text stringlengths 4 1.02M | meta dict |
|---|---|
def test_pass_remaining_cli_args_to_task(result):
lines = result.stdout_.splitlines()
assert lines == ["--arg something!"]
| {
"content_hash": "c6be8f8c3604bb958fb118818b2395b7",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 49,
"avg_line_length": 43.666666666666664,
"alnum_prop": 0.6793893129770993,
"repo_name": "vantage-org/vantage",
"id": "99652fbf4dc58c23c85b1ee5ce13e4e4a26e42ee",
"size": "131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cases/task/pass_remaining_cli_args_to_task/test_pass_remaining_cli_args_to_task.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31583"
},
{
"name": "Shell",
"bytes": "6515"
},
{
"name": "Starlark",
"bytes": "1661"
}
],
"symlink_target": ""
} |
import time
import numpy as nm
from sfepy.base.base import output, iter_dict_of_lists, get_default, Struct
import fea
from sfepy.fem.mesh import Mesh, make_point_cells
def parse_approx_order(approx_order):
"""
Parse the uniform approximation order value (str or int).
"""
ao_msg = 'unsupported approximation order! (%s)'
force_bubble = False
discontinuous = False
try:
ao = int(approx_order)
except ValueError:
mode = approx_order[-1].lower()
if mode == 'b':
ao = int(approx_order[:-1])
force_bubble = True
elif mode == 'd':
ao = int(approx_order[:-1])
discontinuous = True
else:
raise ValueError(ao_msg % approx_order)
if ao < 0:
raise ValueError(ao_msg % approx_order)
return ao, force_bubble, discontinuous
def create_dof_conn(conn, dpn):
"""Given element a node connectivity, create the dof connectivity."""
if dpn == 1:
dc = conn.copy()
else:
n_el, n_ep = conn.shape
n_ed = n_ep * dpn
dc = nm.empty( (n_el, n_ed), dtype = conn.dtype )
for ic in range( n_ed ):
inod = ic / dpn
idof = ic % dpn
## iloc = ic
iloc = n_ep * idof + inod # Hack: For DBD order.
dc[:,iloc] = dpn * conn[:,inod] + idof
return dc
def _fix_scalar_dc(dc1, dc2):
aux = nm.empty((dc2.shape[0], 1), dtype=nm.int32)
aux.fill(dc1)
return aux
def fields_from_conf(conf, regions):
fields = {}
for key, val in conf.iteritems():
field = Field.from_conf(val, regions)
fields[field.name] = field
return fields
def setup_extra_data(conn_info):
"""
Setup extra data required for non-volume integration.
"""
for key, ii, info in iter_dict_of_lists(conn_info, return_keys=True):
## print key, ii
## print info
for var in info.all_vars:
field = var.get_field()
field.setup_extra_data(info.ps_tg, info, info.is_trace)
def setup_dof_conns(conn_info, dof_conns=None,
make_virtual=False, verbose=True):
"""
Dof connectivity key:
(field.name, var.n_components, region.name, type, ig)
"""
if verbose:
output('setting up dof connectivities...')
tt = time.clock()
dof_conns = get_default(dof_conns, {})
for key, ii, info in iter_dict_of_lists(conn_info, return_keys=True):
## print key, ii
## print info
if info.primary is not None:
var = info.primary
field = var.get_field()
field.setup_extra_data(info.ps_tg, info, info.is_trace)
field.setup_dof_conns(dof_conns, var.n_components,
info.dc_type, info.get_region())
if info.has_virtual and not info.is_trace:
# This is needed regardless make_virtual.
var = info.virtual
field = var.get_field()
field.setup_extra_data(info.v_tg, info, False)
field.setup_dof_conns(dof_conns, var.n_components,
info.dc_type,
info.get_region(can_trace=False))
## print dof_conns
## pause()
if verbose:
output('...done in %.2f s' % (time.clock() - tt))
return dof_conns
##
# 14.07.2006, c
class Field( Struct ):
@staticmethod
def from_conf(conf, regions):
"""To refactor... very hackish now."""
space = conf.get_default_attr('space', 'H1')
poly_space_base = conf.get_default_attr('poly_space_base', 'lagrange')
approx_order = parse_approx_order(conf.approx_order)
ao, force_bubble, discontinuous = approx_order
if isinstance(conf.region, tuple):
region_name, kind = conf.region
region = regions[region_name]
if kind == 'surface':
obj = SurfaceField(conf.name, conf.dtype, conf.shape, region,
space=space,
poly_space_base=poly_space_base,
approx_order=approx_order[:2])
else:
raise ValueError('unknown field kind! (%s)', kind)
else:
if discontinuous:
cls = DiscontinuousField
else:
cls = Field
obj = cls(conf.name, conf.dtype, conf.shape, regions[conf.region],
space=space,
poly_space_base=poly_space_base,
approx_order=approx_order[:2])
return obj
def __init__(self, name, dtype, shape, region,
space='H1', poly_space_base='lagrange', approx_order=1):
"""Create a Field.
Parameters
----------
name : str
Object name.
dtype : numpy.dtype
Field data type: float64 or complex128.
shape : int/tuple/str
Field shape: 1 or (1,) or 'scalar', space dimension (2, or
(2,) or 3 or (3,)) or 'vector'. The field shape determines
the shape of the FE base functions and can be different from
a FieldVariable instance shape. (TODO)
region : Region
The region where the field is defined.
space : str
The function space name.
poly_space_base : str
The name of polynomial space base.
approx_order : int/str
FE approximation order, e.g. 0, 1, 2, '1B' (1 with bubble).
Notes
-----
Assumes one cell type for the whole region!
"""
if isinstance(shape, str):
try:
shape = {'scalar' : (1,),
'vector' : (region.domain.shape.dim,)}[shape]
except KeyError:
raise ValueError('unsupported field shape! (%s)', shape)
elif isinstance(shape, int):
shape = (shape,)
Struct.__init__(self,
name = name,
dtype = dtype,
shape = shape,
region = region,
space = space,
poly_space_base = poly_space_base)
self.domain = self.region.domain
self.clear_dof_conns()
self.set_approx_order(approx_order)
self.setup_geometry()
# To refactor below...
self.create_interpolant()
self.setup_approximations()
## print self.aps
## pause()
self.setup_global_base()
self.setup_coors()
def set_approx_order(self, approx_order):
"""
Set a uniform approximation order.
"""
if isinstance(approx_order, tuple):
self.approx_order = approx_order[0]
self.force_bubble = approx_order[1]
else:
self.approx_order = approx_order
self.force_bubble = False
def setup_geometry(self):
"""
Setup the field region geometry.
"""
self.gel = self.domain.groups[self.region.igs[0]].gel
def create_interpolant(self):
name = '%s_%d%s' % (self.gel.name, self.approx_order,
'B' * self.force_bubble)
self.interp = fea.Interpolant(name, self.gel, self.approx_order,
self.force_bubble)
def setup_approximations(self):
self.aps = fea.Approximations(self.interp, self.region)
##
#
def igs( self ):
return self.aps.igs
##
# 19.07.2006, c
def setup_global_base( self ):
self.aps.describe_nodes()
self.aps.setup_nodes()
aux = self.aps.setup_global_base()
self.n_nod, self.remap, self.cnt_vn, self.cnt_en = aux
## print self.n_nod, self.cnt_vn, self.cnt_en
# pause()
##
# 19.07.2006, c
def setup_coors( self ):
"""Coordinates of field nodes."""
self.aps.setup_coors( self.domain.mesh, self.cnt_vn )
def setup_extra_data(self, geometry, info, is_trace):
dct = info.dc_type.type
if geometry != None:
geometry_flag = 'surface' in geometry
else:
geometry_flag = False
if (dct == 'surface') or (geometry_flag):
reg = info.get_region()
reg.select_cells_of_surface(reset=False)
self.aps.setup_surface_data(reg)
elif dct == 'edge':
raise NotImplementedError('dof connectivity type %s' % dct)
elif dct == 'point':
self.aps.setup_point_data(self, info.region)
elif dct not in ('volume', 'scalar'):
raise ValueError('unknown dof connectivity type! (%s)' % dct)
def clear_dof_conns(self):
self.dof_conns = {}
def setup_dof_conns(self, dof_conns, dpn, dc_type, region):
"""Setup dof connectivities of various kinds as needed by terms."""
dct = dc_type.type
##
# Expand nodes into dofs.
can_point = True
for ig, ap in self.aps.iter_aps(igs=region.igs):
region_name = region.name # True region name.
key = (self.name, dpn, region_name, dct, ig)
if key in dof_conns:
self.dof_conns[key] = dof_conns[key]
if dct == 'point':
can_point = False
continue
if dct == 'volume':
dc = create_dof_conn(ap.econn, dpn)
self.dof_conns[key] = dc
elif dct == 'surface':
sd = ap.surface_data[region_name]
dc = create_dof_conn(sd.econn, dpn)
self.dof_conns[key] = dc
elif dct == 'edge':
raise NotImplementedError('dof connectivity type %s' % dct)
elif dct == 'point':
if can_point:
# Point data only in the first group to avoid multiple
# assembling of nodes on group boundaries.
conn = ap.point_data[region_name]
dc = create_dof_conn(conn, dpn)
self.dof_conns[key] = dc
can_point = False
else:
raise ValueError('unknown dof connectivity type! (%s)' % dct)
dof_conns.update(self.dof_conns)
##
# c: 02.01.2008, r: 02.01.2008
def get_extra_nodes_as_simplices( self, iextra = None ):
dim = self.domain.mesh.dim
if iextra is None:
noft = self.aps.node_offset_table
iextra = nm.arange( noft[1,0], noft[-1,-1], dtype = nm.int32 )
extra = make_point_cells( iextra, dim )
return {2 : '2_3', 3 : '3_4'}[dim], -nm.ones_like( iextra ), extra
def create_mesh(self, extra_nodes=True):
"""
Create a mesh from the field region, optionally including the field
extra nodes.
"""
mesh = self.domain.mesh
if self.approx_order != '0':
conns, mat_ids, descs = [], [], []
for ig, ap in self.aps.iter_aps():
region = ap.region
group = region.domain.groups[ig]
if extra_nodes:
conn = ap.econn
else:
offset = group.shape.n_ep
conn = ap.econn[:,:offset]
conns.append(conn)
mat_ids.append(mesh.mat_ids[ig])
descs.append(mesh.descs[ig])
mesh = Mesh.from_data(self.name,
self.aps.coors, None, conns,
mat_ids, descs)
return mesh
##
# c: 19.07.2006, r: 27.02.2008
def write_mesh( self, name_template, field_name = None ):
"""Extra nodes are written as zero-size simplices (= points)."""
if field_name is None:
field_name = self.name
tmp = self.create_mesh(extra_nodes=False)
aux = self.get_extra_nodes_as_simplices()
tmp.descs.append( aux[0] )
tmp.mat_ids.append( aux[1] )
tmp.conns.append( aux[2] )
## print tmp
## pause()
tmp.write( io = 'auto' )
##
# c: 20.07.2006, r: 15.01.2008
def get_node_descs( self, region ):
nds = {}
for ig, ap in self.aps.iter_aps():
if ig in region.igs:
nds[ig] = self.aps.node_desc
return nds
##
# Modify me for bubble-only approximations to not generate vertex nodes.
# 12.10.2005, c
# 26.10.2005
# 26.05.2006
# 05.06.2006
# 25.07.2006
# 04.09.2006
def interp_c_vals_to_n_vals( self, vec ):
"""len( vec ) == domain.n_el"""
n_els = [sub.n_el for sub in self.domain.subs]
oel = nm.cumsum( [0] + n_els )
if sum( n_els ) != vec.shape[0]:
print 'incomatible shape! (%d == %d)' % (sum( n_els ), vec.shape[0])
raise ValueError
##
# Mesh vertex values.
n_vertex = self.domain.n_nod
nod_vol = nm.zeros( (n_vertex,), nm.float64 )
dim = vec.shape[1]
nod_vol_val = nm.zeros( (n_vertex, dim ), nm.float64 )
for ii, ap in enumerate( self.aps ):
sub = ap.sub
ig = sub.iseq
vg = self.vgs[ii]
volume = nm.squeeze( vg.variable( 2 ) );
for ii in range( sub.conn.shape[1] ):
cc = sub.conn[:,ii]
nod_vol[cc] += volume
val = volume[:,nm.newaxis] * vec[oel[ig]:oel[ig+1],:]
ind2, ind1 = nm.meshgrid( nm.arange( dim ), cc )
nod_vol_val[ind1,ind2] += val
nod_vol_val = nod_vol_val / nod_vol[:,nm.newaxis]
##
# Field nodes values.
enod_vol_val = self.interp_v_vals_to_n_vals( nod_vol_val )
return enod_vol_val
##
# 05.06.2006, c
# 25.07.2006
# 31.08.2006
def interp_v_vals_to_n_vals( self, vec ):
dim = vec.shape[1]
enod_vol_val = nm.zeros( (self.n_nod, dim), nm.float64 )
for ig, ap in self.aps.iter_aps():
group = self.domain.groups[ig]
offset = group.shape.n_ep
conn = ap.econn[:,:offset]
noff = ap.node_offsets.ravel()
if noff[1] == noff[-1]:
# Vertex values only...
ii = nm.unique(conn) # Probably wrong?!
enod_vol_val[ii] = vec[ii]
continue
econn = ap.econn
ginterp = ap.interp.gel.interp
coors = ap.interp.poly_spaces['v'].node_coors
bf = ginterp.poly_spaces['v'].eval_base(coors)
bf = bf[:,0,:].copy()
fea.mu.interp_vertex_data(enod_vol_val, econn, vec, group.conn,
bf, 0)
return enod_vol_val
##
# 08.08.2006, c
# 13.02.2007
def get_coor( self, nods = None, igs = None ):
"""Will igs be ever needed?"""
if nods is None:
return self.aps.coors
else:
return self.aps.coors[nods]
class DiscontinuousField(Field):
def setup_global_base( self ):
self.aps.describe_nodes()
self.aps.setup_nodes()
# Get n_dof_per_vertex, n_dof_per_facet.
# Find all unique facets in the field region.
# Define global facet dof numbers (range(n_dof_per_facet * n_facet))
aux = self.aps.setup_global_base()
self.n_nod, self.remap, self.cnt_vn, self.cnt_en = aux
class SurfaceField(Field):
"""
A field defined on a surface region.
"""
def __init__(self, name, dtype, shape, region,
space='H1', poly_space_base='lagrange', approx_order=1):
region.setup_face_indices()
Field.__init__(self, name, dtype, shape, region,
space=space, poly_space_base=poly_space_base,
approx_order=approx_order)
def setup_geometry(self):
"""
Setup the field region geometry.
"""
self.gel = self.domain.groups[self.region.igs[0]].gel.surface_facet
if self.gel is None:
raise ValueError('element group has no surface!')
def setup_approximations(self):
self.aps = fea.Approximations(self.interp, self.region, is_surface=True)
def setup_extra_data(self, geometry, info, is_trace):
dct = info.dc_type.type
if dct != 'surface':
msg = "dof connectivity type must be 'surface'! (%s)" % dct
raise ValueError(msg)
reg = info.get_region()
reg.select_cells_of_surface(reset=False)
self.aps.setup_surface_data(reg)
def setup_dof_conns(self, dof_conns, dpn, dc_type, region):
"""Setup dof connectivities of various kinds as needed by terms."""
dct = dc_type.type
if dct != 'surface':
msg = "dof connectivity type must be 'surface'! (%s)" % dct
raise ValueError(msg)
##
# Expand nodes into dofs.
for ig, ap in self.aps.iter_aps(igs=region.igs):
region_name = region.name # True region name.
key = (self.name, dpn, region_name, dct, ig)
if key in dof_conns:
self.dof_conns[key] = dof_conns[key]
continue
sd = ap.surface_data[region_name]
dc = create_dof_conn(sd.leconn, dpn)
self.dof_conns[key] = dc
dof_conns.update(self.dof_conns)
| {
"content_hash": "03be283441be95df55bffdd3eb129274",
"timestamp": "",
"source": "github",
"line_count": 558,
"max_line_length": 80,
"avg_line_length": 31.485663082437277,
"alnum_prop": 0.5193807274176105,
"repo_name": "olivierverdier/sfepy",
"id": "9ca9e7d357eb20a88d12620e479d006cf8cf0ff3",
"size": "17569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sfepy/fem/fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "395470"
},
{
"name": "Python",
"bytes": "1754577"
},
{
"name": "Shell",
"bytes": "297"
}
],
"symlink_target": ""
} |
from os import environ
from twisted.internet.defer import inlineCallbacks
from twisted.python.failure import Failure
from autobahn import wamp
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class MyService1(object):
@wamp.register('com.mathservice.add2')
def add2(self, x, y):
return x + y
@wamp.register('com.mathservice.mul2')
def mul2(self, x, y):
return x * y
class Component(ApplicationSession):
"""
An application component registering RPC endpoints using decorators.
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
# to use this session to register all the @register decorated
# methods, we call register with the object; so here we create
# a MyService1 instance and register all the methods on it and
# on ourselves
results = []
svc1 = MyService1()
# register all @register-decorated methods from "svc1":
res = yield self.register(svc1)
results.extend(res)
# register all our own @register-decorated methods:
res = yield self.register(self)
results.extend(res)
for res in results:
if isinstance(res, Failure):
print("Failed to register procedure: {}".format(res.value))
else:
print("registration ID {}: {}".format(res.id, res.procedure))
@wamp.register('com.mathservice.square2')
def square2(self, x, y):
return x * x + y * y
@wamp.register('com.mathservice.div2')
def div2(self, x, y):
if y:
return float(x) / float(y)
else:
return 0
if __name__ == '__main__':
url = environ.get("AUTOBAHN_DEMO_ROUTER", "ws://127.0.0.1:8080/ws")
realm = "crossbardemo"
runner = ApplicationRunner(url, realm)
runner.run(Component)
| {
"content_hash": "3046a602c02ef47c8f6020976f40e7f4",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 77,
"avg_line_length": 28.223880597014926,
"alnum_prop": 0.6250661025912215,
"repo_name": "oberstet/autobahn-python",
"id": "203bdd3731d9191a239960f134f7ade8e51f22e3",
"size": "3185",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/twisted/wamp/rpc/decorators/backend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "17063"
},
{
"name": "Jinja",
"bytes": "54604"
},
{
"name": "Makefile",
"bytes": "31543"
},
{
"name": "Python",
"bytes": "2408842"
},
{
"name": "Shell",
"bytes": "3825"
}
],
"symlink_target": ""
} |
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.utils.domutils
import os.path
from pyxb.exceptions_ import *
import unittest
class TestIncludeDD (unittest.TestCase):
def testDefault (self):
schema_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../schemas/test-include-ad.xsd'))
self.assertRaises(pyxb.SchemaValidationError, pyxb.binding.generate.GeneratePython, schema_location=schema_path)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "0f87a9aa79695c28483a98161b1245de",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 120,
"avg_line_length": 28.8,
"alnum_prop": 0.7065972222222222,
"repo_name": "pabigot/pyxb",
"id": "df04c9ca9a02470670066411025c4c7879164773",
"size": "600",
"binary": false,
"copies": "2",
"ref": "refs/heads/next",
"path": "tests/drivers/test-include-ad.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1927697"
},
{
"name": "Shell",
"bytes": "20792"
}
],
"symlink_target": ""
} |
'''
Created on Nov 21, 2016
@author: mjschust
'''
from __future__ import division
import conformal_blocks.cbbundle as cbd
import math, cProfile, time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#Computes all non-trivial 4-point conformal blocks divisors of specified Lie rank and level.
def experiment():
rank = 3
level = 10
liealg = cbd.TypeALieAlgebra(rank, store_fusion=True, exact=False)
print("Weight", "Rank", "Divisor")
trivial_x = []
trivial_y = []
trivial_z = []
nontrivial_x = []
nontrivial_y = []
nontrivial_z = []
for wt in liealg.get_weights(level):
cbb = cbd.SymmetricConformalBlocksBundle(liealg, wt, 4, level)
if cbb.get_rank() == 0: continue
#tot_weight = wt.fund_coords[0] + 2*wt.fund_coords[1] + 3*wt.fund_coords[2]
#if tot_weight <= level: continue
#tot_weight = 3*wt.fund_coords[0] + 2*wt.fund_coords[1] + wt.fund_coords[2]
#if tot_weight <= level: continue
#if level >= tot_weight // (r+1): continue
divisor = cbb.get_symmetrized_divisor()
if divisor[0] == 0:
trivial_x.append(wt[0])
trivial_y.append(wt[2])
trivial_z.append(wt[1])
else:
nontrivial_x.append(wt[0])
nontrivial_y.append(wt[2])
nontrivial_z.append(wt[1])
print(wt, cbb.get_rank(), divisor[0])
# Plot the results
#fig, ax = plt.subplots()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(trivial_x, trivial_y, zs=trivial_z, c='black', label="Trivial divisor")
ax.scatter(nontrivial_x, nontrivial_y, zs=nontrivial_z, c='red', label="Non-trivial divisor")
ax.set_xlabel('a_1')
ax.set_ylabel('a_3')
ax.set_zlabel('a_2')
ax.legend()
ax.grid(True)
plt.show()
if __name__ == '__main__':
#t0 = time.clock()
experiment()
#print(time.clock() -t0)
#cProfile.run('experiment()', sort='cumtime')
| {
"content_hash": "aae6d54c4eeb0102ff4285c0eddf2547",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 97,
"avg_line_length": 33.17741935483871,
"alnum_prop": 0.5862907146329607,
"repo_name": "mjschust/conformal-blocks",
"id": "8c2fae919ff19904cfaa76b43b960e605f64207e",
"size": "2057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/triviality.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "145968"
}
],
"symlink_target": ""
} |
from pandajedi.jedicore import Interaction
from pandajedi.jedicore.MsgWrapper import MsgWrapper
from JobThrottlerBase import JobThrottlerBase
# logger
from pandacommon.pandalogger.PandaLogger import PandaLogger
logger = PandaLogger().getLogger(__name__.split('.')[-1])
# class to throttle ATLAS analysis jobs
class AtlasAnalJobThrottler (JobThrottlerBase):
# constructor
def __init__(self,taskBufferIF):
JobThrottlerBase.__init__(self,taskBufferIF)
# check if throttled
def toBeThrottled(self,vo,prodSourceLabel,cloudName,workQueue,jobStat):
# make logger
tmpLog = MsgWrapper(logger)
tmpLog.debug('start vo={0} label={1} cloud={2} workQueue={3}'.format(vo,prodSourceLabel,cloudName,
workQueue.queue_name))
# check if unthrottled
if workQueue.queue_share == None:
tmpLog.debug(" done : unthrottled since share=None")
return self.retUnThrottled
tmpLog.debug(" done : SKIP")
return self.retThrottled
| {
"content_hash": "6574417d1e1d4852080fc75828cd0d84",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 106,
"avg_line_length": 37.44827586206897,
"alnum_prop": 0.6593001841620626,
"repo_name": "RRCKI/panda-jedi",
"id": "6b06a1087ce31327a4f718fd3af2145b828b7125",
"size": "1086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandajedi/jedithrottle/AtlasAnalJobThrottler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1452255"
},
{
"name": "Shell",
"bytes": "1892"
}
],
"symlink_target": ""
} |
from a10sdk.common.A10BaseClass import A10BaseClass
class GeneratesAvcsDebugFile(A10BaseClass):
""" :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/techsupport/vcsdebug`.
Class Generates aVCS debug file supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "Generates aVCS debug file"
self.a10_url="/axapi/v3/techsupport/vcsdebug"
self.DeviceProxy = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| {
"content_hash": "6aa23fa75c7108c741d876e9f8bed27f",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 123,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.6426829268292683,
"repo_name": "a10networks/a10sdk-python",
"id": "5ab73d62848edc2f0279a82d14c841396240e154",
"size": "820",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/techsupport/techsupport_vcsdebug.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956372"
}
],
"symlink_target": ""
} |
from django.db import models
from django.db.models.query import QuerySet
class QuestionQuerySet(QuerySet):
def active(self):
"""
Return only "active" (i.e. published) questions.
"""
return self.filter(status__exact=self.model.ACTIVE)
class QuestionManager(models.Manager):
def get_query_set(self):
return QuestionQuerySet(self.model)
def active(self):
return self.get_query_set().active()
| {
"content_hash": "06a526cb1ef58e7d504f88b081b5892b",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 59,
"avg_line_length": 25.22222222222222,
"alnum_prop": 0.6674008810572687,
"repo_name": "I-sektionen/i-portalen",
"id": "e19e7928842ae071e2b8381ee1a7951eced2ce7d",
"size": "454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wsgi/iportalen_django/faq/managers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18420"
},
{
"name": "Dockerfile",
"bytes": "1859"
},
{
"name": "HTML",
"bytes": "355692"
},
{
"name": "JavaScript",
"bytes": "415020"
},
{
"name": "Python",
"bytes": "660556"
},
{
"name": "SCSS",
"bytes": "72077"
},
{
"name": "Sass",
"bytes": "23813"
},
{
"name": "Shell",
"bytes": "1190"
}
],
"symlink_target": ""
} |
import csv
import numpy
import matplotlib
import matplotlib.pyplot as plt
from scipy import stats
num_iter = 500
num_reps = 100
file = 'lower_tier_only_500_iterations.csv'
all_repetitions_data = []
num_lowtier_rules = 7
num_hightier_rules = 0
chunk_size = 20
num_chunks = num_iter/chunk_size
rule_applications_a = []
rule_acceptance_a = []
rule_effectiveness_a = []
rule_selection_chance_a = []
rule_proportions_a = []
for i in range(0, int(num_chunks)):
rule_applications_a.append(numpy.zeros(num_lowtier_rules+num_hightier_rules))
rule_acceptance_a.append(numpy.zeros(num_lowtier_rules+num_hightier_rules))
rule_effectiveness_a.append(numpy.zeros(num_lowtier_rules+num_hightier_rules))
with open(file, 'r') as sim_data_file:
csv_reader = csv.DictReader(sim_data_file)
valid_reps = []
for row in csv_reader:
if int(row['iteration']) == num_iter and len(valid_reps) < num_reps:
valid_reps.append(row['repetition'])
print('')
print(valid_reps)
print('')
print(len(valid_reps))
print('')
sim_data_file.seek(0)
next(csv_reader)
data_list = list(csv_reader)
current_data_list_index = 0
for repetition_index in range(0, len(valid_reps)):
current_rep_num = valid_reps[repetition_index]
current_rep_data = []
for i in range(0, num_iter):
current_rep_data.append([])
for data_index in range(current_data_list_index, len(data_list)):
row = data_list[data_index]
current_data_list_index += 1
if row['repetition'] == current_rep_num:
rep = int(current_rep_num)
iter = int(row['iteration'])
tier = row['rule tier']
rule = int(row['rule number'])
acceptance = int(row['rule acceptance'])
quality_before = float(row['quality before rule'])
quality_after = float(row['quality after rule'])
quality_change = quality_after - quality_before
current_rep_data[int(row['iteration'])-1].append({'rep': rep,
'iter': iter,
'tier': tier,
'rule': rule,
'acceptance': acceptance,
'quality_change': quality_change})
elif row['repetition'] in valid_reps:
current_data_list_index -= 1
break
# print(current_rep_data)
all_repetitions_data.append(current_rep_data)
# print(all_repetitions_data)
for i in range(0, len(all_repetitions_data)):
for j in range(0, len(all_repetitions_data[i])):
iteration = all_repetitions_data[i][j][0]
chunk = int((iteration['iter'] - 1) / chunk_size)
if iteration['tier'] == 'low':
rule_index = iteration['rule'] - 1
elif iteration['tier'] == 'high':
rule_index = iteration['rule'] - 1 + num_lowtier_rules
rule_applications_a[chunk][rule_index] += 1
if iteration['acceptance'] == 1:
rule_acceptance_a[chunk][rule_index] += 1
rule_effectiveness_a = numpy.divide(rule_acceptance_a, rule_applications_a)
rule_selection_chance_a = numpy.divide(rule_applications_a, len(all_repetitions_data)*chunk_size)
print(rule_acceptance_a)
for i in range(0, len(rule_acceptance_a)):
total_accepted = sum(rule_acceptance_a[i])
rule_proportions_a.append(numpy.divide(rule_acceptance_a[i], total_accepted))
error_a = []
print(len(rule_proportions_a))
for chunk in rule_proportions_a:
error_a.append(stats.sem(chunk))
# print(rule_applications_a)
# print(rule_acceptance_a)
# print(rule_effectiveness_a)
# print(rule_selection_chance_a)
# print(rule_proportions_a)
# print('')
# file = 'probabilistic_selection_test_1000_iterations.csv'
#
# all_repetitions_data = []
#
# rule_applications_b = []
# rule_acceptance_b = []
# rule_effectiveness_b = []
# rule_selection_chance_b = []
# rule_proportions_b = []
#
# for i in range(0, int(num_iter/chunk_size)):
# rule_applications_b.append(numpy.zeros(num_lowtier_rules+num_hightier_rules))
# rule_acceptance_b.append(numpy.zeros(num_lowtier_rules+num_hightier_rules))
# rule_effectiveness_b.append(numpy.zeros(num_lowtier_rules+num_hightier_rules))
#
# with open(file, 'r') as sim_data_file:
# csv_reader = csv.DictReader(sim_data_file)
#
# valid_reps = []
# for row in csv_reader:
# if int(row['iteration']) == num_iter and len(valid_reps) < num_reps:
# valid_reps.append(row['repetition'])
#
# print('')
# print(valid_reps)
# print('')
# print(len(valid_reps))
# print('')
#
# sim_data_file.seek(0)
#
# next(csv_reader)
#
# data_list = list(csv_reader)
# current_data_list_index = 0
#
# for repetition_index in range(0, len(valid_reps)):
# current_rep_num = valid_reps[repetition_index]
# current_rep_data = []
#
# for i in range(0, num_iter):
# current_rep_data.append([])
#
# for data_index in range(current_data_list_index, len(data_list)):
# row = data_list[data_index]
# current_data_list_index += 1
# if row['repetition'] == current_rep_num:
# rep = int(current_rep_num)
# iter = int(row['iteration'])
# tier = row['rule tier']
# rule = int(row['rule number'])
# acceptance = int(row['rule acceptance'])
#
# quality_before = float(row['quality before rule'])
# quality_after = float(row['quality after rule'])
# quality_change = quality_after - quality_before
#
# current_rep_data[int(row['iteration'])-1].append({'rep': rep,
# 'iter': iter,
# 'tier': tier,
# 'rule': rule,
# 'acceptance': acceptance,
# 'quality_change': quality_change})
#
# elif row['repetition'] in valid_reps:
# current_data_list_index -= 1
# break
#
# all_repetitions_data.append(current_rep_data)
#
# for i in range(0, len(all_repetitions_data)):
# for j in range(0, len(all_repetitions_data[i])):
# iteration = all_repetitions_data[i][j][0]
# chunk = int((iteration['iter'] - 1) / chunk_size)
#
# if iteration['tier'] == 'low':
# rule_index = iteration['rule'] - 1
# elif iteration['tier'] == 'high':
# rule_index = iteration['rule'] - 2 + num_lowtier_rules
#
# rule_applications_b[chunk][rule_index] += 1
#
# if iteration['acceptance'] == 1:
# rule_acceptance_b[chunk][rule_index] += 1
#
# rule_effectiveness_b = numpy.divide(rule_acceptance_b, rule_applications_b)
# rule_selection_chance_b = numpy.divide(rule_applications_b, len(all_repetitions_data)*chunk_size)
#
# for i in range(0, len(rule_acceptance_b)):
# total_accepted = sum(rule_acceptance_b[i])
# rule_proportions_b.append(numpy.divide(rule_acceptance_b[i], total_accepted))
#
# error_b = []
#
# for chunk in rule_proportions_b:
# error_b.append(stats.sem(chunk))
# print(rule_applications_b)
# print(rule_acceptance_b)
# print(rule_effectiveness_b)
#######################################################################################################################
#######################################################################################################################
chunk_labels = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20')
y_pos = numpy.arange(num_chunks)
bar_width = 1
# for rule in range(0, num_lowtier_rules + num_hightier_rules):
# effectiveness_a = []
# effectiveness_b = []
# for chunk in rule_proportions_a:
# effectiveness_a.append(chunk[rule])
# for chunk in rule_proportions_b:
# effectiveness_b.append(chunk[rule])
# plt.bar(y_pos, effectiveness_a, bar_width, color='g', align='center', alpha=0.5, label='Random Selection')
# # plt.bar(y_pos+bar_width, effectiveness_b, bar_width, color='c', align='center', alpha=0.5, label='Probabilistic Selection')
# # plt.errorbar(y_pos, effectiveness_a, yerr=error_a, color='g', alpha=0.5, fmt='o')
# # plt.errorbar(y_pos + bar_width, effectiveness_b, yerr=error_b, color='c', alpha=0.5, fmt='o')
# plt.xticks(y_pos, chunk_labels)
# plt.ylim(0, 0.75)
# plt.grid()
# plt.xlabel('Iteration Chunk (Every 100 Iter.)')
# plt.ylabel('Acceptance Rate of Applied Rule')
# plt.legend(loc=1)
#
# if rule < 8:
# plt.title('Lower-Tier Rule: ' + str(rule+1))
# else:
# plt.title('Higher-Tier Rule: ' + str(rule-7))
# print(effectiveness_a)
# print(effectiveness_b)
# plt.show()
all_rule_proportions = []
for rule in range(0, num_lowtier_rules + num_hightier_rules):
proportion = []
for chunk in rule_proportions_a:
proportion.append(chunk[rule])
all_rule_proportions.append(proportion)
print(all_rule_proportions)
colors = [(0.8, 0, 0), (0, 0.8, 0), (0, 0, 0.8), (0.8, 0.8, 0), (0.8, 0, 0.8), (0, 0.8, 0.8), (0.8, 0.4, 0.4), (0.4, 0.8, 0.4),
(0.4, 0.4, 0.8), (0.8, 0.2, 0.4), (0.2, 0.2, 0), (0.8, 1.0, 0.4), (0.9, 0.6, 0.2)]
last_bottom = numpy.zeros(len(rule_proportions_a))
for rule_index in range(0, len(all_rule_proportions)):
rule = all_rule_proportions[rule_index]
if rule_index < 7:
rule_name = "LT Rule: " + str(rule_index+1)
else:
rule_name = "HT Rule: "+str(rule_index-6)
# all_rule_names = ["HT 1: Increase Complexity", "HT 2: Decrease Complexity", "HT 3: Change Scale", "HT 4: Replicate Pattern", "HT 5: Standardize"]
all_rule_names = ["LT 1: Split Member", "LT 2: Join Member", "LT 3: Add Joint", "LT 4: Remove Joint", "LT 5: Switch Diagonal Member", "LT 6: Move Joint", "LT 7: Re-Size Member"]
rule_name = all_rule_names[rule_index]
plt.bar(y_pos, rule, bar_width, color=colors[rule_index], bottom=last_bottom, align='center', alpha=0.5, label=rule_name)
plt.xticks(y_pos, chunk_labels)
plt.xlim(-0.5, 9.5)
plt.ylim(0, 1.0)
plt.xlabel('Iteration Chunk (Every 20 Iter.)')
plt.ylabel('Proportion')
plt.title('Proportion of Each Rule Within All Accepted Rules per Chunk (Lower Tier Only)')
plt.legend(loc=0)
last_bottom += rule
# plt.grid()
plt.show()
#######################################################################################################################
#######################################################################################################################
# lumped_proportions = numpy.zeros(int(num_chunks))
# best_rules = [3, 5, 6, 9, 11]
#
# for rule_index in range(0, len(all_rule_proportions)):
# if rule_index not in best_rules:
# for chunk_index in range(len(rule_proportions_a)):
# lumped_proportions[chunk_index] += all_rule_proportions[rule_index][chunk_index]
# print(lumped_proportions)
#
# lumped_and_best_proportions = []
# lumped_and_best_proportions.append(lumped_proportions)
#
# for index in best_rules:
# lumped_and_best_proportions.append(all_rule_proportions[index])
#
# colors = [(0.5, 0.4, 0.2), (0, 0.6, 0), (0, 0.2, 0.5), (0.7, 0.2, 0.1), (0.4, 0.8, 0.2), (0.8, 0.5, 0)]
# last_bottom = numpy.zeros(len(rule_proportions_a))
#
# for rule_index in range(0, len(lumped_and_best_proportions)):
# rule = lumped_and_best_proportions[rule_index]
# if rule_index == 0:
# rule_name = "OTHER"
# elif rule_index == 1:
# rule_name = 'LT Rule 4'
# elif rule_index == 2:
# rule_name = 'LT Rule 6'
# elif rule_index == 3:
# rule_name = 'LT Rule 7'
# elif rule_index == 4:
# rule_name = 'HT Rule 3'
# elif rule_index == 5:
# rule_name = 'HT Rule 5'
# plt.bar(y_pos, rule, bar_width, color=colors[rule_index], bottom=last_bottom, align='center', alpha=0.5,
# label=rule_name)
# plt.xticks(y_pos, chunk_labels)
# plt.xlim(-0.5, 19.5)
# plt.ylim(0, 1.0)
# plt.xlabel('Iteration Chunk (Every 20 Iter.)')
# plt.ylabel('Proportion')
# plt.title('Proportion of Each Rule Within All Accepted Rules per Chunk (Both Tiers w/ Random Selection)')
# plt.legend(loc=1)
#
# last_bottom += rule
#
# # plt.grid()
# plt.show()
#######################################################################################################################
#######################################################################################################################
# lower_tier_proportions = numpy.zeros(int(num_chunks))
# higher_tier_proportions = numpy.zeros(int(num_chunks))
#
# for rule_index in range(len(all_rule_proportions)):
# if rule_index < 8:
# for chunk_index in range(len(all_rule_proportions[rule_index])):
# lower_tier_proportions[chunk_index] += all_rule_proportions[rule_index][chunk_index]
# print(lower_tier_proportions)
# elif rule_index >= 8:
# for chunk_index in range(len(all_rule_proportions[rule_index])):
# higher_tier_proportions[chunk_index] += all_rule_proportions[rule_index][chunk_index]
# print(higher_tier_proportions)
#
# combined_tiers_proportions = []
# combined_tiers_proportions.append(lower_tier_proportions)
# combined_tiers_proportions.append(higher_tier_proportions)
#
# colors = [(0.8, 0.8, 0), (0, 0.2, 0.8)]
#
# last_bottom = numpy.zeros(len(rule_proportions_a))
#
# for tier_index in range(len(combined_tiers_proportions)):
# tier = combined_tiers_proportions[tier_index]
# if tier_index == 0:
# tier_name = "Lower-Tier"
# elif tier_index == 1:
# tier_name = "Higher-Tier"
# plt.bar(y_pos, tier, bar_width, color=colors[tier_index], bottom=last_bottom, align='center', alpha=0.5, label=tier_name)
# plt.xticks(y_pos, chunk_labels)
# # plt.xticks([])
# plt.ylim(0, 1.0)
# plt.xlabel('Iteration Chunk (Every 25 Iter.)')
# plt.ylabel('Proportion')
# plt.title('Proportion of Each Rule Tier Within All Accepted Rules per Chunk (Both Tiers w/ Random Selection)(500 Iterations per Agent)')
# plt.legend(loc=1)
#
# last_bottom += tier
#
# plt.grid(axis='y', linestyle='-')
# plt.show()
| {
"content_hash": "861505f60f6895f49eed6c32e13d4604",
"timestamp": "",
"source": "github",
"line_count": 389,
"max_line_length": 181,
"avg_line_length": 38.04370179948586,
"alnum_prop": 0.5553753632002162,
"repo_name": "HSDL/HeuristicBursts",
"id": "ad75e6602b70b5607af5a0cdae5f1d026ffef8de",
"size": "14799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/truss_tests/Main Truss Tests/rule_effectiveness_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "158417"
}
],
"symlink_target": ""
} |
"""
Handles all requests relating to volumes + cinder.
"""
import copy
import sys
from cinderclient import client as cinder_client
from cinderclient import exceptions as cinder_exception
from cinderclient.v1 import client as v1_client
from keystoneclient import exceptions as keystone_exception
from keystoneclient import session
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import strutils
import six
from nova import availability_zones as az
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
cinder_opts = [
cfg.StrOpt('catalog_info',
default='volumev2:cinderv2:publicURL',
help='Info to match when looking for cinder in the service '
'catalog. Format is: separated values of the form: '
'<service_type>:<service_name>:<endpoint_type>'),
cfg.StrOpt('endpoint_template',
help='Override service catalog lookup with template for cinder '
'endpoint e.g. http://localhost:8776/v1/%(project_id)s'),
cfg.StrOpt('os_region_name',
help='Region name of this node'),
cfg.IntOpt('http_retries',
default=3,
help='Number of cinderclient retries on failed http calls'),
cfg.BoolOpt('cross_az_attach',
default=True,
help='Allow attach between instance and volume in different '
'availability zones.'),
]
CONF = cfg.CONF
CINDER_OPT_GROUP = 'cinder'
# cinder_opts options in the DEFAULT group were deprecated in Juno
CONF.register_opts(cinder_opts, group=CINDER_OPT_GROUP)
deprecated = {'timeout': [cfg.DeprecatedOpt('http_timeout',
group=CINDER_OPT_GROUP)],
'cafile': [cfg.DeprecatedOpt('ca_certificates_file',
group=CINDER_OPT_GROUP)],
'insecure': [cfg.DeprecatedOpt('api_insecure',
group=CINDER_OPT_GROUP)]}
session.Session.register_conf_options(CONF,
CINDER_OPT_GROUP,
deprecated_opts=deprecated)
LOG = logging.getLogger(__name__)
_SESSION = None
_V1_ERROR_RAISED = False
def reset_globals():
"""Testing method to reset globals.
"""
global _SESSION
_SESSION = None
def cinderclient(context):
global _SESSION
global _V1_ERROR_RAISED
if not _SESSION:
_SESSION = session.Session.load_from_conf_options(CONF,
CINDER_OPT_GROUP)
url = None
endpoint_override = None
auth = context.get_auth_plugin()
service_type, service_name, interface = CONF.cinder.catalog_info.split(':')
service_parameters = {'service_type': service_type,
'service_name': service_name,
'interface': interface,
'region_name': CONF.cinder.os_region_name}
if CONF.cinder.endpoint_template:
url = CONF.cinder.endpoint_template % context.to_dict()
endpoint_override = url
else:
url = _SESSION.get_endpoint(auth, **service_parameters)
# TODO(jamielennox): This should be using proper version discovery from
# the cinder service rather than just inspecting the URL for certain string
# values.
version = cinder_client.get_volume_api_from_url(url)
if version == '1' and not _V1_ERROR_RAISED:
msg = _LW('Cinder V1 API is deprecated as of the Juno '
'release, and Nova is still configured to use it. '
'Enable the V2 API in Cinder and set '
'cinder.catalog_info in nova.conf to use it.')
LOG.warn(msg)
_V1_ERROR_RAISED = True
return cinder_client.Client(version,
session=_SESSION,
auth=auth,
endpoint_override=endpoint_override,
connect_retries=CONF.cinder.http_retries,
**service_parameters)
def _untranslate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol.id
d['status'] = vol.status
d['size'] = vol.size
d['availability_zone'] = vol.availability_zone
d['created_at'] = vol.created_at
# TODO(jdg): The calling code expects attach_time and
# mountpoint to be set. When the calling
# code is more defensive this can be
# removed.
d['attach_time'] = ""
d['mountpoint'] = ""
if vol.attachments:
att = vol.attachments[0]
d['attach_status'] = 'attached'
d['instance_uuid'] = att['server_id']
d['mountpoint'] = att['device']
else:
d['attach_status'] = 'detached'
# NOTE(dzyu) volume(cinder) v2 API uses 'name' instead of 'display_name',
# and use 'description' instead of 'display_description' for volume.
if hasattr(vol, 'display_name'):
d['display_name'] = vol.display_name
d['display_description'] = vol.display_description
else:
d['display_name'] = vol.name
d['display_description'] = vol.description
# TODO(jdg): Information may be lost in this translation
d['volume_type_id'] = vol.volume_type
d['snapshot_id'] = vol.snapshot_id
d['bootable'] = strutils.bool_from_string(vol.bootable)
d['volume_metadata'] = {}
for key, value in vol.metadata.items():
d['volume_metadata'][key] = value
if hasattr(vol, 'volume_image_metadata'):
d['volume_image_metadata'] = copy.deepcopy(vol.volume_image_metadata)
return d
def _untranslate_snapshot_summary_view(context, snapshot):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = snapshot.id
d['status'] = snapshot.status
d['progress'] = snapshot.progress
d['size'] = snapshot.size
d['created_at'] = snapshot.created_at
# NOTE(dzyu) volume(cinder) v2 API uses 'name' instead of 'display_name',
# 'description' instead of 'display_description' for snapshot.
if hasattr(snapshot, 'display_name'):
d['display_name'] = snapshot.display_name
d['display_description'] = snapshot.display_description
else:
d['display_name'] = snapshot.name
d['display_description'] = snapshot.description
d['volume_id'] = snapshot.volume_id
d['project_id'] = snapshot.project_id
d['volume_size'] = snapshot.size
return d
def translate_volume_exception(method):
"""Transforms the exception for the volume but keeps its traceback intact.
"""
def wrapper(self, ctx, volume_id, *args, **kwargs):
try:
res = method(self, ctx, volume_id, *args, **kwargs)
except (cinder_exception.ClientException,
keystone_exception.ClientException):
exc_type, exc_value, exc_trace = sys.exc_info()
if isinstance(exc_value, (keystone_exception.NotFound,
cinder_exception.NotFound)):
exc_value = exception.VolumeNotFound(volume_id=volume_id)
elif isinstance(exc_value, (keystone_exception.BadRequest,
cinder_exception.BadRequest)):
exc_value = exception.InvalidInput(
reason=six.text_type(exc_value))
six.reraise(exc_value, None, exc_trace)
except (cinder_exception.ConnectionError,
keystone_exception.ConnectionError):
exc_type, exc_value, exc_trace = sys.exc_info()
exc_value = exception.CinderConnectionFailed(
reason=six.text_type(exc_value))
six.reraise(exc_value, None, exc_trace)
return res
return wrapper
def translate_snapshot_exception(method):
"""Transforms the exception for the snapshot but keeps its traceback
intact.
"""
def wrapper(self, ctx, snapshot_id, *args, **kwargs):
try:
res = method(self, ctx, snapshot_id, *args, **kwargs)
except (cinder_exception.ClientException,
keystone_exception.ClientException):
exc_type, exc_value, exc_trace = sys.exc_info()
if isinstance(exc_value, (keystone_exception.NotFound,
cinder_exception.NotFound)):
exc_value = exception.SnapshotNotFound(snapshot_id=snapshot_id)
six.reraise(exc_value, None, exc_trace)
except (cinder_exception.ConnectionError,
keystone_exception.ConnectionError):
exc_type, exc_value, exc_trace = sys.exc_info()
reason = six.text_type(exc_value)
exc_value = exception.CinderConnectionFailed(reason=reason)
six.reraise(exc_value, None, exc_trace)
return res
return wrapper
class API(object):
"""API for interacting with the volume manager."""
@translate_volume_exception
def get(self, context, volume_id):
item = cinderclient(context).volumes.get(volume_id)
return _untranslate_volume_summary_view(context, item)
def get_all(self, context, search_opts=None):
search_opts = search_opts or {}
items = cinderclient(context).volumes.list(detailed=True,
search_opts=search_opts)
rval = []
for item in items:
rval.append(_untranslate_volume_summary_view(context, item))
return rval
def check_attached(self, context, volume):
if volume['status'] != "in-use":
msg = _("volume '%(vol)s' status must be 'in-use'. Currently in "
"'%(status)s' status") % {"vol": volume['id'],
"status": volume['status']}
raise exception.InvalidVolume(reason=msg)
def check_attach(self, context, volume, instance=None):
# TODO(vish): abstract status checking?
if volume['status'] != "available":
msg = _("volume '%(vol)s' status must be 'available'. Currently "
"in '%(status)s'") % {'vol': volume['id'],
'status': volume['status']}
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
msg = _("volume %s already attached") % volume['id']
raise exception.InvalidVolume(reason=msg)
if instance and not CONF.cinder.cross_az_attach:
instance_az = az.get_instance_availability_zone(context, instance)
if instance_az != volume['availability_zone']:
msg = _("Instance %(instance)s and volume %(vol)s are not in "
"the same availability_zone. Instance is in "
"%(ins_zone)s. Volume is in %(vol_zone)s") % {
"instance": instance['id'],
"vol": volume['id'],
'ins_zone': instance_az,
'vol_zone': volume['availability_zone']}
raise exception.InvalidVolume(reason=msg)
def check_detach(self, context, volume):
# TODO(vish): abstract status checking?
if volume['status'] == "available":
msg = _("volume %s already detached") % volume['id']
raise exception.InvalidVolume(reason=msg)
@translate_volume_exception
def reserve_volume(self, context, volume_id):
cinderclient(context).volumes.reserve(volume_id)
@translate_volume_exception
def unreserve_volume(self, context, volume_id):
cinderclient(context).volumes.unreserve(volume_id)
@translate_volume_exception
def begin_detaching(self, context, volume_id):
cinderclient(context).volumes.begin_detaching(volume_id)
@translate_volume_exception
def roll_detaching(self, context, volume_id):
cinderclient(context).volumes.roll_detaching(volume_id)
@translate_volume_exception
def attach(self, context, volume_id, instance_uuid, mountpoint, mode='rw'):
cinderclient(context).volumes.attach(volume_id, instance_uuid,
mountpoint, mode=mode)
@translate_volume_exception
def detach(self, context, volume_id):
cinderclient(context).volumes.detach(volume_id)
@translate_volume_exception
def initialize_connection(self, context, volume_id, connector):
try:
connection_info = cinderclient(
context).volumes.initialize_connection(volume_id, connector)
connection_info['connector'] = connector
return connection_info
except cinder_exception.ClientException as ex:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Initialize connection failed for volume '
'%(vol)s on host %(host)s. Error: %(msg)s '
'Code: %(code)s. Attempting to terminate '
'connection.'),
{'vol': volume_id,
'host': connector.get('host'),
'msg': six.text_type(ex),
'code': ex.code})
try:
self.terminate_connection(context, volume_id, connector)
except Exception as exc:
LOG.error(_LE('Connection between volume %(vol)s and host '
'%(host)s might have succeeded, but attempt '
'to terminate connection has failed. '
'Validate the connection and determine if '
'manual cleanup is needed. Error: %(msg)s '
'Code: %(code)s.'),
{'vol': volume_id,
'host': connector.get('host'),
'msg': six.text_type(exc),
'code': (
exc.code if hasattr(exc, 'code') else None)})
@translate_volume_exception
def terminate_connection(self, context, volume_id, connector):
return cinderclient(context).volumes.terminate_connection(volume_id,
connector)
def migrate_volume_completion(self, context, old_volume_id, new_volume_id,
error=False):
return cinderclient(context).volumes.migrate_volume_completion(
old_volume_id, new_volume_id, error)
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None):
client = cinderclient(context)
if snapshot is not None:
snapshot_id = snapshot['id']
else:
snapshot_id = None
kwargs = dict(snapshot_id=snapshot_id,
volume_type=volume_type,
user_id=context.user_id,
project_id=context.project_id,
availability_zone=availability_zone,
metadata=metadata,
imageRef=image_id)
if isinstance(client, v1_client.Client):
kwargs['display_name'] = name
kwargs['display_description'] = description
else:
kwargs['name'] = name
kwargs['description'] = description
try:
item = client.volumes.create(size, **kwargs)
return _untranslate_volume_summary_view(context, item)
except cinder_exception.OverLimit:
raise exception.OverQuota(overs='volumes')
except (cinder_exception.BadRequest,
keystone_exception.BadRequest) as e:
raise exception.InvalidInput(reason=e)
@translate_volume_exception
def delete(self, context, volume_id):
cinderclient(context).volumes.delete(volume_id)
@translate_volume_exception
def update(self, context, volume_id, fields):
raise NotImplementedError()
@translate_snapshot_exception
def get_snapshot(self, context, snapshot_id):
item = cinderclient(context).volume_snapshots.get(snapshot_id)
return _untranslate_snapshot_summary_view(context, item)
def get_all_snapshots(self, context):
items = cinderclient(context).volume_snapshots.list(detailed=True)
rvals = []
for item in items:
rvals.append(_untranslate_snapshot_summary_view(context, item))
return rvals
@translate_volume_exception
def create_snapshot(self, context, volume_id, name, description):
item = cinderclient(context).volume_snapshots.create(volume_id,
False,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
@translate_volume_exception
def create_snapshot_force(self, context, volume_id, name, description):
item = cinderclient(context).volume_snapshots.create(volume_id,
True,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
@translate_snapshot_exception
def delete_snapshot(self, context, snapshot_id):
cinderclient(context).volume_snapshots.delete(snapshot_id)
def get_volume_encryption_metadata(self, context, volume_id):
return cinderclient(context).volumes.get_encryption_metadata(volume_id)
@translate_snapshot_exception
def update_snapshot_status(self, context, snapshot_id, status):
vs = cinderclient(context).volume_snapshots
# '90%' here is used to tell Cinder that Nova is done
# with its portion of the 'creating' state. This can
# be removed when we are able to split the Cinder states
# into 'creating' and a separate state of
# 'creating_in_nova'. (Same for 'deleting' state.)
vs.update_snapshot_status(
snapshot_id,
{'status': status,
'progress': '90%'}
)
| {
"content_hash": "9c0606bae922ea49f542a1532e948389",
"timestamp": "",
"source": "github",
"line_count": 460,
"max_line_length": 79,
"avg_line_length": 40.493478260869566,
"alnum_prop": 0.5742739034734525,
"repo_name": "Francis-Liu/animated-broccoli",
"id": "e8748d1bc79da00cbd36ca479ad973ddbb7d1442",
"size": "19359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/volume/cinder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16961288"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "351433"
}
],
"symlink_target": ""
} |
import csv
import os
import sys
import re
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
import pkgutil
import inspect
import bb
import supportedrecipesreport
class Columns(object):
"""Base class for all classes which extend the SUPPORTED_RECIPES_SOURCES report.
Typically used to add columns, hence the name. Usage of the class is:
- instantiated when starting to write a report
- extend_header() - add new columns
- extend_row() - add data for new colums to each row as it is getting written
To add new classes, create a "lib/supportedrecipesreport" directory in your layer,
with an empty "__init__.py" file and one or more classes inheriting from this base
class defined in one or more regular .py files.
"""
def __init__(self, d, all_rows):
"""Initialize instance.
Gets access to the global datastore and all rows that are to be written (unmodified
and read-only).
"""
pass
def extend_header(self, row_headers):
"""Add new columns.
Called with a list of field names, in the order in which the
resultig .cvs report will have them. extend_header() then may
extend the list of fields. See supportedrecipes.py for
a list of already present fields.
"""
pass
def extend_row(self, row):
"""Add data for new columns or modify existing ones.
Called with a hash mapping field names to the corresponding data.
"""
pass
def parse_regex(regex, filename, linenumber):
try:
# must match entire string, hence the '$'
return (re.compile(regex + '$'), regex)
except Exception as ex:
raise RuntimeError("%s.%d: parsing '%s' as regular expression failed: %s" % (
filename,
linenumber,
regex,
str(ex)))
class SupportedRecipe:
def __init__(self, pattern, supportedby, filename, linenumber):
self.supportedby = supportedby
self.filename = filename
self.pattern = pattern
self.linenumber = linenumber
parts = pattern.split('@')
if len(parts) != 2:
raise RuntimeError("%s.%d: entry must have format <recipe name regex>@<collection name regex>, "
"splitting by @ found %d parts instead: %s" %
(filename, linenumber, len(parts), pattern))
self.pn_re = parse_regex(parts[0], filename, linenumber)
self.collection_re = parse_regex(parts[1], filename, linenumber)
def is_supportedby(self, pn, collection):
# Returns string identifying the team supporting the recipe or
# empty string if unsupported.
supported = bool((pn is None or self.pn_re[0].match(pn)) and
(collection is None or self.collection_re[0].match(collection)))
return self.supportedby if supported else ''
class SupportedRecipes:
def __init__(self):
self.supported = []
def append(self, recipe):
self.supported.append(recipe)
def current_recipe_supportedby(self, d):
pn = d.getVar('PN', True)
filename = d.getVar('FILE', True)
collection = bb.utils.get_file_layer(filename, d)
return self.recipe_supportedby(pn, collection)
def recipe_supportedby(self, pn, collection):
# Returns list of of teams supporting the recipe (could be
# more than one or none).
result = set()
for recipe in self.supported:
supportedby = recipe.is_supportedby(pn, collection)
if supportedby:
result.add(supportedby)
return sorted(result)
def load_supported_recipes(d):
files = []
supported_files = d.getVar('SUPPORTED_RECIPES', True)
if not supported_files:
bb.fatal('SUPPORTED_RECIPES is not set')
supported_recipes = SupportedRecipes()
for filename in supported_files.split():
try:
base = os.path.basename(filename)
supportedby = d.getVarFlag('SUPPORTED_RECIPES', base, True)
if not supportedby:
supportedby = base.rstrip('.txt')
with open(filename) as f:
linenumber = 1
for line in f:
if line.startswith('#'):
continue
# TODO (?): sanity check the content to catch
# obsolete entries or typos.
pn = line.strip()
if pn:
supported_recipes.append(SupportedRecipe(line.strip(),
supportedby,
filename,
linenumber))
linenumber += 1
files.append(filename)
except OSError as ex:
bb.fatal('Could not read SUPPORTED_RECIPES = %s: %s' % (supported_files, str(ex)))
return (supported_recipes, files)
def strip_multiconfig_prefix(name):
parts = name.split(':')
# There's an open bug about
# shortening "multiconfig" to "mc", so support both here
# (https://bugzilla.yoctoproject.org/show_bug.cgi?id=11168).
if len(parts) >= 2 and parts[0] in ('multiconfig', 'mc', 'virtual'):
if len(parts) == 2:
# There was a bug in bitbake were it produced multiconfig:qemuarm.ggc
# in the depgraph. Support that also.
parts = parts[1].split('.', 1)
if len(parts) == 2:
name = parts[1]
else:
name = parts[-1]
return name
SOURCE_FIELDS = 'component,collection,version,homepage,source,summary,license'.split(',')
def gather_sources(d):
pn = d.getVar('PN', True)
# Shorten the name because it gets copied into the output below.
pn = strip_multiconfig_prefix(pn)
filename = d.getVar('FILE', True)
collection = bb.utils.get_file_layer(filename, d)
pv = d.getVar('PV', True)
summary = d.getVar('SUMMARY', True) or ''
homepage = d.getVar('HOMEPAGE', True) or ''
src = d.getVar('SRC_URI', True).split()
license = d.getVar('LICENSE', True)
sources = []
for url in src:
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if scheme != 'file':
parts = path.split(';')
if len(parts) > 1:
path = parts[0]
params = dict([x.split('=') if '=' in x else (x, '') for x in parts[1:]])
else:
params = {}
name = params.get('name', None)
sources.append((name, '%s://%s%s' % (scheme, netloc, path)))
# Produce SOURCE_FIELDS.
rows = []
for idx, val in enumerate(sources):
name, url = val
if name and len(sources) != 1:
fullname = '%s/%s' % (pn, name)
elif idx > 0:
fullname = '%s/%d' % (pn, idx)
else:
fullname = pn
rows.append((fullname, collection, pv, homepage, url, summary, license))
return rows
# Collects information about one recipe during parsing for SUPPORTED_RECIPES_SOURCES.
# The dumped information cannot be removed because it might be needed in future
# bitbake invocations, so the default location is inside the tmp directory.
def dump_sources(d):
pn = d.getVar('PN', True)
# We need to distinguish between different multiconfigs. Below we get pn with
# multiconfig prefix if it is not 'default', so do the same here.
mc = d.getVar('BB_CURRENT_MC', True)
if mc and mc != 'default':
pn = 'multiconfig:%s:%s' % (mc, pn)
filename = d.getVar('FILE', True)
rows = gather_sources(d)
dumpfile = d.getVar('SUPPORTED_RECIPES_SOURCES_DIR', True) + '/' + pn + filename
bb.utils.mkdirhier(os.path.dirname(dumpfile))
with open(dumpfile, 'w') as f:
# File intentionally kept small by not writing a header
# line. Guaranteed to contain SOURCE_FIELDS.
writer = csv.writer(f)
writer.writerows(rows)
class IsNative(object):
def __init__(self, d):
# Always add a trailing $ to ensure a full match.
native_recipes = d.getVar('SUPPORTED_RECIPES_NATIVE_RECIPES', True).split()
self.isnative_exception = re.compile('(' + '|'.join(native_recipes) + ')$')
self.isnative_baseclasses = d.getVar('SUPPORTED_RECIPES_NATIVE_BASECLASSES', True).split()
def __call__(self, pn, pndata):
for inherited in pndata['inherits']:
if os.path.basename(inherited) in self.isnative_baseclasses:
return True
# Some build recipes do not inherit cross.bbclass and must be skipped explicitly.
# The "real" recipes (in cases like glibc) still get checked. Other recipes are OE-core
# internal helpers.
if self.isnative_exception.match(pn):
return True
class TruncatedError(Exception):
pass
def dump_dependencies(depgraph, max_lines, unsupported):
# Walk the recipe dependency tree and add one line for each path that ends in
# an unsupported recipe.
lines = []
current_line = []
# Pre-compute complete dependencies (DEPEND and RDEPEND) for each recipe
# instead of doing it each time we reach a recipe. Also identifies those
# recipes that nothing depends on. They are the start points for the build.
roots = set(depgraph['pn'])
deps = {}
for task, taskdeps in depgraph['tdepends'].items():
pn = task.split('.')[0]
pndeps = deps.setdefault(pn, set())
for taskdep in taskdeps:
pndep = taskdep.split('.')[0]
if pndep != pn:
pndeps.add(pndep)
roots.discard(pndep)
for pn in deps:
deps[pn] = sorted(deps[pn])
# We can prune the search tree a lot by keeping track of those recipes which are already
# known to not depend on an unsupported recipe.
okay = set()
def visit_recipe(pn):
if pn in okay:
return False
if pn in current_line:
# Recursive dependency, bail out. Can happen
# because we flattened the task dependencies; those don't have
# cycles.
return False
current_line.append(pn)
printed = False
for dep in deps.get(pn, []):
if visit_recipe(dep):
printed = True
if not printed and \
pn in unsupported and \
not len(current_line) == 1:
# Current path is non-trivial, ends in an unsupported recipe and was not alread
# included in a longer, printed path. Add a copy to the output.
if len(lines) >= max_lines:
raise TruncatedError()
lines.append(current_line[:])
printed = True
if not printed and not pn in unsupported:
okay.add(pn)
del current_line[-1]
return printed
truncated = False
try:
for pn in sorted(roots):
visit_recipe(pn)
except TruncatedError:
truncated = True
return lines, truncated
def collection_hint(pn, supported_recipes):
# Determines whether the recipe would be supported in some other collection.
collections = set([supported_recipe.collection_re[1]
for supported_recipe
in supported_recipes.supported
if supported_recipe.is_supportedby(pn, None)])
return ' (would be supported in %s)' % ' '.join(collections) if collections else ''
def dump_unsupported(unsupported, supported_recipes):
# Turns the mapping from unsupported recipe to is collection
# into a sorted list of entries in the final report.
lines = []
for pn, collection in unsupported.items():
# Left and right side of the <recipe>@<collection> entries are
# regular expressions. In contrast to re.escape(), we only
# escape + (as in gtk+3). Escaping all non-alphanumerics
# makes many entries (like linux-yocto) unnecessarily less
# readable (linux\-yocto).
pn = pn.replace('+', r'\+')
collection = collection.replace('+', r'\+')
hint = collection_hint(pn, supported_recipes)
entry = '%s@%s%s' % (pn, collection, hint)
lines.append(entry)
return sorted(lines)
def check_build(d, event, tinfoil=None):
supported_recipes, files = load_supported_recipes(d)
supported_recipes_check = d.getVar('SUPPORTED_RECIPES_CHECK', True)
report_sources = d.getVar('SUPPORTED_RECIPES_SOURCES', True)
# Bail out early if nothing to do.
if not supported_recipes_check and not report_sources:
return
isnative = IsNative(d)
valid = ('note', 'warn', 'error', 'fatal', '')
if supported_recipes_check not in valid:
bb.fatal('SUPPORTED_RECIPES_CHECK must be set to one of %s, currently is: %s' %
('/'.join(valid), supported_recipes_check))
# See bitbake/lib/bb/cooker.py buildDependTree() for the content of the depgraph hash.
# Basically it mirrors the information dumped by "bitbake -g".
depgraph = event._depgraph
# import pprint
# bb.note('depgraph: %s' % pprint.pformat(depgraph))
dirname = d.getVar('SUPPORTED_RECIPES_SOURCES_DIR', True)
unsupported = {}
sources = []
bb.note('Checking active recipes')
for pn, pndata in depgraph['pn'].items():
# Both SUPPORTED_RECIPES_NATIVE_RECIPES and the mapping files in SUPPORTED_RECIPES_SOURCES
# are without the multiconfig prefix, so strip that.
pn_stripped = strip_multiconfig_prefix(pn)
# We only care about recipes compiled for the target.
# Most native ones can be detected reliably because they inherit native.bbclass,
# but some special cases have to be hard-coded.
# Image recipes also do not matter.
if not isnative(pn_stripped, pndata):
filename = pndata['filename']
collection = bb.utils.get_file_layer(strip_multiconfig_prefix(filename), d)
supportedby = supported_recipes.recipe_supportedby(pn_stripped, collection)
if not supportedby:
unsupported[pn_stripped] = collection
if report_sources:
def add_rows(rows):
for row in rows:
row_hash = {f: row[i] for i, f in enumerate(SOURCE_FIELDS)}
row_hash['supported'] = 'yes (%s)' % ' '.join(supportedby) \
if supportedby else 'no'
sources.append(row_hash)
if tinfoil:
bb.note('Parsing %s' % filename)
pn_d = tinfoil.parse_recipe_file(filename)
rows = gather_sources(pn_d)
add_rows(rows)
else:
dumpfile = os.path.join(dirname, pn + strip_multiconfig_prefix(filename))
# Work around multiconfig:qemuarm.gcc vs multiconfig:qemuarm:gcc bug.
if not os.path.exists(dumpfile):
pn = re.sub(r'^multiconfig:([^:.]*)\.', r'multiconfig:\1:', pn)
dumpfile2 = os.path.join(dirname, pn + strip_multiconfig_prefix(filename))
if os.path.exists(dumpfile2):
dumpfile = dumpfile2
with open(dumpfile) as f:
reader = csv.reader(f)
add_rows(reader)
if report_sources:
def write_report(f):
fields = SOURCE_FIELDS[:]
# Insert after 'collection'.
fields.insert(fields.index('collection') + 1, 'supported')
extensions = []
for importer, modname, ispkg in pkgutil.iter_modules(supportedrecipesreport.__path__):
module = __import__('supportedrecipesreport.' + modname, fromlist="dummy")
for name, clazz in inspect.getmembers(module, inspect.isclass):
if issubclass(clazz, Columns):
extensions.append(clazz(d, sources))
for e in extensions:
e.extend_header(fields)
writer = csv.writer(f)
writer.writerow(fields)
for row in sources:
for e in extensions:
e.extend_row(row)
# Sort by first column, then second column, etc., after extending all rows.
# Also de-duplicate. Duplicates can occur when the exact same compontent
# is used multiple times by different recipes or we have a multiconfig build
# that builds the same recipe more than once.
for row in sorted(set([tuple([r.get(f, None) for f in fields]) for r in sources])):
writer.writerow(row)
if report_sources == '-':
write_report(sys.stdout)
else:
# Always write as UTF-8, regardless of the current system locale.
# If that locale is the C locale, writing UTF-8 strings with non-ASCII
# characters would fail.
with open(report_sources, 'w', encoding='utf-8') as f:
write_report(f)
bb.note('Wrote supported recipes report to %s.' % ('stdout' if report_sources == '-' else report_sources))
if supported_recipes_check and unsupported:
max_lines = int(d.getVar('SUPPORTED_RECIPES_CHECK_DEPENDENCY_LINES', True))
dependencies, truncated = dump_dependencies(depgraph, max_lines, unsupported)
output = []
output.append('The following unsupported recipes are required for the build:')
output.extend([' ' + line for line in dump_unsupported(unsupported, supported_recipes)])
output.append('''
Each unsupported recipe is identified by the recipe name and the collection
in which it occurs and has to be marked as supported (see below) using that
format. Typically each layer has exactly one collection.''')
if dependencies:
# Add the optional dependency dump.
output.append('''
Here are the dependency chains (including DEPENDS and RDEPENDS)
which include one or more of the unsupported recipes. -> means "depends on"
and * marks unsupported recipes:''')
for line in dependencies:
line_entries = [('*' if pn in unsupported else '') + pn for pn in line]
output.append(' ' + ' -> '.join(line_entries))
if truncated:
output.append('''...
Output truncated, to see more increase SUPPORTED_RECIPES_CHECK_DEPENDENCY_LINES (currently %d).''' %
max_lines)
output.append('''
To avoid this message, several options exist:
* Check the dependency chain(s) to see why a recipe gets pulled in and perhaps
change recipe configurations or image content to avoid pulling in undesired
components.
* If the recipe is supported in some other layer, disable the unsupported one
with BBMASK.
* Add the unsupported recipes to one of the following files:
%s
Regular expressions are supported on both sides of the @ separator.
* Create a new file which lists the unsupported recipes and extend SUPPORTED_RECIPES:
SUPPORTED_RECIPES_append = " <path>/recipes-supported-by-me.txt"
See meta-refkit/conf/layer.conf and refkit.conf for an example how the path can be
derived automatically. The expectation is that SUPPORTED_RECIPES gets set in
distro configuration files, depending on the support provided by the distro
creator.
* Disable the check with SUPPORTED_RECIPES_CHECK = "" in local.conf.
'bitbake -g <build target>' produces .dot files showing these dependencies.
''' % '\n '.join(files))
bb.__dict__[supported_recipes_check]('\n'.join(output))
| {
"content_hash": "d120a7d4611e9282bb8544023b38196e",
"timestamp": "",
"source": "github",
"line_count": 467,
"max_line_length": 114,
"avg_line_length": 42.57815845824411,
"alnum_prop": 0.6011365922349627,
"repo_name": "klihub/intel-iot-refkit",
"id": "da7cdba7e3c6065386c8ec32c10f23b1157eb840",
"size": "19966",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "meta-refkit-core/lib/supportedrecipes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "11527"
},
{
"name": "BitBake",
"bytes": "108294"
},
{
"name": "C",
"bytes": "175786"
},
{
"name": "C++",
"bytes": "467"
},
{
"name": "CMake",
"bytes": "838"
},
{
"name": "Java",
"bytes": "504"
},
{
"name": "JavaScript",
"bytes": "25003"
},
{
"name": "M4",
"bytes": "9757"
},
{
"name": "Makefile",
"bytes": "3002"
},
{
"name": "Mask",
"bytes": "787"
},
{
"name": "PHP",
"bytes": "11784"
},
{
"name": "Python",
"bytes": "480794"
},
{
"name": "Shell",
"bytes": "88088"
},
{
"name": "SourcePawn",
"bytes": "2662"
}
],
"symlink_target": ""
} |
from typing import Optional, TYPE_CHECKING
from typing import cast as typecast
from ..config import Config
from ..utils import RichStatus
from ..resource import Resource
from .irfilter import IRFilter
from .ircluster import IRCluster
if TYPE_CHECKING:
from .ir import IR # pragma: no cover
class IRBuffer (IRFilter):
def __init__(self, ir: 'IR', aconf: Config,
rkey: str="ir.buffer",
name: str="ir.buffer",
kind: str="IRBuffer",
**kwargs) -> None:
super().__init__(
ir=ir, aconf=aconf, rkey=rkey, kind=kind, name=name, **kwargs)
def setup(self, ir: 'IR', aconf: Config) -> bool:
max_request_bytes = self.pop('max_request_bytes', None)
if max_request_bytes is not None:
self["max_request_bytes"] = max_request_bytes
else:
self.post_error(RichStatus.fromError("missing required field: max_request_bytes"))
return False
if self.pop('max_request_time', None):
self.ir.aconf.post_notice("'max_request_time' is no longer supported, ignoring", self)
return True
| {
"content_hash": "37221f507a3d080a9b08d84cafdf7bd6",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 98,
"avg_line_length": 31.27027027027027,
"alnum_prop": 0.6101987899740708,
"repo_name": "datawire/ambassador",
"id": "044649a0e81608fb9609ab147843d47c8a74d8b4",
"size": "1157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ambassador/ir/irbuffer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "20990"
},
{
"name": "Go",
"bytes": "564752"
},
{
"name": "HTML",
"bytes": "25150"
},
{
"name": "JavaScript",
"bytes": "32368"
},
{
"name": "Makefile",
"bytes": "113905"
},
{
"name": "Python",
"bytes": "1158187"
},
{
"name": "Shell",
"bytes": "188832"
}
],
"symlink_target": ""
} |
__author__ = 'tbeltramelli'
from UInteractive import *
class EpipolarGeometry:
_points = []
_img = None
_raw_img = None
_fundamental_matrix = None
_epipole = None
_MAX_POINT_NUMBER = 16
is_ready = False
def __init__(self, img, define_manually=True):
self._img = copy(img)
self._raw_img = img
if define_manually:
left_points, right_points = self.get_manually_selected_features()
fundamental_matrix, mask = cv2.findFundamentalMat(left_points, right_points)
self.show_lines(left_points, right_points, fundamental_matrix)
def show_lines(self, left_points, right_points, fundamental_matrix, return_result=False):
self.build_epipolar_lines(left_points, fundamental_matrix, False)
self.build_epipolar_lines(right_points, fundamental_matrix, True)
if not return_result:
UMedia.show(self._raw_img)
UInteractive.pause()
else:
return self._raw_img
def get_manually_selected_features(self):
UMedia.show(self._img)
cv2.setMouseCallback("image 0", self.mouse_event)
UInteractive.pause("Select 8 points in each image")
left_points = np.array(self._points[::2])
right_points = np.array(self._points[1::2])
return left_points, right_points
def build_epipolar_lines(self, points, fundamental_matrix, is_right, show_lines=True):
lines = cv2.computeCorrespondEpilines(points, 2 if is_right else 1, fundamental_matrix)
lines = lines.reshape(-1, 3)
if show_lines:
self.draw_lines(self._raw_img, lines, points, is_right)
def draw_lines(self, img, lines, points, is_right):
height, width, layers = img.shape
color = (0, 0, 255) if not is_right else (255, 0, 0)
x_gap_point = 0 if not is_right else width / 2
x_gap_line = 0 if is_right else width / 2
for height, row in zip(lines, points):
x_start, y_start = map(int, [0, -height[2]/height[1]])
x_end, y_end = map(int, [width/2, -(height[2]+height[0]*(width/2))/height[1]])
row = map(int, row)
cv2.line(img, (x_start + x_gap_line, y_start), (x_end + x_gap_line, y_end), color, 1)
cv2.circle(img, (row[0] + x_gap_point, row[1]), 3, color)
return img
def mouse_event(self, event, x, y, flag, param):
if event == cv2.EVENT_LBUTTONDOWN:
if self.is_ready:
return
height, width, layers = self._img.shape
point = (x, y)
color = (0, 0, 255) if len(self._points) % 2 == 0 else (255, 0, 0)
cv2.circle(self._img, point, 3, color, thickness=-1)
point = (point[0] - (0 if len(self._points) % 2 == 0 else width / 2), point[1])
point = (point[0], point[1], 1)
self._points.append(point)
if len(self._points) is self._MAX_POINT_NUMBER:
print "done"
self.is_ready = True
UMedia.show(self._img)
elif event == cv2.EVENT_RBUTTONUP:
self._points = []
self._img = copy(self._raw_img)
UMedia.show(self._img) | {
"content_hash": "c7ff52656fc281257a0efa8d78d77daf",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 97,
"avg_line_length": 33.29896907216495,
"alnum_prop": 0.5739938080495356,
"repo_name": "tonybeltramelli/Graphics-And-Vision",
"id": "6e164e7743105612cb9bb25e0b18aeccbc83a9ad",
"size": "3230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Stereo-Vision-System/tony/com.tonybeltramelli.stereo/EpipolarGeometry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "343757"
}
],
"symlink_target": ""
} |
"""Support for Ambiclimate devices."""
import logging
import voluptuous as vol
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.helpers import config_validation as cv
from . import config_flow
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up Ambiclimate components."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
config_flow.register_flow_implementation(
hass, conf[CONF_CLIENT_ID], conf[CONF_CLIENT_SECRET]
)
return True
async def async_setup_entry(hass, entry):
"""Set up Ambiclimate from a config entry."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "climate")
)
return True
| {
"content_hash": "cf803e5293dbca7774436733ea229753",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 71,
"avg_line_length": 22.48936170212766,
"alnum_prop": 0.6471144749290445,
"repo_name": "pschmitt/home-assistant",
"id": "490c41255bfbf9516cac9183b201e958e77349c2",
"size": "1057",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ambiclimate/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1522"
},
{
"name": "Python",
"bytes": "24807200"
},
{
"name": "Shell",
"bytes": "4342"
}
],
"symlink_target": ""
} |
import os.path
import pathlib
from typing import List
import requirements as rp
def collect_requirements_files() -> List[pathlib.Path]:
project_root = pathlib.Path(__file__).parents[1]
assert project_root.exists()
reqs_dir = project_root.joinpath("reqs")
assert reqs_dir.exists()
pattern = "requirements*.txt"
return list(project_root.glob(pattern)) + list(reqs_dir.glob(pattern))
def test_requirements_files():
"""requirements.txt should be a subset of requirements-dev.txt"""
req_set_dict = {}
req_files = collect_requirements_files()
for req_file in req_files:
abs_path = req_file.absolute().as_posix()
key = abs_path.rsplit(os.path.sep, 1)[-1]
with open(req_file) as f:
req_set_dict[key] = {
f'{line.name}{"".join(line.specs[0])}'
for line in rp.parse(f)
if line.specs
}
assert req_set_dict["requirements.txt"] <= req_set_dict["requirements-dev.txt"]
assert (
req_set_dict["requirements-dev-contrib.txt"]
| req_set_dict["requirements-dev-lite.txt"]
== req_set_dict["requirements-dev-test.txt"]
)
assert (
req_set_dict["requirements-dev-lite.txt"]
& req_set_dict["requirements-dev-spark.txt"]
== set()
)
assert (
req_set_dict["requirements-dev-spark.txt"]
& req_set_dict["requirements-dev-sqlalchemy.txt"]
& req_set_dict["requirements-dev-azure.txt"]
== set()
)
assert (
req_set_dict["requirements-dev-lite.txt"]
& req_set_dict["requirements-dev-contrib.txt"]
== set()
)
assert (
req_set_dict["requirements-dev-lite.txt"]
| req_set_dict["requirements-dev-athena.txt"]
| req_set_dict["requirements-dev-bigquery.txt"]
| req_set_dict["requirements-dev-dremio.txt"]
| req_set_dict["requirements-dev-mssql.txt"]
| req_set_dict["requirements-dev-mysql.txt"]
| req_set_dict["requirements-dev-postgresql.txt"]
| req_set_dict["requirements-dev-redshift.txt"]
| req_set_dict["requirements-dev-snowflake.txt"]
| req_set_dict["requirements-dev-teradata.txt"]
| req_set_dict["requirements-dev-trino.txt"]
| req_set_dict["requirements-dev-hive.txt"]
| req_set_dict["requirements-dev-vertica.txt"]
) == req_set_dict["requirements-dev-sqlalchemy.txt"]
assert (
req_set_dict["requirements.txt"]
| req_set_dict["requirements-dev-contrib.txt"]
| req_set_dict["requirements-dev-sqlalchemy.txt"]
| req_set_dict["requirements-dev-arrow.txt"]
| req_set_dict["requirements-dev-azure.txt"]
| req_set_dict["requirements-dev-excel.txt"]
| req_set_dict["requirements-dev-pagerduty.txt"]
| req_set_dict["requirements-dev-spark.txt"]
) == req_set_dict["requirements-dev.txt"]
assert req_set_dict["requirements-dev.txt"] - (
req_set_dict["requirements.txt"]
| req_set_dict["requirements-dev-lite.txt"]
| req_set_dict["requirements-dev-contrib.txt"]
| req_set_dict["requirements-dev-spark.txt"]
| req_set_dict["requirements-dev-sqlalchemy.txt"]
| req_set_dict["requirements-dev-arrow.txt"]
| req_set_dict["requirements-dev-athena.txt"]
| req_set_dict["requirements-dev-azure.txt"]
| req_set_dict["requirements-dev-bigquery.txt"]
| req_set_dict["requirements-dev-dremio.txt"]
| req_set_dict["requirements-dev-excel.txt"]
| req_set_dict["requirements-dev-mssql.txt"]
| req_set_dict["requirements-dev-mysql.txt"]
| req_set_dict["requirements-dev-pagerduty.txt"]
| req_set_dict["requirements-dev-postgresql.txt"]
| req_set_dict["requirements-dev-redshift.txt"]
| req_set_dict["requirements-dev-snowflake.txt"]
| req_set_dict["requirements-dev-teradata.txt"]
| req_set_dict["requirements-dev-trino.txt"]
| req_set_dict["requirements-dev-vertica.txt"]
) <= {"numpy>=1.21.0", "scipy>=1.7.0"}
| {
"content_hash": "3aacb888dba065beeccf3efc18bb30fc",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 83,
"avg_line_length": 37.9537037037037,
"alnum_prop": 0.6150280556233227,
"repo_name": "great-expectations/great_expectations",
"id": "0c3f99602887e1c97a488dc1b4959c395d84b05b",
"size": "4099",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_packaging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23771"
},
{
"name": "Dockerfile",
"bytes": "2388"
},
{
"name": "HTML",
"bytes": "27311"
},
{
"name": "JavaScript",
"bytes": "45960"
},
{
"name": "Jinja",
"bytes": "66650"
},
{
"name": "Jupyter Notebook",
"bytes": "816323"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "Makefile",
"bytes": "657"
},
{
"name": "Python",
"bytes": "15728777"
},
{
"name": "Shell",
"bytes": "2930"
}
],
"symlink_target": ""
} |
"""Varnish Monitoring check."""
from __future__ import print_function
import sys
import argparse
import subprocess
import json
fields=[]
instance=''
warning=''
critical=''
def check():
global fields,instance,warning,critical
keys=[]
values=[]
output=''
perfdata=''
try:
if instance:
cmd = ['/usr/bin/varnishstat','-1','-j','-n', instance]
else:
cmd = ['/usr/bin/varnishstat','-1','-j']
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output, unused_err = process.communicate()
retcode = process.poll()
except OSError as e:
print("Error: Executing command failed:")
print(' '.join(cmd))
sys.exit(2)
json_data = json.loads(output)
for field in fields:
#print(field) # Debug
#print(json_data[field]['value']) # Debug
keys.append(field)
values.append(json_data[field]['value'])
if (len(keys) == 1):
# Single value, can be compared against thresholds
if ( critical > 0 ) and ( values[0] >= critical ):
output="VARNISH CRITICAL - {} is {} (greater than threshold {})" .format(keys[0], values[0], critical)
perfdata="{}={};{};{};;" .format(keys[0], values[0], warning, critical)
print("{} | {}" .format(output, perfdata))
sys.exit(2)
elif ( warning > 0 ) and ( values[0] >= warning ):
output="VARNISH WARNING - {} is {} (greater than threshold {})" .format(keys[0], values[0], critical)
perfdata="{}={};{};{};;" .format(keys[0], values[0], warning, critical)
print("{} | {}" .format(output, perfdata))
sys.exit(1)
else:
output="VARNISH OK - {} is {}" .format(keys[0], values[0])
perfdata="{}={};{};{};;" .format(keys[0], values[0], warning, critical)
print("{} | {}" .format(output, perfdata))
sys.exit(1)
else:
# Multiple values checked, no thresholds just listing (main purpose: graphing)
x=0
multiout=''
multiperfdata=''
for key in keys:
multiout += "{} is {} - " .format(keys[x], values[x])
multiperfdata += "{}={};{};{};; " .format(keys[x], values[x], warning, critical)
x+=1
print("VARNISH OK - {} | {}" .format(multiout, multiperfdata))
sys.exit(0)
# ----------------------------------------------------------------------
def getopts():
global fields,instance,warning,critical
argp = argparse.ArgumentParser(description=__doc__)
argp.add_argument('-w', '--warning', metavar='RANGE', dest='arg_warning', default=0,
help='return warning if value is outside RANGE')
argp.add_argument('-c', '--critical', metavar='RANGE', dest='arg_critical', default=0,
help='return critical if value is outside RANGE')
req = argp.add_mutually_exclusive_group(required=True)
req.add_argument('-f', '--field', metavar='FIELD', dest='arg_field', action='store', default='MAIN.sess_dropped',
help='field to query')
argp.add_argument('-n', '--name', metavar='NAME', dest='arg_name', action='store', default='',
help='name of Varnish instance (optional)')
args = argp.parse_args()
fields=args.arg_field.split(',')
instance=args.arg_name
warning=int(args.arg_warning)
critical=int(args.arg_critical)
# ----------------------------------------------------------------------
getopts()
check()
| {
"content_hash": "59552b793ddef3f2679e804a9f797a64",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 117,
"avg_line_length": 34.608247422680414,
"alnum_prop": 0.5719392314566577,
"repo_name": "olivierHa/check_varnish",
"id": "3515ea689ac9ed7cbb7cac90fb691136c3e5552b",
"size": "4680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "check_varnish.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4610"
}
],
"symlink_target": ""
} |
import base64
from unittest import TestCase
from otp import OTP
__author__ = 'Terry Chia'
class TestOTP(TestCase):
def setUp(self):
self.secret = base64.b32encode('12345678901234567890')
def test_generate_hotp(self):
# Test vectors taken from RFC 4226, Appendix E
self.assertEqual('755224', OTP.generate_hotp(self.secret, 0))
self.assertEqual('287082', OTP.generate_hotp(self.secret, 1))
self.assertEqual('359152', OTP.generate_hotp(self.secret, 2))
self.assertEqual('969429', OTP.generate_hotp(self.secret, 3))
self.assertEqual('338314', OTP.generate_hotp(self.secret, 4))
self.assertEqual('254676', OTP.generate_hotp(self.secret, 5))
self.assertEqual('287922', OTP.generate_hotp(self.secret, 6))
self.assertEqual('162583', OTP.generate_hotp(self.secret, 7))
self.assertEqual('399871', OTP.generate_hotp(self.secret, 8))
self.assertEqual('520489', OTP.generate_hotp(self.secret, 9))
def test_generate_totp(self):
# Test vectors taken from RFC 6238, Appendix B
self.assertEqual('94287082', OTP.generate_totp(self.secret, 59, 8))
self.assertEqual('07081804', OTP.generate_totp(self.secret, 1111111109, 8))
self.assertEqual('14050471', OTP.generate_totp(self.secret, 1111111111, 8))
self.assertEqual('89005924', OTP.generate_totp(self.secret, 1234567890, 8))
self.assertEqual('69279037', OTP.generate_totp(self.secret, 2000000000, 8))
self.assertEqual('65353130', OTP.generate_totp(self.secret, 20000000000, 8))
def test_validate_hotp(self):
self.assertTrue(OTP.validate_hotp('755224', self.secret, 0))
self.assertTrue(OTP.validate_hotp('287082', self.secret, 0))
self.assertFalse(OTP.validate_hotp('969429', self.secret, 0))
def test_validate_totp(self):
self.assertTrue(OTP.validate_totp('07081804', self.secret, 1111111109, 8))
self.assertTrue(OTP.validate_totp('07081804', self.secret, 1111111084, 8))
self.assertFalse(OTP.validate_totp('07081804', self.secret, 1111111078, 8))
self.assertFalse(OTP.validate_totp('07081804', self.secret, 1111111140, 8)) | {
"content_hash": "e77354732abb4fad13980cca6bed41b5",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 84,
"avg_line_length": 48.71111111111111,
"alnum_prop": 0.6843065693430657,
"repo_name": "Ayrx/py-otp",
"id": "a9e6181177d3004b584e0b3b9f443717698c076b",
"size": "2192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_OTP.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17787"
},
{
"name": "Shell",
"bytes": "6695"
}
],
"symlink_target": ""
} |
from . import core
from .core import *
__all__ = ['core'] + core.__all__
| {
"content_hash": "07538546434958f3af040ef8a6534751",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 33,
"avg_line_length": 22.5,
"alnum_prop": 0.4666666666666667,
"repo_name": "jmunar/pymc3-kalman",
"id": "5d4d61620ed8e13cc518bbaecc62da9f3642b7ab",
"size": "90",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kalman/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "196779"
},
{
"name": "Python",
"bytes": "7904"
}
],
"symlink_target": ""
} |
import sys
if 'setuptools' in sys.modules:
import setuptools.command.install as old_install_mod
have_setuptools = True
else:
import distutils.command.install as old_install_mod
have_setuptools = False
old_install = old_install_mod.install
from distutils.file_util import write_file
class install(old_install):
# Always run install_clib - the command is cheap, so no need to bypass it
sub_commands = old_install.sub_commands + [('install_clib', lambda x: True)]
def finalize_options (self):
old_install.finalize_options(self)
self.install_lib = self.install_libbase
def setuptools_run(self):
""" The setuptools version of the .run() method.
We must pull in the entire code so we can override the level used in the
_getframe() call since we wrap this call by one more level.
"""
# Explicit request for old-style install? Just do it
if self.old_and_unmanageable or self.single_version_externally_managed:
return old_install_mod._install.run(self)
# Attempt to detect whether we were called from setup() or by another
# command. If we were called by setup(), our caller will be the
# 'run_command' method in 'distutils.dist', and *its* caller will be
# the 'run_commands' method. If we were called any other way, our
# immediate caller *might* be 'run_command', but it won't have been
# called by 'run_commands'. This is slightly kludgy, but seems to
# work.
#
caller = sys._getframe(3)
caller_module = caller.f_globals.get('__name__','')
caller_name = caller.f_code.co_name
if caller_module != 'distutils.dist' or caller_name!='run_commands':
# We weren't called from the command line or setup(), so we
# should run in backward-compatibility mode to support bdist_*
# commands.
old_install_mod._install.run(self)
else:
self.do_egg_install()
def run(self):
if not have_setuptools:
r = old_install.run(self)
else:
r = self.setuptools_run()
if self.record:
# bdist_rpm fails when INSTALLED_FILES contains
# paths with spaces. Such paths must be enclosed
# with double-quotes.
f = open(self.record,'r')
lines = []
need_rewrite = False
for l in f.readlines():
l = l.rstrip()
if ' ' in l:
need_rewrite = True
l = '"%s"' % (l)
lines.append(l)
f.close()
if need_rewrite:
self.execute(write_file,
(self.record, lines),
"re-writing list of installed files to '%s'" %
self.record)
return r
| {
"content_hash": "61077feb830c430a2f8b54ccf77773a2",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 80,
"avg_line_length": 39.445945945945944,
"alnum_prop": 0.5758821514217197,
"repo_name": "illume/numpy3k",
"id": "099ad5c16502ebaa078dab4a5b38be7249c9c5e6",
"size": "2919",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "numpy/distutils/command/install.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4499625"
},
{
"name": "C++",
"bytes": "22396"
},
{
"name": "FORTRAN",
"bytes": "8946"
},
{
"name": "Python",
"bytes": "3740754"
},
{
"name": "Shell",
"bytes": "2035"
}
],
"symlink_target": ""
} |
from slicc.ast.DeclAST import DeclAST
from slicc.symbols import Var
class ObjDeclAST(DeclAST):
def __init__(self, slicc, type_ast, ident, pairs, rvalue, pointer):
super(ObjDeclAST, self).__init__(slicc, pairs)
self.type_ast = type_ast
self.ident = ident
self.rvalue = rvalue
self.pointer = pointer
def __repr__(self):
return "[ObjDecl: %r]" % self.ident
def generate(self, parent = None):
if "network" in self and not ("virtual_network" in self or
"physical_network" in self) :
self.error("Network queues require a 'virtual_network' attribute")
type = self.type_ast.type
if type.isBuffer and "ordered" not in self:
self.error("Buffer object decls require an 'ordered' attribute")
if "ordered" in self:
value = self["ordered"]
if value not in ("true", "false"):
self.error("The 'ordered' attribute is '%s' " + \
"must be 'true' or 'false'.", value)
if "random" in self:
value = self["random"]
if value not in ("true", "false"):
self.error("The 'random' attribute is '%s' " + \
"must be 'true' or 'false'.", value)
# FIXME : should all use accessors here to avoid public member
# variables
if self.ident == "version":
c_code = "m_version"
elif self.ident == "machineID":
c_code = "m_machineID"
elif self.ident == "clusterID":
c_code = "m_clusterID"
else:
c_code = "(*m_%s_ptr)" % (self.ident)
# check type if this is a initialization
init_code = ""
if self.rvalue:
rvalue_type,init_code = self.rvalue.inline(True)
if type != rvalue_type:
self.error("Initialization type mismatch '%s' and '%s'" % \
(type, rvalue_type))
machine = self.symtab.state_machine
v = Var(self.symtab, self.ident, self.location, type, c_code,
self.pairs, machine)
# Add data member to the parent type
if parent:
if not parent.addDataMember(self.ident, type, self.pairs, init_code):
self.error("Duplicate data member: %s:%s" % (parent, self.ident))
elif machine:
machine.addObject(v)
else:
self.symtab.newSymbol(v)
| {
"content_hash": "85aba30e5d21cea55cb78454a6c81fae",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 81,
"avg_line_length": 34.736111111111114,
"alnum_prop": 0.5325869652139145,
"repo_name": "kaiyuanl/gem5",
"id": "92ff15d521341b3179ede2458746034c0aa10bc3",
"size": "4104",
"binary": false,
"copies": "26",
"ref": "refs/heads/master",
"path": "src/mem/slicc/ast/ObjDeclAST.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "241510"
},
{
"name": "C",
"bytes": "1003474"
},
{
"name": "C++",
"bytes": "14707972"
},
{
"name": "CMake",
"bytes": "2202"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "HTML",
"bytes": "136898"
},
{
"name": "Java",
"bytes": "3096"
},
{
"name": "M4",
"bytes": "49620"
},
{
"name": "Makefile",
"bytes": "27976"
},
{
"name": "Perl",
"bytes": "33602"
},
{
"name": "Protocol Buffer",
"bytes": "7033"
},
{
"name": "Python",
"bytes": "3906788"
},
{
"name": "Shell",
"bytes": "49333"
},
{
"name": "Visual Basic",
"bytes": "2884"
}
],
"symlink_target": ""
} |
import mock
from nose.tools import raises
from unittest import TestCase
from minio import Minio
from minio.api import _DEFAULT_USER_AGENT
from minio.error import ResponseError
from .minio_mocks import MockResponse, MockConnection
from .helpers import generate_error
class GetObjectTest(TestCase):
@raises(TypeError)
def test_object_is_string(self):
client = Minio('localhost:9000')
client.get_object('hello', 1234)
@raises(ValueError)
def test_object_is_not_empty_string(self):
client = Minio('localhost:9000')
client.get_object('hello', ' \t \n ')
@mock.patch('urllib3.PoolManager')
@raises(ResponseError)
def test_get_object_throws_fail(self, mock_connection):
error_xml = generate_error('code', 'message', 'request_id',
'host_id', 'resource', 'bucket',
'object')
mock_server = MockConnection()
mock_connection.return_value = mock_server
mock_server.mock_add_request(MockResponse('GET',
'https://localhost:9000/hello/key',
{'User-Agent': _DEFAULT_USER_AGENT},
404, content=error_xml))
client = Minio('localhost:9000')
client.get_object('hello', 'key')
| {
"content_hash": "43a993ab5c86997455fac3fd3591e34a",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 86,
"avg_line_length": 38.52777777777778,
"alnum_prop": 0.5796683489545782,
"repo_name": "harshavardhana/minio-py",
"id": "fa49c6596a6140976c0cb773a9ad53188f487a94",
"size": "2042",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/get_object_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "150"
},
{
"name": "Python",
"bytes": "201306"
}
],
"symlink_target": ""
} |
__author__ = 'Haleemur Ali'
"""
Tests to validate that the correct delete statement is
issued for the redshift dialect of SQL.
These tests use a simple transaction schema.
For simple delete statments that don't have a ``WHERE`` clause
or whose ``WHERE`` clause only refers to columns from the
target table, the emitted query should match that emitted
for the postgresql dialect.
However, for more complex queries, an extra ``USING`` clause is required.
For example, the following is valid in Postgresql:
.. :code-block: sql
DELETE FROM customers
WHERE customers.id = orders.customer_id
AND orders.id < 100
This same query needs to be written like this in Redshift:
.. :code-block: sql
DELETE FROM customers
USING orders
WHERE customers.id = orders.customer_id
AND orders.id < 100
"""
import sqlalchemy as sa
from packaging.version import Version
from rs_sqla_test_utils.utils import clean, compile_query
sa_version = Version(sa.__version__)
meta = sa.MetaData()
customers = sa.Table(
'customers', meta,
sa.Column('id', sa.Integer, primary_key=True, autoincrement=False),
sa.Column('first_name', sa.String(128)),
sa.Column('last_name', sa.String(128)),
sa.Column('email', sa.String(255))
)
orders = sa.Table(
'orders', meta,
sa.Column('id', sa.Integer, primary_key=True, autoincrement=False),
sa.Column('customer_id', sa.Integer),
sa.Column('total_invoiced', sa.Numeric(12, 4)),
sa.Column('discount_invoiced', sa.Numeric(12, 4)),
sa.Column('grandtotal_invoiced', sa.Numeric(12, 4)),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime)
)
items = sa.Table(
'items', meta,
sa.Column('id', sa.Integer, primary_key=True, autoincrement=False),
sa.Column('order_id', sa.Integer),
sa.Column('product_id', sa.Integer),
sa.Column('name', sa.String(255)),
sa.Column('qty', sa.Numeric(12, 4)),
sa.Column('price', sa.Numeric(12, 4)),
sa.Column('total_invoiced', sa.Numeric(12, 4)),
sa.Column('discount_invoiced', sa.Numeric(12, 4)),
sa.Column('grandtotal_invoiced', sa.Numeric(12, 4)),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime)
)
product = sa.Table(
'products', meta,
sa.Column('id', sa.Integer, primary_key=True, autoincrement=False),
sa.Column('parent_id', sa.Integer),
sa.Column('name', sa.String(255)),
sa.Column('price', sa.Numeric(12, 4))
)
ham = sa.Table(
'ham', meta,
sa.Column('id', sa.Integer, primary_key=True, autoincrement=False)
)
spam = sa.Table(
'spam', meta,
sa.Column('id', sa.Integer, primary_key=True, autoincrement=False)
)
hammy_spam = sa.Table(
'ham, spam', meta,
sa.Column('ham_id', sa.Integer, sa.ForeignKey('ham.id')),
sa.Column('spam_id', sa.Integer, sa.ForeignKey('spam.id'))
)
def test_delete_stmt_nowhereclause(stub_redshift_dialect):
del_stmt = sa.delete(customers)
assert clean(compile_query(del_stmt, stub_redshift_dialect)) == \
'DELETE FROM customers'
def test_delete_stmt_simplewhereclause1(stub_redshift_dialect):
del_stmt = sa.delete(customers).where(
customers.c.email == 'test@test.test'
)
expected = """
DELETE FROM customers
WHERE customers.email = 'test@test.test'"""
assert clean(compile_query(del_stmt, stub_redshift_dialect)) == \
clean(expected)
def test_delete_stmt_simplewhereclause2(stub_redshift_dialect):
del_stmt = sa.delete(customers).where(
customers.c.email.endswith('test.com')
)
if sa_version >= Version('1.4.0'):
expected = """
DELETE FROM customers
WHERE (customers.email LIKE '%%' || 'test.com')"""
else:
expected = """
DELETE FROM customers
WHERE customers.email LIKE '%%' || 'test.com'"""
assert clean(compile_query(del_stmt, stub_redshift_dialect)) == \
clean(expected)
def test_delete_stmt_joinedwhereclause1(stub_redshift_dialect):
del_stmt = sa.delete(orders).where(
orders.c.customer_id == customers.c.id
)
expected = """
DELETE FROM orders
USING customers
WHERE orders.customer_id = customers.id"""
assert clean(compile_query(del_stmt, stub_redshift_dialect)) == \
clean(expected)
def test_delete_stmt_joinedwhereclause2(stub_redshift_dialect):
del_stmt = sa.delete(
orders
).where(
orders.c.customer_id == customers.c.id
).where(
orders.c.id == items.c.order_id
).where(
customers.c.email.endswith('test.com')
).where(
items.c.name == 'test product'
)
expected = """
DELETE FROM orders
USING customers, items
WHERE orders.customer_id = customers.id
AND orders.id = items.order_id
AND (customers.email LIKE '%%' || 'test.com')
AND items.name = 'test product'"""
assert clean(compile_query(del_stmt, stub_redshift_dialect)) == \
clean(expected)
def test_delete_stmt_subqueryplusjoin(stub_redshift_dialect):
del_stmt = sa.delete(
orders
).where(
orders.c.customer_id.in_(
sa.select(
[customers.c.id]
).where(customers.c.email.endswith('test.com'))
)
).where(
orders.c.id == items.c.order_id
).where(
items.c.name == 'test product'
)
expected = """
DELETE FROM orders
USING items
WHERE orders.customer_id IN
(SELECT customers.id
FROM customers
WHERE (customers.email LIKE '%%' || 'test.com'))
AND orders.id = items.order_id
AND items.name = 'test product'"""
assert clean(compile_query(del_stmt, stub_redshift_dialect)) == \
clean(expected)
def test_delete_stmt_subquery(stub_redshift_dialect):
del_stmt = sa.delete(
orders
).where(
orders.c.customer_id.in_(
sa.select(
[customers.c.id]
).where(customers.c.email.endswith('test.com'))
)
)
expected = """
DELETE FROM orders
WHERE orders.customer_id IN
(SELECT customers.id
FROM customers
WHERE (customers.email LIKE '%%' || 'test.com'))"""
assert clean(compile_query(del_stmt, stub_redshift_dialect)) == \
clean(expected)
def test_delete_stmt_on_subquerycomma(stub_redshift_dialect):
del_stmt = sa.delete(
ham
).where(
ham.c.id.in_(
sa.select(
[hammy_spam.c.ham_id]
)
)
)
expected = """
DELETE FROM ham
WHERE ham.id IN
(SELECT "ham, spam".ham_id
FROM "ham, spam")"""
assert clean(compile_query(del_stmt, stub_redshift_dialect)) == \
clean(expected)
def test_delete_on_comma(stub_redshift_dialect):
del_stmt = sa.delete(ham).where(ham.c.id == hammy_spam.c.ham_id)
expected = """
DELETE FROM ham USING "ham, spam"
WHERE ham.id = "ham, spam".ham_id"""
assert clean(compile_query(del_stmt, stub_redshift_dialect)) == \
clean(expected)
def test_delete_stmt_on_alias(stub_redshift_dialect):
parent_ = sa.alias(product)
del_stmt = sa.delete(
product
).where(product.c.parent_id == parent_.c.id)
expected = """
DELETE FROM products
USING products AS products_1
WHERE products.parent_id = products_1.id"""
assert clean(compile_query(del_stmt, stub_redshift_dialect)) == \
clean(expected)
def test_delete_stmt_with_comma_subquery_alias_join(stub_redshift_dialect):
parent_ = sa.alias(product)
del_stmt = sa.delete(
items
).where(
items.c.order_id == orders.c.id
).where(
orders.c.customer_id.in_(
sa.select([customers.c.id]).where(
customers.c.email.endswith('test.com')
)
)
).where(
items.c.product_id == product.c.id
).where(
product.c.parent_id == parent_.c.id
).where(
parent_.c.id != hammy_spam.c.ham_id
)
expected = """
DELETE FROM items
USING orders, products, products AS products_1, "ham, spam"
WHERE items.order_id = orders.id
AND orders.customer_id IN
(SELECT customers.id
FROM customers
WHERE (customers.email LIKE '%%' || 'test.com'))
AND items.product_id = products.id
AND products.parent_id = products_1.id
AND products_1.id != "ham, spam".ham_id"""
assert clean(compile_query(del_stmt, stub_redshift_dialect)) == \
clean(expected)
| {
"content_hash": "168d833ce179d519d41b5aa56ddf57df",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 75,
"avg_line_length": 29.244897959183675,
"alnum_prop": 0.6157245871132822,
"repo_name": "sqlalchemy-redshift/sqlalchemy-redshift",
"id": "62089c04bdb576a253fe2d91edcd02e515543785",
"size": "8598",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_delete_stmt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "187150"
},
{
"name": "Shell",
"bytes": "523"
}
],
"symlink_target": ""
} |
import datetime
from decimal import Decimal
import logging as std_logging
import pyelasticsearch
import requests
from django.conf import settings
from django.test import TestCase
from haystack import connections, reset_search_queries
from haystack import indexes
from haystack.inputs import AutoQuery
from haystack.models import SearchResult
from haystack.query import SearchQuerySet, RelatedSearchQuerySet, SQ
from haystack.utils import log as logging
from haystack.utils.loading import UnifiedIndex
from core.models import (MockModel, AnotherMockModel,
AFourthMockModel, ASixthMockModel)
from core.tests.mocks import MockSearchResult
test_pickling = True
try:
import cPickle as pickle
except ImportError:
try:
import pickle
except ImportError:
test_pickling = False
def clear_elasticsearch_index():
# Wipe it clean.
print 'Clearing out Elasticsearch...'
raw_es = pyelasticsearch.ElasticSearch(settings.HAYSTACK_CONNECTIONS['default']['URL'])
try:
raw_es.delete_index(settings.HAYSTACK_CONNECTIONS['default']['INDEX_NAME'])
raw_es.refresh()
except (requests.RequestException, pyelasticsearch.ElasticHttpError):
pass
class ElasticsearchMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
name = indexes.CharField(model_attr='author', faceted=True)
pub_date = indexes.DateField(model_attr='pub_date')
def get_model(self):
return MockModel
class ElasticsearchMaintainTypeMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
month = indexes.CharField(indexed=False)
pub_date = indexes.DateField(model_attr='pub_date')
def prepare_month(self, obj):
return "%02d" % obj.pub_date.month
def get_model(self):
return MockModel
class ElasticsearchMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(model_attr='foo', document=True)
name = indexes.CharField(model_attr='author')
pub_date = indexes.DateField(model_attr='pub_date')
def get_model(self):
return MockModel
class ElasticsearchAnotherMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
name = indexes.CharField(model_attr='author')
pub_date = indexes.DateField(model_attr='pub_date')
def get_model(self):
return AnotherMockModel
def prepare_text(self, obj):
return u"You might be searching for the user %s" % obj.author
class ElasticsearchBoostMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(
document=True, use_template=True,
template_name='search/indexes/core/mockmodel_template.txt'
)
author = indexes.CharField(model_attr='author', weight=2.0)
editor = indexes.CharField(model_attr='editor')
pub_date = indexes.DateField(model_attr='pub_date')
def get_model(self):
return AFourthMockModel
class ElasticsearchRoundTripSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, default='')
name = indexes.CharField()
is_active = indexes.BooleanField()
post_count = indexes.IntegerField()
average_rating = indexes.FloatField()
price = indexes.DecimalField()
pub_date = indexes.DateField()
created = indexes.DateTimeField()
tags = indexes.MultiValueField()
sites = indexes.MultiValueField()
def get_model(self):
return MockModel
def prepare(self, obj):
prepped = super(ElasticsearchRoundTripSearchIndex, self).prepare(obj)
prepped.update({
'text': 'This is some example text.',
'name': 'Mister Pants',
'is_active': True,
'post_count': 25,
'average_rating': 3.6,
'price': Decimal('24.99'),
'pub_date': datetime.date(2009, 11, 21),
'created': datetime.datetime(2009, 11, 21, 21, 31, 00),
'tags': ['staff', 'outdoor', 'activist', 'scientist'],
'sites': [3, 5, 1],
})
return prepped
class ElasticsearchComplexFacetsMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, default='')
name = indexes.CharField(faceted=True)
is_active = indexes.BooleanField(faceted=True)
post_count = indexes.IntegerField()
post_count_i = indexes.FacetIntegerField(facet_for='post_count')
average_rating = indexes.FloatField(faceted=True)
pub_date = indexes.DateField(faceted=True)
created = indexes.DateTimeField(faceted=True)
sites = indexes.MultiValueField(faceted=True)
def get_model(self):
return MockModel
class ElasticsearchAutocompleteMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(model_attr='foo', document=True)
name = indexes.CharField(model_attr='author')
pub_date = indexes.DateField(model_attr='pub_date')
text_auto = indexes.EdgeNgramField(model_attr='foo')
name_auto = indexes.EdgeNgramField(model_attr='author')
def get_model(self):
return MockModel
class ElasticsearchSpatialSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(model_attr='name', document=True)
location = indexes.LocationField()
def prepare_location(self, obj):
return "%s,%s" % (obj.lat, obj.lon)
def get_model(self):
return ASixthMockModel
class ElasticsearchSearchBackendTestCase(TestCase):
def setUp(self):
super(ElasticsearchSearchBackendTestCase, self).setUp()
# Wipe it clean.
self.raw_es = pyelasticsearch.ElasticSearch(settings.HAYSTACK_CONNECTIONS['default']['URL'])
clear_elasticsearch_index()
# Stow.
self.old_ui = connections['default'].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = ElasticsearchMockSearchIndex()
self.smtmmi = ElasticsearchMaintainTypeMockSearchIndex()
self.ui.build(indexes=[self.smmi])
connections['default']._index = self.ui
self.sb = connections['default'].get_backend()
self.sample_objs = []
for i in xrange(1, 4):
mock = MockModel()
mock.id = i
mock.author = 'daniel%s' % i
mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)
self.sample_objs.append(mock)
def tearDown(self):
connections['default']._index = self.old_ui
super(ElasticsearchSearchBackendTestCase, self).tearDown()
def raw_search(self, query):
try:
return self.raw_es.search('*:*', index=settings.HAYSTACK_CONNECTIONS['default']['INDEX_NAME'])
except (requests.RequestException, pyelasticsearch.ElasticHttpError):
return {}
def test_non_silent(self):
bad_sb = connections['default'].backend('bad', URL='http://omg.wtf.bbq:1000/', INDEX_NAME='whatver', SILENTLY_FAIL=False, TIMEOUT=1)
try:
bad_sb.update(self.smmi, self.sample_objs)
self.fail()
except:
pass
try:
bad_sb.remove('core.mockmodel.1')
self.fail()
except:
pass
try:
bad_sb.clear()
self.fail()
except:
pass
try:
bad_sb.search('foo')
self.fail()
except:
pass
def test_update(self):
self.sb.update(self.smmi, self.sample_objs)
# Check what Elasticsearch thinks is there.
self.assertEqual(self.raw_search('*:*')['hits']['total'], 3)
self.assertEqual(sorted([res['_source'] for res in self.raw_search('*:*')['hits']['hits']], cmp=lambda x, y: cmp(x['id'], y['id'])), [
{
'django_id': '1',
'django_ct': 'core.mockmodel',
'name': 'daniel1',
'name_exact': 'daniel1',
'text': 'Indexed!\n1',
'pub_date': '2009-02-24T00:00:00',
'id': 'core.mockmodel.1'
},
{
'django_id': '2',
'django_ct': 'core.mockmodel',
'name': 'daniel2',
'name_exact': 'daniel2',
'text': 'Indexed!\n2',
'pub_date': '2009-02-23T00:00:00',
'id': 'core.mockmodel.2'
},
{
'django_id': '3',
'django_ct': 'core.mockmodel',
'name': 'daniel3',
'name_exact': 'daniel3',
'text': 'Indexed!\n3',
'pub_date': '2009-02-22T00:00:00',
'id': 'core.mockmodel.3'
}
])
def test_remove(self):
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_search('*:*')['hits']['total'], 3)
self.sb.remove(self.sample_objs[0])
self.assertEqual(self.raw_search('*:*')['hits']['total'], 2)
self.assertEqual([res['_source'] for res in self.raw_search('*:*')['hits']['hits']], [
{
'django_id': '2',
'django_ct': 'core.mockmodel',
'name': 'daniel2',
'name_exact': 'daniel2',
'text': 'Indexed!\n2',
'pub_date': '2009-02-23T00:00:00',
'id': 'core.mockmodel.2'
},
{
'django_id': '3',
'django_ct': 'core.mockmodel',
'name': 'daniel3',
'name_exact': 'daniel3',
'text': 'Indexed!\n3',
'pub_date': '2009-02-22T00:00:00',
'id': 'core.mockmodel.3'
}
])
def test_clear(self):
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_search('*:*').get('hits', {}).get('total', 0), 3)
self.sb.clear()
self.assertEqual(self.raw_search('*:*').get('hits', {}).get('total', 0), 0)
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_search('*:*').get('hits', {}).get('total', 0), 3)
self.sb.clear([AnotherMockModel])
self.assertEqual(self.raw_search('*:*').get('hits', {}).get('total', 0), 3)
self.sb.clear([MockModel])
self.assertEqual(self.raw_search('*:*').get('hits', {}).get('total', 0), 0)
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_search('*:*').get('hits', {}).get('total', 0), 3)
self.sb.clear([AnotherMockModel, MockModel])
self.assertEqual(self.raw_search('*:*').get('hits', {}).get('total', 0), 0)
def test_search(self):
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_search('*:*')['hits']['total'], 3)
self.assertEqual(self.sb.search(''), {'hits': 0, 'results': []})
self.assertEqual(self.sb.search('*:*')['hits'], 3)
self.assertEqual([result.pk for result in self.sb.search('*:*')['results']], [u'2', u'1', u'3'])
self.assertEqual(self.sb.search('', highlight=True), {'hits': 0, 'results': []})
self.assertEqual(self.sb.search('Index', highlight=True)['hits'], 3)
self.assertEqual([result.highlighted for result in self.sb.search('Index', highlight=True)['results']],
[[u'<em>Indexed</em>!\n2'], [u'<em>Indexed</em>!\n1'], [u'<em>Indexed</em>!\n3']])
self.assertEqual(self.sb.search('Indx')['hits'], 0)
self.assertEqual(self.sb.search('indax')['spelling_suggestion'], None)
self.assertEqual(self.sb.search('Indx', spelling_query='indexy')['spelling_suggestion'], None)
self.assertEqual(self.sb.search('', facets=['name']), {'hits': 0, 'results': []})
results = self.sb.search('Index', facets=['name'])
self.assertEqual(results['hits'], 3)
self.assertEqual(results['facets']['fields']['name'], [('daniel3', 1), ('daniel2', 1), ('daniel1', 1)])
self.assertEqual(self.sb.search('', date_facets={'pub_date': {'start_date': datetime.date(2008, 1, 1), 'end_date': datetime.date(2009, 4, 1), 'gap_by': 'month', 'gap_amount': 1}}), {'hits': 0, 'results': []})
results = self.sb.search('Index', date_facets={'pub_date': {'start_date': datetime.date(2008, 1, 1), 'end_date': datetime.date(2009, 4, 1), 'gap_by': 'month', 'gap_amount': 1}})
self.assertEqual(results['hits'], 3)
self.assertEqual(results['facets']['dates']['pub_date'], [(datetime.datetime(2009, 2, 1, 0, 0), 3)])
self.assertEqual(self.sb.search('', query_facets=[('name', '[* TO e]')]), {'hits': 0, 'results': []})
results = self.sb.search('Index', query_facets=[('name', '[* TO e]')])
self.assertEqual(results['hits'], 3)
self.assertEqual(results['facets']['queries'], {u'name': 3})
self.assertEqual(self.sb.search('', narrow_queries=set(['name:daniel1'])), {'hits': 0, 'results': []})
results = self.sb.search('Index', narrow_queries=set(['name:daniel1']))
self.assertEqual(results['hits'], 1)
# Ensure that swapping the ``result_class`` works.
self.assertTrue(isinstance(self.sb.search(u'index', result_class=MockSearchResult)['results'][0], MockSearchResult))
# Check the use of ``limit_to_registered_models``.
self.assertEqual(self.sb.search('', limit_to_registered_models=False), {'hits': 0, 'results': []})
self.assertEqual(self.sb.search('*:*', limit_to_registered_models=False)['hits'], 3)
self.assertEqual(sorted([result.pk for result in self.sb.search('*:*', limit_to_registered_models=False)['results']]), ['1', '2', '3'])
# Stow.
old_limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)
settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = False
self.assertEqual(self.sb.search(''), {'hits': 0, 'results': []})
self.assertEqual(self.sb.search('*:*')['hits'], 3)
self.assertEqual(sorted([result.pk for result in self.sb.search('*:*')['results']]), ['1', '2', '3'])
# Restore.
settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = old_limit_to_registered_models
def test_more_like_this(self):
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_search('*:*')['hits']['total'], 3)
# A functional MLT example with enough data to work is below. Rely on
# this to ensure the API is correct enough.
self.assertEqual(self.sb.more_like_this(self.sample_objs[0])['hits'], 0)
self.assertEqual([result.pk for result in self.sb.more_like_this(self.sample_objs[0])['results']], [])
def test_build_schema(self):
old_ui = connections['default'].get_unified_index()
(content_field_name, mapping) = self.sb.build_schema(old_ui.all_searchfields())
self.assertEqual(content_field_name, 'text')
self.assertEqual(len(mapping), 4)
self.assertEqual(mapping, {
'text': {'index': 'analyzed', 'term_vector': 'with_positions_offsets', 'type': 'string', 'analyzer': 'snowball', 'boost': 1.0, 'store': 'yes'},
'pub_date': {'index': 'analyzed', 'boost': 1.0, 'store': 'yes', 'type': 'date'},
'name': {'index': 'analyzed', 'term_vector': 'with_positions_offsets', 'type': 'string', 'analyzer': 'snowball', 'boost': 1.0, 'store': 'yes'},
'name_exact': {'index': 'not_analyzed', 'term_vector': 'with_positions_offsets', 'boost': 1.0, 'store': 'yes', 'type': 'string'}
})
ui = UnifiedIndex()
ui.build(indexes=[ElasticsearchComplexFacetsMockSearchIndex()])
(content_field_name, mapping) = self.sb.build_schema(ui.all_searchfields())
self.assertEqual(content_field_name, 'text')
self.assertEqual(len(mapping), 15)
self.assertEqual(mapping, {
'name': {'index': 'analyzed', 'term_vector': 'with_positions_offsets', 'type': 'string', 'analyzer': 'snowball', 'boost': 1.0, 'store': 'yes'},
'is_active_exact': {'index': 'not_analyzed', 'boost': 1.0, 'store': 'yes', 'type': 'boolean'},
'created': {'index': 'analyzed', 'boost': 1.0, 'store': 'yes', 'type': 'date'},
'post_count': {'index': 'analyzed', 'boost': 1.0, 'store': 'yes', 'type': 'long'},
'created_exact': {'index': 'not_analyzed', 'boost': 1.0, 'store': 'yes', 'type': 'date'},
'sites_exact': {'index': 'not_analyzed', 'term_vector': 'with_positions_offsets', 'boost': 1.0, 'store': 'yes', 'type': 'string'},
'is_active': {'index': 'analyzed', 'boost': 1.0, 'store': 'yes', 'type': 'boolean'},
'sites': {'index': 'analyzed', 'term_vector': 'with_positions_offsets', 'type': 'string', 'analyzer': 'snowball', 'boost': 1.0, 'store': 'yes'},
'post_count_i': {'index': 'not_analyzed', 'boost': 1.0, 'store': 'yes', 'type': 'long'},
'average_rating': {'index': 'analyzed', 'boost': 1.0, 'store': 'yes', 'type': 'float'},
'text': {'index': 'analyzed', 'term_vector': 'with_positions_offsets', 'type': 'string', 'analyzer': 'snowball', 'boost': 1.0, 'store': 'yes'},
'pub_date_exact': {'index': 'not_analyzed', 'boost': 1.0, 'store': 'yes', 'type': 'date'},
'name_exact': {'index': 'not_analyzed', 'term_vector': 'with_positions_offsets', 'boost': 1.0, 'store': 'yes', 'type': 'string'},
'pub_date': {'index': 'analyzed', 'boost': 1.0, 'store': 'yes', 'type': 'date'},
'average_rating_exact': {'index': 'not_analyzed', 'boost': 1.0, 'store': 'yes', 'type': 'float'}
})
def test_verify_type(self):
old_ui = connections['default'].get_unified_index()
ui = UnifiedIndex()
smtmmi = ElasticsearchMaintainTypeMockSearchIndex()
ui.build(indexes=[smtmmi])
connections['default']._index = ui
sb = connections['default'].get_backend()
sb.update(smtmmi, self.sample_objs)
self.assertEqual(sb.search('*:*')['hits'], 3)
self.assertEqual([result.month for result in sb.search('*:*')['results']], [u'02', u'02', u'02'])
connections['default']._index = old_ui
class CaptureHandler(std_logging.Handler):
logs_seen = []
def emit(self, record):
CaptureHandler.logs_seen.append(record)
class FailedElasticsearchSearchBackendTestCase(TestCase):
def setUp(self):
self.sample_objs = []
for i in xrange(1, 4):
mock = MockModel()
mock.id = i
mock.author = 'daniel%s' % i
mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)
self.sample_objs.append(mock)
# Stow.
# Point the backend at a URL that doesn't exist so we can watch the
# sparks fly.
self.old_es_url = settings.HAYSTACK_CONNECTIONS['default']['URL']
settings.HAYSTACK_CONNECTIONS['default']['URL'] = "%s/foo/" % self.old_es_url
self.cap = CaptureHandler()
logging.getLogger('haystack').addHandler(self.cap)
import haystack
logging.getLogger('haystack').removeHandler(haystack.stream)
# Setup the rest of the bits.
self.old_ui = connections['default'].get_unified_index()
ui = UnifiedIndex()
self.smmi = ElasticsearchMockSearchIndex()
ui.build(indexes=[self.smmi])
connections['default']._index = ui
self.sb = connections['default'].get_backend()
def tearDown(self):
import haystack
# Restore.
settings.HAYSTACK_CONNECTIONS['default']['URL'] = self.old_es_url
connections['default']._index = self.old_ui
logging.getLogger('haystack').removeHandler(self.cap)
logging.getLogger('haystack').addHandler(haystack.stream)
def test_all_cases(self):
# Prior to the addition of the try/except bits, these would all fail miserably.
self.assertEqual(len(CaptureHandler.logs_seen), 0)
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(len(CaptureHandler.logs_seen), 1)
self.sb.remove(self.sample_objs[0])
self.assertEqual(len(CaptureHandler.logs_seen), 2)
self.sb.search('search')
self.assertEqual(len(CaptureHandler.logs_seen), 3)
self.sb.more_like_this(self.sample_objs[0])
self.assertEqual(len(CaptureHandler.logs_seen), 4)
self.sb.clear([MockModel])
self.assertEqual(len(CaptureHandler.logs_seen), 5)
self.sb.clear()
self.assertEqual(len(CaptureHandler.logs_seen), 6)
class LiveElasticsearchSearchQueryTestCase(TestCase):
fixtures = ['initial_data.json']
def setUp(self):
super(LiveElasticsearchSearchQueryTestCase, self).setUp()
# Wipe it clean.
clear_elasticsearch_index()
# Stow.
self.old_ui = connections['default'].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = ElasticsearchMockSearchIndex()
self.ui.build(indexes=[self.smmi])
connections['default']._index = self.ui
self.sb = connections['default'].get_backend()
self.sq = connections['default'].get_query()
# Force indexing of the content.
self.smmi.update()
def tearDown(self):
connections['default']._index = self.old_ui
super(LiveElasticsearchSearchQueryTestCase, self).tearDown()
def test_get_spelling(self):
self.sq.add_filter(SQ(content='Indexy'))
self.assertEqual(self.sq.get_spelling_suggestion(), None)
self.assertEqual(self.sq.get_spelling_suggestion('indexy'), None)
def test_log_query(self):
from django.conf import settings
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
# Stow.
old_debug = settings.DEBUG
settings.DEBUG = False
len(self.sq.get_results())
self.assertEqual(len(connections['default'].queries), 0)
settings.DEBUG = True
# Redefine it to clear out the cached results.
self.sq = connections['default'].query()
self.sq.add_filter(SQ(name='bar'))
len(self.sq.get_results())
self.assertEqual(len(connections['default'].queries), 1)
self.assertEqual(connections['default'].queries[0]['query_string'], 'name:(bar)')
# And again, for good measure.
self.sq = connections['default'].query()
self.sq.add_filter(SQ(name='bar'))
self.sq.add_filter(SQ(text='moof'))
len(self.sq.get_results())
self.assertEqual(len(connections['default'].queries), 2)
self.assertEqual(connections['default'].queries[0]['query_string'], 'name:(bar)')
self.assertEqual(connections['default'].queries[1]['query_string'], u'(name:(bar) AND text:(moof))')
# Restore.
settings.DEBUG = old_debug
lssqstc_all_loaded = None
class LiveElasticsearchSearchQuerySetTestCase(TestCase):
"""Used to test actual implementation details of the SearchQuerySet."""
fixtures = ['bulk_data.json']
def setUp(self):
super(LiveElasticsearchSearchQuerySetTestCase, self).setUp()
# Stow.
self.old_debug = settings.DEBUG
settings.DEBUG = True
self.old_ui = connections['default'].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = ElasticsearchMockSearchIndex()
self.ui.build(indexes=[self.smmi])
connections['default']._index = self.ui
self.sqs = SearchQuerySet()
self.rsqs = RelatedSearchQuerySet()
# Ugly but not constantly reindexing saves us almost 50% runtime.
global lssqstc_all_loaded
if lssqstc_all_loaded is None:
print 'Reloading data...'
lssqstc_all_loaded = True
# Wipe it clean.
clear_elasticsearch_index()
# Force indexing of the content.
self.smmi.update()
def tearDown(self):
# Restore.
connections['default']._index = self.old_ui
settings.DEBUG = self.old_debug
super(LiveElasticsearchSearchQuerySetTestCase, self).tearDown()
def test_load_all(self):
sqs = self.sqs.load_all()
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertTrue(len(sqs) > 0)
self.assertEqual(sqs[0].object.foo, u'In addition, you may specify other fields to be populated along with the document. In this case, we also index the user who authored the document as well as the date the document was published. The variable you assign the SearchField to should directly map to the field your search backend is expecting. You instantiate most search fields with a parameter that points to the attribute of the object to populate that field with.')
def test_iter(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
sqs = self.sqs.all()
results = sorted([int(result.pk) for result in sqs])
self.assertEqual(results, range(1, 24))
self.assertEqual(len(connections['default'].queries), 3)
def test_slice(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
results = self.sqs.all()
self.assertEqual([int(result.pk) for result in results[1:11]], [7, 12, 17, 1, 6, 11, 16, 23, 5, 10])
self.assertEqual(len(connections['default'].queries), 1)
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
results = self.sqs.all()
self.assertEqual(int(results[21].pk), 18)
self.assertEqual(len(connections['default'].queries), 1)
def test_count(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
sqs = self.sqs.all()
self.assertEqual(sqs.count(), 23)
self.assertEqual(sqs.count(), 23)
self.assertEqual(len(sqs), 23)
self.assertEqual(sqs.count(), 23)
# Should only execute one query to count the length of the result set.
self.assertEqual(len(connections['default'].queries), 1)
def test_manual_iter(self):
results = self.sqs.all()
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
results = [int(result.pk) for result in results._manual_iter()]
self.assertEqual(results, [2, 7, 12, 17, 1, 6, 11, 16, 23, 5, 10, 15, 22, 4, 9, 14, 19, 21, 3, 8, 13, 18, 20])
self.assertEqual(len(connections['default'].queries), 3)
def test_fill_cache(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
results = self.sqs.all()
self.assertEqual(len(results._result_cache), 0)
self.assertEqual(len(connections['default'].queries), 0)
results._fill_cache(0, 10)
self.assertEqual(len([result for result in results._result_cache if result is not None]), 10)
self.assertEqual(len(connections['default'].queries), 1)
results._fill_cache(10, 20)
self.assertEqual(len([result for result in results._result_cache if result is not None]), 20)
self.assertEqual(len(connections['default'].queries), 2)
def test_cache_is_full(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
self.assertEqual(self.sqs._cache_is_full(), False)
results = self.sqs.all()
fire_the_iterator_and_fill_cache = [result for result in results]
self.assertEqual(results._cache_is_full(), True)
self.assertEqual(len(connections['default'].queries), 3)
def test___and__(self):
sqs1 = self.sqs.filter(content='foo')
sqs2 = self.sqs.filter(content='bar')
sqs = sqs1 & sqs2
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 2)
self.assertEqual(sqs.query.build_query(), u'((foo) AND (bar))')
# Now for something more complex...
sqs3 = self.sqs.exclude(title='moof').filter(SQ(content='foo') | SQ(content='baz'))
sqs4 = self.sqs.filter(content='bar')
sqs = sqs3 & sqs4
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 3)
self.assertEqual(sqs.query.build_query(), u'(NOT (title:(moof)) AND ((foo) OR (baz)) AND (bar))')
def test___or__(self):
sqs1 = self.sqs.filter(content='foo')
sqs2 = self.sqs.filter(content='bar')
sqs = sqs1 | sqs2
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 2)
self.assertEqual(sqs.query.build_query(), u'((foo) OR (bar))')
# Now for something more complex...
sqs3 = self.sqs.exclude(title='moof').filter(SQ(content='foo') | SQ(content='baz'))
sqs4 = self.sqs.filter(content='bar').models(MockModel)
sqs = sqs3 | sqs4
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 2)
self.assertEqual(sqs.query.build_query(), u'((NOT (title:(moof)) AND ((foo) OR (baz))) OR (bar))')
def test_auto_query(self):
# Ensure bits in exact matches get escaped properly as well.
# This will break horrifically if escaping isn't working.
sqs = self.sqs.auto_query('"pants:rule"')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains="pants:rule">')
self.assertEqual(sqs.query.build_query(), u'("pants\\:rule")')
self.assertEqual(len(sqs), 0)
# Regressions
def test_regression_proper_start_offsets(self):
sqs = self.sqs.filter(text='index')
self.assertNotEqual(sqs.count(), 0)
id_counts = {}
for item in sqs:
if item.id in id_counts:
id_counts[item.id] += 1
else:
id_counts[item.id] = 1
for key, value in id_counts.items():
if value > 1:
self.fail("Result with id '%s' seen more than once in the results." % key)
def test_regression_raw_search_breaks_slicing(self):
sqs = self.sqs.raw_search('text:index')
page_1 = [result.pk for result in sqs[0:10]]
page_2 = [result.pk for result in sqs[10:20]]
for pk in page_2:
if pk in page_1:
self.fail("Result with id '%s' seen more than once in the results." % pk)
# RelatedSearchQuerySet Tests
def test_related_load_all(self):
sqs = self.rsqs.load_all()
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertTrue(len(sqs) > 0)
self.assertEqual(sqs[0].object.foo, u'In addition, you may specify other fields to be populated along with the document. In this case, we also index the user who authored the document as well as the date the document was published. The variable you assign the SearchField to should directly map to the field your search backend is expecting. You instantiate most search fields with a parameter that points to the attribute of the object to populate that field with.')
def test_related_load_all_queryset(self):
sqs = self.rsqs.load_all()
self.assertEqual(len(sqs._load_all_querysets), 0)
sqs = sqs.load_all_queryset(MockModel, MockModel.objects.filter(id__gt=1))
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs._load_all_querysets), 1)
self.assertEqual(sorted([obj.object.id for obj in sqs]), range(2, 24))
sqs = sqs.load_all_queryset(MockModel, MockModel.objects.filter(id__gt=10))
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs._load_all_querysets), 1)
self.assertEqual([obj.object.id for obj in sqs], [12, 17, 11, 16, 23, 15, 22, 14, 19, 21, 13, 18, 20])
self.assertEqual([obj.object.id for obj in sqs[10:20]], [13, 18, 20])
def test_related_iter(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
sqs = self.rsqs.all()
results = [int(result.pk) for result in sqs]
self.assertEqual(results, [2, 7, 12, 17, 1, 6, 11, 16, 23, 5, 10, 15, 22, 4, 9, 14, 19, 21, 3, 8, 13, 18, 20])
self.assertEqual(len(connections['default'].queries), 4)
def test_related_slice(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
results = self.rsqs.all()
self.assertEqual([int(result.pk) for result in results[1:11]], [7, 12, 17, 1, 6, 11, 16, 23, 5, 10])
self.assertEqual(len(connections['default'].queries), 3)
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
results = self.rsqs.all()
self.assertEqual(int(results[21].pk), 18)
self.assertEqual(len(connections['default'].queries), 4)
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
results = self.rsqs.all()
self.assertEqual([int(result.pk) for result in results[20:30]], [13, 18, 20])
self.assertEqual(len(connections['default'].queries), 4)
def test_related_manual_iter(self):
results = self.rsqs.all()
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
results = sorted([int(result.pk) for result in results._manual_iter()])
self.assertEqual(results, range(1, 24))
self.assertEqual(len(connections['default'].queries), 4)
def test_related_fill_cache(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
results = self.rsqs.all()
self.assertEqual(len(results._result_cache), 0)
self.assertEqual(len(connections['default'].queries), 0)
results._fill_cache(0, 10)
self.assertEqual(len([result for result in results._result_cache if result is not None]), 10)
self.assertEqual(len(connections['default'].queries), 1)
results._fill_cache(10, 20)
self.assertEqual(len([result for result in results._result_cache if result is not None]), 20)
self.assertEqual(len(connections['default'].queries), 2)
def test_related_cache_is_full(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
self.assertEqual(self.rsqs._cache_is_full(), False)
results = self.rsqs.all()
fire_the_iterator_and_fill_cache = [result for result in results]
self.assertEqual(results._cache_is_full(), True)
self.assertEqual(len(connections['default'].queries), 5)
def test_quotes_regression(self):
sqs = self.sqs.auto_query(u"44°48'40''N 20°28'32''E")
# Should not have empty terms.
self.assertEqual(sqs.query.build_query(), u"(44\xb048'40''N 20\xb028'32''E)")
# Should not cause Elasticsearch to 500.
self.assertEqual(sqs.count(), 0)
sqs = self.sqs.auto_query('blazing')
self.assertEqual(sqs.query.build_query(), u'(blazing)')
self.assertEqual(sqs.count(), 0)
sqs = self.sqs.auto_query('blazing saddles')
self.assertEqual(sqs.query.build_query(), u'(blazing saddles)')
self.assertEqual(sqs.count(), 0)
sqs = self.sqs.auto_query('"blazing saddles')
self.assertEqual(sqs.query.build_query(), u'(\\"blazing saddles)')
self.assertEqual(sqs.count(), 0)
sqs = self.sqs.auto_query('"blazing saddles"')
self.assertEqual(sqs.query.build_query(), u'("blazing saddles")')
self.assertEqual(sqs.count(), 0)
sqs = self.sqs.auto_query('mel "blazing saddles"')
self.assertEqual(sqs.query.build_query(), u'(mel "blazing saddles")')
self.assertEqual(sqs.count(), 0)
sqs = self.sqs.auto_query('mel "blazing \'saddles"')
self.assertEqual(sqs.query.build_query(), u'(mel "blazing \'saddles")')
self.assertEqual(sqs.count(), 0)
sqs = self.sqs.auto_query('mel "blazing \'\'saddles"')
self.assertEqual(sqs.query.build_query(), u'(mel "blazing \'\'saddles")')
self.assertEqual(sqs.count(), 0)
sqs = self.sqs.auto_query('mel "blazing \'\'saddles"\'')
self.assertEqual(sqs.query.build_query(), u'(mel "blazing \'\'saddles" \')')
self.assertEqual(sqs.count(), 0)
sqs = self.sqs.auto_query('mel "blazing \'\'saddles"\'"')
self.assertEqual(sqs.query.build_query(), u'(mel "blazing \'\'saddles" \'\\")')
self.assertEqual(sqs.count(), 0)
sqs = self.sqs.auto_query('"blazing saddles" mel')
self.assertEqual(sqs.query.build_query(), u'("blazing saddles" mel)')
self.assertEqual(sqs.count(), 0)
sqs = self.sqs.auto_query('"blazing saddles" mel brooks')
self.assertEqual(sqs.query.build_query(), u'("blazing saddles" mel brooks)')
self.assertEqual(sqs.count(), 0)
sqs = self.sqs.auto_query('mel "blazing saddles" brooks')
self.assertEqual(sqs.query.build_query(), u'(mel "blazing saddles" brooks)')
self.assertEqual(sqs.count(), 0)
sqs = self.sqs.auto_query('mel "blazing saddles" "brooks')
self.assertEqual(sqs.query.build_query(), u'(mel "blazing saddles" \\"brooks)')
self.assertEqual(sqs.count(), 0)
def test_query_generation(self):
sqs = self.sqs.filter(SQ(content=AutoQuery("hello world")) | SQ(title=AutoQuery("hello world")))
self.assertEqual(sqs.query.build_query(), u"((hello world) OR title:(hello world))")
def test_result_class(self):
# Assert that we're defaulting to ``SearchResult``.
sqs = self.sqs.all()
self.assertTrue(isinstance(sqs[0], SearchResult))
# Custom class.
sqs = self.sqs.result_class(MockSearchResult).all()
self.assertTrue(isinstance(sqs[0], MockSearchResult))
# Reset to default.
sqs = self.sqs.result_class(None).all()
self.assertTrue(isinstance(sqs[0], SearchResult))
class LiveElasticsearchMoreLikeThisTestCase(TestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(LiveElasticsearchMoreLikeThisTestCase, self).setUp()
# Wipe it clean.
clear_elasticsearch_index()
self.old_ui = connections['default'].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = ElasticsearchMockModelSearchIndex()
self.sammi = ElasticsearchAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.smmi, self.sammi])
connections['default']._index = self.ui
self.sqs = SearchQuerySet()
self.smmi.update()
self.sammi.update()
def tearDown(self):
# Restore.
connections['default']._index = self.old_ui
super(LiveElasticsearchMoreLikeThisTestCase, self).tearDown()
def test_more_like_this(self):
mlt = self.sqs.more_like_this(MockModel.objects.get(pk=1))
self.assertEqual(mlt.count(), 4)
self.assertEqual([result.pk for result in mlt], [u'2', u'6', u'16', u'23'])
self.assertEqual(len([result.pk for result in mlt]), 4)
alt_mlt = self.sqs.filter(name='daniel3').more_like_this(MockModel.objects.get(pk=2))
self.assertEqual(alt_mlt.count(), 6)
self.assertEqual([result.pk for result in alt_mlt], [u'2', u'6', u'16', u'23', u'1', u'11'])
self.assertEqual(len([result.pk for result in alt_mlt]), 6)
alt_mlt_with_models = self.sqs.models(MockModel).more_like_this(MockModel.objects.get(pk=1))
self.assertEqual(alt_mlt_with_models.count(), 4)
self.assertEqual([result.pk for result in alt_mlt_with_models], [u'2', u'6', u'16', u'23'])
self.assertEqual(len([result.pk for result in alt_mlt_with_models]), 4)
if hasattr(MockModel.objects, 'defer'):
# Make sure MLT works with deferred bits.
mi = MockModel.objects.defer('foo').get(pk=1)
self.assertEqual(mi._deferred, True)
deferred = self.sqs.models(MockModel).more_like_this(mi)
self.assertEqual(deferred.count(), 0)
self.assertEqual([result.pk for result in deferred], [])
self.assertEqual(len([result.pk for result in deferred]), 0)
# Ensure that swapping the ``result_class`` works.
self.assertTrue(isinstance(self.sqs.result_class(MockSearchResult).more_like_this(MockModel.objects.get(pk=1))[0], MockSearchResult))
class LiveElasticsearchAutocompleteTestCase(TestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(LiveElasticsearchAutocompleteTestCase, self).setUp()
# Stow.
self.old_ui = connections['default'].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = ElasticsearchAutocompleteMockModelSearchIndex()
self.ui.build(indexes=[self.smmi])
connections['default']._index = self.ui
self.sqs = SearchQuerySet()
# Wipe it clean.
clear_elasticsearch_index()
# Reboot the schema.
self.sb = connections['default'].get_backend()
self.sb.setup()
self.smmi.update()
def tearDown(self):
# Restore.
connections['default']._index = self.old_ui
super(LiveElasticsearchAutocompleteTestCase, self).tearDown()
def test_build_schema(self):
self.sb = connections['default'].get_backend()
content_name, mapping = self.sb.build_schema(self.ui.all_searchfields())
self.assertEqual(mapping, {
'name_auto': {
'index': 'analyzed',
'term_vector': 'with_positions_offsets',
'type': 'string',
'analyzer': 'edgengram_analyzer',
'boost': 1.0,
'store': 'yes'
},
'text': {
'index': 'analyzed',
'term_vector': 'with_positions_offsets',
'type': 'string',
'analyzer': 'snowball',
'boost': 1.0,
'store': 'yes'
},
'pub_date': {
'index': 'analyzed',
'boost': 1.0,
'store': 'yes',
'type': 'date'
},
'name': {
'index': 'analyzed',
'term_vector': 'with_positions_offsets',
'type': 'string',
'analyzer': 'snowball',
'boost': 1.0,
'store': 'yes'
},
'text_auto': {
'index': 'analyzed',
'term_vector': 'with_positions_offsets',
'type': 'string',
'analyzer': 'edgengram_analyzer',
'boost': 1.0,
'store': 'yes'
}
})
def test_autocomplete(self):
autocomplete = self.sqs.autocomplete(text_auto='mod')
self.assertEqual(autocomplete.count(), 5)
self.assertEqual([result.pk for result in autocomplete], [u'1', u'12', u'14', u'6', u'7'])
self.assertTrue('mod' in autocomplete[0].text.lower())
self.assertTrue('mod' in autocomplete[1].text.lower())
self.assertTrue('mod' in autocomplete[2].text.lower())
self.assertTrue('mod' in autocomplete[3].text.lower())
self.assertTrue('mod' in autocomplete[4].text.lower())
self.assertEqual(len([result.pk for result in autocomplete]), 5)
# Test multiple words.
autocomplete_2 = self.sqs.autocomplete(text_auto='your mod')
self.assertEqual(autocomplete_2.count(), 3)
self.assertEqual([result.pk for result in autocomplete_2], ['1', '14', '6'])
self.assertTrue('your' in autocomplete_2[0].text.lower())
self.assertTrue('mod' in autocomplete_2[0].text.lower())
self.assertTrue('your' in autocomplete_2[1].text.lower())
self.assertTrue('mod' in autocomplete_2[1].text.lower())
self.assertTrue('your' in autocomplete_2[2].text.lower())
self.assertTrue('mod' in autocomplete_2[2].text.lower())
self.assertEqual(len([result.pk for result in autocomplete_2]), 3)
# Test multiple fields.
autocomplete_3 = self.sqs.autocomplete(text_auto='Django', name_auto='dan')
self.assertEqual(autocomplete_3.count(), 4)
self.assertEqual([result.pk for result in autocomplete_3], ['12', '1', '14', '22'])
self.assertEqual(len([result.pk for result in autocomplete_3]), 4)
class LiveElasticsearchRoundTripTestCase(TestCase):
def setUp(self):
super(LiveElasticsearchRoundTripTestCase, self).setUp()
# Wipe it clean.
clear_elasticsearch_index()
# Stow.
self.old_ui = connections['default'].get_unified_index()
self.ui = UnifiedIndex()
self.srtsi = ElasticsearchRoundTripSearchIndex()
self.ui.build(indexes=[self.srtsi])
connections['default']._index = self.ui
self.sb = connections['default'].get_backend()
self.sqs = SearchQuerySet()
# Fake indexing.
mock = MockModel()
mock.id = 1
self.sb.update(self.srtsi, [mock])
def tearDown(self):
# Restore.
connections['default']._index = self.old_ui
super(LiveElasticsearchRoundTripTestCase, self).tearDown()
def test_round_trip(self):
results = self.sqs.filter(id='core.mockmodel.1')
# Sanity check.
self.assertEqual(results.count(), 1)
# Check the individual fields.
result = results[0]
self.assertEqual(result.id, 'core.mockmodel.1')
self.assertEqual(result.text, 'This is some example text.')
self.assertEqual(result.name, 'Mister Pants')
self.assertEqual(result.is_active, True)
self.assertEqual(result.post_count, 25)
self.assertEqual(result.average_rating, 3.6)
self.assertEqual(result.price, u'24.99')
self.assertEqual(result.pub_date, datetime.date(2009, 11, 21))
self.assertEqual(result.created, datetime.datetime(2009, 11, 21, 21, 31, 00))
self.assertEqual(result.tags, ['staff', 'outdoor', 'activist', 'scientist'])
self.assertEqual(result.sites, [3, 5, 1])
if test_pickling:
class LiveElasticsearchPickleTestCase(TestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(LiveElasticsearchPickleTestCase, self).setUp()
# Wipe it clean.
clear_elasticsearch_index()
# Stow.
self.old_ui = connections['default'].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = ElasticsearchMockModelSearchIndex()
self.sammi = ElasticsearchAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.smmi, self.sammi])
connections['default']._index = self.ui
self.sqs = SearchQuerySet()
self.smmi.update()
self.sammi.update()
def tearDown(self):
# Restore.
connections['default']._index = self.old_ui
super(LiveElasticsearchPickleTestCase, self).tearDown()
def test_pickling(self):
results = self.sqs.all()
for res in results:
# Make sure the cache is full.
pass
in_a_pickle = pickle.dumps(results)
like_a_cuke = pickle.loads(in_a_pickle)
self.assertEqual(len(like_a_cuke), len(results))
self.assertEqual(like_a_cuke[0].id, results[0].id)
class ElasticsearchBoostBackendTestCase(TestCase):
def setUp(self):
super(ElasticsearchBoostBackendTestCase, self).setUp()
# Wipe it clean.
self.raw_es = pyelasticsearch.ElasticSearch(settings.HAYSTACK_CONNECTIONS['default']['URL'])
clear_elasticsearch_index()
# Stow.
self.old_ui = connections['default'].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = ElasticsearchBoostMockSearchIndex()
self.ui.build(indexes=[self.smmi])
connections['default']._index = self.ui
self.sb = connections['default'].get_backend()
self.sample_objs = []
for i in xrange(1, 5):
mock = AFourthMockModel()
mock.id = i
if i % 2:
mock.author = 'daniel'
mock.editor = 'david'
else:
mock.author = 'david'
mock.editor = 'daniel'
mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)
self.sample_objs.append(mock)
def tearDown(self):
connections['default']._index = self.old_ui
super(ElasticsearchBoostBackendTestCase, self).tearDown()
def raw_search(self, query):
return self.raw_es.search('*:*', index=settings.HAYSTACK_CONNECTIONS['default']['INDEX_NAME'])
def test_boost(self):
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_search('*:*')['hits']['total'], 4)
results = SearchQuerySet().filter(SQ(author='daniel') | SQ(editor='daniel'))
self.assertEqual([result.id for result in results], [
'core.afourthmockmodel.3',
'core.afourthmockmodel.1',
'core.afourthmockmodel.2',
'core.afourthmockmodel.4'
])
| {
"content_hash": "01a9c079d248a1af9329aaf58f0e2b30",
"timestamp": "",
"source": "github",
"line_count": 1184,
"max_line_length": 475,
"avg_line_length": 41.880067567567565,
"alnum_prop": 0.609466381639979,
"repo_name": "ericholscher/django-haystack",
"id": "4353d3f36c08a4e1bae99eb571ff12da6c8c5ee6",
"size": "49612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/elasticsearch_tests/tests/elasticsearch_backend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "660429"
},
{
"name": "Shell",
"bytes": "836"
}
],
"symlink_target": ""
} |
"""
A skeleton python script which reads from an a flag
and parses command line arguments
"""
import sys
import argparse
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"-f", "--foo",
help="path to the input file (read from stdin if omitted)")
args = parser.parse_args()
print("foo: %s" % args.foo)
if __name__ == "__main__":
main()
| {
"content_hash": "08c3bb2f1ad251d693540ec74d6c173a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 67,
"avg_line_length": 19.714285714285715,
"alnum_prop": 0.6207729468599034,
"repo_name": "issmirnov/dotfiles",
"id": "4dcd79055b36ccd4b39ecb4a5a44061e17937e54",
"size": "438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vim/templates/skeleton.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "106"
},
{
"name": "Makefile",
"bytes": "103"
},
{
"name": "Perl",
"bytes": "43240"
},
{
"name": "PostScript",
"bytes": "1605"
},
{
"name": "Python",
"bytes": "34552"
},
{
"name": "Ruby",
"bytes": "12418"
},
{
"name": "Shell",
"bytes": "108904"
},
{
"name": "Vim Script",
"bytes": "78771"
}
],
"symlink_target": ""
} |
"""A function to build a DetectionModel from configuration."""
import functools
import sys
from object_detection.builders import anchor_generator_builder
from object_detection.builders import box_coder_builder
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.builders import image_resizer_builder
from object_detection.builders import losses_builder
from object_detection.builders import matcher_builder
from object_detection.builders import post_processing_builder
from object_detection.builders import region_similarity_calculator_builder as sim_calc
from object_detection.core import balanced_positive_negative_sampler as sampler
from object_detection.core import post_processing
from object_detection.core import target_assigner
from object_detection.meta_architectures import center_net_meta_arch
from object_detection.meta_architectures import context_rcnn_meta_arch
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.meta_architectures import rfcn_meta_arch
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.predictors.heads import mask_head
from object_detection.protos import losses_pb2
from object_detection.protos import model_pb2
from object_detection.utils import label_map_util
from object_detection.utils import ops
from object_detection.utils import tf_version
## Feature Extractors for TF
## This section conditionally imports different feature extractors based on the
## Tensorflow version.
##
# pylint: disable=g-import-not-at-top
if tf_version.is_tf2():
from object_detection.models import center_net_hourglass_feature_extractor
from object_detection.models import center_net_resnet_feature_extractor
from object_detection.models import center_net_resnet_v1_fpn_feature_extractor
from object_detection.models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res_keras
from object_detection.models import faster_rcnn_resnet_keras_feature_extractor as frcnn_resnet_keras
from object_detection.models import ssd_resnet_v1_fpn_keras_feature_extractor as ssd_resnet_v1_fpn_keras
from object_detection.models import faster_rcnn_resnet_v1_fpn_keras_feature_extractor as frcnn_resnet_fpn_keras
from object_detection.models.ssd_mobilenet_v1_fpn_keras_feature_extractor import SSDMobileNetV1FpnKerasFeatureExtractor
from object_detection.models.ssd_mobilenet_v1_keras_feature_extractor import SSDMobileNetV1KerasFeatureExtractor
from object_detection.models.ssd_mobilenet_v2_fpn_keras_feature_extractor import SSDMobileNetV2FpnKerasFeatureExtractor
from object_detection.models.ssd_mobilenet_v2_keras_feature_extractor import SSDMobileNetV2KerasFeatureExtractor
from object_detection.predictors import rfcn_keras_box_predictor
if sys.version_info[0] >= 3:
from object_detection.models import ssd_efficientnet_bifpn_feature_extractor as ssd_efficientnet_bifpn
if tf_version.is_tf1():
from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res
from object_detection.models import faster_rcnn_inception_v2_feature_extractor as frcnn_inc_v2
from object_detection.models import faster_rcnn_nas_feature_extractor as frcnn_nas
from object_detection.models import faster_rcnn_pnas_feature_extractor as frcnn_pnas
from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as frcnn_resnet_v1
from object_detection.models import ssd_resnet_v1_fpn_feature_extractor as ssd_resnet_v1_fpn
from object_detection.models import ssd_resnet_v1_ppn_feature_extractor as ssd_resnet_v1_ppn
from object_detection.models.embedded_ssd_mobilenet_v1_feature_extractor import EmbeddedSSDMobileNetV1FeatureExtractor
from object_detection.models.ssd_inception_v2_feature_extractor import SSDInceptionV2FeatureExtractor
from object_detection.models.ssd_mobilenet_v2_fpn_feature_extractor import SSDMobileNetV2FpnFeatureExtractor
from object_detection.models.ssd_mobilenet_v2_mnasfpn_feature_extractor import SSDMobileNetV2MnasFPNFeatureExtractor
from object_detection.models.ssd_inception_v3_feature_extractor import SSDInceptionV3FeatureExtractor
from object_detection.models.ssd_mobilenet_edgetpu_feature_extractor import SSDMobileNetEdgeTPUFeatureExtractor
from object_detection.models.ssd_mobilenet_v1_feature_extractor import SSDMobileNetV1FeatureExtractor
from object_detection.models.ssd_mobilenet_v1_fpn_feature_extractor import SSDMobileNetV1FpnFeatureExtractor
from object_detection.models.ssd_mobilenet_v1_ppn_feature_extractor import SSDMobileNetV1PpnFeatureExtractor
from object_detection.models.ssd_mobilenet_v2_feature_extractor import SSDMobileNetV2FeatureExtractor
from object_detection.models.ssd_mobilenet_v3_feature_extractor import SSDMobileNetV3LargeFeatureExtractor
from object_detection.models.ssd_mobilenet_v3_feature_extractor import SSDMobileNetV3SmallFeatureExtractor
from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetCPUFeatureExtractor
from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetDSPFeatureExtractor
from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetEdgeTPUFeatureExtractor
from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetGPUFeatureExtractor
from object_detection.models.ssd_pnasnet_feature_extractor import SSDPNASNetFeatureExtractor
from object_detection.predictors import rfcn_box_predictor
# pylint: enable=g-import-not-at-top
if tf_version.is_tf2():
SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP = {
'ssd_mobilenet_v1_keras': SSDMobileNetV1KerasFeatureExtractor,
'ssd_mobilenet_v1_fpn_keras': SSDMobileNetV1FpnKerasFeatureExtractor,
'ssd_mobilenet_v2_keras': SSDMobileNetV2KerasFeatureExtractor,
'ssd_mobilenet_v2_fpn_keras': SSDMobileNetV2FpnKerasFeatureExtractor,
'ssd_resnet50_v1_fpn_keras':
ssd_resnet_v1_fpn_keras.SSDResNet50V1FpnKerasFeatureExtractor,
'ssd_resnet101_v1_fpn_keras':
ssd_resnet_v1_fpn_keras.SSDResNet101V1FpnKerasFeatureExtractor,
'ssd_resnet152_v1_fpn_keras':
ssd_resnet_v1_fpn_keras.SSDResNet152V1FpnKerasFeatureExtractor,
'ssd_efficientnet-b0_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB0BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b1_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB1BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b2_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB2BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b3_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB3BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b4_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB4BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b5_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB5BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b6_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB6BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b7_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB7BiFPNKerasFeatureExtractor,
}
FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP = {
'faster_rcnn_resnet50_keras':
frcnn_resnet_keras.FasterRCNNResnet50KerasFeatureExtractor,
'faster_rcnn_resnet101_keras':
frcnn_resnet_keras.FasterRCNNResnet101KerasFeatureExtractor,
'faster_rcnn_resnet152_keras':
frcnn_resnet_keras.FasterRCNNResnet152KerasFeatureExtractor,
'faster_rcnn_inception_resnet_v2_keras':
frcnn_inc_res_keras.FasterRCNNInceptionResnetV2KerasFeatureExtractor,
'faster_rcnn_resnet50_fpn_keras':
frcnn_resnet_fpn_keras.FasterRCNNResnet50FpnKerasFeatureExtractor,
'faster_rcnn_resnet101_fpn_keras':
frcnn_resnet_fpn_keras.FasterRCNNResnet101FpnKerasFeatureExtractor,
'faster_rcnn_resnet152_fpn_keras':
frcnn_resnet_fpn_keras.FasterRCNNResnet152FpnKerasFeatureExtractor,
}
CENTER_NET_EXTRACTOR_FUNCTION_MAP = {
'resnet_v2_50': center_net_resnet_feature_extractor.resnet_v2_50,
'resnet_v2_101': center_net_resnet_feature_extractor.resnet_v2_101,
'resnet_v1_50_fpn':
center_net_resnet_v1_fpn_feature_extractor.resnet_v1_50_fpn,
'resnet_v1_101_fpn':
center_net_resnet_v1_fpn_feature_extractor.resnet_v1_101_fpn,
'hourglass_104': center_net_hourglass_feature_extractor.hourglass_104,
}
FEATURE_EXTRACTOR_MAPS = [
CENTER_NET_EXTRACTOR_FUNCTION_MAP,
FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP,
SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP
]
if tf_version.is_tf1():
SSD_FEATURE_EXTRACTOR_CLASS_MAP = {
'ssd_inception_v2':
SSDInceptionV2FeatureExtractor,
'ssd_inception_v3':
SSDInceptionV3FeatureExtractor,
'ssd_mobilenet_v1':
SSDMobileNetV1FeatureExtractor,
'ssd_mobilenet_v1_fpn':
SSDMobileNetV1FpnFeatureExtractor,
'ssd_mobilenet_v1_ppn':
SSDMobileNetV1PpnFeatureExtractor,
'ssd_mobilenet_v2':
SSDMobileNetV2FeatureExtractor,
'ssd_mobilenet_v2_fpn':
SSDMobileNetV2FpnFeatureExtractor,
'ssd_mobilenet_v2_mnasfpn':
SSDMobileNetV2MnasFPNFeatureExtractor,
'ssd_mobilenet_v3_large':
SSDMobileNetV3LargeFeatureExtractor,
'ssd_mobilenet_v3_small':
SSDMobileNetV3SmallFeatureExtractor,
'ssd_mobilenet_edgetpu':
SSDMobileNetEdgeTPUFeatureExtractor,
'ssd_resnet50_v1_fpn':
ssd_resnet_v1_fpn.SSDResnet50V1FpnFeatureExtractor,
'ssd_resnet101_v1_fpn':
ssd_resnet_v1_fpn.SSDResnet101V1FpnFeatureExtractor,
'ssd_resnet152_v1_fpn':
ssd_resnet_v1_fpn.SSDResnet152V1FpnFeatureExtractor,
'ssd_resnet50_v1_ppn':
ssd_resnet_v1_ppn.SSDResnet50V1PpnFeatureExtractor,
'ssd_resnet101_v1_ppn':
ssd_resnet_v1_ppn.SSDResnet101V1PpnFeatureExtractor,
'ssd_resnet152_v1_ppn':
ssd_resnet_v1_ppn.SSDResnet152V1PpnFeatureExtractor,
'embedded_ssd_mobilenet_v1':
EmbeddedSSDMobileNetV1FeatureExtractor,
'ssd_pnasnet':
SSDPNASNetFeatureExtractor,
'ssd_mobiledet_cpu':
SSDMobileDetCPUFeatureExtractor,
'ssd_mobiledet_dsp':
SSDMobileDetDSPFeatureExtractor,
'ssd_mobiledet_edgetpu':
SSDMobileDetEdgeTPUFeatureExtractor,
'ssd_mobiledet_gpu':
SSDMobileDetGPUFeatureExtractor,
}
FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP = {
'faster_rcnn_nas':
frcnn_nas.FasterRCNNNASFeatureExtractor,
'faster_rcnn_pnas':
frcnn_pnas.FasterRCNNPNASFeatureExtractor,
'faster_rcnn_inception_resnet_v2':
frcnn_inc_res.FasterRCNNInceptionResnetV2FeatureExtractor,
'faster_rcnn_inception_v2':
frcnn_inc_v2.FasterRCNNInceptionV2FeatureExtractor,
'faster_rcnn_resnet50':
frcnn_resnet_v1.FasterRCNNResnet50FeatureExtractor,
'faster_rcnn_resnet101':
frcnn_resnet_v1.FasterRCNNResnet101FeatureExtractor,
'faster_rcnn_resnet152':
frcnn_resnet_v1.FasterRCNNResnet152FeatureExtractor,
}
FEATURE_EXTRACTOR_MAPS = [
SSD_FEATURE_EXTRACTOR_CLASS_MAP,
FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP
]
def _check_feature_extractor_exists(feature_extractor_type):
feature_extractors = set().union(*FEATURE_EXTRACTOR_MAPS)
if feature_extractor_type not in feature_extractors:
raise ValueError('{} is not supported. See `model_builder.py` for features '
'extractors compatible with different versions of '
'Tensorflow'.format(feature_extractor_type))
def _build_ssd_feature_extractor(feature_extractor_config,
is_training,
freeze_batchnorm,
reuse_weights=None):
"""Builds a ssd_meta_arch.SSDFeatureExtractor based on config.
Args:
feature_extractor_config: A SSDFeatureExtractor proto config from ssd.proto.
is_training: True if this feature extractor is being built for training.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
reuse_weights: if the feature extractor should reuse weights.
Returns:
ssd_meta_arch.SSDFeatureExtractor based on config.
Raises:
ValueError: On invalid feature extractor type.
"""
feature_type = feature_extractor_config.type
depth_multiplier = feature_extractor_config.depth_multiplier
min_depth = feature_extractor_config.min_depth
pad_to_multiple = feature_extractor_config.pad_to_multiple
use_explicit_padding = feature_extractor_config.use_explicit_padding
use_depthwise = feature_extractor_config.use_depthwise
is_keras = tf_version.is_tf2()
if is_keras:
conv_hyperparams = hyperparams_builder.KerasLayerHyperparams(
feature_extractor_config.conv_hyperparams)
else:
conv_hyperparams = hyperparams_builder.build(
feature_extractor_config.conv_hyperparams, is_training)
override_base_feature_extractor_hyperparams = (
feature_extractor_config.override_base_feature_extractor_hyperparams)
if not is_keras and feature_type not in SSD_FEATURE_EXTRACTOR_CLASS_MAP:
raise ValueError('Unknown ssd feature_extractor: {}'.format(feature_type))
if is_keras:
feature_extractor_class = SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP[
feature_type]
else:
feature_extractor_class = SSD_FEATURE_EXTRACTOR_CLASS_MAP[feature_type]
kwargs = {
'is_training':
is_training,
'depth_multiplier':
depth_multiplier,
'min_depth':
min_depth,
'pad_to_multiple':
pad_to_multiple,
'use_explicit_padding':
use_explicit_padding,
'use_depthwise':
use_depthwise,
'override_base_feature_extractor_hyperparams':
override_base_feature_extractor_hyperparams
}
if feature_extractor_config.HasField('replace_preprocessor_with_placeholder'):
kwargs.update({
'replace_preprocessor_with_placeholder':
feature_extractor_config.replace_preprocessor_with_placeholder
})
if feature_extractor_config.HasField('num_layers'):
kwargs.update({'num_layers': feature_extractor_config.num_layers})
if is_keras:
kwargs.update({
'conv_hyperparams': conv_hyperparams,
'inplace_batchnorm_update': False,
'freeze_batchnorm': freeze_batchnorm
})
else:
kwargs.update({
'conv_hyperparams_fn': conv_hyperparams,
'reuse_weights': reuse_weights,
})
if feature_extractor_config.HasField('fpn'):
kwargs.update({
'fpn_min_level':
feature_extractor_config.fpn.min_level,
'fpn_max_level':
feature_extractor_config.fpn.max_level,
'additional_layer_depth':
feature_extractor_config.fpn.additional_layer_depth,
})
if feature_extractor_config.HasField('bifpn'):
kwargs.update({
'bifpn_min_level': feature_extractor_config.bifpn.min_level,
'bifpn_max_level': feature_extractor_config.bifpn.max_level,
'bifpn_num_iterations': feature_extractor_config.bifpn.num_iterations,
'bifpn_num_filters': feature_extractor_config.bifpn.num_filters,
'bifpn_combine_method': feature_extractor_config.bifpn.combine_method,
})
return feature_extractor_class(**kwargs)
def _build_ssd_model(ssd_config, is_training, add_summaries):
"""Builds an SSD detection model based on the model config.
Args:
ssd_config: A ssd.proto object containing the config for the desired
SSDMetaArch.
is_training: True if this model is being built for training purposes.
add_summaries: Whether to add tf summaries in the model.
Returns:
SSDMetaArch based on the config.
Raises:
ValueError: If ssd_config.type is not recognized (i.e. not registered in
model_class_map).
"""
num_classes = ssd_config.num_classes
_check_feature_extractor_exists(ssd_config.feature_extractor.type)
# Feature extractor
feature_extractor = _build_ssd_feature_extractor(
feature_extractor_config=ssd_config.feature_extractor,
freeze_batchnorm=ssd_config.freeze_batchnorm,
is_training=is_training)
box_coder = box_coder_builder.build(ssd_config.box_coder)
matcher = matcher_builder.build(ssd_config.matcher)
region_similarity_calculator = sim_calc.build(
ssd_config.similarity_calculator)
encode_background_as_zeros = ssd_config.encode_background_as_zeros
negative_class_weight = ssd_config.negative_class_weight
anchor_generator = anchor_generator_builder.build(
ssd_config.anchor_generator)
if feature_extractor.is_keras_model:
ssd_box_predictor = box_predictor_builder.build_keras(
hyperparams_fn=hyperparams_builder.KerasLayerHyperparams,
freeze_batchnorm=ssd_config.freeze_batchnorm,
inplace_batchnorm_update=False,
num_predictions_per_location_list=anchor_generator
.num_anchors_per_location(),
box_predictor_config=ssd_config.box_predictor,
is_training=is_training,
num_classes=num_classes,
add_background_class=ssd_config.add_background_class)
else:
ssd_box_predictor = box_predictor_builder.build(
hyperparams_builder.build, ssd_config.box_predictor, is_training,
num_classes, ssd_config.add_background_class)
image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)
non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
ssd_config.post_processing)
(classification_loss, localization_loss, classification_weight,
localization_weight, hard_example_miner, random_example_sampler,
expected_loss_weights_fn) = losses_builder.build(ssd_config.loss)
normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches
normalize_loc_loss_by_codesize = ssd_config.normalize_loc_loss_by_codesize
equalization_loss_config = ops.EqualizationLossConfig(
weight=ssd_config.loss.equalization_loss.weight,
exclude_prefixes=ssd_config.loss.equalization_loss.exclude_prefixes)
target_assigner_instance = target_assigner.TargetAssigner(
region_similarity_calculator,
matcher,
box_coder,
negative_class_weight=negative_class_weight)
ssd_meta_arch_fn = ssd_meta_arch.SSDMetaArch
kwargs = {}
return ssd_meta_arch_fn(
is_training=is_training,
anchor_generator=anchor_generator,
box_predictor=ssd_box_predictor,
box_coder=box_coder,
feature_extractor=feature_extractor,
encode_background_as_zeros=encode_background_as_zeros,
image_resizer_fn=image_resizer_fn,
non_max_suppression_fn=non_max_suppression_fn,
score_conversion_fn=score_conversion_fn,
classification_loss=classification_loss,
localization_loss=localization_loss,
classification_loss_weight=classification_weight,
localization_loss_weight=localization_weight,
normalize_loss_by_num_matches=normalize_loss_by_num_matches,
hard_example_miner=hard_example_miner,
target_assigner_instance=target_assigner_instance,
add_summaries=add_summaries,
normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize,
freeze_batchnorm=ssd_config.freeze_batchnorm,
inplace_batchnorm_update=ssd_config.inplace_batchnorm_update,
add_background_class=ssd_config.add_background_class,
explicit_background_class=ssd_config.explicit_background_class,
random_example_sampler=random_example_sampler,
expected_loss_weights_fn=expected_loss_weights_fn,
use_confidences_as_targets=ssd_config.use_confidences_as_targets,
implicit_example_weight=ssd_config.implicit_example_weight,
equalization_loss_config=equalization_loss_config,
return_raw_detections_during_predict=(
ssd_config.return_raw_detections_during_predict),
**kwargs)
def _build_faster_rcnn_feature_extractor(
feature_extractor_config, is_training, reuse_weights=True,
inplace_batchnorm_update=False):
"""Builds a faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config.
Args:
feature_extractor_config: A FasterRcnnFeatureExtractor proto config from
faster_rcnn.proto.
is_training: True if this feature extractor is being built for training.
reuse_weights: if the feature extractor should reuse weights.
inplace_batchnorm_update: Whether to update batch_norm inplace during
training. This is required for batch norm to work correctly on TPUs. When
this is false, user must add a control dependency on
tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch
norm moving average parameters.
Returns:
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config.
Raises:
ValueError: On invalid feature extractor type.
"""
if inplace_batchnorm_update:
raise ValueError('inplace batchnorm updates not supported.')
feature_type = feature_extractor_config.type
first_stage_features_stride = (
feature_extractor_config.first_stage_features_stride)
batch_norm_trainable = feature_extractor_config.batch_norm_trainable
if feature_type not in FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP:
raise ValueError('Unknown Faster R-CNN feature_extractor: {}'.format(
feature_type))
feature_extractor_class = FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP[
feature_type]
return feature_extractor_class(
is_training, first_stage_features_stride,
batch_norm_trainable, reuse_weights=reuse_weights)
def _build_faster_rcnn_keras_feature_extractor(
feature_extractor_config, is_training,
inplace_batchnorm_update=False):
"""Builds a faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor from config.
Args:
feature_extractor_config: A FasterRcnnFeatureExtractor proto config from
faster_rcnn.proto.
is_training: True if this feature extractor is being built for training.
inplace_batchnorm_update: Whether to update batch_norm inplace during
training. This is required for batch norm to work correctly on TPUs. When
this is false, user must add a control dependency on
tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch
norm moving average parameters.
Returns:
faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor based on config.
Raises:
ValueError: On invalid feature extractor type.
"""
if inplace_batchnorm_update:
raise ValueError('inplace batchnorm updates not supported.')
feature_type = feature_extractor_config.type
first_stage_features_stride = (
feature_extractor_config.first_stage_features_stride)
batch_norm_trainable = feature_extractor_config.batch_norm_trainable
if feature_type not in FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP:
raise ValueError('Unknown Faster R-CNN feature_extractor: {}'.format(
feature_type))
feature_extractor_class = FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP[
feature_type]
return feature_extractor_class(
is_training, first_stage_features_stride,
batch_norm_trainable)
def _build_faster_rcnn_model(frcnn_config, is_training, add_summaries):
"""Builds a Faster R-CNN or R-FCN detection model based on the model config.
Builds R-FCN model if the second_stage_box_predictor in the config is of type
`rfcn_box_predictor` else builds a Faster R-CNN model.
Args:
frcnn_config: A faster_rcnn.proto object containing the config for the
desired FasterRCNNMetaArch or RFCNMetaArch.
is_training: True if this model is being built for training purposes.
add_summaries: Whether to add tf summaries in the model.
Returns:
FasterRCNNMetaArch based on the config.
Raises:
ValueError: If frcnn_config.type is not recognized (i.e. not registered in
model_class_map).
"""
num_classes = frcnn_config.num_classes
image_resizer_fn = image_resizer_builder.build(frcnn_config.image_resizer)
_check_feature_extractor_exists(frcnn_config.feature_extractor.type)
is_keras = tf_version.is_tf2()
if is_keras:
feature_extractor = _build_faster_rcnn_keras_feature_extractor(
frcnn_config.feature_extractor, is_training,
inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update)
else:
feature_extractor = _build_faster_rcnn_feature_extractor(
frcnn_config.feature_extractor, is_training,
inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update)
number_of_stages = frcnn_config.number_of_stages
first_stage_anchor_generator = anchor_generator_builder.build(
frcnn_config.first_stage_anchor_generator)
first_stage_target_assigner = target_assigner.create_target_assigner(
'FasterRCNN',
'proposal',
use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher)
first_stage_atrous_rate = frcnn_config.first_stage_atrous_rate
if is_keras:
first_stage_box_predictor_arg_scope_fn = (
hyperparams_builder.KerasLayerHyperparams(
frcnn_config.first_stage_box_predictor_conv_hyperparams))
else:
first_stage_box_predictor_arg_scope_fn = hyperparams_builder.build(
frcnn_config.first_stage_box_predictor_conv_hyperparams, is_training)
first_stage_box_predictor_kernel_size = (
frcnn_config.first_stage_box_predictor_kernel_size)
first_stage_box_predictor_depth = frcnn_config.first_stage_box_predictor_depth
first_stage_minibatch_size = frcnn_config.first_stage_minibatch_size
use_static_shapes = frcnn_config.use_static_shapes and (
frcnn_config.use_static_shapes_for_eval or is_training)
first_stage_sampler = sampler.BalancedPositiveNegativeSampler(
positive_fraction=frcnn_config.first_stage_positive_balance_fraction,
is_static=(frcnn_config.use_static_balanced_label_sampler and
use_static_shapes))
first_stage_max_proposals = frcnn_config.first_stage_max_proposals
if (frcnn_config.first_stage_nms_iou_threshold < 0 or
frcnn_config.first_stage_nms_iou_threshold > 1.0):
raise ValueError('iou_threshold not in [0, 1.0].')
if (is_training and frcnn_config.second_stage_batch_size >
first_stage_max_proposals):
raise ValueError('second_stage_batch_size should be no greater than '
'first_stage_max_proposals.')
first_stage_non_max_suppression_fn = functools.partial(
post_processing.batch_multiclass_non_max_suppression,
score_thresh=frcnn_config.first_stage_nms_score_threshold,
iou_thresh=frcnn_config.first_stage_nms_iou_threshold,
max_size_per_class=frcnn_config.first_stage_max_proposals,
max_total_size=frcnn_config.first_stage_max_proposals,
use_static_shapes=use_static_shapes,
use_partitioned_nms=frcnn_config.use_partitioned_nms_in_first_stage,
use_combined_nms=frcnn_config.use_combined_nms_in_first_stage)
first_stage_loc_loss_weight = (
frcnn_config.first_stage_localization_loss_weight)
first_stage_obj_loss_weight = frcnn_config.first_stage_objectness_loss_weight
initial_crop_size = frcnn_config.initial_crop_size
maxpool_kernel_size = frcnn_config.maxpool_kernel_size
maxpool_stride = frcnn_config.maxpool_stride
second_stage_target_assigner = target_assigner.create_target_assigner(
'FasterRCNN',
'detection',
use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher)
if is_keras:
second_stage_box_predictor = box_predictor_builder.build_keras(
hyperparams_builder.KerasLayerHyperparams,
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[1],
box_predictor_config=frcnn_config.second_stage_box_predictor,
is_training=is_training,
num_classes=num_classes)
else:
second_stage_box_predictor = box_predictor_builder.build(
hyperparams_builder.build,
frcnn_config.second_stage_box_predictor,
is_training=is_training,
num_classes=num_classes)
second_stage_batch_size = frcnn_config.second_stage_batch_size
second_stage_sampler = sampler.BalancedPositiveNegativeSampler(
positive_fraction=frcnn_config.second_stage_balance_fraction,
is_static=(frcnn_config.use_static_balanced_label_sampler and
use_static_shapes))
(second_stage_non_max_suppression_fn, second_stage_score_conversion_fn
) = post_processing_builder.build(frcnn_config.second_stage_post_processing)
second_stage_localization_loss_weight = (
frcnn_config.second_stage_localization_loss_weight)
second_stage_classification_loss = (
losses_builder.build_faster_rcnn_classification_loss(
frcnn_config.second_stage_classification_loss))
second_stage_classification_loss_weight = (
frcnn_config.second_stage_classification_loss_weight)
second_stage_mask_prediction_loss_weight = (
frcnn_config.second_stage_mask_prediction_loss_weight)
hard_example_miner = None
if frcnn_config.HasField('hard_example_miner'):
hard_example_miner = losses_builder.build_hard_example_miner(
frcnn_config.hard_example_miner,
second_stage_classification_loss_weight,
second_stage_localization_loss_weight)
crop_and_resize_fn = (
ops.matmul_crop_and_resize if frcnn_config.use_matmul_crop_and_resize
else ops.native_crop_and_resize)
clip_anchors_to_image = (
frcnn_config.clip_anchors_to_image)
common_kwargs = {
'is_training':
is_training,
'num_classes':
num_classes,
'image_resizer_fn':
image_resizer_fn,
'feature_extractor':
feature_extractor,
'number_of_stages':
number_of_stages,
'first_stage_anchor_generator':
first_stage_anchor_generator,
'first_stage_target_assigner':
first_stage_target_assigner,
'first_stage_atrous_rate':
first_stage_atrous_rate,
'first_stage_box_predictor_arg_scope_fn':
first_stage_box_predictor_arg_scope_fn,
'first_stage_box_predictor_kernel_size':
first_stage_box_predictor_kernel_size,
'first_stage_box_predictor_depth':
first_stage_box_predictor_depth,
'first_stage_minibatch_size':
first_stage_minibatch_size,
'first_stage_sampler':
first_stage_sampler,
'first_stage_non_max_suppression_fn':
first_stage_non_max_suppression_fn,
'first_stage_max_proposals':
first_stage_max_proposals,
'first_stage_localization_loss_weight':
first_stage_loc_loss_weight,
'first_stage_objectness_loss_weight':
first_stage_obj_loss_weight,
'second_stage_target_assigner':
second_stage_target_assigner,
'second_stage_batch_size':
second_stage_batch_size,
'second_stage_sampler':
second_stage_sampler,
'second_stage_non_max_suppression_fn':
second_stage_non_max_suppression_fn,
'second_stage_score_conversion_fn':
second_stage_score_conversion_fn,
'second_stage_localization_loss_weight':
second_stage_localization_loss_weight,
'second_stage_classification_loss':
second_stage_classification_loss,
'second_stage_classification_loss_weight':
second_stage_classification_loss_weight,
'hard_example_miner':
hard_example_miner,
'add_summaries':
add_summaries,
'crop_and_resize_fn':
crop_and_resize_fn,
'clip_anchors_to_image':
clip_anchors_to_image,
'use_static_shapes':
use_static_shapes,
'resize_masks':
frcnn_config.resize_masks,
'return_raw_detections_during_predict':
frcnn_config.return_raw_detections_during_predict,
'output_final_box_features':
frcnn_config.output_final_box_features
}
if ((not is_keras and isinstance(second_stage_box_predictor,
rfcn_box_predictor.RfcnBoxPredictor)) or
(is_keras and
isinstance(second_stage_box_predictor,
rfcn_keras_box_predictor.RfcnKerasBoxPredictor))):
return rfcn_meta_arch.RFCNMetaArch(
second_stage_rfcn_box_predictor=second_stage_box_predictor,
**common_kwargs)
elif frcnn_config.HasField('context_config'):
context_config = frcnn_config.context_config
common_kwargs.update({
'attention_bottleneck_dimension':
context_config.attention_bottleneck_dimension,
'attention_temperature':
context_config.attention_temperature
})
return context_rcnn_meta_arch.ContextRCNNMetaArch(
initial_crop_size=initial_crop_size,
maxpool_kernel_size=maxpool_kernel_size,
maxpool_stride=maxpool_stride,
second_stage_mask_rcnn_box_predictor=second_stage_box_predictor,
second_stage_mask_prediction_loss_weight=(
second_stage_mask_prediction_loss_weight),
**common_kwargs)
else:
return faster_rcnn_meta_arch.FasterRCNNMetaArch(
initial_crop_size=initial_crop_size,
maxpool_kernel_size=maxpool_kernel_size,
maxpool_stride=maxpool_stride,
second_stage_mask_rcnn_box_predictor=second_stage_box_predictor,
second_stage_mask_prediction_loss_weight=(
second_stage_mask_prediction_loss_weight),
**common_kwargs)
EXPERIMENTAL_META_ARCH_BUILDER_MAP = {
}
def _build_experimental_model(config, is_training, add_summaries=True):
return EXPERIMENTAL_META_ARCH_BUILDER_MAP[config.name](
is_training, add_summaries)
# The class ID in the groundtruth/model architecture is usually 0-based while
# the ID in the label map is 1-based. The offset is used to convert between the
# the two.
CLASS_ID_OFFSET = 1
KEYPOINT_STD_DEV_DEFAULT = 1.0
def keypoint_proto_to_params(kp_config, keypoint_map_dict):
"""Converts CenterNet.KeypointEstimation proto to parameter namedtuple."""
label_map_item = keypoint_map_dict[kp_config.keypoint_class_name]
classification_loss, localization_loss, _, _, _, _, _ = (
losses_builder.build(kp_config.loss))
keypoint_indices = [
keypoint.id for keypoint in label_map_item.keypoints
]
keypoint_labels = [
keypoint.label for keypoint in label_map_item.keypoints
]
keypoint_std_dev_dict = {
label: KEYPOINT_STD_DEV_DEFAULT for label in keypoint_labels
}
if kp_config.keypoint_label_to_std:
for label, value in kp_config.keypoint_label_to_std.items():
keypoint_std_dev_dict[label] = value
keypoint_std_dev = [keypoint_std_dev_dict[label] for label in keypoint_labels]
return center_net_meta_arch.KeypointEstimationParams(
task_name=kp_config.task_name,
class_id=label_map_item.id - CLASS_ID_OFFSET,
keypoint_indices=keypoint_indices,
classification_loss=classification_loss,
localization_loss=localization_loss,
keypoint_labels=keypoint_labels,
keypoint_std_dev=keypoint_std_dev,
task_loss_weight=kp_config.task_loss_weight,
keypoint_regression_loss_weight=kp_config.keypoint_regression_loss_weight,
keypoint_heatmap_loss_weight=kp_config.keypoint_heatmap_loss_weight,
keypoint_offset_loss_weight=kp_config.keypoint_offset_loss_weight,
heatmap_bias_init=kp_config.heatmap_bias_init,
keypoint_candidate_score_threshold=(
kp_config.keypoint_candidate_score_threshold),
num_candidates_per_keypoint=kp_config.num_candidates_per_keypoint,
peak_max_pool_kernel_size=kp_config.peak_max_pool_kernel_size,
unmatched_keypoint_score=kp_config.unmatched_keypoint_score,
box_scale=kp_config.box_scale,
candidate_search_scale=kp_config.candidate_search_scale,
candidate_ranking_mode=kp_config.candidate_ranking_mode,
offset_peak_radius=kp_config.offset_peak_radius,
per_keypoint_offset=kp_config.per_keypoint_offset)
def object_detection_proto_to_params(od_config):
"""Converts CenterNet.ObjectDetection proto to parameter namedtuple."""
loss = losses_pb2.Loss()
# Add dummy classification loss to avoid the loss_builder throwing error.
# TODO(yuhuic): update the loss builder to take the classification loss
# directly.
loss.classification_loss.weighted_sigmoid.CopyFrom(
losses_pb2.WeightedSigmoidClassificationLoss())
loss.localization_loss.CopyFrom(od_config.localization_loss)
_, localization_loss, _, _, _, _, _ = (losses_builder.build(loss))
return center_net_meta_arch.ObjectDetectionParams(
localization_loss=localization_loss,
scale_loss_weight=od_config.scale_loss_weight,
offset_loss_weight=od_config.offset_loss_weight,
task_loss_weight=od_config.task_loss_weight)
def object_center_proto_to_params(oc_config):
"""Converts CenterNet.ObjectCenter proto to parameter namedtuple."""
loss = losses_pb2.Loss()
# Add dummy localization loss to avoid the loss_builder throwing error.
# TODO(yuhuic): update the loss builder to take the localization loss
# directly.
loss.localization_loss.weighted_l2.CopyFrom(
losses_pb2.WeightedL2LocalizationLoss())
loss.classification_loss.CopyFrom(oc_config.classification_loss)
classification_loss, _, _, _, _, _, _ = (losses_builder.build(loss))
return center_net_meta_arch.ObjectCenterParams(
classification_loss=classification_loss,
object_center_loss_weight=oc_config.object_center_loss_weight,
heatmap_bias_init=oc_config.heatmap_bias_init,
min_box_overlap_iou=oc_config.min_box_overlap_iou,
max_box_predictions=oc_config.max_box_predictions,
use_labeled_classes=oc_config.use_labeled_classes)
def mask_proto_to_params(mask_config):
"""Converts CenterNet.MaskEstimation proto to parameter namedtuple."""
loss = losses_pb2.Loss()
# Add dummy localization loss to avoid the loss_builder throwing error.
loss.localization_loss.weighted_l2.CopyFrom(
losses_pb2.WeightedL2LocalizationLoss())
loss.classification_loss.CopyFrom(mask_config.classification_loss)
classification_loss, _, _, _, _, _, _ = (losses_builder.build(loss))
return center_net_meta_arch.MaskParams(
classification_loss=classification_loss,
task_loss_weight=mask_config.task_loss_weight,
mask_height=mask_config.mask_height,
mask_width=mask_config.mask_width,
score_threshold=mask_config.score_threshold,
heatmap_bias_init=mask_config.heatmap_bias_init)
def densepose_proto_to_params(densepose_config):
"""Converts CenterNet.DensePoseEstimation proto to parameter namedtuple."""
classification_loss, localization_loss, _, _, _, _, _ = (
losses_builder.build(densepose_config.loss))
return center_net_meta_arch.DensePoseParams(
class_id=densepose_config.class_id,
classification_loss=classification_loss,
localization_loss=localization_loss,
part_loss_weight=densepose_config.part_loss_weight,
coordinate_loss_weight=densepose_config.coordinate_loss_weight,
num_parts=densepose_config.num_parts,
task_loss_weight=densepose_config.task_loss_weight,
upsample_to_input_res=densepose_config.upsample_to_input_res,
heatmap_bias_init=densepose_config.heatmap_bias_init)
def _build_center_net_model(center_net_config, is_training, add_summaries):
"""Build a CenterNet detection model.
Args:
center_net_config: A CenterNet proto object with model configuration.
is_training: True if this model is being built for training purposes.
add_summaries: Whether to add tf summaries in the model.
Returns:
CenterNetMetaArch based on the config.
"""
image_resizer_fn = image_resizer_builder.build(
center_net_config.image_resizer)
_check_feature_extractor_exists(center_net_config.feature_extractor.type)
feature_extractor = _build_center_net_feature_extractor(
center_net_config.feature_extractor)
object_center_params = object_center_proto_to_params(
center_net_config.object_center_params)
object_detection_params = None
if center_net_config.HasField('object_detection_task'):
object_detection_params = object_detection_proto_to_params(
center_net_config.object_detection_task)
keypoint_params_dict = None
if center_net_config.keypoint_estimation_task:
label_map_proto = label_map_util.load_labelmap(
center_net_config.keypoint_label_map_path)
keypoint_map_dict = {
item.name: item for item in label_map_proto.item if item.keypoints
}
keypoint_params_dict = {}
keypoint_class_id_set = set()
all_keypoint_indices = []
for task in center_net_config.keypoint_estimation_task:
kp_params = keypoint_proto_to_params(task, keypoint_map_dict)
keypoint_params_dict[task.task_name] = kp_params
all_keypoint_indices.extend(kp_params.keypoint_indices)
if kp_params.class_id in keypoint_class_id_set:
raise ValueError(('Multiple keypoint tasks map to the same class id is '
'not allowed: %d' % kp_params.class_id))
else:
keypoint_class_id_set.add(kp_params.class_id)
if len(all_keypoint_indices) > len(set(all_keypoint_indices)):
raise ValueError('Some keypoint indices are used more than once.')
mask_params = None
if center_net_config.HasField('mask_estimation_task'):
mask_params = mask_proto_to_params(center_net_config.mask_estimation_task)
densepose_params = None
if center_net_config.HasField('densepose_estimation_task'):
densepose_params = densepose_proto_to_params(
center_net_config.densepose_estimation_task)
return center_net_meta_arch.CenterNetMetaArch(
is_training=is_training,
add_summaries=add_summaries,
num_classes=center_net_config.num_classes,
feature_extractor=feature_extractor,
image_resizer_fn=image_resizer_fn,
object_center_params=object_center_params,
object_detection_params=object_detection_params,
keypoint_params_dict=keypoint_params_dict,
mask_params=mask_params,
densepose_params=densepose_params)
def _build_center_net_feature_extractor(
feature_extractor_config):
"""Build a CenterNet feature extractor from the given config."""
if feature_extractor_config.type not in CENTER_NET_EXTRACTOR_FUNCTION_MAP:
raise ValueError('\'{}\' is not a known CenterNet feature extractor type'
.format(feature_extractor_config.type))
return CENTER_NET_EXTRACTOR_FUNCTION_MAP[feature_extractor_config.type](
channel_means=list(feature_extractor_config.channel_means),
channel_stds=list(feature_extractor_config.channel_stds),
bgr_ordering=feature_extractor_config.bgr_ordering
)
META_ARCH_BUILDER_MAP = {
'ssd': _build_ssd_model,
'faster_rcnn': _build_faster_rcnn_model,
'experimental_model': _build_experimental_model,
'center_net': _build_center_net_model
}
def build(model_config, is_training, add_summaries=True):
"""Builds a DetectionModel based on the model config.
Args:
model_config: A model.proto object containing the config for the desired
DetectionModel.
is_training: True if this model is being built for training purposes.
add_summaries: Whether to add tensorflow summaries in the model graph.
Returns:
DetectionModel based on the config.
Raises:
ValueError: On invalid meta architecture or model.
"""
if not isinstance(model_config, model_pb2.DetectionModel):
raise ValueError('model_config not of type model_pb2.DetectionModel.')
meta_architecture = model_config.WhichOneof('model')
if meta_architecture not in META_ARCH_BUILDER_MAP:
raise ValueError('Unknown meta architecture: {}'.format(meta_architecture))
else:
build_func = META_ARCH_BUILDER_MAP[meta_architecture]
return build_func(getattr(model_config, meta_architecture), is_training,
add_summaries)
| {
"content_hash": "5b2205125b58a327c246be983e09bda3",
"timestamp": "",
"source": "github",
"line_count": 991,
"max_line_length": 121,
"avg_line_length": 44.55196770938446,
"alnum_prop": 0.7320105999864103,
"repo_name": "tombstone/models",
"id": "d69b7bc7c2546a6461a2d75d3576d725d748d409",
"size": "44841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "research/object_detection/builders/model_builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1365199"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "1858048"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Python",
"bytes": "7241242"
},
{
"name": "Shell",
"bytes": "102270"
},
{
"name": "TypeScript",
"bytes": "6515"
}
],
"symlink_target": ""
} |
"""This module contains the GradingProjectSurvey model.
"""
__authors__ = [
'"Daniel Diniz" <ajaksu@gmail.com>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from soc.modules.gsoc.models.project_survey import ProjectSurvey
class GradingProjectSurvey(ProjectSurvey):
"""Survey for Mentors for each of their StudentProjects.
"""
def __init__(self, *args, **kwargs):
super(GradingProjectSurvey, self).__init__(*args, **kwargs)
self.taking_access = 'org'
| {
"content_hash": "b59bff1f6fead13001c800c0100932f4",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 64,
"avg_line_length": 25,
"alnum_prop": 0.6947368421052632,
"repo_name": "MatthewWilkes/mw4068-packaging",
"id": "f3af672b8b7d94a35e4cdcd45e575111622e6e3f",
"size": "1085",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/melange/src/soc/modules/gsoc/models/grading_project_survey.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "68827"
},
{
"name": "HTML",
"bytes": "586705"
},
{
"name": "JavaScript",
"bytes": "441502"
},
{
"name": "Python",
"bytes": "2136551"
},
{
"name": "Shell",
"bytes": "5667"
}
],
"symlink_target": ""
} |
from .fgir import *
from .optimize import FlowgraphOptimization
from .error import Warn
import asyncio
class PCodeOp(object):
'''A class interface for creating coroutines.
This helps us keep track of valid computational elements. Every coroutine in
a PCode object should be an method of PCodeOp.'''
@staticmethod
async def _node(in_qs, out_qs, func):
'''A helper function to create coroutines.
`in_qs`: an ordered list of asyncio.Queues() which hold the node's inputs.
`out_qs`: a list of asyncio.Queues() into which the function's output should go
`func`: the function to apply to the inputs which produces the output value'''
# hint: look at asyncio.gather
input_generator = (in_q.get() for in_q in in_qs)
inputs = await asyncio.gather(*input_generator)
# hint: the same return value of the function is put in every output queue
outputs = func(*inputs)
for out_q in out_qs:
await out_q.put(outputs)
@staticmethod
async def forward(in_qs, out_qs):
def f(input):
return input
await PCodeOp._node(in_qs, out_qs, f)
@staticmethod
async def libraryfunction(in_qs, out_qs, function_ref):
def f(*inputs):
return function_ref(*inputs)
await PCodeOp._node(in_qs, out_qs, f)
@staticmethod
async def librarymethod(in_qs, out_qs, method_ref):
def f(*inputs):
return method_ref.__get__(inputs[0])(*inputs[1:])
await PCodeOp._node(in_qs, out_qs, f)
@staticmethod
async def input(in_qs, out_qs):
def f(input):
return input
await PCodeOp._node(in_qs, out_qs, f)
@staticmethod
async def literal(out_qs, value_ref):
def f(*inputs):
return value_ref
await PCodeOp._node(in_qs, out_qs, f)
class PCode(object):
def __init__(self):
self.inputs = [] # ordered
self.outputs = [] # ordered
self.ops = [] # unordered
self.retvals = None
def add_op(self, pcode_op_coroutine):
self.ops.append( pcode_op_coroutine )
async def input_generator(self,input_args):
gen_coroutines = [q.put(i) for q,i in zip(self.inputs, input_args)]
await asyncio.gather(*gen_coroutines)
async def output_collector(self, future):
col_coroutines = [q.get() for q in self.outputs]
output_args = await asyncio.gather(*col_coroutines)
self.retvals = output_args
return output_args
async def driver(self, input_args, future):
_,value,*_ = await asyncio.gather(self.input_generator(input_args), self.output_collector(future), *self.ops)
future.set_result(value)
def run(self, *input_args):
return_future = asyncio.Future()
asyncio.ensure_future(self.driver(input_args, return_future))
loop = asyncio.get_event_loop()
loop.set_debug(True)
loop.run_until_complete(return_future)
return return_future.result()[0]
class PCodeGenerator(FlowgraphOptimization):
def __init__(self):
self.pcodes = {}
def visit(self, flowgraph):
pc = PCode()
# Create asyncio queues for every edge
# qs is indexed by tuples of the source and destination node ids
# for the inputs of a component, the source should be None
qs = {} # { (src,dst)=>asyncio.Queue(), ... }
# Populate qs by iterating over inputs of every node
# hint: destination nodes should be in flowgraph nodes
# hint: sources are their inputs
visited_nodes = set()
def pop_qs(dest_nodes):
src_nodes = set()
for dest_node in dest_nodes:
input_nodes = set(flowgraph.pre(dest_node))
for input_node in input_nodes:
qs[(input_node, dest_node)] = asyncio.Queue()
src_nodes = src_nodes.union(input_nodes)
visited_nodes.add(dest_node)
# Make src nodes as new nodes and call pop_qs recursively
new_dest_nodes = src_nodes.difference(visited_nodes)
if new_dest_nodes:
pop_qs(new_dest_nodes)
pop_qs(flowgraph.outputs)
# Add an extra input queue for each component input
component_inputs = []
for dst in flowgraph.inputs:
q = asyncio.Queue()
component_inputs.append(q)
qs[(None,dst)] = q
qs[(None,dst)]._endpoints = (None,dst)
pc.inputs = component_inputs
# Now create all the coroutines from the nodes.
for (node_id,node) in flowgraph.nodes.items():
node_in_qs = [qs[src_id,node_id] for src_id in node.inputs]
out_ids = [i for (i,n) in flowgraph.nodes.items() if node_id in n.inputs]
node_out_qs = [qs[node_id,dst_id] for dst_id in out_ids]
if node.type==FGNodeType.forward:
pc.add_op( PCodeOp.forward(node_in_qs, node_out_qs) )
elif node.type==FGNodeType.libraryfunction:
pc.add_op( PCodeOp.libraryfunction(node_in_qs, node_out_qs, node.ref) )
elif node.type==FGNodeType.librarymethod:
pc.add_op( PCodeOp.librarymethod(node_in_qs, node_out_qs, node.ref) )
elif node.type==FGNodeType.input:
# Add an extra input queue for each component input
node_in_q = qs[(None,node_id)]
pc.add_op( PCodeOp.input([node_in_q], node_out_qs) )
elif node.type==FGNodeType.output:
# Remove the output node and just use its input queues directly.
pc.outputs = node_in_qs
elif node.type==FGNodeType.literal:
pc.add_op( PCodeOp.literal(node_out_qs, node.ref) )
self.pcodes[flowgraph.name] = pc
self.queues = qs
| {
"content_hash": "10aebf7a3719254d43312f765f551dd1",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 113,
"avg_line_length": 34.88235294117647,
"alnum_prop": 0.6616076447442384,
"repo_name": "cs207-project/TimeSeries",
"id": "4dda97de986be0dd5084786be9feb8dfbb87adfb",
"size": "5337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pype/pcode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "351769"
},
{
"name": "C++",
"bytes": "125053"
},
{
"name": "Jupyter Notebook",
"bytes": "145413"
},
{
"name": "Python",
"bytes": "261946"
},
{
"name": "Shell",
"bytes": "1437"
}
],
"symlink_target": ""
} |
"""The Python implementation of the GRPC helloworld.Greeter server."""
from concurrent import futures
import time
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, context):
return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
server.add_insecure_port('[::]:50051')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
| {
"content_hash": "9e0456422bc510484e69bc6152af4c5a",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 77,
"avg_line_length": 24.1875,
"alnum_prop": 0.6847545219638242,
"repo_name": "SolarisYan/grpc-hello-with-gateway",
"id": "1be5449bd3973f06cd88ce8f395fb8b182ee4bcb",
"size": "2302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/helloworld/greeter_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "938"
},
{
"name": "Protocol Buffer",
"bytes": "11025"
},
{
"name": "Python",
"bytes": "8106"
}
],
"symlink_target": ""
} |
import os
import string
from contextlib import contextmanager
from zope.interface import implementer
from twisted.plugin import IPlugin
from twisted.python import lockfile
from comet.icomet import IHandler, IHasOptions
import comet.log as log
# Used when building filenames to avoid over-writing.
FILENAME_PAD = "_"
def string_to_filename(input_string):
# Strip weird, confusing or special characters from input_string so that
# we can safely use it as a filename.
# Replace "/" and "\" with "_" for readability.
# Allow ".", but not as the first character.
if input_string[0] == ".":
input_string = input_string[1:]
return "".join(
x
for x in input_string.replace("/", "_").replace("\\", "_")
if x in string.digits + string.ascii_letters + "_."
)
@contextmanager
def event_file(ivoid, dirname=None):
# Return a file object into which we can write an event.
# If a directory is specified, write into that; otherwise, use the cwd.
# We use a lock to ensure we don't clobber other files with the same name.
if not dirname:
dirname = os.getcwd()
fname = os.path.join(dirname, string_to_filename(ivoid))
lock = lockfile.FilesystemLock(string_to_filename(ivoid) + "-lock")
lock.lock()
try:
while os.path.exists(fname):
fname += FILENAME_PAD
with open(fname, "w") as f:
yield f
finally:
lock.unlock()
# Event handlers must implement IPlugin and IHandler.
# Implementing IHasOptions enables us to use command line options.
@implementer(IPlugin, IHandler, IHasOptions)
class EventWriter(object):
# Simple example of an event handler plugin. This saves the events to
# disk.
# The name attribute enables the user to specify plugins they want on the
# command line.
name = "save-event"
def __init__(self):
self.directory = os.getcwd()
# When the handler is called, it is passed an instance of
# comet.utility.xml.xml_document.
def __call__(self, event):
"""
Save an event to disk.
"""
if not os.path.exists(self.directory):
os.makedirs(self.directory)
with event_file(event.element.attrib["ivorn"], self.directory) as f:
log.debug("Writing to %s" % (f.name,))
f.write(event.raw_bytes.decode(event.encoding))
def get_options(self):
return [("directory", self.directory, "Directory in which to save events")]
def set_option(self, name, value):
if name == "directory":
self.directory = value
# This instance of the handler is what actually constitutes our plugin.
save_event = EventWriter()
| {
"content_hash": "d25aba045e132669e242db16d63187f1",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 83,
"avg_line_length": 32.166666666666664,
"alnum_prop": 0.653219837157661,
"repo_name": "jdswinbank/Comet",
"id": "dd057bb584f3a0dff2b7ea83bac59a95191504bb",
"size": "2776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "comet/plugins/eventwriter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "175811"
},
{
"name": "Shell",
"bytes": "108"
}
],
"symlink_target": ""
} |
from civictechprojects.models import Event, EventProject, Project
def get_event_legacy_projects(event):
legacy_slugs = event.event_legacy_organization.slugs()
if legacy_slugs:
return Project.objects.filter(project_organization__name__in=legacy_slugs, is_searchable=True)
def migrate_event_projects(*args):
# TODO: Migrate existing video events to link_video
events = Event.objects.all()
for event in events:
print('Migrating projects for event: ' + event.__str__())
# Get list of legacy projects for event
event_projects = get_event_legacy_projects(event)
if event_projects:
for project in event_projects:
# Check to see if event project is already created
event_project = EventProject.get(event.id, project.id)
if not event_project:
print('Migrating project: ' + project.__str__())
# Create Event Project from Project
event_project = EventProject.create(project.project_creator, event, project)
event_project.save()
# Remove event tag from Project
project.project_organization.remove(event.event_legacy_organization)
else:
print('{project} has already been migrated'.format(project=project.__str__()))
| {
"content_hash": "670a533c4bae8bc2f78010cde81f365c",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 102,
"avg_line_length": 46.13333333333333,
"alnum_prop": 0.6221098265895953,
"repo_name": "DemocracyLab/CivicTechExchange",
"id": "05bd7a960653ccdde23a11426bb8da18102467b7",
"size": "1384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "civictechprojects/migrations/data_migrations/migrate_event_projects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "5441"
},
{
"name": "HTML",
"bytes": "27002"
},
{
"name": "JavaScript",
"bytes": "868669"
},
{
"name": "Procfile",
"bytes": "120"
},
{
"name": "Python",
"bytes": "493545"
},
{
"name": "SCSS",
"bytes": "106029"
},
{
"name": "Shell",
"bytes": "20355"
}
],
"symlink_target": ""
} |
"""This file provides tests to benchmark performance sqlite/file queue
on specific hardware. User can easily evaluate the performance by running this
file directly via `python run_benchmark.py`
"""
from persistqueue import SQLiteQueue
from persistqueue import Queue
import tempfile
import time
BENCHMARK_COUNT = 100
def time_it(func):
def _exec(*args, **kwargs):
start = time.time()
func(*args, **kwargs)
end = time.time()
print(
"\t{} => time used: {:.4f} seconds.".format(
func.__doc__,
(end - start)))
return _exec
class FileQueueBench(object):
"""Benchmark File queue performance."""
def __init__(self, prefix=None):
self.path = prefix
@time_it
def benchmark_file_write(self):
"""Writing <BENCHMARK_COUNT> items."""
self.path = tempfile.mkdtemp('b_file_10000')
q = Queue(self.path)
for i in range(BENCHMARK_COUNT):
q.put('bench%d' % i)
assert q.qsize() == BENCHMARK_COUNT
@time_it
def benchmark_file_read_write_false(self):
"""Writing and reading <BENCHMARK_COUNT> items(1 task_done)."""
self.path = tempfile.mkdtemp('b_file_10000')
q = Queue(self.path)
for i in range(BENCHMARK_COUNT):
q.put('bench%d' % i)
for i in range(BENCHMARK_COUNT):
q.get()
q.task_done()
assert q.qsize() == 0
@time_it
def benchmark_file_read_write_autosave(self):
"""Writing and reading <BENCHMARK_COUNT> items(autosave)."""
self.path = tempfile.mkdtemp('b_file_10000')
q = Queue(self.path, autosave=True)
for i in range(BENCHMARK_COUNT):
q.put('bench%d' % i)
for i in range(BENCHMARK_COUNT):
q.get()
assert q.qsize() == 0
@time_it
def benchmark_file_read_write_true(self):
"""Writing and reading <BENCHMARK_COUNT> items(many task_done)."""
self.path = tempfile.mkdtemp('b_file_10000')
q = Queue(self.path)
for i in range(BENCHMARK_COUNT):
q.put('bench%d' % i)
for i in range(BENCHMARK_COUNT):
q.get()
q.task_done()
assert q.qsize() == 0
@classmethod
def run(cls):
print(cls.__doc__)
ins = cls()
for name in sorted(cls.__dict__):
if name.startswith('benchmark'):
func = getattr(ins, name)
func()
class Sqlite3QueueBench(object):
"""Benchmark Sqlite3 queue performance."""
@time_it
def benchmark_sqlite_write(self):
"""Writing <BENCHMARK_COUNT> items."""
self.path = tempfile.mkdtemp('b_sql_10000')
q = SQLiteQueue(self.path, auto_commit=False)
for i in range(BENCHMARK_COUNT):
q.put('bench%d' % i)
assert q.qsize() == BENCHMARK_COUNT
@time_it
def benchmark_sqlite_read_write_false(self):
"""Writing and reading <BENCHMARK_COUNT> items(1 task_done)."""
self.path = tempfile.mkdtemp('b_sql_10000')
q = SQLiteQueue(self.path, auto_commit=False)
for i in range(BENCHMARK_COUNT):
q.put('bench%d' % i)
for i in range(BENCHMARK_COUNT):
q.get()
q.task_done()
assert q.qsize() == 0
@time_it
def benchmark_sqlite_read_write_true(self):
"""Writing and reading <BENCHMARK_COUNT> items(many task_done)."""
self.path = tempfile.mkdtemp('b_sql_10000')
q = SQLiteQueue(self.path, auto_commit=True)
for i in range(BENCHMARK_COUNT):
q.put('bench%d' % i)
for i in range(BENCHMARK_COUNT):
q.get()
q.task_done()
assert q.qsize() == 0
@classmethod
def run(cls):
print(cls.__doc__)
ins = cls()
for name in sorted(cls.__dict__):
if name.startswith('benchmark'):
func = getattr(ins, name)
func()
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
BENCHMARK_COUNT = int(sys.argv[1])
print("<BENCHMARK_COUNT> = {}".format(BENCHMARK_COUNT))
file_bench = FileQueueBench()
file_bench.run()
sql_bench = Sqlite3QueueBench()
sql_bench.run()
| {
"content_hash": "e9b9dd39f414e61d7c1d1dc0bc2e67b4",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 78,
"avg_line_length": 28.164473684210527,
"alnum_prop": 0.5631861714552675,
"repo_name": "peter-wangxu/persist-queue",
"id": "9ee228a9799ddbd507f154ae85735058cdfb138f",
"size": "4281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "benchmark/run_benchmark.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "112045"
},
{
"name": "Shell",
"bytes": "362"
}
],
"symlink_target": ""
} |
""" Test for EncoderDecoder model architecture.
First of all, we define possible types of encoders, embeddings and decoders.
Later every combination of encoder, embedding, decoder is combined into one model and we initialize it.
"""
# pylint: disable=import-error, no-name-in-module
# pylint: disable=redefined-outer-name
import pytest
from batchflow.models.tf import EncoderDecoder, VariationalAutoEncoder
from batchflow.models.tf import ResNet, MobileNet, DenseNet, Inception_v4
MODELS = [
EncoderDecoder,
VariationalAutoEncoder
]
ENCODERS = [
{'num_stages': 2},
{'base': ResNet, 'num_blocks': [2]*3, 'filters': [13]*3, 'downsample': [[], [0], [0], [0]]},
{'base': DenseNet, 'num_layers': [2]*3, 'growth_rate': 13},
{'num_stages': 2, 'blocks': {'base': ResNet.block, 'filters':[13]*2}},
]
EMBEDDINGS = [
{},
{'base': MobileNet.block, 'width_factor': 2},
{'base': Inception_v4.inception_c_block, 'filters': [1, 1, 1, 1]}
]
DECODERS = [
{},
{'num_stages': 2, 'factor': 9, 'skip': False, 'upsample': {'layout': 'X'}},
{'num_stages': 4, 'blocks': {'layout': 'cnacna', 'filters': [23]*4}},
{'num_stages': 4, 'blocks': {'base': DenseNet.block, 'num_layers': [2]*4, 'growth_rate': 23}},
]
@pytest.fixture()
def base_config():
""" Fixture to hold default configuration. """
config = {
'inputs': {'images': {'shape': (16, 16, 1)},
'masks': {'name': 'targets', 'shape': (16, 16, 1)}},
'initial_block': {'inputs': 'images'},
'loss': 'mse'
}
return config
@pytest.mark.slow
@pytest.mark.parametrize('model', MODELS)
@pytest.mark.parametrize('decoder', DECODERS)
@pytest.mark.parametrize('embedding', EMBEDDINGS)
@pytest.mark.parametrize('encoder', ENCODERS)
def test_first(base_config, model, encoder, embedding, decoder):
""" Create encoder-decoder architecture from every possible combination
of encoder, embedding, decoder, listed in global variables defined above.
"""
base_config.update({'body/encoder': encoder,
'body/embedding': embedding,
'body/decoder': decoder})
_ = model(base_config)
| {
"content_hash": "a80a97ec5a907a768711ab9f68004ed6",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 103,
"avg_line_length": 32.52238805970149,
"alnum_prop": 0.6273519963285911,
"repo_name": "analysiscenter/dataset",
"id": "ed754db699813b213f4068203ba8ca06524d849e",
"size": "2179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "batchflow/tests/encoder_decoder_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "711078"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import itertools
from echomesh.pattern.Pattern import Pattern
class Choose(Pattern):
HELP = """Choose one of several patterns to display."""
SETTINGS = {
'choose': {
'default': 0,
'help': 'Selects which specific pattern to display',
},
}
def _evaluate(self):
length = len(self._patterns)
def restrict(size):
return int(max(0, min(length - 1, size)))
choose = self.get('choose')
return self._patterns[restrict(choose)].evaluate()
| {
"content_hash": "9c3ba5b97163bd8c2e1a72e1ae112bd5",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 82,
"avg_line_length": 27.59090909090909,
"alnum_prop": 0.6194398682042833,
"repo_name": "rec/echomesh",
"id": "66cfb63ae7c5bdb9b2c9febb5be03c707e97796a",
"size": "607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/python/echomesh/pattern/Choose.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5723427"
},
{
"name": "C++",
"bytes": "10326191"
},
{
"name": "CSS",
"bytes": "2048"
},
{
"name": "HTML",
"bytes": "22355"
},
{
"name": "Java",
"bytes": "25383"
},
{
"name": "M4",
"bytes": "32321"
},
{
"name": "Makefile",
"bytes": "215120"
},
{
"name": "Objective-C",
"bytes": "93003"
},
{
"name": "Objective-C++",
"bytes": "394207"
},
{
"name": "Python",
"bytes": "1117634"
},
{
"name": "Shell",
"bytes": "735767"
}
],
"symlink_target": ""
} |
""" Defines an addon mixin for classes """
import projex
from projex import errors
class AddonMixin(object):
@classmethod
def _initAddons(cls, recurse=True):
"""
Initializes the addons for this manager.
"""
for addon_module in cls.addonModules(recurse):
projex.importmodules(addon_module)
@classmethod
def addons(cls, recurse=True):
"""
Returns a dictionary containing all the available addons
for this mixin class. If the optional recurse flag is set to True,
then all the base classes will be searched for the given addon as well.
:param recurse | <bool>
:return {<str> name: <variant> addon, ..}
"""
cls.initAddons()
prop = '_{0}__addons'.format(cls.__name__)
out = {}
# lookup base classes
if recurse:
for base in cls.__bases__:
if issubclass(base, AddonManager):
out.update(base.addons(recurse))
# always use the highest level for any given key
out.update(getattr(cls, prop, {}))
return out
@classmethod
def addonModules(cls, recurse=True):
"""
Returns all the modules that this addon class uses to load plugins
from.
:param recurse | <bool>
:return [<str> || <module>, ..]
"""
prop = '_{0}__addon_modules'.format(cls.__name__)
out = set()
# lookup base classes
if recurse:
for base in cls.__bases__:
if issubclass(base, AddonManager):
out.update(base.addonModules(recurse))
# always use the highest level for any given key
out.update(getattr(cls, prop, set()))
return out
@classmethod
def addonName(cls):
return getattr(cls, '_{0}__addonName'.format(cls.__name__), '')
@classmethod
def byName(cls, name, recurse=True, default=None):
"""
Returns the addon whose name matches the inputted name. If
the optional recurse flag is set to True, then all the base classes
will be searched for the given addon as well. If no addon is found,
the default is returned.
:param name | <str>
recurse | <bool>
default | <variant>
"""
cls.initAddons()
prop = '_{0}__addons'.format(cls.__name__)
try:
return getattr(cls, prop, {})[name]
except KeyError:
if recurse:
for base in cls.__bases__:
if issubclass(base, AddonManager):
return base.byName(name, recurse)
return default
@classmethod
def initAddons(cls, recurse=True):
"""
Loads different addon modules for this class. This method
should not be overloaded in a subclass as it also manages the loaded
state to avoid duplicate loads. Instead, you can re-implement the
_initAddons method for custom loading.
:param recurse | <bool>
"""
key = '_{0}__addons_loaded'.format(cls.__name__)
if getattr(cls, key, False):
return
cls._initAddons(recurse)
setattr(cls, key, True)
@classmethod
def registerAddon(cls, name, addon, force=False):
"""
Registers the inputted addon to the class.
:param name | <str>
addon | <variant>
"""
prop = '_{0}__addons'.format(cls.__name__)
cmds = getattr(cls, prop, {})
if name in cmds and not force:
raise errors.AddonAlreadyExists(cls, name, addon)
cmds[name] = addon
try:
if issubclass(addon, cls):
setattr(addon, '_{0}__addonName'.format(addon.__name__), name)
except StandardError:
pass
setattr(cls, prop, cmds)
@classmethod
def registerAddonModule(cls, module):
"""
Registers a module to use to import addon subclasses from.
:param module | <str> || <module>
"""
prop = '_{0}__addon_modules'.format(cls.__name__)
mods = getattr(cls, prop, set())
mods.add(module)
setattr(cls, prop, mods)
@classmethod
def unregisterAddon(cls, name):
"""
Unregisters the addon defined by the given name from the class.
:param name | <str>
"""
prop = '_{0}__addons'.format(cls.__name__)
cmds = getattr(cls, prop, {})
cmds.pop(name, None)
@classmethod
def unregisterAddonModule(cls, module):
"""
Unregisters the module to use to import addon subclasses from.
:param module | <str> || <module>
"""
prop = '_{0}__addon_modules'.format(cls.__name__)
mods = getattr(cls, prop, set())
try:
mods.remove(module)
except KeyError:
pass
# backward compatibility support
AddonManager = AddonMixin
| {
"content_hash": "08208ccb618bffc44d830a779c3e0d4b",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 79,
"avg_line_length": 30.485207100591715,
"alnum_prop": 0.5382375776397516,
"repo_name": "bitesofcode/projex",
"id": "69687a278f5e49f5780ed18bcc97c74e413f521f",
"size": "5152",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "projex/addon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "336382"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pis_system.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "86dd2a60157983e94961acfed22ef532",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 74,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.70995670995671,
"repo_name": "rumz/pis-system",
"id": "5b55631f7d54717440bd95b3aa55a1a3836f3153",
"size": "253",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pis_system/manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "954"
},
{
"name": "HTML",
"bytes": "13171"
},
{
"name": "JavaScript",
"bytes": "6045"
},
{
"name": "Python",
"bytes": "54395"
}
],
"symlink_target": ""
} |
"""XNLI: The Cross-Lingual NLI Corpus."""
import collections
import csv
import os
from etils import epath
import six
import tensorflow_datasets.public_api as tfds
_CITATION = """\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}"""
_DESCRIPTION = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
_DATA_URL = 'https://cims.nyu.edu/~sbowman/xnli/XNLI-1.0.zip'
_LANGUAGES = ('ar', 'bg', 'de', 'el', 'en', 'es', 'fr', 'hi', 'ru', 'sw', 'th',
'tr', 'ur', 'vi', 'zh')
class Xnli(tfds.core.GeneratorBasedBuilder):
"""XNLI: The Cross-Lingual NLI Corpus. Version 1.0."""
VERSION = tfds.core.Version('1.1.0')
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'premise':
tfds.features.Translation(languages=_LANGUAGES,),
'hypothesis':
tfds.features.TranslationVariableLanguages(
languages=_LANGUAGES,),
'label':
tfds.features.ClassLabel(
names=['entailment', 'neutral', 'contradiction']),
}),
# No default supervised_keys (as we have to pass both premise
# and hypothesis as input).
supervised_keys=None,
homepage='https://www.nyu.edu/projects/bowman/xnli/',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_DATA_URL)
data_dir = os.path.join(dl_dir, 'XNLI-1.0')
return [
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={'filepath': os.path.join(data_dir, 'xnli.test.tsv')}),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={'filepath': os.path.join(data_dir, 'xnli.dev.tsv')}),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
rows_per_pair_id = collections.defaultdict(list)
with epath.Path(filepath).open() as f:
reader = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
rows_per_pair_id[row['pairID']].append(row)
for rows in six.itervalues(rows_per_pair_id):
premise = {row['language']: row['sentence1'] for row in rows}
hypothesis = {row['language']: row['sentence2'] for row in rows}
yield rows[0]['pairID'], {
'premise': premise,
'hypothesis': hypothesis,
'label': rows[0]['gold_label'],
}
| {
"content_hash": "e46252ae169d0c10f303638d7dc79b88",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 80,
"avg_line_length": 35.072916666666664,
"alnum_prop": 0.6171666171666171,
"repo_name": "tensorflow/datasets",
"id": "2e345746a1fbd9638f1e29df6add196877ad98d1",
"size": "3979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_datasets/text/xnli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "728"
},
{
"name": "JavaScript",
"bytes": "13369"
},
{
"name": "NewLisp",
"bytes": "13940"
},
{
"name": "Perl",
"bytes": "520"
},
{
"name": "Python",
"bytes": "5398856"
},
{
"name": "Roff",
"bytes": "22095"
},
{
"name": "Ruby",
"bytes": "25669"
},
{
"name": "Shell",
"bytes": "3895"
},
{
"name": "Smalltalk",
"bytes": "20604"
},
{
"name": "TeX",
"bytes": "759"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
from paddle.fluid.tests.unittests.test_conv3d_op import (
TestCase1,
TestConv3DOp,
TestWith1x1,
TestWithGroup1,
TestWithGroup2,
TestWithInput1x1Filter1x1,
)
class TestMKLDNN(TestConv3DOp):
def init_kernel_type(self):
self.use_mkldnn = True
self.data_format = "NCHW"
self.dtype = np.float32
class TestMKLDNNCase1(TestCase1):
def init_kernel_type(self):
self.use_mkldnn = True
self.data_format = "NCHW"
self.dtype = np.float32
class TestMKLDNNGroup1(TestWithGroup1):
def init_kernel_type(self):
self.use_mkldnn = True
self.data_format = "NCHW"
self.dtype = np.float32
class TestMKLDNNGroup2(TestWithGroup2):
def init_kernel_type(self):
self.use_mkldnn = True
self.data_format = "NCHW"
self.dtype = np.float32
class TestMKLDNNWith1x1(TestWith1x1):
def init_kernel_type(self):
self.use_mkldnn = True
self.data_format = "NCHW"
self.dtype = np.float32
class TestMKLDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1):
def init_kernel_type(self):
self.use_mkldnn = True
self.data_format = "NCHW"
self.dtype = np.float32
class TestConv3DOp_AsyPadding_MKLDNN(TestConv3DOp):
def init_kernel_type(self):
self.use_mkldnn = True
self.data_format = "NCHW"
self.dtype = np.float32
def init_paddings(self):
self.pad = [1, 0, 1, 0, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestConv3DOp_Same_MKLDNN(TestConv3DOp_AsyPadding_MKLDNN):
def init_paddings(self):
self.pad = [0, 0, 0]
self.padding_algorithm = "SAME"
def init_kernel_type(self):
self.use_mkldnn = True
self.data_format = "NCHW"
self.dtype = np.float32
class TestConv3DOp_Valid_MKLDNN(TestConv3DOp_AsyPadding_MKLDNN):
def init_paddings(self):
self.pad = [1, 1, 1]
self.padding_algorithm = "VALID"
def init_kernel_type(self):
self.use_mkldnn = True
self.data_format = "NCHW"
self.dtype = np.float32
if __name__ == '__main__':
from paddle import enable_static
enable_static()
unittest.main()
| {
"content_hash": "44c0888b5f20a46c36361da8552b4b2f",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 65,
"avg_line_length": 23.893617021276597,
"alnum_prop": 0.6375779162956366,
"repo_name": "PaddlePaddle/Paddle",
"id": "df6c099e1377a5c9d5694151c77ddd12317ee375",
"size": "2857",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/mkldnn/test_conv3d_mkldnn_op.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36848680"
},
{
"name": "CMake",
"bytes": "902619"
},
{
"name": "Cuda",
"bytes": "5227207"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36203874"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553177"
}
],
"symlink_target": ""
} |
"""
Python ICE-CASCADE combined glacial-fluvial-hillslope landscape evolution model
"""
import argparse
import numpy as np
import matplotlib.pyplot as plt
import netCDF4
import sys
import logging
from . import __version__ as py_ice_cascade_version
class main_model():
"""
Composite landscape evolution model. Integrates glacial, fluvial, and
hillslope model components and handles input-output.
Arguments:
hillslope: initialized hillslope model component, expect child of
py_ice_cascade.hillslope.base_model class
uplift: initialized uplift model component, expect child of
py_ice_cascade.uplift.base_model class
x: numpy vector, x-coordinate, [m]
y: numpy vector, y-coordinate, [m]
z_rx: grid, initial bedrock elevation, [m]
time_start: scalar, starting time, [a]
time_step: scalar, topographic model time step, [a]
num_steps: scalar, total steps in simulation, i.e. duration, [1]
out_steps: list, step numbers to write output, 0 is initial state, [1]
"""
def __init__(self, hillslope, uplift,
x, y, z_rx, time_start, time_step, num_steps, out_steps):
# setup logger
self._logger = logging.getLogger(__name__)
self._logger.info('Setting model parameters')
# user-defined parameters
self._model_hill = hillslope
self._model_uplift = uplift
self._x = np.copy(x)
self._y = np.copy(y)
self._z_rx = np.copy(z_rx)
self._time_start = time_start
self._time_step = time_step
self._num_steps = num_steps
self._out_steps = np.copy(out_steps)
# automatic parameters
self._delta = None
self._time = None
self._step = None
# TODO: test for a regular grid here
def _create_netcdf(self, file_name, clobber):
"""
Create new (empty) netCDF for model state and parameters
Arguments:
file_name: String, path to which file should be saved
clobber: Boolean, enable/disable overwriting output file
Model components are responsible for initializing thier own output
variables, using the expected .init_netcdf method.
"""
self._logger.info("Creating input file: {}".format(file_name))
# compression/chunking parameters for time-dependant grid vars
zlib = False
complevel = 1 # 1->fastest, 9->best
shuffle = True
chunksizes = (1, self._y.size, self._x.size)
# create file
nc = netCDF4.Dataset(file_name, "w", format="NETCDF4", clobber=clobber)
# global attributes
nc.version = py_ice_cascade_version
nc.time_start = self._time_start
nc.time_step = self._time_step
nc.num_steps = self._num_steps
nc.out_steps = "see step variable"
# create dimensions
nc.createDimension('x', size=self._x.size)
nc.createDimension('y', size=self._y.size)
nc.createDimension('time', size=self._out_steps.size)
# create variables, populate constants
nc.createVariable('x', np.double, dimensions=('x'))
nc['x'].long_name = 'x coordinate'
nc['x'].units = 'm'
nc['x'][:] = self._x
nc.createVariable('y', np.double, dimensions=('y'))
nc['y'].long_name = 'y coordinate'
nc['y'].units = 'm'
nc['y'][:] = self._y
nc.createVariable('time', np.double, dimensions=('time'))
nc['time'].long_name = 'time coordinate'
nc['time'].units = 'a'
nc['time'].start = self._time_start
nc['time'].step = self._time_step
nc.createVariable('step', np.int64, dimensions=('time'))
nc['step'].long_name = 'model time step'
nc['step'].units = '1'
nc['step'].num_steps = self._num_steps
nc['step'].out_steps = self._out_steps
nc.createVariable('z_rx', np.double, dimensions=('time', 'y', 'x'),
zlib=zlib, complevel=complevel, shuffle=shuffle, chunksizes=chunksizes)
nc['z_rx'].long_name = 'bedrock surface elevation'
nc['z_rx'].units = 'm'
# initialize output for component models
self._model_hill.init_netcdf(nc, zlib, complevel, shuffle, chunksizes)
self._model_uplift.init_netcdf(nc, zlib, complevel, shuffle, chunksizes)
# finalize
nc.close()
def _to_netcdf(self, file_name):
"""
Append model state and parameters to netCDF file
Arguments:
file_name: String, path to which file should be saved
"""
if self._step in self._out_steps:
self._logger.info('Write output for time={:.2f}, step={}'.format(
self._time, self._step))
ii = list(self._out_steps).index(self._step)
nc = netCDF4.Dataset(file_name, "a")
nc['time'][ii] = self._time
nc['step'][ii] = self._step
nc['z_rx'][ii,:,:] = self._z_rx
# write data for model components
self._model_hill.to_netcdf(nc, ii)
self._model_uplift.to_netcdf(nc, ii)
# finalize
nc.close()
def run(self, file_name, clobber=False):
"""
Run model simulation, save results to file
Arguments:
file_name: String, path to which results should be saved
clobber: Boolean, allow overwriting output file
"""
self._logger.info('Initialize simulation')
# init automatic parameters
self._delta = np.abs(self._x[1]-self._x[0])
self._time_end = self._time_start+self._time_step*(self._num_steps-1)
# init model integration loop
self._time = self._time_start
self._step = 0
self._create_netcdf(file_name, clobber)
self._to_netcdf(file_name)
while self._step < self._num_steps:
# synchronize model components
self._model_hill.set_height(self._z_rx)
self._model_uplift.set_height(self._z_rx)
# run climate component simulations
# gather climate component results
# run erosion-deposition component simulations
self._model_hill.run(self._time_step)
self._model_uplift.run(self._time, self._time+self._time_step)
# gather erosion-deposition-uplift component results
dzdt = (self._model_hill.get_height()
+ self._model_uplift.get_height()
- 2*self._z_rx)
# run isostasy component simulations
# gather isostasy results
# advance time step
self._z_rx += dzdt
self._time += self._time_step
self._step += 1
# write output and/or display model state
self._to_netcdf(file_name)
self._logger.info('Simulation complete')
| {
"content_hash": "c3030a23b62acf153e7c6d45956af5ea",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 83,
"avg_line_length": 34.68316831683168,
"alnum_prop": 0.5779332001141878,
"repo_name": "keithfma/py_ice_cascade",
"id": "f331f6932ec9c5e3773987868a485cfcbaa27a52",
"size": "7006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py_ice_cascade/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "659"
},
{
"name": "Python",
"bytes": "58223"
}
],
"symlink_target": ""
} |
from pycadf import cadftype
from pycadf import identifier
# Metric types can appear outside a cadf:Event record context, in these cases
# a typeURI may be used to identify the cadf:Metric data type.
TYPE_URI_METRIC = cadftype.CADF_VERSION_1_0_0 + 'metric'
METRIC_KEYNAME_METRICID = "metricId"
METRIC_KEYNAME_UNIT = "unit"
METRIC_KEYNAME_NAME = "name"
#METRIC_KEYNAME_ANNOTATIONS = "annotations"
METRIC_KEYNAMES = [METRIC_KEYNAME_METRICID,
METRIC_KEYNAME_UNIT,
METRIC_KEYNAME_NAME
#METRIC_KEYNAME_ANNOTATIONS
]
class Metric(cadftype.CADFAbstractType):
metricId = cadftype.ValidatorDescriptor(METRIC_KEYNAME_METRICID,
lambda x: identifier.is_valid(x))
unit = cadftype.ValidatorDescriptor(METRIC_KEYNAME_UNIT,
lambda x: isinstance(x, basestring))
name = cadftype.ValidatorDescriptor(METRIC_KEYNAME_NAME,
lambda x: isinstance(x, basestring))
def __init__(self, metricId=None, unit=None, name=None):
# Metric.id
setattr(self, METRIC_KEYNAME_METRICID,
metricId or identifier.generate_uuid())
# Metric.unit
if unit is not None:
setattr(self, METRIC_KEYNAME_UNIT, unit)
# Metric.name
if name is not None:
setattr(self, METRIC_KEYNAME_NAME, name)
# TODO(mrutkows): add mechanism for annotations, OpenStack may choose
# not to support this "extension mechanism" and is not required (and not
# critical in many audit contexts)
def set_annotations(self, value):
raise NotImplementedError()
# setattr(self, METRIC_KEYNAME_ANNOTATIONS, value)
# self validate cadf:Metric type against schema
def is_valid(self):
# Existence test, id, and unit attributes must both exist
return (
hasattr(self, METRIC_KEYNAME_METRICID) and
hasattr(self, METRIC_KEYNAME_UNIT)
)
| {
"content_hash": "e1c8d9b4f0f48fb1d6e5308c40524e2e",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 77,
"avg_line_length": 37.21818181818182,
"alnum_prop": 0.6297020029311187,
"repo_name": "saschpe/pycadf",
"id": "190c4909661a8e1833ba72a70f4ad532eca82d1a",
"size": "2694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycadf/metric.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from datasets.imdb import imdb
import datasets.ds_utils as ds_utils
import xml.etree.ElementTree as ET
import numpy as np
import scipy.sparse
import scipy.io as sio
import utils.cython_bbox
import pickle
import subprocess
import uuid
from .voc_eval import voc_eval
from model.config import cfg
class pascal_voc(imdb):
def __init__(self, image_set, year, devkit_path=None):
imdb.__init__(self, 'voc_' + year + '_' + image_set)
self._year = year
self._image_set = image_set
self._devkit_path = self._get_default_path() if devkit_path is None \
else devkit_path
self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)
self._classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.gt_roidb
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
# PASCAL specific config options
self.config = {'cleanup': True,
'use_salt': True,
'use_diff': False,
'matlab_eval': False,
'rpn_file': None}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(cfg.TRAIN.CACHE_PATH, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
try:
roidb = pickle.load(fid)
except:
roidb = pickle.load(fid, encoding='bytes')
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def rpn_roidb(self):
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self._load_rpn_roidb(None)
return roidb
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print('loading {}'.format(filename))
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = pickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
if not self.config['use_diff']:
# Exclude the samples labeled as difficult
non_diff_objs = [
obj for obj in objs if int(obj.find('difficult').text) == 0]
# if len(non_diff_objs) != len(objs):
# print 'Removed {} difficult objects'.format(
# len(objs) - len(non_diff_objs))
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_voc_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
path = os.path.join(
self._devkit_path,
'results',
'VOC' + self._year,
'Main',
filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
folder_name = os.path.dirname(os.path.abspath(filename))
if(not os.path.exists(folder_name)):
os.mkdirs(folder_name)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._devkit_path,
'VOC' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
print(('AP for {} = {:.4f}'.format(cls, ap)))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
def _do_matlab_eval(self, output_dir='output'):
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print(('Running:\n{}'.format(cmd)))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
self._write_voc_results_file(all_boxes)
self._do_python_eval(output_dir)
if self.config['matlab_eval']:
self._do_matlab_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
os.remove(filename)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
from datasets.pascal_voc import pascal_voc
d = pascal_voc('trainval', '2007')
res = d.roidb
from IPython import embed;
embed()
| {
"content_hash": "04871d50eb1947d6c4c4066743e3df66",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 85,
"avg_line_length": 36.28852459016394,
"alnum_prop": 0.5734550054210336,
"repo_name": "junranhe/tf-faster-rcnn",
"id": "1d538551b5d338282ea9b9e0814afb409ed1b46f",
"size": "11332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/datasets/pascal_voc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "146"
},
{
"name": "Cuda",
"bytes": "5064"
},
{
"name": "Makefile",
"bytes": "94"
},
{
"name": "Matlab",
"bytes": "1821"
},
{
"name": "Python",
"bytes": "232492"
},
{
"name": "Roff",
"bytes": "1195"
},
{
"name": "Shell",
"bytes": "10039"
}
],
"symlink_target": ""
} |
import atdlib
from atdlib import *
import logging
import unittest
# ------------------------------------
global host, user, pswd, file, srcip, md5s, jobid, taskid, jobs, sjobs, tasks, stasks, ssl
# ----- Please verify the following test data -----
# ---- before running to get meaningful results ---
ssl = True # Use ssl connection. Can be True or False
host = '169.254.254.100' # IP-address or hostname of your ATD box
user = 'atduser' # Your ATD user with REST-API access
pswd = 'atdpassword' # The respective password
file = 'sample.exe' # Sample file for upload, located in the working dir
srcip = '10.10.10.10' # Any valid IP address for enriching sample context
md5s = '10e4a1d2132ccb5c6759f038cdb6f3c9' # MD5 hash sum of the sample file
jobid = 39 # The id of an existing job on the ATD box
taskid = 62 # The id of an existing task on the ATD box
jobs = (39, 40, 41) # The tuple of existing jobs' ids on the ATD box
sjobs = ('39', '40', '41') # Same as above (string values)
tasks = (62, 63, 64) # The tuple of existing tasks' ids on the ATD box
stasks = ('62', '63', '64') # Same as above (string values)
# --------------------------------------------------
# ---- Constructor Test Case ---------
class TestATDConstructor(unittest.TestCase):
def setUp(self):
self.atd = None
def tearDown(self):
del self.atd
def test_atdsession_sslstr(self):
with self.assertRaises(TypeError):
self.atd = atdsession(ssl='abc')
def test_atdsession_uagnum(self):
with self.assertRaises(TypeError):
self.atd = atdsession(uag=123)
# ------------------------------------
# ---- Open Method Test Case ---------
class TestATDOpenMethod(unittest.TestCase):
def setUp(self):
self.atd = atdsession(ssl=ssl)
def tearDown(self):
self.atd.reset()
del self.atd
def test_open(self):
self.assertEqual(self.atd.open(host, user, pswd), True)
def test_open_dup(self):
self.atd.open(host, user, pswd)
with self.assertRaises(ATDStateError):
self.atd.open(host, user, pswd)
def test_open_numhname(self):
with self.assertRaises(TypeError):
self.atd.open(123, user, pswd)
def test_open_numuname(self):
with self.assertRaises(TypeError):
self.atd.open(host, 123, pswd)
def test_open_emptyuname(self):
with self.assertRaises(ATDAuthError):
self.atd.open(host, '', pswd)
def test_open_nonexistuname(self):
with self.assertRaises(ATDClientError):
self.atd.open(host, 'definitelynotauser', pswd)
def test_open_numpasswd(self):
with self.assertRaises(TypeError):
self.atd.open(host, user, 123)
def test_open_emptypasswd(self):
with self.assertRaises(ATDAuthError):
self.atd.open(host, user, '')
def test_open_wrongpasswd(self):
with self.assertRaises(ATDAuthError):
self.atd.open(host, user, 'McAfee123')
# ------------------------------------
# ---- Close Method Test Case ---------
class TestATDCloseMethod(unittest.TestCase):
def setUp(self):
self.atd = atdsession(ssl=ssl)
self.atd.open(host, user, pswd)
def tearDown(self):
self.atd.reset()
del self.atd
def test_close(self):
self.assertEqual(self.atd.close(), True)
def test_close_dup(self):
self.atd.close()
with self.assertRaises(ATDStateError):
self.atd.close()
# ------------------------------------
# ---- Fileup Method Test Case ---------
class TestATDFileupMethod(unittest.TestCase):
def setUp(self):
self.atd = atdsession(ssl=ssl)
self.atd.open(host, user, pswd)
def tearDown(self):
self.atd.close()
del self.atd
def test_fileup_ip_nore(self):
self.assertGreater(self.atd.fileup(file, srcip, False), 0)
def test_fileup_noip_nore(self):
self.assertGreater(self.atd.fileup(file, '', False), 0)
def test_fileup_ip_re(self):
self.assertGreater(self.atd.fileup(file, srcip, True), 0)
def test_fileup_noip_re(self):
self.assertGreater(self.atd.fileup(file, '', True), 0)
def test_fileup_num_fname(self):
with self.assertRaises(TypeError):
self.atd.fileup(123, srcip, True)
def test_fileup_empty_fname(self):
with self.assertRaises(IOError):
self.atd.fileup('', srcip, True)
def test_fileup_num_srcip(self):
with self.assertRaises(TypeError):
self.atd.fileup(file, 123, True)
def test_fileup_str_reanalyze(self):
with self.assertRaises(TypeError):
self.atd.fileup(file, srcip, '')
# ------------------------------------
# ---- _MD5Log Method Test Case ---------
class TestATD_MD5LogMethod(unittest.TestCase):
def setUp(self):
self.atd = atdsession(ssl=ssl)
self.atd.open(host, user, pswd)
self.ret = None
def tearDown(self):
self.atd.close()
del self.atd
del self.ret
def test__md5log(self):
self.ret = self.atd._md5log(md5s)
self.assertIsInstance(self.ret, dict)
self.assertIn('success', self.ret)
self.assertEqual(self.ret['success'], True)
#print self.ret
def test__md5log_nonexist(self):
self.ret = self.atd._md5log('00000000000000000000000000000000')
self.assertIsInstance(self.ret, dict)
self.assertIn('success', self.ret)
self.assertEqual(self.ret['success'], True)
#print self.ret
def test__md5log_invalid_md5(self):
with self.assertRaises(ValueError):
self.ret = self.atd._md5log('abcdefghijklmnopqrstuvwxyz')
def test__md5log_num_md5(self):
with self.assertRaises(TypeError):
self.ret = self.atd._md5log(123)
# ------------------------------------
# ---- MD5Status Method Test Case ---------
class TestATDMD5StatusMethod(unittest.TestCase):
def setUp(self):
self.atd = atdsession(ssl=ssl)
self.atd.open(host, user, pswd)
self.ret = None
def tearDown(self):
self.atd.close()
del self.atd
del self.ret
def test_md5status(self):
self.ret = self.atd.md5status(md5s)
self.assertIsInstance(self.ret, dict)
self.assertIn('status', self.ret)
self.assertIn('severity', self.ret)
#print self.ret
def test_md5status_nonexist(self):
self.ret = self.atd.md5status('00000000000000000000000000000000')
self.assertIsInstance(self.ret, dict)
self.assertEqual(self.ret['status'], 0)
self.assertEqual(self.ret['severity'], -6)
#print self.ret
def test_md5status_invalid_md5(self):
with self.assertRaises(ValueError):
self.ret = self.atd.md5status('abcdefghijklmnopqrstuvwxyz')
def test_md5status_num_md5(self):
with self.assertRaises(TypeError):
self.ret = self.atd.md5status(123)
# ------------------------------------
# ---- JobStatus Method Test Case ---------
class TestATDJobStatusMethod(unittest.TestCase):
def setUp(self):
self.atd = atdsession(ssl=ssl)
self.atd.open(host, user, pswd)
self.ret = None
def tearDown(self):
self.atd.close()
del self.atd
del self.ret
def test_jobstatus_int(self):
self.ret = self.atd.jobstatus(jobid)
self.assertIsInstance(self.ret, dict)
self.assertIn('status', self.ret)
self.assertIn('severity', self.ret)
#print self.ret
def test_jobstatus_intstr(self):
self.ret = self.atd.jobstatus(str(jobid))
self.assertIsInstance(self.ret, dict)
self.assertIn('status', self.ret)
self.assertIn('severity', self.ret)
#print self.ret
def test_jobstatus_nonexist(self):
# current API implementation returns ATDError
with self.assertRaises((ATDClientError, ATDError)):
self.ret = self.atd.jobstatus(65535)
def test_jobstatus_str(self):
with self.assertRaises(ValueError):
self.ret = self.atd.jobstatus("justastring")
def test_jobstatus_emptystr(self):
with self.assertRaises(ValueError):
self.ret = self.atd.jobstatus("")
# ------------------------------------
# ---- TaskStatus Method Test Case ---------
class TestATDTaskStatusMethod(unittest.TestCase):
def setUp(self):
self.atd = atdsession(ssl=ssl)
self.atd.open(host, user, pswd)
self.ret = None
def tearDown(self):
self.atd.close()
del self.atd
del self.ret
def test_taskstatus_int(self):
self.ret = self.atd.taskstatus(taskid)
self.assertIsInstance(self.ret, int)
self.assertIn(self.ret, [1,2,3,4,-1])
#print self.ret
def test_taskstatus_intstr(self):
self.ret = self.atd.taskstatus(str(taskid))
self.assertIsInstance(self.ret, int)
self.assertIn(self.ret, [1,2,3,4,-1])
#print self.ret
def test_taskstatus_nonexist(self):
# current API implementation returns ATDClientError
with self.assertRaises((ATDClientError, ATDError)):
self.ret = self.atd.taskstatus(65535)
def test_taskstatus_str(self):
with self.assertRaises(ValueError):
self.ret = self.atd.taskstatus("justastring")
def test_taskstatus_emptystr(self):
with self.assertRaises(ValueError):
self.ret = self.atd.taskstatus("")
# ------------------------------------
# ---- JobTasks Method Test Case ---------
class TestATDJobTasksMethod(unittest.TestCase):
def setUp(self):
self.atd = atdsession(ssl=ssl)
self.atd.open(host, user, pswd)
self.ret = None
def tearDown(self):
self.atd.close()
del self.atd
del self.ret
def test_jobtasks_int(self):
self.ret = self.atd.jobtasks(jobid)
self.assertIsInstance(self.ret, list)
self.assertGreater(len(self.ret), 0)
#print self.ret
def test_jobtasks_intstr(self):
self.ret = self.atd.jobtasks(str(jobid))
self.assertIsInstance(self.ret, list)
self.assertGreater(len(self.ret), 0)
#print self.ret
def test_jobtasks_nonexist(self):
# current API implementation returns empty list
self.ret = self.atd.jobtasks(65535)
self.assertIsInstance(self.ret, list)
self.assertEqual(len(self.ret), 0)
#print self.ret
def test_jobtasks_str(self):
with self.assertRaises(ValueError):
self.ret = self.atd.jobtasks("justastring")
def test_jobtasks_emptystr(self):
with self.assertRaises(ValueError):
self.ret = self.atd.jobtasks("")
# ------------------------------------
# ---- BulkStatus Method Test Case ---------
class TestATDBulkStatusMethod(unittest.TestCase):
def setUp(self):
self.atd = atdsession(ssl=ssl)
self.atd.open(host, user, pswd)
def tearDown(self):
self.atd.close()
del self.atd
def test_bulkstatus_tasks(self):
self.ret = self.atd.bulkstatus(tasks=tasks)
self.assertIsInstance(self.ret, list)
self.assertGreater(len(self.ret), 0)
#print self.ret
def test_bulkstatus_stasks(self):
self.ret = self.atd.bulkstatus(tasks=stasks)
self.assertIsInstance(self.ret, list)
self.assertGreater(len(self.ret), 0)
#print self.ret
def test_bulkstatus_jobs(self):
self.ret = self.atd.bulkstatus(jobs=jobs)
self.assertIsInstance(self.ret, list)
self.assertGreater(len(self.ret), 0)
#print self.ret
def test_bulkstatus_sjobs(self):
self.ret = self.atd.bulkstatus(jobs=sjobs)
self.assertIsInstance(self.ret, list)
self.assertGreater(len(self.ret), 0)
#print self.ret
def test_bulkstatus_tasksjobs(self):
self.ret = self.atd.bulkstatus(tasks=tasks, jobs=jobs)
self.assertIsInstance(self.ret, list)
self.assertGreater(len(self.ret), 0)
#print self.ret
def test_bulkstatus_nonexisttasks(self):
#current API returns the list with {taskid=<id>, status=-1, severity=-1} items
self.ret = self.atd.bulkstatus(tasks=[65535,65534])
self.assertIsInstance(self.ret, list)
self.assertEqual(len(self.ret), 2)
#print self.ret
def test_bulkstatus_nonexistjobs(self):
#current API returns the list with {jobid=<id>, status=-1, severity=-1} items
self.ret = self.atd.bulkstatus(jobs=[65535,65534])
self.assertIsInstance(self.ret, list)
self.assertEqual(len(self.ret), 2)
#print self.ret
def test_bulkstatus_notasksnojobs(self):
with self.assertRaises(ValueError):
self.ret = self.atd.bulkstatus()
def test_bulkstatus_num_tasks(self):
with self.assertRaises(TypeError):
self.ret = self.atd.bulkstatus(tasks=123)
def test_bulkstatus_num_jobs(self):
with self.assertRaises(TypeError):
self.ret = self.atd.bulkstatus(jobs=123)
def test_bulkstatus_str_tasks(self):
with self.assertRaises(ValueError):
self.ret = self.atd.bulkstatus(tasks="thisisjustastring")
def test_bulkstatus_str_jobs(self):
with self.assertRaises(ValueError):
self.ret = self.atd.bulkstatus(jobs="thisisjustastring")
# ------------------------------------
# ---- TaskReport Method Test Case ---------
class TestATDTaskReportMethod(unittest.TestCase):
def setUp(self):
self.atd = atdsession(ssl=ssl)
self.atd.open(host, user, pswd)
self.ret = None
def tearDown(self):
self.atd.close()
del self.atd
del self.ret
def test_taskreport_int(self):
self.ret = self.atd.taskreport(taskid)
self.assertGreater(len(self.ret), 0)
def test_taskreport_intstr(self):
self.ret = self.atd.taskreport(str(taskid))
self.assertGreater(len(self.ret), 0)
def test_taskreport_list(self):
with self.assertRaises(TypeError):
self.ret = self.atd.taskreport(list(taskid))
def test_taskreport_nonexist(self):
# current API implementation returns ATDClientError
with self.assertRaises(ATDClientError):
self.ret = self.atd.taskreport(65535)
def test_taskreport_invalidreptype(self):
with self.assertRaises(ValueError):
self.ret = self.atd.taskreport(taskid=taskid, type='nosuchtype')
# ------------------------------------
# ---- JobReport Method Test Case ---------
class TestATDJobReportMethod(unittest.TestCase):
def setUp(self):
self.atd = atdsession(ssl=ssl)
self.atd.open(host, user, pswd)
self.ret = None
def tearDown(self):
self.atd.close()
del self.atd
del self.ret
def test_jobreport_int(self):
self.ret = self.atd.jobreport(jobid)
self.assertGreater(len(self.ret), 0)
def test_jobreport_intstr(self):
self.ret = self.atd.jobreport(str(jobid))
self.assertGreater(len(self.ret), 0)
def test_jobreport_list(self):
with self.assertRaises(TypeError):
self.ret = self.atd.jobreport(list(jobid))
def test_jobreport_nonexist(self):
# current API implementation returns ATDClientError
with self.assertRaises(ATDClientError):
self.ret = self.atd.jobreport(65535)
def test_jobreport_invalidreptype(self):
with self.assertRaises(ValueError):
self.ret = self.atd.jobreport(jobid=jobid, type='nosuchtype')
# ------------------------------------
# ---- MD5Report Method Test Case ---------
class TestATDMD5ReportMethod(unittest.TestCase):
def setUp(self):
self.atd = atdsession(ssl=ssl)
self.atd.open(host, user, pswd)
self.ret = None
def tearDown(self):
self.atd.close()
del self.atd
del self.ret
def test_md5report(self):
self.ret = self.atd.md5report(md5s)
self.assertGreater(len(self.ret), 0)
def test_md5report_nonexist(self):
# current API implementation returns ATDServerError 'submission not found'
with self.assertRaises(ATDServerError):
self.ret = self.atd.md5report('00000000000000000000000000000000')
def test_md5report_invalid(self):
with self.assertRaises(ValueError):
self.ret = self.atd.md5report('abcdefghijklmnopqrstuvwxyz')
def test_md5report_numeric(self):
with self.assertRaises(TypeError):
self.ret = self.atd.md5report(123)
# ------------------------------------
# ---- ListLookup Method Test Case ---------
class TestATDListLookupMethod(unittest.TestCase):
def setUp(self):
self.atd = atdsession(ssl=ssl)
self.atd.open(host, user, pswd)
self.ret = None
def tearDown(self):
self.atd.close()
del self.atd
del self.ret
def test_listlookup(self):
self.ret = self.atd.listlookup(md5s)
self.assertIn(self.ret, ('0', 'w', 'b'))
def test_listlookup_nonexist(self):
self.ret = self.atd.listlookup('00000000000000000000000000000000')
self.assertEquals(self.ret, '0')
def test_listlookup_invalid(self):
with self.assertRaises(ValueError):
self.ret = self.atd.listlookup('abcdefghijklmnopqrstuvwxyz')
def test_listlookup_numeric(self):
with self.assertRaises(TypeError):
self.ret = self.atd.listlookup(123)
# ------------------------------------
# ----------- Main Unit Test -------------
if __name__ == '__main__':
logFormat = logging.Formatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s')
logHandler = logging.FileHandler('atdlib14.log')
logHandler.setFormatter(logFormat)
mylog = logging.getLogger('atdlib')
mylog.addHandler(logHandler)
mylog.setLevel(logging.INFO)
t = {}
t['suiteCreate'] = unittest.TestLoader().loadTestsFromTestCase(TestATDConstructor)
t['suiteOpen'] = unittest.TestLoader().loadTestsFromTestCase(TestATDOpenMethod)
t['suiteClose'] = unittest.TestLoader().loadTestsFromTestCase(TestATDCloseMethod)
t['suiteFileup'] = unittest.TestLoader().loadTestsFromTestCase(TestATDFileupMethod)
t['suiteMD5Status'] = unittest.TestLoader().loadTestsFromTestCase(TestATDMD5StatusMethod)
t['suite_MD5Log'] = unittest.TestLoader().loadTestsFromTestCase(TestATD_MD5LogMethod)
t['suiteJobStatus'] = unittest.TestLoader().loadTestsFromTestCase(TestATDJobStatusMethod)
t['suiteTaskStatus'] = unittest.TestLoader().loadTestsFromTestCase(TestATDTaskStatusMethod)
t['suiteJobTasks'] = unittest.TestLoader().loadTestsFromTestCase(TestATDJobTasksMethod)
t['suiteBulkStatus'] = unittest.TestLoader().loadTestsFromTestCase(TestATDBulkStatusMethod)
t['suiteTaskReport'] = unittest.TestLoader().loadTestsFromTestCase(TestATDTaskReportMethod)
t['suiteJobReport'] = unittest.TestLoader().loadTestsFromTestCase(TestATDJobReportMethod)
t['suiteMD5Report'] = unittest.TestLoader().loadTestsFromTestCase(TestATDMD5ReportMethod)
t['suiteListLookup'] = unittest.TestLoader().loadTestsFromTestCase(TestATDListLookupMethod)
alltests = unittest.TestSuite(t.values())
#alltests = unittest.TestSuite(t['suiteListLookup'])
unittest.TextTestRunner(verbosity=5).run(alltests)
| {
"content_hash": "17d05cc7360bc58cce14bced83aa4e2a",
"timestamp": "",
"source": "github",
"line_count": 585,
"max_line_length": 92,
"avg_line_length": 31.073504273504273,
"alnum_prop": 0.668610408185719,
"repo_name": "passimens/atdlib",
"id": "6eee3749524fc6943a05b3557a412e2ca6f4d064",
"size": "18178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44033"
}
],
"symlink_target": ""
} |
import os
import sys
import Targets
import datetime
import logging
if (hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()):
GRAY = '\033[90m'
ENDC = '\033[0m'
ERROR = '\033[91m'
else:
GRAY = ''
ENDC = ''
ERROR = ''
OKLVL = 22
logger = logging.getLogger("Engine")
def create_lockfile(exploit_name):
# TODO: This is backwards - the exception should be if the
# file *does* exist!
try:
open('.lock')
return False
except IOError:
# lockfile does not yet exist, create it
fd = file('.lock', 'w')
fd.write(exploit_name)
fd.close()
return True
def remove_lockfile():
os.remove('.lock')
return
def get_running():
try:
fd = file('.lock', 'r')
running = fd.readline().rstrip('\n')
fd.close()
return running
except IOError:
return None
# lockfile does not exist - there is appartently nothing running
class Engine:
class StartUpException(Exception):
pass
class ShutDownException(Exception):
pass
def __init__(self, exploit, config):
self.chroot_dirs = config.chroot_dirs
self.live_systems_dir = config.live_systems_dir
self.xdebug_dir = config.xdebug_dir
self.traces_dir = config.traces_dir
self.exploit = exploit
try:
Target = Targets.get_target_class(self.exploit.attributes['Target'])
except Targets.TargetModuleNotFound as e:
logger.error("No module found for target application \"%s\"", e.value)
exit(-1)
self.target_app = Target()
self.chroot_environment = self.target_app.chroot_environment
self.exploitname = self.exploit.attributes['Name'].replace(' ', '_').replace('.', '_')
# this is to not break existing exploits, a better convention could be had
self.target_system_dir = "%s/%s" % (self.live_systems_dir, self.exploitname)
self.application_dir_mapping = self.target_app.application_dir_mapping
self.application_dir = self.target_app.application_dir
if 'Plugin' in self.exploit.attributes:
try:
pname = self.exploit.attributes['Plugin']
self.target_app.set_plugin(pname)
except Targets.TargetPluginNotFound as e:
logger.error("Plugin \"%s\" not found for target application \"%s\"",
pname,
self.target_app.name)
exit(-1)
def startup(self):
logger.info("Running application startup for exploit %s", self.exploitname)
# Check to make sure that the System's Apache server isn't running
# before we try to start ours.
# TODO: Architecturally, this isn't the best place - but it fits best for now...
retvalue = os.system("sudo service apache2 status")
if retvalue == 0:
logger.error("There is already a web server running! You need to stop your web server.")
logger.error("This program will not stop your existing webserver, because that is a Very Bad Idea (TM)")
raise self.StartUpException("Problem starting application")
if not get_running():
create_lockfile(self.exploitname)
start_script = ["mkdir %s" % (self.target_system_dir,),
"mount --bind %s/%s %s" % (self.chroot_dirs,
self.chroot_environment,
self.target_system_dir),
"mount --bind /dev %s/dev" % (self.target_system_dir,),
"mount --bind /dev/pts %s/dev/pts" % (self.target_system_dir,),
"mount --bind /proc %s/proc" % (self.target_system_dir,),
"mkdir %s%s/%s" % (self.target_system_dir,
self.application_dir_mapping[1],
self.application_dir),
# "mount --bind %s %s%s/%s" % (self.application_dir_mapping[0],
"cp -pR %s/* %s%s/%s" % (self.application_dir_mapping[0],
self.target_system_dir,
self.application_dir_mapping[1],
self.application_dir)]
start_script += self.target_app.get_start_service_script(self.target_system_dir)
self.execute_commands(start_script)
logger.info("Running exploit setup")
self.exploit.setup(self.target_system_dir)
else:
logger.error("There is already a system running under %s", self.live_systems_dir)
raise self.StartUpException("Problem starting application")
def test(self):
return self.exploit.test()
def shutdown(self):
if get_running() == self.exploitname:
if self.check_chroot_in_use():
logger.error("Shutdown failed: one or more processes is using a resource in %s", self.target_system_dir)
exit(-1)
stop_script = self.target_app.get_stop_service_script(self.target_system_dir)
stop_script += ["umount %s/proc" % (self.target_system_dir,),
"umount %s/dev/pts" % (self.target_system_dir,),
"umount %s/dev" % (self.target_system_dir,),
"rm -rf %s%s/%s" % (self.target_system_dir,
self.application_dir_mapping[1],
self.application_dir),
"umount %s" % (self.target_system_dir,),
"[ \"$(ls -A %s)\" ] "
"&& echo \"Directory not empty!\" "
"&& exit 1 "
"|| rm -rf %s" % (self.target_system_dir,
self.target_system_dir),
"rm -rf .tmpbuff"]
self.execute_commands(stop_script)
remove_lockfile()
else:
logger.error("attempting to shutdown a system that is not running.")
raise self.ShutDownException("Problem during application shutdown")
def exploit():
self.exploit.exploit()
if not self.exploit.verify():
logger.error("Verify failed: exploit did not succeed")
return
def check_chroot_in_use(self):
checkcmd = "if [ ! -z `lsof -Fcp +D %s | tr '\\n' ' ' | " \
"sed -e 's/p\\([0-9]\\+\\) c\\([^ ]\\+\\)/\\2(\\1) /g' " \
"-e 's/apache2.* //g'` ]; then exit 1; fi" % (self.target_system_dir,)
return not (self.execute_command(checkcmd) == os.EX_OK)
def xdebug_autotrace_on(self):
if get_running():
autotrace_on_script = ["sed -i 's/xdebug\.auto_trace=0/xdebug\.auto_trace=1/' "
"%s/etc/php5/mods-available/xdebug.ini" % (self.target_system_dir),
"chroot %s /etc/init.d/apache2 restart" % (self.target_system_dir,)]
self.execute_commands(autotrace_on_script)
else:
logger.error("attempting to turn on autotrace for a system that is not running")
def xdebug_autotrace_off(self):
if get_running():
datestr = datetime.datetime.now().strftime('%Y_%m_%d')
movetodir = "%s/%s_%s" % (self.traces_dir, self.exploitname, datestr)
autotrace_on_script = ["mkdir -p %s" % (movetodir,),
"mv %s/tmp/traces/* %s" % (self.target_system_dir, movetodir,),
"sed -i 's/xdebug\.auto_trace=1/xdebug\.auto_trace=0/' "
"%s/etc/php5/mods-available/xdebug.ini" % (self.target_system_dir),
"chroot %s /etc/init.d/apache2 restart" % (self.target_system_dir,)]
self.execute_commands(autotrace_on_script)
else:
print "[%sError%s]: attempting to turn off autotrace for a system that is not running" % (ERROR, ENDC)
def execute_command(self, command):
logger.info("EXEC: %s%s%s", GRAY, command, ENDC)
return os.system(command)
def execute_commands(self, cmdlist):
for cmd in cmdlist:
ret = self.execute_command(cmd)
if ret != os.EX_OK:
logger.error("Nonzero exit status %s", str(ret))
exit(-1)
return
| {
"content_hash": "72ae9e76918ed1e2399d3b69e56b7edd",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 120,
"avg_line_length": 42.37327188940092,
"alnum_prop": 0.49603045133224577,
"repo_name": "UMD-SEAM/bugbox",
"id": "cd868957a2577522a1dea5a6d0190fda8603d0a0",
"size": "9367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "framework/engine.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "33053"
},
{
"name": "CSS",
"bytes": "3304865"
},
{
"name": "Elixir",
"bytes": "5199"
},
{
"name": "Java",
"bytes": "22054"
},
{
"name": "JavaScript",
"bytes": "5143660"
},
{
"name": "PHP",
"bytes": "47086650"
},
{
"name": "Perl",
"bytes": "5111"
},
{
"name": "Python",
"bytes": "228074"
},
{
"name": "Ruby",
"bytes": "15397"
},
{
"name": "Shell",
"bytes": "126456"
},
{
"name": "XSLT",
"bytes": "345743"
}
],
"symlink_target": ""
} |
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer.testing import attr
if cuda.available:
cuda.init()
class TestReshape(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (4, 3, 2)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (2, 2, 6)).astype(numpy.float32)
def check_forward(self, x_data):
shape = self.gy.shape
x = chainer.Variable(x_data)
y = functions.reshape(x, shape)
self.assertEqual(y.data.dtype, numpy.float32)
self.assertTrue((self.x.reshape(shape) == cuda.to_cpu(y.data)).all())
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
x = chainer.Variable(x_data)
y = functions.reshape(x, self.gy.shape)
y.grad = y_grad
y.backward()
shape = self.x.shepe
self.assertTrue((self.gy.reshape(shape) == cuda.to_cpu(x.grad)).all())
| {
"content_hash": "0369273961d2ba6634eb8036b0911ac2",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 78,
"avg_line_length": 26.571428571428573,
"alnum_prop": 0.6317204301075269,
"repo_name": "tereka114/chainer",
"id": "580f20d1c7e47996b6465c0c74371e7988901d7b",
"size": "1116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/functions_tests/test_reshape.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "515026"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
import sys
import os
def hive(name=None):
import params
if name == 'hiveserver2':
params.HdfsDirectory(params.hive_apps_whs_dir,
action="create_delayed",
owner=params.hive_user,
mode=0777
)
params.HdfsDirectory(params.hive_hdfs_user_dir,
action="create_delayed",
owner=params.hive_user,
mode=params.hive_hdfs_user_mode
)
params.HdfsDirectory(None, action="create")
# We should change configurations for client as well as for server.
# The reason is that stale-configs are service-level, not component.
for conf_dir in params.hive_conf_dirs_list:
fill_conf_dir(conf_dir)
XmlConfig("hive-site.xml",
conf_dir=params.hive_config_dir,
configurations=params.config['configurations']['hive-site'],
configuration_attributes=params.config['configuration_attributes']['hive-site'],
owner=params.hive_user,
group=params.user_group,
mode=0644)
File(format("{hive_config_dir}/hive-env.sh"),
owner=params.hive_user,
group=params.user_group,
content=InlineTemplate(params.hive_env_sh_template)
)
if name == 'metastore' or name == 'hiveserver2':
jdbc_connector()
environment = {
"no_proxy": format("{ambari_server_hostname}")
}
cmd = format("/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf -x \"\" "
"--retry 5 "
"{jdk_location}/{check_db_connection_jar_name} "
"-o {check_db_connection_jar_name}'")
Execute(cmd,
not_if=format("[ -f {check_db_connection_jar_name}]"),
environment = environment)
if name == 'metastore':
File(params.start_metastore_path,
mode=0755,
content=StaticFile('startMetastore.sh')
)
if params.init_metastore_schema:
create_schema_cmd = format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
"{hive_bin}/schematool -initSchema "
"-dbType {hive_metastore_db_type} "
"-userName {hive_metastore_user_name} "
"-passWord {hive_metastore_user_passwd!p}")
check_schema_created_cmd = format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
"{hive_bin}/schematool -info "
"-dbType {hive_metastore_db_type} "
"-userName {hive_metastore_user_name} "
"-passWord {hive_metastore_user_passwd!p}")
Execute(create_schema_cmd,
not_if = check_schema_created_cmd
)
elif name == 'hiveserver2':
File(params.start_hiveserver2_path,
mode=0755,
content=Template(format('{start_hiveserver2_script}'))
)
if name != "client":
crt_directory(params.hive_pid_dir)
crt_directory(params.hive_log_dir)
crt_directory(params.hive_var_lib)
def fill_conf_dir(component_conf_dir):
import params
Directory(component_conf_dir,
owner=params.hive_user,
group=params.user_group,
create_parents = True
)
XmlConfig("mapred-site.xml",
conf_dir=component_conf_dir,
configurations=params.config['configurations']['mapred-site'],
configuration_attributes=params.config['configuration_attributes']['mapred-site'],
owner=params.hive_user,
group=params.user_group,
mode=0644)
crt_file(format("{component_conf_dir}/hive-default.xml.template"))
crt_file(format("{component_conf_dir}/hive-env.sh.template"))
log4j_exec_filename = 'hive-exec-log4j.properties'
if (params.log4j_exec_props != None):
File(format("{component_conf_dir}/{log4j_exec_filename}"),
mode=0644,
group=params.user_group,
owner=params.hive_user,
content=params.log4j_exec_props
)
elif (os.path.exists("{component_conf_dir}/{log4j_exec_filename}.template")):
File(format("{component_conf_dir}/{log4j_exec_filename}"),
mode=0644,
group=params.user_group,
owner=params.hive_user,
content=StaticFile(format("{component_conf_dir}/{log4j_exec_filename}.template"))
)
log4j_filename = 'hive-log4j.properties'
if (params.log4j_props != None):
File(format("{component_conf_dir}/{log4j_filename}"),
mode=0644,
group=params.user_group,
owner=params.hive_user,
content=params.log4j_props
)
elif (os.path.exists("{component_conf_dir}/{log4j_filename}.template")):
File(format("{component_conf_dir}/{log4j_filename}"),
mode=0644,
group=params.user_group,
owner=params.hive_user,
content=StaticFile(format("{component_conf_dir}/{log4j_filename}.template"))
)
def crt_directory(name):
import params
Directory(name,
create_parents = True,
owner=params.hive_user,
group=params.user_group,
mode=0755)
def crt_file(name):
import params
File(name,
owner=params.hive_user,
group=params.user_group
)
def jdbc_connector():
import params
if params.hive_jdbc_driver == "com.mysql.jdbc.Driver":
cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
Execute(cmd,
not_if=format("test -f {target}"),
creates=params.target,
environment= {'PATH' : params.execute_path },
path=["/bin", "/usr/bin/"])
elif params.hive_jdbc_driver == "org.postgresql.Driver":
cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
Execute(cmd,
not_if=format("test -f {target}"),
creates=params.target,
environment= {'PATH' : params.execute_path },
path=["/bin", "usr/bin/"])
elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
environment = {
"no_proxy": format("{ambari_server_hostname}")
}
cmd = format(
"mkdir -p {artifact_dir} ; "
"curl -kf -x \"\" --retry 10 {driver_curl_source} -o {driver_curl_target} && "
"cp {driver_curl_target} {target}")
Execute(cmd,
not_if=format("test -f {target}"),
path=["/bin", "/usr/bin/"],
environment=environment)
| {
"content_hash": "c3d8c9d3be0aeb6d2c609c36c9435c7a",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 94,
"avg_line_length": 33.96279069767442,
"alnum_prop": 0.6038071761161325,
"repo_name": "arenadata/ambari",
"id": "504a6cbebcc43cb0fae9621437a7b02cc9613da5",
"size": "7324",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-adh-1.6",
"path": "ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hive.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
from django.conf.urls import url, include
from djanban.apps.fetch.views import fetch
urlpatterns = [
url(r'^fetch$', fetch, name="fetch_boards"),
] | {
"content_hash": "21f9aa6babf45bd365139a3ce8703c94",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 48,
"avg_line_length": 21.857142857142858,
"alnum_prop": 0.7189542483660131,
"repo_name": "diegojromerolopez/djanban",
"id": "fb00df45c4a541ea633d89717557747056b0b3c7",
"size": "153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/djanban/apps/fetch/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "79709"
},
{
"name": "HTML",
"bytes": "660275"
},
{
"name": "JavaScript",
"bytes": "634320"
},
{
"name": "Python",
"bytes": "993818"
},
{
"name": "Shell",
"bytes": "1732"
},
{
"name": "TypeScript",
"bytes": "71578"
}
],
"symlink_target": ""
} |
__license__ = "MIT"
from collections import defaultdict
import itertools
import sys
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
HTMLAwareEntitySubstitution,
nonwhitespace_re
)
__all__ = [
'HTMLTreeBuilder',
'SAXTreeBuilder',
'TreeBuilder',
'TreeBuilderRegistry',
]
# Some useful features for a TreeBuilder to have.
FAST = 'fast'
PERMISSIVE = 'permissive'
STRICT = 'strict'
XML = 'xml'
HTML = 'html'
HTML_5 = 'html5'
class TreeBuilderRegistry(object):
def __init__(self):
self.builders_for_feature = defaultdict(list)
self.builders = []
def register(self, treebuilder_class):
"""Register a treebuilder based on its advertised features."""
for feature in treebuilder_class.features:
self.builders_for_feature[feature].insert(0, treebuilder_class)
self.builders.insert(0, treebuilder_class)
def lookup(self, *features):
if len(self.builders) == 0:
# There are no builders at all.
return None
if len(features) == 0:
# They didn't ask for any features. Give them the most
# recently registered builder.
return self.builders[0]
# Go down the list of features in order, and eliminate any builders
# that don't match every feature.
features = list(features)
features.reverse()
candidates = None
candidate_set = None
while len(features) > 0:
feature = features.pop()
we_have_the_feature = self.builders_for_feature.get(feature, [])
if len(we_have_the_feature) > 0:
if candidates is None:
candidates = we_have_the_feature
candidate_set = set(candidates)
else:
# Eliminate any candidates that don't have this feature.
candidate_set = candidate_set.intersection(
set(we_have_the_feature))
# The only valid candidates are the ones in candidate_set.
# Go through the original list of candidates and pick the first one
# that's in candidate_set.
if candidate_set is None:
return None
for candidate in candidates:
if candidate in candidate_set:
return candidate
return None
# The BeautifulSoup class will take feature lists from developers and use them
# to look up builders in this registry.
builder_registry = TreeBuilderRegistry()
class TreeBuilder(object):
"""Turn a document into a Beautiful Soup object tree."""
NAME = "[Unknown tree builder]"
ALTERNATE_NAMES = []
features = []
is_xml = False
picklable = False
preserve_whitespace_tags = set()
empty_element_tags = None # A tag will be considered an empty-element
# tag when and only when it has no contents.
# A value for these tag/attribute combinations is a space- or
# comma-separated list of CDATA, rather than a single CDATA.
cdata_list_attributes = {}
def __init__(self):
self.soup = None
def initialize_soup(self, soup):
"""The BeautifulSoup object has been initialized and is now
being associated with the TreeBuilder.
"""
self.soup = soup
def reset(self):
pass
def can_be_empty_element(self, tag_name):
"""Might a tag with this name be an empty-element tag?
The final markup may or may not actually present this tag as
self-closing.
For instance: an HTMLBuilder does not consider a <p> tag to be
an empty-element tag (it's not in
HTMLBuilder.empty_element_tags). This means an empty <p> tag
will be presented as "<p></p>", not "<p />".
The default implementation has no opinion about which tags are
empty-element tags, so a tag will be presented as an
empty-element tag if and only if it has no contents.
"<foo></foo>" will become "<foo />", and "<foo>bar</foo>" will
be left alone.
"""
if self.empty_element_tags is None:
return True
return tag_name in self.empty_element_tags
def feed(self, markup):
raise NotImplementedError()
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
return markup, None, None, False
def test_fragment_to_document(self, fragment):
"""Wrap an HTML fragment to make it look like a document.
Different parsers do this differently. For instance, lxml
introduces an empty <head> tag, and html5lib
doesn't. Abstracting this away lets us write simple tests
which run HTML fragments through the parser and compare the
results against other HTML fragments.
This method should not be used outside of tests.
"""
return fragment
def set_up_substitutions(self, tag):
return False
def _replace_cdata_list_attribute_values(self, tag_name, attrs):
"""Replaces class="foo bar" with class=["foo", "bar"]
Modifies its input in place.
"""
if not attrs:
return attrs
if self.cdata_list_attributes:
universal = self.cdata_list_attributes.get('*', [])
tag_specific = self.cdata_list_attributes.get(
tag_name.lower(), None)
for attr in attrs.keys():
if attr in universal or (tag_specific and attr in tag_specific):
# We have a "class"-type attribute whose string
# value is a whitespace-separated list of
# values. Split it into a list.
value = attrs[attr]
if isinstance(value, basestring):
values = nonwhitespace_re.findall(value)
else:
# html5lib sometimes calls setAttributes twice
# for the same tag when rearranging the parse
# tree. On the second call the attribute value
# here is already a list. If this happens,
# leave the value alone rather than trying to
# split it again.
values = value
attrs[attr] = values
return attrs
class SAXTreeBuilder(TreeBuilder):
"""A Beautiful Soup treebuilder that listens for SAX events."""
def feed(self, markup):
raise NotImplementedError()
def close(self):
pass
def startElement(self, name, attrs):
attrs = dict((key[1], value) for key, value in list(attrs.items()))
#print "Start %s, %r" % (name, attrs)
self.soup.handle_starttag(name, attrs)
def endElement(self, name):
#print "End %s" % name
self.soup.handle_endtag(name)
def startElementNS(self, nsTuple, nodeName, attrs):
# Throw away (ns, nodeName) for now.
self.startElement(nodeName, attrs)
def endElementNS(self, nsTuple, nodeName):
# Throw away (ns, nodeName) for now.
self.endElement(nodeName)
#handler.endElementNS((ns, node.nodeName), node.nodeName)
def startPrefixMapping(self, prefix, nodeValue):
# Ignore the prefix for now.
pass
def endPrefixMapping(self, prefix):
# Ignore the prefix for now.
# handler.endPrefixMapping(prefix)
pass
def characters(self, content):
self.soup.handle_data(content)
def startDocument(self):
pass
def endDocument(self):
pass
class HTMLTreeBuilder(TreeBuilder):
"""This TreeBuilder knows facts about HTML.
Such as which tags are empty-element tags.
"""
preserve_whitespace_tags = HTMLAwareEntitySubstitution.preserve_whitespace_tags
empty_element_tags = set([
# These are from HTML5.
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr',
# These are from earlier versions of HTML and are removed in HTML5.
'basefont', 'bgsound', 'command', 'frame', 'image', 'isindex', 'nextid', 'spacer'
])
# The HTML standard defines these as block-level elements. Beautiful
# Soup does not treat these elements differently from other elements,
# but it may do so eventually, and this information is available if
# you need to use it.
block_elements = set(["address", "article", "aside", "blockquote", "canvas", "dd", "div", "dl", "dt", "fieldset", "figcaption", "figure", "footer", "form", "h1", "h2", "h3", "h4", "h5", "h6", "header", "hr", "li", "main", "nav", "noscript", "ol", "output", "p", "pre", "section", "table", "tfoot", "ul", "video"])
# The HTML standard defines these attributes as containing a
# space-separated list of values, not a single value. That is,
# class="foo bar" means that the 'class' attribute has two values,
# 'foo' and 'bar', not the single value 'foo bar'. When we
# encounter one of these attributes, we will parse its value into
# a list of values if possible. Upon output, the list will be
# converted back into a string.
cdata_list_attributes = {
"*" : ['class', 'accesskey', 'dropzone'],
"a" : ['rel', 'rev'],
"link" : ['rel', 'rev'],
"td" : ["headers"],
"th" : ["headers"],
"td" : ["headers"],
"form" : ["accept-charset"],
"object" : ["archive"],
# These are HTML5 specific, as are *.accesskey and *.dropzone above.
"area" : ["rel"],
"icon" : ["sizes"],
"iframe" : ["sandbox"],
"output" : ["for"],
}
def set_up_substitutions(self, tag):
# We are only interested in <meta> tags
if tag.name != 'meta':
return False
http_equiv = tag.get('http-equiv')
content = tag.get('content')
charset = tag.get('charset')
# We are interested in <meta> tags that say what encoding the
# document was originally in. This means HTML 5-style <meta>
# tags that provide the "charset" attribute. It also means
# HTML 4-style <meta> tags that provide the "content"
# attribute and have "http-equiv" set to "content-type".
#
# In both cases we will replace the value of the appropriate
# attribute with a standin object that can take on any
# encoding.
meta_encoding = None
if charset is not None:
# HTML 5 style:
# <meta charset="utf8">
meta_encoding = charset
tag['charset'] = CharsetMetaAttributeValue(charset)
elif (content is not None and http_equiv is not None
and http_equiv.lower() == 'content-type'):
# HTML 4 style:
# <meta http-equiv="content-type" content="text/html; charset=utf8">
tag['content'] = ContentMetaAttributeValue(content)
return (meta_encoding is not None)
def register_treebuilders_from(module):
"""Copy TreeBuilders from the given module into this module."""
# I'm fairly sure this is not the best way to do this.
this_module = sys.modules['bs4.builder']
for name in module.__all__:
obj = getattr(module, name)
if issubclass(obj, TreeBuilder):
setattr(this_module, name, obj)
this_module.__all__.append(name)
# Register the builder while we're at it.
this_module.builder_registry.register(obj)
class ParserRejectedMarkup(Exception):
pass
# Builders are registered in reverse order of priority, so that custom
# builder registrations will take precedence. In general, we want lxml
# to take precedence over html5lib, because it's faster. And we only
# want to use HTMLParser as a last result.
from . import _htmlparser
register_treebuilders_from(_htmlparser)
try:
from . import _html5lib
register_treebuilders_from(_html5lib)
except ImportError:
# They don't have html5lib installed.
pass
try:
from . import _lxml
register_treebuilders_from(_lxml)
except ImportError:
# They don't have lxml installed.
pass
| {
"content_hash": "3258fe3ee3ba201ad7f04e449c211e05",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 317,
"avg_line_length": 35.9156976744186,
"alnum_prop": 0.6043707001214084,
"repo_name": "deanishe/alfred-duden",
"id": "42077503f7ce0d850d682b42c5aeed6378e37ee3",
"size": "12413",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/lib/bs4/builder/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "65314"
},
{
"name": "Python",
"bytes": "1199858"
}
],
"symlink_target": ""
} |
from .encoder import encode_packet
from .decoder import decode_packet
from .helpers import create_packet, contains_complete_packet
| {
"content_hash": "edd7896b7538c56c8b3586e2d0167a13",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 60,
"avg_line_length": 43.666666666666664,
"alnum_prop": 0.8320610687022901,
"repo_name": "edvinerikson/frostbite-rcon-utils-python",
"id": "4f5fa3650f41ac76282f479f92b91d209ccf17ca",
"size": "131",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "frostbite_rcon_utils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6189"
}
],
"symlink_target": ""
} |
"""Tests for QueueRunner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
class QueueRunnerTest(tf.test.TestCase):
def testBasic(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
tf.initialize_all_variables().run()
qr = tf.train.QueueRunner(queue, [count_up_to])
threads = qr.create_threads(sess)
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 3.
self.assertEqual(3, var.eval())
def testTwoOps(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var0 = tf.Variable(zero64)
count_up_to_3 = var0.count_up_to(3)
var1 = tf.Variable(zero64)
count_up_to_30 = var1.count_up_to(30)
queue = tf.FIFOQueue(10, tf.float32)
qr = tf.train.QueueRunner(queue, [count_up_to_3, count_up_to_30])
threads = qr.create_threads(sess)
tf.initialize_all_variables().run()
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
self.assertEqual(3, var0.eval())
self.assertEqual(30, var1.eval())
def testExceptionsCaptured(self):
with self.test_session() as sess:
queue = tf.FIFOQueue(10, tf.float32)
qr = tf.train.QueueRunner(queue, ["i fail", "so fail"])
threads = qr.create_threads(sess)
tf.initialize_all_variables().run()
for t in threads:
t.start()
for t in threads:
t.join()
exceptions = qr.exceptions_raised
self.assertEqual(2, len(exceptions))
self.assertTrue("Operation not in the graph" in str(exceptions[0]))
self.assertTrue("Operation not in the graph" in str(exceptions[1]))
def testRealDequeueEnqueue(self):
with self.test_session() as sess:
q0 = tf.FIFOQueue(3, tf.float32)
enqueue0 = q0.enqueue((10.0,))
close0 = q0.close()
q1 = tf.FIFOQueue(30, tf.float32)
enqueue1 = q1.enqueue((q0.dequeue(),))
dequeue1 = q1.dequeue()
qr = tf.train.QueueRunner(q1, [enqueue1])
threads = qr.create_threads(sess)
for t in threads:
t.start()
# Enqueue 2 values, then close queue0.
enqueue0.run()
enqueue0.run()
close0.run()
# Wait for the queue runner to terminate.
for t in threads:
t.join()
# It should have terminated cleanly.
self.assertEqual(0, len(qr.exceptions_raised))
# The 2 values should be in queue1.
self.assertEqual(10.0, dequeue1.eval())
self.assertEqual(10.0, dequeue1.eval())
# And queue1 should now be closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError, "is closed"):
dequeue1.eval()
def testRespectCoordShouldStop(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
tf.initialize_all_variables().run()
qr = tf.train.QueueRunner(queue, [count_up_to])
# As the coordinator to stop. The queue runner should
# finish immediately.
coord = tf.train.Coordinator()
coord.request_stop()
threads = qr.create_threads(sess, coord)
for t in threads:
t.start()
coord.join(threads)
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 0.
self.assertEqual(0, var.eval())
def testRequestStopOnException(self):
with self.test_session() as sess:
queue = tf.FIFOQueue(10, tf.float32)
qr = tf.train.QueueRunner(queue, ["not an op"])
coord = tf.train.Coordinator()
threads = qr.create_threads(sess, coord)
for t in threads:
t.start()
# The exception should be re-raised when joining.
with self.assertRaisesRegexp(ValueError, "Operation not in the graph"):
coord.join(threads)
def testGracePeriod(self):
with self.test_session() as sess:
# The enqueue will quickly block.
queue = tf.FIFOQueue(2, tf.float32)
enqueue = queue.enqueue((10.0,))
dequeue = queue.dequeue()
qr = tf.train.QueueRunner(queue, [enqueue])
coord = tf.train.Coordinator()
threads = qr.create_threads(sess, coord, start=True)
# Dequeue one element and then request stop.
dequeue.op.run()
time.sleep(0.02)
coord.request_stop()
# We should be able to join because the RequestStop() will cause
# the queue to be closed and the enqueue to terminate.
coord.join(threads, stop_grace_period_secs=0.05)
def testNoMultiThreads(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
tf.initialize_all_variables().run()
coord = tf.train.Coordinator()
qr = tf.train.QueueRunner(queue, [count_up_to])
threads = []
threads.extend(qr.create_threads(sess, coord=coord))
with self.assertRaisesRegexp(
RuntimeError,
"Threads are already running"):
threads.extend(qr.create_threads(sess, coord=coord))
coord.request_stop()
coord.join(threads, stop_grace_period_secs=0.5)
def testThreads(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
tf.initialize_all_variables().run()
qr = tf.train.QueueRunner(queue, [count_up_to, "bad op"])
threads = qr.create_threads(sess, start=True)
for t in threads:
t.join()
exceptions = qr.exceptions_raised
self.assertEqual(1, len(exceptions))
self.assertTrue("Operation not in the graph" in str(exceptions[0]))
threads = qr.create_threads(sess, start=True)
for t in threads:
t.join()
exceptions = qr.exceptions_raised
self.assertEqual(1, len(exceptions))
self.assertTrue("Operation not in the graph" in str(exceptions[0]))
def testName(self):
with tf.name_scope("scope"):
queue = tf.FIFOQueue(10, tf.float32, name="queue")
qr = tf.train.QueueRunner(queue, [tf.no_op()])
self.assertEqual("scope/queue", qr.name)
tf.train.add_queue_runner(qr)
self.assertEqual(1, len(tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS,
"scope")))
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "f8bf48a9b08a085fec67caedcd67b8c2",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 77,
"avg_line_length": 36.5969387755102,
"alnum_prop": 0.6340443329150983,
"repo_name": "panmari/tensorflow",
"id": "ef938d1984a608ef80ee202abd256befe05b9c3f",
"size": "7851",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/queue_runner_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "153226"
},
{
"name": "C++",
"bytes": "7360924"
},
{
"name": "CMake",
"bytes": "29325"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "683163"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "7188"
},
{
"name": "Jupyter Notebook",
"bytes": "1771416"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "102168"
},
{
"name": "Python",
"bytes": "4526515"
},
{
"name": "Shell",
"bytes": "117381"
},
{
"name": "TypeScript",
"bytes": "340911"
}
],
"symlink_target": ""
} |
import unittest2 as unittest
from tinycu import *
class SumTests(unittest.TestCase):
def test_full(self):
class MySumTest(TinyCu):
def __call__(self, *args):
return sum([1,2,3,4])
b = MySumTest()
result = b.intercept_call()
self.assertEqual(result, [10.0])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "b9d50833a95a3ca70b1b667472f88e3f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 40,
"avg_line_length": 21.055555555555557,
"alnum_prop": 0.5514511873350924,
"repo_name": "richardxia/asp-multilevel-debug",
"id": "9e39a876911ebe6923b36ef474e2d22583f1ec4e",
"size": "379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "specializers/tinycu/tests/sum_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "102604"
},
{
"name": "Objective-C",
"bytes": "6902"
},
{
"name": "Python",
"bytes": "551438"
},
{
"name": "Shell",
"bytes": "7110"
}
],
"symlink_target": ""
} |
from flask import Flask, url_for
import os
app = Flask(__name__)
env = os.environ.get('FLASK_ENV', 'development')
app.config['ENV'] = env
# Function to easily find your assets
# In your template use <link rel=stylesheet href="{{ static('filename') }}">
app.jinja_env.globals['static'] = (
lambda filename: url_for('static', filename = filename)
)
# setup assets
from flask.ext.assets import Environment, Bundle
from .app import app
assets = Environment(app)
assets.url_expire = False
assets.debug = app.config['ENV'] == 'development'
assets.load_path = ['%s/assets' % app.config.root_path]
assets.register('css',
Bundle(
'stylesheets/**/*.css',
Bundle(
'stylesheets/*.scss',
filters='pyscss',
output='stylesheets/app.%(version)s.css'),
output='stylesheets/all.%(version)s.css'))
assets.register('js', Bundle(
'js/**/*.js',
output='js/app.%(version)s.js'))
from . import views
| {
"content_hash": "f4cfe5fbe9f422d86c32303feed7cb8c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 76,
"avg_line_length": 25.62162162162162,
"alnum_prop": 0.6518987341772152,
"repo_name": "Code4SA/flask-template",
"id": "4310d98900cea1b3b7860ac6b944d6ab175872ad",
"size": "948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "0"
},
{
"name": "Python",
"bytes": "1180"
}
],
"symlink_target": ""
} |
import filecmp
import os
import tempfile
import unittest
import isomedia
TESTDATA = os.path.join(os.path.dirname(__file__), 'testdata')
class TestSanity(unittest.TestCase):
def test_sanity(self):
mp4filename = os.path.join(TESTDATA, 'loop_circle.mp4')
mp4file = open(mp4filename, 'rb')
isofile = isomedia.load(mp4file)
root = isofile.root
moov_atom = [atom for atom in root.children if atom.type() == 'moov']
self.assertEqual(len(moov_atom), 1, 'There should be 1 moov atom.')
def test_lossless_write(self):
mp4filename = os.path.join(TESTDATA, 'loop_circle.mp4')
infile = open(mp4filename, 'rb')
isofile = isomedia.load(infile)
outfile = tempfile.NamedTemporaryFile(delete=False)
isofile.write(outfile)
infile.close()
outfile.close()
self.assertTrue(filecmp.cmp(infile.name, outfile.name))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "d2b1a190ac4c3c58719a6a4c3dc3bc17",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 77,
"avg_line_length": 28.441176470588236,
"alnum_prop": 0.6452947259565667,
"repo_name": "0/isomedia",
"id": "66cb83128ca3c8b554d07927e2843867472b7c26",
"size": "967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sanity_tests.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""Tests for the Task Cache information collector."""
import unittest
from dfwinreg import definitions as dfwinreg_definitions
from dfwinreg import fake as dfwinreg_fake
from dfwinreg import registry as dfwinreg_registry
from winregrc import task_cache
from tests import test_lib
_DYNAMIC_INFO_DATA = bytes(bytearray([
0x03, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x7d, 0x12, 0x3f, 0x04, 0xca, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00]))
_DYNAMIC_INFO2_DATA = bytes(bytearray([
0x03, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x7d, 0x12, 0x3f, 0x04, 0xca, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x7d, 0x12, 0x3f, 0x04, 0xca, 0x01]))
class TaskCacheDataParserTest(test_lib.BaseTestCase):
"""Tests for the Task Cache data parser."""
def testParseDynamicInfo(self):
"""Tests the ParseDynamicInfo function."""
data_parser = task_cache.TaskCacheDataParser()
cached_task = task_cache.CachedTask()
data_parser.ParseDynamicInfo(_DYNAMIC_INFO_DATA, cached_task)
# TODO: compare date time value.
self.assertIsNotNone(cached_task.last_registered_time)
# TODO: compare date time value.
self.assertIsNotNone(cached_task.launch_time)
cached_task = task_cache.CachedTask()
data_parser.ParseDynamicInfo(_DYNAMIC_INFO2_DATA, cached_task)
# TODO: compare date time value.
self.assertIsNotNone(cached_task.last_registered_time)
# TODO: compare date time value.
self.assertIsNotNone(cached_task.launch_time)
class TaskCacheCollectorTest(test_lib.BaseTestCase):
"""Tests for the Task Cache information collector."""
_GUID1 = '{8905ECD8-016F-4DC2-90E6-A5F1FA6A841A}'
_GUID2 = '{F93C7104-998A-4A38-B935-775A3138B3C3}'
_GUID3 = '{FE7B674F-2430-40A1-9162-AFC3727E3DC3}'
_NAME1 = 'AD RMS Rights Policy Template Management (Automated)'
_NAME2 = 'Notifications'
_NAME3 = 'AutoWake'
_PATH = (
'\\Microsoft\\Windows\\Active Directory Rights Management Services '
'Client\\AD RMS Rights Policy Template Management (Automated)')
def _CreateTestRegistry(self):
"""Creates Registry keys and values for testing.
Returns:
dfwinreg.WinRegistry: Windows Registry for testing.
"""
key_path_prefix = 'HKEY_LOCAL_MACHINE\\Software'
registry_file = dfwinreg_fake.FakeWinRegistryFile(
key_path_prefix=key_path_prefix)
registry_key = dfwinreg_fake.FakeWinRegistryKey(self._GUID1)
registry_file.AddKeyByPath(
'\\Microsoft\\Windows NT\\CurrentVersion\\Schedule\\TaskCache\\Tasks',
registry_key)
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'DynamicInfo', data=_DYNAMIC_INFO_DATA,
data_type=dfwinreg_definitions.REG_BINARY)
registry_key.AddValue(registry_value)
value_data = self._PATH.encode('utf-16-le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'Path', data=value_data, data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
registry_key = dfwinreg_fake.FakeWinRegistryKey(self._NAME1)
registry_file.AddKeyByPath((
'\\Microsoft\\Windows NT\\CurrentVersion\\Schedule\\TaskCache\\Tree\\'
'Microsoft\\Windows\\Active Directory Rights Management Services '
'Client'), registry_key)
value_data = '{8905ECD8-016F-4DC2-90E6-A5F1FA6A841A}\x00'.encode(
'utf-16-le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'Id', data=value_data, data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
registry_key = dfwinreg_fake.FakeWinRegistryKey(self._GUID2)
registry_file.AddKeyByPath(
'\\Microsoft\\Windows NT\\CurrentVersion\\Schedule\\TaskCache\\Tasks',
registry_key)
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'DynamicInfo', data=_DYNAMIC_INFO2_DATA,
data_type=dfwinreg_definitions.REG_BINARY)
registry_key.AddValue(registry_value)
registry_key = dfwinreg_fake.FakeWinRegistryKey(self._NAME2)
registry_file.AddKeyByPath((
'\\Microsoft\\Windows NT\\CurrentVersion\\Schedule\\TaskCache\\Tree\\'
'Microsoft\\Windows\\Location'), registry_key)
value_data = '{F93C7104-998A-4A38-B935-775A3138B3C3}\x00'.encode(
'utf-16-le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'Id', data=value_data, data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
registry_key = dfwinreg_fake.FakeWinRegistryKey(self._GUID3)
registry_file.AddKeyByPath(
'\\Microsoft\\Windows NT\\CurrentVersion\\Schedule\\TaskCache\\Tasks',
registry_key)
registry_key = dfwinreg_fake.FakeWinRegistryKey(self._NAME3)
registry_file.AddKeyByPath((
'\\Microsoft\\Windows NT\\CurrentVersion\\Schedule\\TaskCache\\Tree\\'
'Microsoft\\Windows\\SideShow'), registry_key)
value_data = '{FE7B674F-2430-40A1-9162-AFC3727E3DC3}\x00'.encode(
'utf-16-le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'Id', data=value_data, data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
registry_file.Open(None)
registry = dfwinreg_registry.WinRegistry()
registry.MapFile(key_path_prefix, registry_file)
return registry
def _CreateTestRegistryEmpty(self):
"""Creates Registry keys and values for testing.
Returns:
dfwinreg.WinRegistry: Windows Registry for testing.
"""
key_path_prefix = 'HKEY_LOCAL_MACHINE\\Software'
registry_file = dfwinreg_fake.FakeWinRegistryFile(
key_path_prefix=key_path_prefix)
registry_key = dfwinreg_fake.FakeWinRegistryKey('Tasks')
registry_file.AddKeyByPath(
'\\Microsoft\\Windows NT\\CurrentVersion\\Schedule\\TaskCache',
registry_key)
registry_file.Open(None)
registry = dfwinreg_registry.WinRegistry()
registry.MapFile(key_path_prefix, registry_file)
return registry
def testCollect(self):
"""Tests the Collect function."""
registry = self._CreateTestRegistry()
test_output_writer = test_lib.TestOutputWriter()
collector_object = task_cache.TaskCacheCollector(
output_writer=test_output_writer)
result = collector_object.Collect(registry)
self.assertTrue(result)
test_output_writer.Close()
self.assertEqual(len(collector_object.cached_tasks), 2)
cached_tasks = sorted(
collector_object.cached_tasks, key=lambda task: task.identifier)
cached_task = cached_tasks[0]
self.assertIsNotNone(cached_task)
self.assertEqual(cached_task.identifier, self._GUID1)
# TODO: fix test
# self.assertEqual(cached_task.name, self._NAME1)
cached_task = cached_tasks[1]
self.assertIsNotNone(cached_task)
self.assertEqual(cached_task.identifier, self._GUID2)
# TODO: fix test
# self.assertEqual(cached_task.name, self._NAME2)
def testCollectEmpty(self):
"""Tests the Collect function on an empty Registry."""
registry = dfwinreg_registry.WinRegistry()
test_output_writer = test_lib.TestOutputWriter()
collector_object = task_cache.TaskCacheCollector(
output_writer=test_output_writer)
result = collector_object.Collect(registry)
self.assertFalse(result)
test_output_writer.Close()
self.assertEqual(len(collector_object.cached_tasks), 0)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "73739faaed2281cc62a5a60b03942be1",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 78,
"avg_line_length": 34.574074074074076,
"alnum_prop": 0.7054097482592394,
"repo_name": "libyal/winreg-kb",
"id": "c2cc106ba774c5b8965bfac1a49f17576adf561b",
"size": "7514",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/task_cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1074"
},
{
"name": "Python",
"bytes": "403051"
},
{
"name": "Shell",
"bytes": "1186"
}
],
"symlink_target": ""
} |
import random
def MakeRandomVarificationCode(length = 50):
digits = []
for i in range(length):
digit = random.randint(0,10+26+26-1)
if digit < 10:
digits.append(str(digit))
continue
digit -= 10
if digit < 26:
digits.append(chr(digit+ord('A')))
continue
digit -= 26
digits.append(chr(digit+ord('a')))
code = ""
for ch in digits:
code += ch
return code
| {
"content_hash": "8e8a9a932b6b3e6d870004c941ece29d",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 44,
"avg_line_length": 21.22222222222222,
"alnum_prop": 0.6465968586387435,
"repo_name": "bardia73/Graph",
"id": "8dcfdde78c8783b9d74707bcb3bd1063acf5522b",
"size": "382",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/random.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "207229"
},
{
"name": "JavaScript",
"bytes": "127478"
},
{
"name": "Python",
"bytes": "38261"
}
],
"symlink_target": ""
} |
import test_framework.loginit
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import logging
'''
In this test we connect to one node over p2p, send it numerous inv's, and
compare the resulting number of getdata requests to a max allowed value. We
test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
reach. [0.10 clients shouldn't request more than 16 from a single peer.]
In BU we request a maximum of 256 blocks at a time with 32 from a single peer
'''
MAX_REQUESTS = 256
class TestManager(NodeConnCB):
# set up NodeConnCB callbacks, overriding base class
def on_getdata(self, conn, message):
self.log.debug("got getdata %s" % repr(message))
# Log the requests
for inv in message.inv:
if inv.hash not in self.blockReqCounts:
self.blockReqCounts[inv.hash] = 0
self.blockReqCounts[inv.hash] += 1
def on_close(self, conn):
if not self.disconnectOkay:
raise EarlyDisconnectError(0)
def __init__(self):
NodeConnCB.__init__(self)
self.log = logging.getLogger("BlockRelayTest")
def add_new_connection(self, connection):
self.connection = connection
self.blockReqCounts = {}
self.disconnectOkay = False
def run(self):
self.connection.rpc.generate(1) # Leave IBD
numBlocksToGenerate = [8, 16, 128, 256]
for count in range(len(numBlocksToGenerate)):
current_invs = []
for i in range(numBlocksToGenerate[count]):
current_invs.append(CInv(2, random.randrange(0, 1 << 256)))
if len(current_invs) >= 50000:
self.connection.send_message(msg_inv(current_invs))
current_invs = []
if len(current_invs) > 0:
self.connection.send_message(msg_inv(current_invs))
print ("requesting " + str(len(current_invs)) + " blocks")
# Wait and see how many blocks were requested
time.sleep(2)
total_requests = 0
with mininode_lock:
for key in self.blockReqCounts:
total_requests += self.blockReqCounts[key]
if self.blockReqCounts[key] > 1:
raise AssertionError("Error, test failed: block %064x requested more than once" % key)
if total_requests > MAX_REQUESTS:
raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
if total_requests < numBlocksToGenerate[count]:
raise AssertionError("Error, not enough blocks (%d) requested" % total_requests)
print("Round %d: success (total requests: %d)" % (count, total_requests))
self.blockReqCounts = {}
self.disconnectOkay = True
self.connection.disconnect_node()
class MaxBlocksInFlightTest(BitcoinTestFramework):
def __init__(self):
self.num_nodes = 1
self.extra_args = [["-whitelist=127.0.0.1", "-debug=net"]]
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="Binary to test max block requests behavior")
def run_test(self):
test = TestManager()
test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
NetworkThread().start() # Start up network handling in another thread
test.run()
if __name__ == '__main__':
MaxBlocksInFlightTest().main()
| {
"content_hash": "45c8ff17f503ba0d9c7c70ff1f8dbf5d",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 110,
"avg_line_length": 39.51063829787234,
"alnum_prop": 0.6149703823371029,
"repo_name": "Justaphf/BitcoinUnlimited",
"id": "56b5cbc8881a70e0fb269f01d6ec6688b2cbbe8b",
"size": "3928",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "qa/rpc-tests/maxblocksinflight.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "Batchfile",
"bytes": "30639"
},
{
"name": "C",
"bytes": "784243"
},
{
"name": "C++",
"bytes": "7177983"
},
{
"name": "CMake",
"bytes": "4434"
},
{
"name": "GDB",
"bytes": "455"
},
{
"name": "HTML",
"bytes": "20970"
},
{
"name": "Java",
"bytes": "30291"
},
{
"name": "M4",
"bytes": "257068"
},
{
"name": "Makefile",
"bytes": "120981"
},
{
"name": "Objective-C",
"bytes": "92442"
},
{
"name": "Objective-C++",
"bytes": "7214"
},
{
"name": "Python",
"bytes": "1557435"
},
{
"name": "QMake",
"bytes": "2067"
},
{
"name": "Roff",
"bytes": "3821"
},
{
"name": "Shell",
"bytes": "86191"
}
],
"symlink_target": ""
} |
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.quest.QuestPoster
from panda3d.core import NodePath, Point3, TextNode, Vec3, Vec4
from direct.gui.DirectGui import *
import Quests
from toontown.toon import NPCToons
from toontown.toon import ToonHead
from toontown.toon import ToonDNA
from toontown.suit import SuitDNA
from toontown.suit import Suit
from toontown.hood import ZoneUtil
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
import string, types, random
from toontown.toon import LaffMeter
from toontown.toonbase import ToontownBattleGlobals
from toontown.toonbase.ToontownBattleGlobals import AvPropsNew
from direct.directnotify import DirectNotifyGlobal
from toontown.toontowngui import TTDialog
from otp.otpbase import OTPLocalizer
from toontown.building import DistributedBuildingQueryMgr
IMAGE_SCALE_LARGE = 0.2
IMAGE_SCALE_SMALL = 0.15
POSTER_WIDTH = 0.7
TEXT_SCALE = TTLocalizer.QPtextScale
TEXT_WORDWRAP = TTLocalizer.QPtextWordwrap
class QuestPoster(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('QuestPoster')
colors = {'white': (1,
1,
1,
1),
'blue': (0.45,
0.45,
0.8,
1),
'lightBlue': (0.42,
0.671,
1.0,
1.0),
'green': (0.45,
0.8,
0.45,
1),
'lightGreen': (0.784,
1,
0.863,
1),
'red': (0.8,
0.45,
0.45,
1),
'rewardRed': (0.8,
0.3,
0.3,
1),
'brightRed': (1.0,
0.16,
0.16,
1.0),
'brown': (0.52,
0.42,
0.22,
1)}
normalTextColor = (0.3,
0.25,
0.2,
1)
confirmDeleteButtonEvent = 'confirmDeleteButtonEvent'
def __init__(self, parent = aspect2d, **kw):
bookModel = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
circleModel = loader.loadModel('phase_3/models/gui/tt_m_gui_mat_nameShop').find('**/tt_t_gui_mat_namePanelCircle')
questCard = bookModel.find('**/questCard')
optiondefs = (('relief', None, None),
('image', questCard, None),
('image_scale', (0.8, 1.0, 0.58), None),
('state', DGG.NORMAL, None))
self.defineoptions(kw, optiondefs)
DirectFrame.__init__(self, relief=None)
self.initialiseoptions(QuestPoster)
self._deleteCallback = None
self.questFrame = DirectFrame(parent=self, relief=None)
self.headline = DirectLabel(parent=self.questFrame, relief=None, text='', text_font=ToontownGlobals.getMinnieFont(), text_fg=self.normalTextColor, text_scale=0.05, text_align=TextNode.ACenter, text_wordwrap=12.0, textMayChange=1, pos=(0, 0, 0.23))
self.questInfo = DirectLabel(parent=self.questFrame, relief=None, text='', text_fg=self.normalTextColor, text_scale=TEXT_SCALE, text_align=TextNode.ACenter, text_wordwrap=TEXT_WORDWRAP, textMayChange=1, pos=(0, 0, -0.0625))
self.rewardText = DirectLabel(parent=self.questFrame, relief=None, text='', text_fg=self.colors['rewardRed'], text_scale=0.0425, text_align=TextNode.ALeft, text_wordwrap=17.0, textMayChange=1, pos=(-0.36, 0, -0.26))
self.rewardText.hide()
self.lPictureFrame = DirectFrame(parent=self.questFrame, relief=None, image=bookModel.find('**/questPictureFrame'), image_scale=IMAGE_SCALE_SMALL, text='', text_pos=(0, -0.11), text_fg=self.normalTextColor, text_scale=TEXT_SCALE, text_align=TextNode.ACenter, text_wordwrap=11.0, textMayChange=1)
self.lPictureFrame.hide()
self.rPictureFrame = DirectFrame(parent=self.questFrame, relief=None, image=bookModel.find('**/questPictureFrame'), image_scale=IMAGE_SCALE_SMALL, text='', text_pos=(0, -0.11), text_fg=self.normalTextColor, text_scale=TEXT_SCALE, text_align=TextNode.ACenter, text_wordwrap=11.0, textMayChange=1, pos=(0.18, 0, 0.13))
self.rPictureFrame.hide()
self.lQuestIcon = DirectFrame(parent=self.lPictureFrame, relief=None, text=' ', text_font=ToontownGlobals.getSuitFont(), text_pos=(0, -0.03), text_fg=self.normalTextColor, text_scale=0.13, text_align=TextNode.ACenter, text_wordwrap=13.0, textMayChange=1)
self.rQuestIcon = DirectFrame(parent=self.rPictureFrame, relief=None, text=' ', text_font=ToontownGlobals.getSuitFont(), text_pos=(0, -0.03), text_fg=self.normalTextColor, text_scale=0.13, text_align=TextNode.ACenter, text_wordwrap=13.0, textMayChange=1)
self.auxText = DirectLabel(parent=self.questFrame, relief=None, text='', text_scale=TTLocalizer.QPauxText, text_fg=self.normalTextColor, text_align=TextNode.ACenter, textMayChange=1)
self.auxText.hide()
self.questProgress = DirectWaitBar(parent=self.questFrame, relief=DGG.SUNKEN, frameSize=(-0.95, 0.95, -0.1, 0.12), borderWidth=(0.025, 0.025), scale=0.2, frameColor=(0.945, 0.875, 0.706, 1.0), barColor=(0.5, 0.7, 0.5, 1), text='0/0', text_scale=0.19, text_fg=(0.05, 0.14, 0.4, 1), text_align=TextNode.ACenter, text_pos=(0, -0.04), pos=(0, 0, -0.195))
self.questProgress.hide()
self.funQuest = DirectLabel(parent=self.questFrame, relief=None, text=TTLocalizer.QuestPosterFun, text_fg=(0.0, 0.44, 1.0, 1.0), text_shadow=(0, 0, 0, 1), pos=(-0.28, 0, 0.2), scale=0.03)
self.funQuest.setR(-30)
self.funQuest.hide()
self.teleportButton = DirectButton(parent=self.questFrame, relief=None, image=circleModel, text=TTLocalizer.TeleportButton, text_scale=0.035, text_pos=(-0.0025, -0.015), pos=(0.175, 0, 0.125), scale=0.75)
self.teleportButton.hide()
self.laffMeter = None
self.dialog = None
return
def destroy(self):
self._deleteGeoms()
self.destroyDialog()
DirectFrame.destroy(self)
def destroyDialog(self, extra = None):
if self.dialog:
self.dialog.destroy()
self.dialog = None
base.cr.playGame.getPlace().setState('walk')
return
def _deleteGeoms(self):
for icon in (self.lQuestIcon, self.rQuestIcon):
geom = icon['geom']
if geom:
if hasattr(geom, 'delete'):
geom.delete()
def mouseEnterPoster(self, event):
self.reparentTo(self.getParent())
sc = Vec3(self.initImageScale)
sc.setZ(sc[2] + 0.07)
self['image_scale'] = sc
self.questFrame.setZ(0.03)
self.headline.setZ(0.26)
self.lPictureFrame.setZ(0.16)
self.rPictureFrame.setZ(0.16)
self.questInfo.setZ(-0.0325)
self.questProgress.setZ(-0.165)
self.auxText.setZ(0.15)
self.rewardText.setZ(-0.23)
self.funQuest.setZ(0.23)
self.rewardText.show()
def mouseExitPoster(self, event):
self['image_scale'] = self.initImageScale
self.questFrame.setZ(0)
self.headline.setZ(0.23)
self.lPictureFrame.setZ(0.13)
self.rPictureFrame.setZ(0.13)
self.questInfo.setZ(-0.0625)
self.questProgress.setZ(-0.195)
self.auxText.setZ(0.12)
self.rewardText.setZ(-0.26)
self.funQuest.setZ(0.2)
self.rewardText.hide()
def createNpcToonHead(self, toNpcId):
npcInfo = NPCToons.NPCToonDict[toNpcId]
dnaList = npcInfo[2]
gender = npcInfo[3]
if dnaList == 'r':
dnaList = NPCToons.getRandomDNA(toNpcId, gender)
dna = ToonDNA.ToonDNA()
dna.newToonFromProperties(*dnaList)
head = ToonHead.ToonHead()
head.setupHead(dna, forGui=1)
self.fitGeometry(head, fFlip=1)
return head
def createLaffMeter(self, hp):
lm = LaffMeter.LaffMeter(base.localAvatar.style, hp, hp)
lm.adjustText()
return lm
def createSuitHead(self, suitName):
suitDNA = SuitDNA.SuitDNA()
suitDNA.newSuit(suitName)
suit = Suit.Suit()
suit.setDNA(suitDNA)
headParts = suit.getHeadParts()
head = hidden.attachNewNode('head')
for part in headParts:
copyPart = part.copyTo(head)
copyPart.setDepthTest(1)
copyPart.setDepthWrite(1)
self.fitGeometry(head, fFlip=1)
suit.delete()
suit = None
return head
def loadElevator(self, building, numFloors):
elevatorNodePath = hidden.attachNewNode('elevatorNodePath')
elevatorModel = loader.loadModel('phase_4/models/modules/elevator')
floorIndicator = [None] * 5
npc = elevatorModel.findAllMatches('**/floor_light_?;+s')
for i in xrange(npc.getNumPaths()):
np = npc.getPath(i)
floor = int(np.getName()[-1:]) - 1
floorIndicator[floor] = np
if floor < numFloors:
np.setColor(Vec4(0.5, 0.5, 0.5, 1.0))
else:
np.hide()
elevatorModel.reparentTo(elevatorNodePath)
suitDoorOrigin = building.find('**/*_door_origin')
if 'tech' in building.getName():
building.find('**/*_front').setScale(1, 1, 1.01)
building.find('**/*_front').setPos(0, 0, -0.14)
suitDoorOrigin.setPos(14.3, -4, 0)
elevatorNodePath.reparentTo(suitDoorOrigin)
elevatorNodePath.setPosHpr(0, 0, 0, 0, 0, 0)
return
def teleportToShop(self, npcId):
if base.cr.playGame.getPlace().getState() not in ('walk', 'stickerBook'):
return
else:
npcZone = None
zoneId = base.localAvatar.getZoneId()
npcHood = ZoneUtil.getHoodId(zoneId)
if npcId == Quests.ToonHQ:
if not ZoneUtil.isDynamicZone(zoneId) and not ZoneUtil.isCogHQZone(zoneId):
npcZone = {2000: 2520,
1000: 1507,
3000: 3508,
4000: 4504,
5000: 5502,
7000: 7511,
9000: 9505}.get(npcHood)
elif npcId == Quests.ToonTailor:
if not ZoneUtil.isDynamicZone(zoneId) and not ZoneUtil.isCogHQZone(zoneId):
npcZone = {2000: 2521,
1000: 1508,
3000: 3509,
4000: 4506,
5000: 5503,
7000: 7510,
9000: 9506}.get(npcHood)
else:
npcZone = NPCToons.getNPCZone(npcId)
if npcZone and npcZone != -1:
npcHood = ZoneUtil.getHoodId(npcZone)
self.destroyDialog()
base.cr.playGame.getPlace().setState('stopped')
if not npcZone or npcZone == -1:
self.dialog = TTDialog.TTDialog(style=TTDialog.Acknowledge, text=TTLocalizer.TeleportButtonUnavailable, command=self.destroyDialog)
return
cost = ToontownGlobals.getTeleportButtonCost(npcHood)
if base.localAvatar.getBankMoney() < cost:
self.dialog = TTDialog.TTDialog(style=TTDialog.Acknowledge, text=TTLocalizer.TeleportButtonNoMoney % cost, command=self.destroyDialog)
else:
self.dialog = TTDialog.TTDialog(style=TTDialog.YesNo, text=TTLocalizer.TeleportButtonConfirm % cost, command=lambda value: self.teleportToShopConfirm(npcZone, cost, value))
self.dialog.show()
return
def teleportToShopConfirm(self, npcZone, cost, value):
self.destroyDialog()
if value > 0:
base.cr.buildingQueryMgr.d_isSuit([npcZone], lambda response: self.teleportToShopCallback(npcZone, cost, response))
def teleportToShopCallback(self, npcZone, cost, response):
if npcZone in dict(response).keys():
base.cr.playGame.getPlace().setState('stopped')
self.dialog = TTDialog.TTDialog(style=TTDialog.Acknowledge, text=TTLocalizer.TeleportButtonTakenOver, command=self.destroyDialog)
self.dialog.show()
return
base.localAvatar.takeMoney(cost, True)
base.cr.playGame.getPlace().requestTeleport(ZoneUtil.getHoodId(npcZone), npcZone, base.localAvatar.defaultShard, -1)
def fitGeometry(self, geom, fFlip = 0, dimension = 0.8):
p1 = Point3()
p2 = Point3()
geom.calcTightBounds(p1, p2)
if fFlip:
t = p1[0]
p1.setX(-p2[0])
p2.setX(-t)
d = p2 - p1
biggest = max(d[0], d[2])
s = dimension / biggest
mid = (p1 + d / 2.0) * s
geomXform = hidden.attachNewNode('geomXform')
for child in geom.getChildren():
child.reparentTo(geomXform)
geomXform.setPosHprScale(-mid[0], -mid[1] + 1, -mid[2], 180, 0, 0, s, s, s)
geomXform.reparentTo(geom)
def clear(self):
self['image_color'] = Vec4(*self.colors['white'])
self.headline['text'] = ''
self.headline['text_fg'] = self.normalTextColor
self.questInfo['text'] = ''
self.questInfo['text_fg'] = self.normalTextColor
self.rewardText['text'] = ''
self.auxText['text'] = ''
self.auxText['text_fg'] = self.normalTextColor
self.funQuest.hide()
self.lPictureFrame.hide()
self.rPictureFrame.hide()
self.questProgress.hide()
self.teleportButton.hide()
self.destroyDialog()
if hasattr(self, 'chooseButton'):
self.chooseButton.destroy()
del self.chooseButton
if hasattr(self, 'deleteButton'):
self.deleteButton.destroy()
del self.deleteButton
self.ignore(self.confirmDeleteButtonEvent)
if hasattr(self, 'confirmDeleteButton'):
self.confirmDeleteButton.cleanup()
del self.confirmDeleteButton
if self.laffMeter != None:
self.laffMeter.reparentTo(hidden)
self.laffMeter.destroy()
self.laffMeter = None
return
def showChoicePoster(self, questId, fromNpcId, toNpcId, rewardId, callback):
self.update((questId,
fromNpcId,
toNpcId,
rewardId,
0))
quest = Quests.getQuest(questId)
self.rewardText.show()
self.rewardText.setZ(-0.205)
self.questProgress.hide()
if not hasattr(self, 'chooseButton'):
guiButton = loader.loadModel('phase_3/models/gui/quit_button')
self.chooseButton = DirectButton(parent=self.questFrame, relief=None, image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale=(0.7, 1, 1), text=TTLocalizer.QuestPageChoose, text_scale=0.06, text_pos=(0, -0.02), pos=(0.285, 0, 0.245), scale=0.65)
guiButton.removeNode()
npcZone = NPCToons.getNPCZone(toNpcId)
hoodId = ZoneUtil.getHoodId(npcZone)
self.chooseButton['command'] = callback
self.chooseButton['extraArgs'] = [questId]
self.unbind(DGG.WITHIN)
self.unbind(DGG.WITHOUT)
return
def update(self, questDesc):
questId, fromNpcId, toNpcId, rewardId, toonProgress = questDesc
quest = Quests.getQuest(questId)
self.teleportButton['command'] = self.teleportToShop
self.teleportButton['extraArgs'] = [toNpcId]
if quest == None:
self.notify.warning('Tried to display poster for unknown quest %s' % questId)
return
else:
if rewardId == Quests.NA:
finalReward = Quests.getFinalRewardId(questId, fAll=1)
transformedReward = Quests.transformReward(finalReward, base.localAvatar)
reward = Quests.getReward(transformedReward)
else:
reward = Quests.getReward(rewardId)
if reward and questId not in Quests.NoRewardTierZeroQuests:
rewardString = reward.getPosterString()
else:
rewardString = ''
self.rewardText['text'] = rewardString
self.fitLabel(self.rewardText)
if Quests.isQuestJustForFun(questId, rewardId):
self.funQuest.show()
else:
self.funQuest.hide()
if self._deleteCallback:
self.showDeleteButton(questDesc)
else:
self.hideDeleteButton()
fComplete = quest.getCompletionStatus(base.localAvatar, questDesc) == Quests.COMPLETE
if toNpcId == Quests.ToonHQ:
toNpcName = TTLocalizer.QuestPosterHQOfficer
toNpcBuildingName = TTLocalizer.QuestPosterHQBuildingName
toNpcStreetName = TTLocalizer.QuestPosterHQStreetName
toNpcLocationName = TTLocalizer.QuestPosterHQLocationName
elif toNpcId == Quests.ToonTailor:
toNpcName = TTLocalizer.QuestPosterTailor
toNpcBuildingName = TTLocalizer.QuestPosterTailorBuildingName
toNpcStreetName = TTLocalizer.QuestPosterTailorStreetName
toNpcLocationName = TTLocalizer.QuestPosterTailorLocationName
else:
toNpcName = NPCToons.getNPCName(toNpcId)
toNpcZone = NPCToons.getNPCZone(toNpcId)
toNpcHoodId = ZoneUtil.getHoodId(toNpcZone)
toNpcLocationName = base.cr.hoodMgr.getFullnameFromId(toNpcHoodId)
toNpcBuildingName = NPCToons.getBuildingTitle(toNpcZone)
toNpcBranchId = ZoneUtil.getBranchZone(toNpcZone)
toNpcStreetName = ZoneUtil.getStreetName(toNpcBranchId)
lPos = Vec3(0, 0, 0.13)
lIconGeom = None
lIconGeomScale = 1
rIconGeom = None
rIconGeomScale = 1
infoText = ''
infoZ = TTLocalizer.QPinfoZ
auxText = None
auxTextPos = Vec3(0, 0, 0.12)
headlineString = quest.getHeadlineString()
objectiveStrings = quest.getObjectiveStrings()
captions = map(string.capwords, quest.getObjectiveStrings())
imageColor = Vec4(*self.colors['white'])
self.teleportButton.hide()
if base.localAvatar.tutorialAck and (fComplete or quest.getType() in (Quests.DeliverGagQuest,
Quests.DeliverItemQuest,
Quests.VisitQuest,
Quests.TrackChoiceQuest)):
self.teleportButton.show()
self.teleportButton.setPos(0.3, 0, -0.15)
if isinstance(quest, Quests.TexturedQuest) and quest.hasFrame():
frame = quest.getFrame()
frameBgColor = frame[1]
lIconGeom = frame[0]
lIconGeomScale = 0.13
if not fComplete:
infoText = quest.getLocationName()
if infoText == '':
infoText = TTLocalizer.QuestPosterAnywhere
elif quest.getType() == Quests.DeliverGagQuest or quest.getType() == Quests.DeliverItemQuest:
frameBgColor = 'red'
if quest.getType() == Quests.DeliverGagQuest:
invModel = loader.loadModel('phase_3.5/models/gui/inventory_icons')
track, item = quest.getGagType()
lIconGeom = invModel.find('**/' + AvPropsNew[track][item])
invModel.removeNode()
else:
bookModel = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
lIconGeom = bookModel.find('**/package')
lIconGeomScale = 0.12
bookModel.removeNode()
if not fComplete:
captions.append(toNpcName)
auxText = TTLocalizer.QuestPosterAuxTo
auxTextPos.setZ(0.12)
lPos.setX(-0.18)
infoText = TTLocalizer.QuestPageDestination % (toNpcBuildingName, toNpcStreetName, toNpcLocationName)
rIconGeom = self.createNpcToonHead(toNpcId)
rIconGeomScale = IMAGE_SCALE_SMALL
elif quest.getType() == Quests.RecoverItemQuest:
frameBgColor = 'green'
bookModel = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
lIconGeom = bookModel.find('**/package')
lIconGeomScale = 0.12
bookModel.removeNode()
if not fComplete:
rIconGeomScale = IMAGE_SCALE_SMALL
holder = quest.getHolder()
holderType = quest.getHolderType()
if holder == Quests.Any:
cogIcons = loader.loadModel('phase_3/models/gui/cog_icons')
rIconGeom = cogIcons.find('**/cog')
cogIcons.removeNode()
lPos.setX(-0.18)
auxText = TTLocalizer.QuestPosterAuxFrom
elif holder == Quests.AnyFish:
headlineString = TTLocalizer.QuestPosterFishing
auxText = TTLocalizer.QuestPosterAuxFor
auxTextPos.setX(-0.18)
captions = captions[:1]
else:
if holderType == 'track':
cogIcons = loader.loadModel('phase_3/models/gui/cog_icons')
if holder in SuitDNA.suitDeptModelPaths:
icon = cogIcons.find(SuitDNA.suitDeptModelPaths[holder])
rIconGeom = icon.copyTo(hidden)
rIconGeom.setColor(Suit.Suit.medallionColors[holder])
rIconGeomScale = 0.12
cogIcons.removeNode()
elif holderType == 'level':
cogIcons = loader.loadModel('phase_3/models/gui/cog_icons')
rIconGeom = cogIcons.find('**/cog')
rIconGeomScale = IMAGE_SCALE_SMALL
cogIcons.removeNode()
else:
rIconGeom = self.createSuitHead(holder)
lPos.setX(-0.18)
auxText = TTLocalizer.QuestPosterAuxFrom
infoText = quest.getLocationName()
if infoText == '':
infoText = TTLocalizer.QuestPosterAnywhere
elif quest.getType() == Quests.VisitQuest:
frameBgColor = 'brown'
captions[0] = '%s' % toNpcName
lIconGeom = self.createNpcToonHead(toNpcId)
lIconGeomScale = IMAGE_SCALE_SMALL
if not fComplete:
infoText = TTLocalizer.QuestPageDestination % (toNpcBuildingName, toNpcStreetName, toNpcLocationName)
elif quest.getType() == Quests.TrackChoiceQuest:
frameBgColor = 'green'
invModel = loader.loadModel('phase_3.5/models/gui/inventory_icons')
track = random.randint(0, ToontownBattleGlobals.MAX_TRACK)
lIconGeom = invModel.find('**/' + AvPropsNew[track][1])
infoZ = -0.0225
if not fComplete:
infoText = TTLocalizer.QuestPageNameAndDestination % (toNpcName,
toNpcBuildingName,
toNpcStreetName,
toNpcLocationName)
invModel.removeNode()
elif quest.getType() == Quests.BuildingQuest:
frameBgColor = 'blue'
track = quest.getBuildingTrack()
numFloors = quest.getNumFloors()
if track == 'c':
lIconGeom = loader.loadModel('phase_4/models/modules/suit_landmark_corp')
elif track == 'l':
lIconGeom = loader.loadModel('phase_4/models/modules/suit_landmark_legal')
elif track == 'm':
lIconGeom = loader.loadModel('phase_4/models/modules/suit_landmark_money')
elif track == 's':
lIconGeom = loader.loadModel('phase_4/models/modules/suit_landmark_sales')
elif track == 't':
lIconGeom = loader.loadModel('phase_4/models/modules/suit_landmark_tech')
else:
bookModel = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
lIconGeom = bookModel.find('**/COG_building')
bookModel.removeNode()
if lIconGeom and track != Quests.Any:
self.loadElevator(lIconGeom, numFloors)
lIconGeom.setH(180)
self.fitGeometry(lIconGeom, fFlip=0)
lIconGeomScale = IMAGE_SCALE_SMALL
else:
lIconGeomScale = 0.13
if not fComplete:
infoText = quest.getLocationName()
if infoText == '':
infoText = TTLocalizer.QuestPosterAnywhere
elif quest.getType() == Quests.FactoryQuest:
frameBgColor = 'blue'
bookModel = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
lIconGeom = bookModel.find('**/factoryIcon2')
bookModel.removeNode()
lIconGeomScale = 0.13
if not fComplete:
infoText = quest.getLocationName()
if infoText == '':
infoText = TTLocalizer.QuestPosterAnywhere
elif quest.getType() == Quests.MintQuest:
frameBgColor = 'blue'
bookModel = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
lIconGeom = bookModel.find('**/CashBotMint')
bookModel.removeNode()
lIconGeomScale = 0.13
if not fComplete:
infoText = quest.getLocationName()
if infoText == '':
infoText = TTLocalizer.QuestPosterAnywhere
elif quest.getType() == Quests.CogPartQuest:
frameBgColor = 'green'
bookModel = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
lIconGeom = bookModel.find('**/CogArmIcon2')
bookModel.removeNode()
lIconGeomScale = 0.13
if not fComplete:
infoText = quest.getLocationName()
if infoText == '':
infoText = TTLocalizer.QuestPosterAnywhere
elif quest.getType() == Quests.ForemanQuest or quest.getType() == Quests.SupervisorQuest:
frameBgColor = 'blue'
bookModel = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
lIconGeom = bookModel.find('**/skelecog5')
bookModel.removeNode()
lIconGeomScale = 0.13
if not fComplete:
infoText = quest.getLocationName()
if infoText == '':
infoText = TTLocalizer.QuestPosterAnywhere
elif quest.getType() == Quests.RescueQuest:
frameBgColor = 'blue'
lIconGeom = self.createNpcToonHead(random.choice(NPCToons.HQnpcFriends.keys()))
lIconGeomScale = 0.13
infoText = quest.getLocationName().strip()
elif quest.getType() == Quests.FriendQuest:
frameBgColor = 'brown'
gui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
lIconGeom = gui.find('**/FriendsBox_Closed')
lIconGeomScale = 0.45
gui.removeNode()
infoText = TTLocalizer.QuestPosterAnywhere
elif quest.getType() == Quests.TrolleyQuest:
frameBgColor = 'lightBlue'
gui = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
lIconGeom = gui.find('**/trolley')
lIconGeomScale = 0.13
gui.removeNode()
infoText = TTLocalizer.QuestPosterPlayground
elif quest.getType() == Quests.MailboxQuest:
frameBgColor = 'lightBlue'
bookModel = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
lIconGeom = bookModel.find('**/package')
lIconGeomScale = 0.12
bookModel.removeNode()
infoText = TTLocalizer.QuestPosterAtHome
elif quest.getType() == Quests.PhoneQuest:
frameBgColor = 'lightBlue'
lIconGeom = self.createNpcToonHead(7028)
lIconGeomScale = IMAGE_SCALE_SMALL
infoText = TTLocalizer.QuestPosterOnPhone
else:
frameBgColor = 'blue'
if quest.getType() == Quests.CogTrackQuest:
dept = quest.getCogTrack()
cogIcons = loader.loadModel('phase_3/models/gui/cog_icons')
lIconGeomScale = 0.13
if dept in SuitDNA.suitDeptModelPaths:
icon = cogIcons.find(SuitDNA.suitDeptModelPaths[dept])
lIconGeom = icon.copyTo(hidden)
lIconGeom.setColor(Suit.Suit.medallionColors[dept])
cogIcons.removeNode()
elif quest.getType() == Quests.CogQuest:
if quest.getCogType() != Quests.Any:
lIconGeom = self.createSuitHead(quest.getCogType())
lIconGeomScale = IMAGE_SCALE_SMALL
else:
cogIcons = loader.loadModel('phase_3/models/gui/cog_icons')
lIconGeom = cogIcons.find('**/cog')
lIconGeomScale = IMAGE_SCALE_SMALL
cogIcons.removeNode()
elif quest.getType() == Quests.CogLevelQuest:
cogIcons = loader.loadModel('phase_3/models/gui/cog_icons')
lIconGeom = cogIcons.find('**/cog')
lIconGeomScale = IMAGE_SCALE_SMALL
cogIcons.removeNode()
elif quest.getType() == Quests.SkelecogTrackQuest:
dept = quest.getCogTrack()
cogIcons = loader.loadModel('phase_3/models/gui/cog_icons')
lIconGeomScale = 0.13
if dept in SuitDNA.suitDeptModelPaths:
icon = cogIcons.find(SuitDNA.suitDeptModelPaths[dept])
lIconGeom = icon.copyTo(hidden)
lIconGeom.setColor(Suit.Suit.medallionColors[dept])
cogIcons.removeNode()
elif quest.getType() == Quests.SkelecogQuest:
cogIcons = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
lIconGeom = cogIcons.find('**/skelecog5')
lIconGeomScale = IMAGE_SCALE_SMALL
cogIcons.removeNode()
elif quest.getType() == Quests.SkelecogLevelQuest:
cogIcons = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
lIconGeom = cogIcons.find('**/skelecog5')
lIconGeomScale = IMAGE_SCALE_SMALL
cogIcons.removeNode()
elif quest.getType() == Quests.SkeleReviveQuest:
cogIcons = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
lIconGeom = cogIcons.find('**/skelecog5')
lIconGeomScale = IMAGE_SCALE_SMALL
cogIcons.removeNode()
if not fComplete:
infoText = quest.getLocationName()
if infoText == '':
infoText = TTLocalizer.QuestPosterAnywhere
if fComplete:
textColor = (0, 0.3, 0, 1)
imageColor = Vec4(*self.colors['lightGreen'])
lPos.setX(-0.18)
rIconGeom = self.createNpcToonHead(toNpcId)
rIconGeomScale = IMAGE_SCALE_SMALL
captions = captions[:1]
captions.append(toNpcName)
auxText = TTLocalizer.QuestPosterAuxReturnTo
headlineString = TTLocalizer.QuestPosterComplete
infoText = TTLocalizer.QuestPageDestination % (toNpcBuildingName, toNpcStreetName, toNpcLocationName)
if self.laffMeter != None:
self.laffMeter.reparentTo(hidden)
self.laffMeter.destroy()
self.laffMeter = None
else:
textColor = self.normalTextColor
self.show()
self['image_color'] = imageColor
self.headline['text_fg'] = textColor
self.headline['text'] = headlineString
self.lPictureFrame.show()
self.lPictureFrame.setPos(lPos)
self.lPictureFrame['text_scale'] = TEXT_SCALE
if lPos[0] != 0:
self.lPictureFrame['text_scale'] = 0.0325
self.lPictureFrame['text'] = captions[0]
self.lPictureFrame['image_color'] = Vec4(*self.colors[frameBgColor])
if len(captions) > 1:
self.rPictureFrame.show()
self.rPictureFrame['text'] = captions[1]
self.rPictureFrame['text_scale'] = 0.0325
self.rPictureFrame['image_color'] = Vec4(*self.colors[frameBgColor])
else:
self.rPictureFrame.hide()
self._deleteGeoms()
self.lQuestIcon['geom'] = lIconGeom
self.lQuestIcon['geom_pos'] = (0, 10, 0)
if lIconGeom:
try:
self.lQuestIcon['geom_scale'] = lIconGeomScale
except:
pass
try:
self.lQuestIcon['geom_pos'] = Point3(lIconGeomPos[0], lIconGeomPos[1], lIconGeomPos[2])
except:
pass
try:
self.lQuestIcon['geom_hpr'] = Point3(lIconGeomHpr[0], lIconGeomHpr[1], lIconGeomHpr[2])
except:
pass
if self.laffMeter != None:
self.laffMeter.reparentTo(self.lQuestIcon)
self.rQuestIcon['geom'] = rIconGeom
self.rQuestIcon['geom_pos'] = (0, 10, 0)
if rIconGeom:
self.rQuestIcon['geom_scale'] = rIconGeomScale
if auxText:
self.auxText.show()
self.auxText['text'] = auxText
self.auxText.setPos(auxTextPos)
else:
self.auxText.hide()
self.bind(DGG.WITHIN, self.mouseEnterPoster)
self.bind(DGG.WITHOUT, self.mouseExitPoster)
numQuestItems = quest.getNumQuestItems()
if fComplete or numQuestItems <= 1:
self.questProgress.hide()
else:
self.questProgress.show()
self.questProgress['value'] = toonProgress & pow(2, 16) - 1
self.questProgress['range'] = numQuestItems
self.questProgress['text'] = quest.getProgressString(base.localAvatar, questDesc)
self.questInfo['text'] = infoText
self.questInfo.setZ(infoZ)
self.fitLabel(self.questInfo)
return
def unbindMouseEnter(self):
self.unbind(DGG.WITHIN)
def showDeleteButton(self, questDesc):
self.hideDeleteButton()
self.deleteButton = DirectButton(parent=self.questFrame, image=Preloaded['trashcan'], text=('', TTLocalizer.QuestPosterDeleteBtn, TTLocalizer.QuestPosterDeleteBtn), text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_scale=0.18, text_pos=(0, -0.12), relief=None, pos=(0.3, 0, 0.145), scale=0.3, command=self.onPressedDeleteButton, extraArgs=[questDesc])
return
def hideDeleteButton(self):
if hasattr(self, 'deleteButton'):
self.deleteButton.destroy()
del self.deleteButton
def setDeleteCallback(self, callback):
self._deleteCallback = callback
def onPressedDeleteButton(self, questDesc):
self.deleteButton['state'] = DGG.DISABLED
self.accept(self.confirmDeleteButtonEvent, self.confirmedDeleteButton)
self.confirmDeleteButton = TTDialog.TTGlobalDialog(doneEvent=self.confirmDeleteButtonEvent, message=TTLocalizer.QuestPosterConfirmDelete, style=TTDialog.YesNo, okButtonText=TTLocalizer.QuestPosterDialogYes, cancelButtonText=TTLocalizer.QuestPosterDialogNo)
self.confirmDeleteButton.quest = questDesc
self.confirmDeleteButton.doneStatus = ''
self.confirmDeleteButton.show()
def confirmedDeleteButton(self):
questDesc = self.confirmDeleteButton.quest
self.ignore(self.confirmDeleteButtonEvent)
if self.confirmDeleteButton.doneStatus == 'ok':
if self._deleteCallback:
self._deleteCallback(questDesc)
else:
self.deleteButton['state'] = DGG.NORMAL
self.confirmDeleteButton.cleanup()
del self.confirmDeleteButton
def fitLabel(self, label, lineNo = 0):
text = label['text']
label['text_scale'] = TEXT_SCALE
label['text_wordwrap'] = TEXT_WORDWRAP
if len(text) > 0:
lines = text.split('\n')
lineWidth = label.component('text0').textNode.calcWidth(lines[lineNo])
if lineWidth > 0:
textScale = POSTER_WIDTH / lineWidth
label['text_scale'] = min(TEXT_SCALE, textScale)
label['text_wordwrap'] = max(TEXT_WORDWRAP, lineWidth + 0.05) | {
"content_hash": "6d2f9a87e634a9d727ac002e96ec6181",
"timestamp": "",
"source": "github",
"line_count": 779,
"max_line_length": 362,
"avg_line_length": 49.716302952503206,
"alnum_prop": 0.5590126261974231,
"repo_name": "DedMemez/ODS-August-2017",
"id": "ae45e4df118e925fd4404d83e7de8ef7236037c5",
"size": "38729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quest/QuestPoster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10152014"
},
{
"name": "Shell",
"bytes": "707"
}
],
"symlink_target": ""
} |
from datetime import datetime
import glob
import json
import logging
import os
import sys
# The time format used in the JSON
time_fmt = "%Y-%m-%d %H:%M:%S"
# The Unix epoch
epoch = datetime.utcfromtimestamp(0)
# Differentiate between start and stop events
START = 1
STOP = 2
logging.basicConfig(level=logging.INFO,
format='%(levelname)-9s%(message)s',)
def abort(msg):
logging.critical("abort: " + msg)
exit(1)
def get_args():
""" Process input arguments """
if len(sys.argv) != 3:
abort("usage: <root directory> <output file>")
root_dir = sys.argv[1]
output_file = sys.argv[2]
return (root_dir, output_file)
def get_rundirs(root_dir):
""" Fill in rundirs, omitting miscellaneous files """
rundirs = []
entries = glob.glob(root_dir+"/*")
for entry in entries:
if entry.endswith(".bak"):
continue
if os.path.isdir(entry):
rundirs.append(entry)
return rundirs
def date2secs(date_string):
"""Convert date string from JSON to floating-point seconds"""
tokens = date_string.split(".")
prefix = tokens[0]
suffix = tokens[1]
d = datetime.strptime(prefix, time_fmt)
secs = (d - epoch).total_seconds()
microsecs = int(suffix)
return secs + microsecs/1000000.0
def get_jsons(rundir):
""" Find the JSON files for the given rundir """
output = find_output(rundir)
# restrict to run.*.json files
json_files = glob.glob(output+"/run.*.json")
results = []
for json_file in json_files:
size = os.path.getsize(json_file)
if size == 0:
logging.warning("file size is 0: %s", json_file)
continue
with open(json_file, "r") as fp:
try:
J = json.load(fp)
except ValueError as e:
abort("Error loading: %s\n%s" % (json_file,str(e)))
results.append(J)
return results
# subdir may be "output" or "save", depending on which script wrote it
# We use this global to remember which one worked last time
subdir = "output"
def find_output(rundir):
""" Helper for get_jsons(): look for directories output|save """
global subdir
output = rundir+"/"+subdir
if not os.path.isdir(output):
if subdir == "save":
subdir = "output"
elif subdir == "output":
subdir = "save"
else:
abort("invalid subdir="+subdir)
output = rundir+"/"+subdir
if not os.path.isdir(output):
abort("could not find either:\n" +
"\t " + rundir+"/output" + " or\n" +
"\t " + rundir+"/save")
return output
def file2tokens(filename):
result = []
with open(filename, "r") as fp:
for line in fp:
# Strip comments
splits = line.split("#")
line = splits[0]
# Ignore whitespace
line = line.strip()
# Omit blank lines
if len(line) == 0:
continue
result.append(line)
return result
| {
"content_hash": "2fb309b6716d89e2077ac982936b3d74",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 70,
"avg_line_length": 28.28440366972477,
"alnum_prop": 0.5728186831008758,
"repo_name": "ECP-CANDLE/Database",
"id": "569b2710a4ca4a7ee9010442d78028b9f82c3202",
"size": "3151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plots/plottools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18417"
},
{
"name": "Shell",
"bytes": "922"
}
],
"symlink_target": ""
} |
"""
SET string and integer scalars (SNMPv2c)
++++++++++++++++++++++++++++++++++++++++
Perform SNMP SET operation with the following options:
* with SNMPv2c, community 'public'
* over IPv4/UDP
* to an Agent at demo.snmplabs.com:161
* for OIDs in string form and values in form of pyasn1 objects
This script performs similar to the following Net-SNMP command:
| $ snmpset -v2c -c public -ObentU demo.snmplabs.com 1.3.6.1.2.1.1.9.1.3.1 s 'New description' 1.3.6.1.2.1.1.9.1.4.1 t 12
"""#
from pysnmp.carrier.asyncore.dispatch import AsyncoreDispatcher
from pysnmp.carrier.asyncore.dgram import udp
from pyasn1.codec.ber import encoder, decoder
from pysnmp.proto import api
from time import time
# Protocol version to use
# pMod = api.protoModules[api.protoVersion1]
pMod = api.PROTOCOL_MODULES[api.SNMP_VERSION_2C]
# Build PDU
reqPDU = pMod.SetRequestPDU()
pMod.apiPDU.setDefaults(reqPDU)
pMod.apiPDU.setVarBinds(
reqPDU,
# A list of Var-Binds to SET
(('1.3.6.1.2.1.1.9.1.3.1', pMod.OctetString('New system description')),
('1.3.6.1.2.1.1.9.1.4.1', pMod.TimeTicks(12)))
)
# Build message
reqMsg = pMod.Message()
pMod.apiMessage.setDefaults(reqMsg)
pMod.apiMessage.setCommunity(reqMsg, 'public')
pMod.apiMessage.setPDU(reqMsg, reqPDU)
startedAt = time()
def cbTimerFun(timeNow):
if timeNow - startedAt > 3:
raise Exception("Request timed out")
# noinspection PyUnusedLocal,PyUnusedLocal
def cbRecvFun(transportDispatcher, transportDomain, transportAddress,
wholeMsg, reqPDU=reqPDU):
while wholeMsg:
rspMsg, wholeMsg = decoder.decode(wholeMsg, asn1Spec=pMod.Message())
rspPDU = pMod.apiMessage.getPDU(rspMsg)
# Match response to request
if pMod.apiPDU.getRequestID(reqPDU) == pMod.apiPDU.getRequestID(rspPDU):
# Check for SNMP errors reported
errorStatus = pMod.apiPDU.getErrorStatus(rspPDU)
if errorStatus:
print(errorStatus.prettyPrint())
else:
for oid, val in pMod.apiPDU.getVarBinds(rspPDU):
print('%s = %s' % (oid.prettyPrint(), val.prettyPrint()))
transportDispatcher.jobFinished(1)
return wholeMsg
transportDispatcher = AsyncoreDispatcher()
transportDispatcher.registerRecvCbFun(cbRecvFun)
transportDispatcher.registerTimerCbFun(cbTimerFun)
# UDP/IPv4
transportDispatcher.registerTransport(
udp.DOMAIN_NAME, udp.UdpSocketTransport().openClientMode()
)
# Pass message to dispatcher
transportDispatcher.sendMessage(
encoder.encode(reqMsg), udp.DOMAIN_NAME, ('demo.snmplabs.com', 161)
)
transportDispatcher.jobStarted(1)
# Dispatcher will finish as job#1 counter reaches zero
transportDispatcher.runDispatcher()
transportDispatcher.closeDispatcher()
| {
"content_hash": "4f979ffcfeb3180a8d2c667b1d647e8c",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 121,
"avg_line_length": 29.242105263157896,
"alnum_prop": 0.7102231821454283,
"repo_name": "etingof/pysnmp",
"id": "60fb251862c889a60ec23dfb477e4d5ceab883ae",
"size": "2778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/v1arch/asyncore/manager/cmdgen/v2c-set.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1453555"
},
{
"name": "Shell",
"bytes": "1312"
}
],
"symlink_target": ""
} |
from .Optimizer import Optimizer
from ..utils import create_param_state_as
class Nesterov(Optimizer):
"""
Implementation of "Nesterov's Accelerated Gradient" (NAG) which is explained
in further detail in
"On the importance of initialization and momentum in deep learning"
But the equation for NAG has been reshuffled by Nicolas Boulanger in
https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617
for easier implementation in Theano. The updates are:
v_{e+1} = mom * v_e - lr * ∇p_e
p_{e+1} = p_e + mom * v_{e+1} - lr * ∇p_e
"""
def __init__(self, lr, momentum):
Optimizer.__init__(self, lr=lr, momentum=momentum)
def get_updates(self, params, grads, lr, momentum):
updates = []
for param, grad in zip(params, grads):
param_mom = create_param_state_as(param)
v = momentum * param_mom - lr * grad
updates.append((param_mom, v))
updates.append((param, param + momentum * v - lr * grad))
return updates
| {
"content_hash": "2f9215007800eed0695518078e4550dd",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 80,
"avg_line_length": 31.147058823529413,
"alnum_prop": 0.6241737488196412,
"repo_name": "Pandoro/DeepFried2",
"id": "4b94810bd007fbbffcf00892f3f1c9187ac48206",
"size": "1087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DeepFried2/optimizers/Nesterov.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46707"
}
],
"symlink_target": ""
} |
"""
salt._logging
~~~~~~~~~~~~~
This is salt's new logging setup.
As the name suggests, this is considered an internal API which can change without notice,
although, as best effort, we'll try not to break code relying on it.
The ``salt._logging`` package should be imported as soon as possible since salt tweaks
the python's logging system.
"""
from salt._logging.impl import (
DFLT_LOG_DATEFMT,
DFLT_LOG_DATEFMT_LOGFILE,
DFLT_LOG_FMT_CONSOLE,
DFLT_LOG_FMT_JID,
DFLT_LOG_FMT_LOGFILE,
LOG_COLORS,
LOG_LEVELS,
LOG_VALUES_TO_LEVELS,
SORTED_LEVEL_NAMES,
freeze_logging_options_dict,
get_console_handler,
get_extended_logging_handlers,
get_log_record_factory,
get_logfile_handler,
get_logging_level_from_string,
get_logging_options_dict,
get_lowest_log_level,
get_temp_handler,
in_mainprocess,
is_console_handler_configured,
is_extended_logging_configured,
is_logfile_handler_configured,
is_temp_handler_configured,
set_log_record_factory,
set_logging_options_dict,
set_lowest_log_level,
set_lowest_log_level_by_opts,
setup_console_handler,
setup_extended_logging,
setup_log_granular_levels,
setup_logfile_handler,
setup_logging,
setup_temp_handler,
shutdown_console_handler,
shutdown_extended_logging,
shutdown_logfile_handler,
shutdown_logging,
shutdown_temp_handler,
)
| {
"content_hash": "1a6927a6800d82f239a0a02e5fb9c233",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 93,
"avg_line_length": 28.41176470588235,
"alnum_prop": 0.6866804692891649,
"repo_name": "saltstack/salt",
"id": "43bcbac3aca9931cc50604dbb23fc5ea357de803",
"size": "1449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/_logging/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('../'))
import iterplus
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.graphviz', 'sphinx.ext.intersphinx']
# 'sphinx.ext.todo', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'iterplus'
copyright = u'2011, Juho Vepsäläinen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = iterplus.__version__
# The full version, including alpha/beta/rc tags.
release = iterplus.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Iterplusdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Iterplus.tex', u'Iterplus Documentation',
u'Juho Vepsäläinen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| {
"content_hash": "2bbc00d9f740a5d813fdc6ae42c3b6ce",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 80,
"avg_line_length": 32.904761904761905,
"alnum_prop": 0.7120115774240231,
"repo_name": "bebraw/iterplus",
"id": "77816d2236afdec0853ff3156647f78a9d9c1e6e",
"size": "6638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10217"
}
],
"symlink_target": ""
} |
import os
import sys
import textwrap
import warnings
from pingpp import error, util
# - Requests is the preferred HTTP library
# - Google App Engine has urlfetch
# - Use Pycurl if it's there (at least it verifies SSL certs)
# - Fall back to urllib2 with a warning if needed
try:
import urllib2
except ImportError:
pass
try:
import pycurl
except ImportError:
pycurl = None
try:
import requests
except ImportError:
requests = None
else:
try:
# Require version 0.8.8, but don't want to depend on distutils
version = requests.__version__
major, minor, patch = [int(i) for i in version.split('.')]
except Exception:
# Probably some new-fangled version, so it should support verify
pass
else:
if (major, minor, patch) < (0, 8, 8):
sys.stderr.write(
'Warning: the Ping++ library requires that your Python '
'"requests" library be newer than version 0.8.8, but your '
'"requests" library is version %s. Ping++ will fall back to '
'an alternate HTTP library so everything should work. We '
'recommend upgrading your "requests" library. If you have any '
'questions, please contact support@pingplusplus.com. (HINT: running '
'"pip install -U requests" should upgrade your requests '
'library to the latest version.)' % (version,))
requests = None
try:
from google.appengine.api import urlfetch
except ImportError:
urlfetch = None
def new_default_http_client(*args, **kwargs):
if urlfetch:
impl = UrlFetchClient
elif requests:
impl = RequestsClient
elif pycurl:
impl = PycurlClient
else:
impl = Urllib2Client
warnings.warn(
"Warning: the Ping++ library is falling back to urllib2/urllib "
"because neither requests nor pycurl are installed. "
"urllib2's SSL implementation doesn't verify server "
"certificates. For improved security, we suggest installing "
"requests.")
return impl(*args, **kwargs)
class HTTPClient(object):
def __init__(self, verify_ssl_certs=True):
self._verify_ssl_certs = verify_ssl_certs
def request(self, method, url, headers, post_data=None):
raise NotImplementedError(
'HTTPClient subclasses must implement `request`')
class RequestsClient(HTTPClient):
name = 'requests'
def request(self, method, url, headers, post_data=None):
kwargs = {}
if self._verify_ssl_certs:
kwargs['verify'] = os.path.join(
os.path.dirname(__file__), 'data/ca-certificates.crt')
else:
kwargs['verify'] = False
try:
try:
result = requests.request(method,
url,
headers=headers,
data=post_data,
timeout=80,
**kwargs)
except TypeError, e:
raise TypeError(
'Warning: It looks like your installed version of the '
'"requests" library is not compatible with Ping++\'s '
'usage thereof. (HINT: The most likely cause is that '
'your "requests" library is out of date. You can fix '
'that by running "pip install -U requests".) The '
'underlying error was: %s' % (e,))
# This causes the content to actually be read, which could cause
# e.g. a socket timeout.
# are susceptible to the same and should be updated.
content = result.content
status_code = result.status_code
except Exception, e:
# Would catch just requests.exceptions.RequestException, but can
# also raise ValueError, RuntimeError, etc.
self._handle_request_error(e)
return content, status_code
def _handle_request_error(self, e):
if isinstance(e, requests.exceptions.RequestException):
msg = ("Unexpected error communicating with Ping++. "
"If this problem persists, let us know at "
"support@pingplusplus.com.")
err = "%s: %s" % (type(e).__name__, str(e))
else:
msg = ("Unexpected error communicating with Ping++. "
"It looks like there's probably a configuration "
"issue locally. If this problem persists, let us "
"know at support@pingplusplus.com.")
err = "A %s was raised" % (type(e).__name__,)
if str(e):
err += " with error message %s" % (str(e),)
else:
err += " with no error message"
msg = textwrap.fill(msg) + "\n\n(Network error: %s)" % (err,)
raise error.APIConnectionError(msg)
class UrlFetchClient(HTTPClient):
name = 'urlfetch'
def request(self, method, url, headers, post_data=None):
try:
result = urlfetch.fetch(
url=url,
method=method,
headers=headers,
# Google App Engine doesn't let us specify our own cert bundle.
# However, that's ok because the CA bundle they use recognizes
# api.pingplusplus.com.
validate_certificate=self._verify_ssl_certs,
# GAE requests time out after 60 seconds, so make sure we leave
# some time for the application to handle a slow Ping++
deadline=55,
payload=post_data
)
except urlfetch.Error, e:
self._handle_request_error(e, url)
return result.content, result.status_code
def _handle_request_error(self, e, url):
if isinstance(e, urlfetch.InvalidURLError):
msg = ("The Ping++ library attempted to fetch an "
"invalid URL (%r). This is likely due to a bug "
"in the Ping++ Python bindings. Please let us know "
"at support@pingplusplus.com." % (url,))
elif isinstance(e, urlfetch.DownloadError):
msg = "There was a problem retrieving data from Ping++."
elif isinstance(e, urlfetch.ResponseTooLargeError):
msg = ("There was a problem receiving all of your data from "
"Ping++. This is likely due to a bug in Ping++. "
"Please let us know at support@pingplusplus.com.")
else:
msg = ("Unexpected error communicating with Ping++. If this "
"problem persists, let us know at support@pingplusplus.com.")
msg = textwrap.fill(msg) + "\n\n(Network error: " + str(e) + ")"
raise error.APIConnectionError(msg)
class PycurlClient(HTTPClient):
name = 'pycurl'
def request(self, method, url, headers, post_data=None):
s = util.StringIO.StringIO()
curl = pycurl.Curl()
if method == 'get':
curl.setopt(pycurl.HTTPGET, 1)
elif method == 'post':
curl.setopt(pycurl.POST, 1)
curl.setopt(pycurl.POSTFIELDS, post_data)
else:
curl.setopt(pycurl.CUSTOMREQUEST, method.upper())
# pycurl doesn't like unicode URLs
curl.setopt(pycurl.URL, util.utf8(url))
curl.setopt(pycurl.WRITEFUNCTION, s.write)
curl.setopt(pycurl.NOSIGNAL, 1)
curl.setopt(pycurl.CONNECTTIMEOUT, 30)
curl.setopt(pycurl.TIMEOUT, 80)
curl.setopt(pycurl.HTTPHEADER, ['%s: %s' % (k, v)
for k, v in headers.iteritems()])
if self._verify_ssl_certs:
curl.setopt(pycurl.CAINFO, os.path.join(
os.path.dirname(__file__), 'data/ca-certificates.crt'))
else:
curl.setopt(pycurl.SSL_VERIFYHOST, False)
try:
curl.perform()
except pycurl.error, e:
self._handle_request_error(e)
rbody = s.getvalue()
rcode = curl.getinfo(pycurl.RESPONSE_CODE)
return rbody, rcode
def _handle_request_error(self, e):
if e[0] in [pycurl.E_COULDNT_CONNECT,
pycurl.E_COULDNT_RESOLVE_HOST,
pycurl.E_OPERATION_TIMEOUTED]:
msg = ("Could not connect to Ping++. Please check your "
"internet connection and try again. If this problem "
"persists, you should check Ping++'s service status at "
"https://pingplusplus.com or let us know at "
"support@pingplusplus.com.")
elif (e[0] in [pycurl.E_SSL_CACERT,
pycurl.E_SSL_PEER_CERTIFICATE]):
msg = ("Could not verify Ping++'s SSL certificate. Please make "
"sure that your network is not intercepting certificates. "
"If this problem persists, let us know at "
"support@pingplusplus.com.")
else:
msg = ("Unexpected error communicating with Ping++. If this "
"problem persists, let us know at support@pingplusplus.com.")
msg = textwrap.fill(msg) + "\n\n(Network error: " + e[1] + ")"
raise error.APIConnectionError(msg)
class Urllib2Client(HTTPClient):
if sys.version_info >= (3, 0):
name = 'urllib.request'
else:
name = 'urllib2'
def request(self, method, url, headers, post_data=None):
if sys.version_info >= (3, 0) and isinstance(post_data, basestring):
post_data = post_data.encode('utf-8')
req = urllib2.Request(url, post_data, headers)
if method not in ('get', 'post'):
req.get_method = lambda: method.upper()
try:
response = urllib2.urlopen(req)
rbody = response.read()
rcode = response.code
except urllib2.HTTPError, e:
rcode = e.code
rbody = e.read()
except (urllib2.URLError, ValueError), e:
self._handle_request_error(e)
return rbody, rcode
def _handle_request_error(self, e):
msg = ("Unexpected error communicating with Ping++. "
"If this problem persists, let us know at support@pingplusplus.com.")
msg = textwrap.fill(msg) + "\n\n(Network error: " + str(e) + ")"
raise error.APIConnectionError(msg)
| {
"content_hash": "cd204a288b21a4a1424f10af1a531c69",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 85,
"avg_line_length": 38.20938628158845,
"alnum_prop": 0.5610355253212396,
"repo_name": "hujiaweibujidao/XingShan",
"id": "2b1bf4547de71cb3e6723d37dd87521d8b46f08e",
"size": "10584",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "shanshan-django/5/pingpp/http_client.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "32038"
},
{
"name": "Java",
"bytes": "76363"
},
{
"name": "JavaScript",
"bytes": "1085"
},
{
"name": "Python",
"bytes": "61693"
}
],
"symlink_target": ""
} |
import functools
from multiprocessing import Pool
import multiprocessing
import threading
from threading import Thread
import string
import time
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from sqlalchemy.orm import scoped_session
from sqlalchemy.exc import IntegrityError, ProgrammingError
from pubsub import pub
from odm2api.models import Base, TimeSeriesResultValues, TimeSeriesResults, Units, setSchema
from odm2api.ODMconnection import dbconnection
from yodatools.excelparser.sessionWorker import SessionWorker
mute_x = multiprocessing.Lock()
print_lock = threading.Lock()
def update_output_text(message):
"""
Updates the Textctrl output window on the summary page
:param message:
:return:
"""
message += '\n'
pub.sendMessage('controller.update_output_text', message=message)
def commit_tsrvs(session, tsrvs):
"""
commits TimeSeriesResultValues to database
:param session: an instance of `sqlalchemy.orm.Session`
:param tsrvs: a list of TimeSeriesResultValues
:return: None
"""
session.add_all(tsrvs)
try:
session.commit()
except (IntegrityError, ProgrammingError):
session.rollback()
for i in xrange(0, len(tsrvs)):
tsrv = tsrvs[i]
session.add(tsrv)
try:
session.commit()
except (IntegrityError, ProgrammingError) as e:
session.rollback()
mute_x.acquire()
print(e)
mute_x.release()
update_output_text('Error: %s' % e.message)
def p_target(queue, conn, thread_count): # type: (multiprocessing.JoinableQueue, str) -> None
session_factory = dbconnection.createConnectionFromString(conn)
engine = session_factory.engine
setSchema(engine)
Base.metadata.create_all(engine)
scoped_session_ = session_factory.Session # type: scoped_session
while True:
args = queue.get()
if args:
# create worker threads
workers = [None] * thread_count
tsrvs_split = np.array_split(args, thread_count)
for i in range(len(tsrvs_split)):
worker = SessionWorker(scoped_session_, print_lock, mute_x, target=commit_tsrvs, args=tsrvs_split[i].tolist())
worker.daemon = True
worker.start()
workers[i] = worker
# it's probably best to wait for these threads to finish before moving on...
for w in workers:
w.join()
queue.task_done()
def start_procs(conn, processes=1, threads=1): # type: (str, int, int) -> multiprocessing.Queue
"""
Starts background processes and returns a queue
:param conn: connection string to create database connections for each process
:param processes: the number of processes to create
:param threads: the number of threads per process to create
:return: a queue object used to send work to each process
"""
q = multiprocessing.JoinableQueue()
# create processes
procs = [None] * processes
for i in range(0, processes):
p = multiprocessing.Process(target=p_target, args=(q, conn, threads), name=string.letters[i])
p.daemon = True
procs[i] = p
# start each process
for p in procs:
p.start()
return q
| {
"content_hash": "ad23feaa12a7688a6dd2a0cc49d393a8",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 126,
"avg_line_length": 28.428571428571427,
"alnum_prop": 0.6500147797812592,
"repo_name": "ODM2/YODA-Tools",
"id": "256e43e92478d832e3b342e7f800cf575accf972",
"size": "3383",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "yodatools/excelparser/excelParserProcess.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "287065"
}
],
"symlink_target": ""
} |
import email.utils
import functools
import imaplib
import socket
import time
from datetime import datetime, timedelta
from poplib import error_proto
from typing import List
import frappe
from frappe import _, are_emails_muted, safe_encode
from frappe.desk.form import assign_to
from frappe.email.receive import EmailServer, InboundMail, SentEmailInInboxError
from frappe.email.smtp import SMTPServer
from frappe.email.utils import get_port
from frappe.model.document import Document
from frappe.utils import cint, comma_or, cstr, parse_addr, validate_email_address
from frappe.utils.background_jobs import enqueue, get_jobs
from frappe.utils.error import raise_error_on_no_output
from frappe.utils.jinja import render_template
from frappe.utils.user import get_system_managers
OUTGOING_EMAIL_ACCOUNT_MISSING = _("Please setup default Email Account from Setup > Email > Email Account")
class SentEmailInInbox(Exception):
pass
def cache_email_account(cache_name):
def decorator_cache_email_account(func):
@functools.wraps(func)
def wrapper_cache_email_account(*args, **kwargs):
if not hasattr(frappe.local, cache_name):
setattr(frappe.local, cache_name, {})
cached_accounts = getattr(frappe.local, cache_name)
match_by = list(kwargs.values()) + ['default']
matched_accounts = list(filter(None, [cached_accounts.get(key) for key in match_by]))
if matched_accounts:
return matched_accounts[0]
matched_accounts = func(*args, **kwargs)
cached_accounts.update(matched_accounts or {})
return matched_accounts and list(matched_accounts.values())[0]
return wrapper_cache_email_account
return decorator_cache_email_account
class EmailAccount(Document):
DOCTYPE = 'Email Account'
def autoname(self):
"""Set name as `email_account_name` or make title from Email Address."""
if not self.email_account_name:
self.email_account_name = self.email_id.split("@", 1)[0]\
.replace("_", " ").replace(".", " ").replace("-", " ").title()
self.name = self.email_account_name
def validate(self):
"""Validate Email Address and check POP3/IMAP and SMTP connections is enabled."""
if self.email_id:
validate_email_address(self.email_id, True)
if self.login_id_is_different:
if not self.login_id:
frappe.throw(_("Login Id is required"))
else:
self.login_id = None
# validate the imap settings
if self.enable_incoming and self.use_imap and len(self.imap_folder) <= 0:
frappe.throw(_("You need to set one IMAP folder for {0}").format(frappe.bold(self.email_id)))
duplicate_email_account = frappe.get_all("Email Account", filters={
"email_id": self.email_id,
"name": ("!=", self.name)
})
if duplicate_email_account:
frappe.throw(_("Email ID must be unique, Email Account already exists for {0}") \
.format(frappe.bold(self.email_id)))
if frappe.local.flags.in_patch or frappe.local.flags.in_test:
return
if (not self.awaiting_password and not frappe.local.flags.in_install
and not frappe.local.flags.in_patch):
if self.password or self.smtp_server in ('127.0.0.1', 'localhost'):
if self.enable_incoming:
self.get_incoming_server()
self.no_failed = 0
if self.enable_outgoing:
self.validate_smtp_conn()
else:
if self.enable_incoming or (self.enable_outgoing and not self.no_smtp_authentication):
frappe.throw(_("Password is required or select Awaiting Password"))
if self.notify_if_unreplied:
if not self.send_notification_to:
frappe.throw(_("{0} is mandatory").format(self.meta.get_label("send_notification_to")))
for e in self.get_unreplied_notification_emails():
validate_email_address(e, True)
for folder in self.imap_folder:
if self.enable_incoming and folder.append_to:
valid_doctypes = [d[0] for d in get_append_to()]
if folder.append_to not in valid_doctypes:
frappe.throw(_("Append To can be one of {0}").format(comma_or(valid_doctypes)))
def validate_smtp_conn(self):
if not self.smtp_server:
frappe.throw(_("SMTP Server is required"))
server = self.get_smtp_server()
return server.session
def before_save(self):
messages = []
as_list = 1
if not self.enable_incoming and self.default_incoming:
self.default_incoming = False
messages.append(_("{} has been disabled. It can only be enabled if {} is checked.")
.format(
frappe.bold(_('Default Incoming')),
frappe.bold(_('Enable Incoming'))
)
)
if not self.enable_outgoing and self.default_outgoing:
self.default_outgoing = False
messages.append(_("{} has been disabled. It can only be enabled if {} is checked.")
.format(
frappe.bold(_('Default Outgoing')),
frappe.bold(_('Enable Outgoing'))
)
)
if messages:
if len(messages) == 1: (as_list, messages) = (0, messages[0])
frappe.msgprint(messages, as_list= as_list, indicator='orange', title=_("Defaults Updated"))
def on_update(self):
"""Check there is only one default of each type."""
self.check_automatic_linking_email_account()
self.there_must_be_only_one_default()
setup_user_email_inbox(email_account=self.name, awaiting_password=self.awaiting_password,
email_id=self.email_id, enable_outgoing=self.enable_outgoing)
def there_must_be_only_one_default(self):
"""If current Email Account is default, un-default all other accounts."""
for field in ("default_incoming", "default_outgoing"):
if not self.get(field):
continue
for email_account in frappe.get_all("Email Account", filters={ field: 1 }):
if email_account.name==self.name:
continue
email_account = frappe.get_doc("Email Account", email_account.name)
email_account.set(field, 0)
email_account.save()
@frappe.whitelist()
def get_domain(self, email_id):
"""look-up the domain and then full"""
try:
domain = email_id.split("@")
fields = [
"name as domain", "use_imap", "email_server",
"use_ssl", "smtp_server", "use_tls",
"smtp_port", "incoming_port", "append_emails_to_sent_folder",
"use_ssl_for_outgoing"
]
return frappe.db.get_value("Email Domain", domain[1], fields, as_dict=True)
except Exception:
pass
def get_incoming_server(self, in_receive=False, email_sync_rule="UNSEEN"):
"""Returns logged in POP3/IMAP connection object."""
if frappe.cache().get_value("workers:no-internet") == True:
return None
args = frappe._dict({
"email_account_name": self.email_account_name,
"email_account": self.name,
"host": self.email_server,
"use_ssl": self.use_ssl,
"username": getattr(self, "login_id", None) or self.email_id,
"use_imap": self.use_imap,
"email_sync_rule": email_sync_rule,
"incoming_port": get_port(self),
"initial_sync_count": self.initial_sync_count or 100
})
if self.password:
args.password = self.get_password()
if not args.get("host"):
frappe.throw(_("{0} is required").format("Email Server"))
email_server = EmailServer(frappe._dict(args))
self.check_email_server_connection(email_server, in_receive)
if not in_receive and self.use_imap:
email_server.imap.logout()
# reset failed attempts count
self.set_failed_attempts_count(0)
return email_server
def check_email_server_connection(self, email_server, in_receive):
# tries to connect to email server and handles failure
try:
email_server.connect()
except (error_proto, imaplib.IMAP4.error) as e:
message = cstr(e).lower().replace(" ","")
auth_error_codes = [
'authenticationfailed',
'loginfailed',
]
other_error_codes = [
'err[auth]',
'errtemporaryerror',
'loginviayourwebbrowser'
]
all_error_codes = auth_error_codes + other_error_codes
if in_receive and any(map(lambda t: t in message, all_error_codes)):
# if called via self.receive and it leads to authentication error,
# disable incoming and send email to System Manager
error_message = _("Authentication failed while receiving emails from Email Account: {0}.").format(self.name)
error_message += "<br>" + _("Message from server: {0}").format(cstr(e))
self.handle_incoming_connect_error(description=error_message)
return None
elif not in_receive and any(map(lambda t: t in message, auth_error_codes)):
SMTPServer.throw_invalid_credentials_exception()
else:
frappe.throw(cstr(e))
except socket.error:
if in_receive:
# timeout while connecting, see receive.py connect method
description = frappe.message_log.pop() if frappe.message_log else "Socket Error"
if test_internet():
self.db_set("no_failed", self.no_failed + 1)
if self.no_failed > 2:
self.handle_incoming_connect_error(description=description)
else:
frappe.cache().set_value("workers:no-internet", True)
return None
else:
raise
@property
def _password(self):
raise_exception = not (self.no_smtp_authentication or frappe.flags.in_test)
return self.get_password(raise_exception=raise_exception)
@property
def default_sender(self):
return email.utils.formataddr((self.name, self.get("email_id")))
def is_exists_in_db(self):
"""Some of the Email Accounts we create from configs and those doesn't exists in DB.
This is is to check the specific email account exists in DB or not.
"""
return self.find_one_by_filters(name=self.name)
@classmethod
def from_record(cls, record):
email_account = frappe.new_doc(cls.DOCTYPE)
email_account.update(record)
return email_account
@classmethod
def find(cls, name):
return frappe.get_doc(cls.DOCTYPE, name)
@classmethod
def find_one_by_filters(cls, **kwargs):
name = frappe.db.get_value(cls.DOCTYPE, kwargs)
return cls.find(name) if name else None
@classmethod
def find_from_config(cls):
config = cls.get_account_details_from_site_config()
return cls.from_record(config) if config else None
@classmethod
def create_dummy(cls):
return cls.from_record({"sender": "notifications@example.com"})
@classmethod
@raise_error_on_no_output(
keep_quiet = lambda: not cint(frappe.get_system_settings('setup_complete')),
error_message = OUTGOING_EMAIL_ACCOUNT_MISSING, error_type = frappe.OutgoingEmailError) # noqa
@cache_email_account('outgoing_email_account')
def find_outgoing(cls, match_by_email=None, match_by_doctype=None, _raise_error=False):
"""Find the outgoing Email account to use.
:param match_by_email: Find account using emailID
:param match_by_doctype: Find account by matching `Append To` doctype
:param _raise_error: This is used by raise_error_on_no_output decorator to raise error.
"""
if match_by_email:
match_by_email = parse_addr(match_by_email)[1]
doc = cls.find_one_by_filters(enable_outgoing=1, email_id=match_by_email)
if doc:
return {match_by_email: doc}
if match_by_doctype:
doc = cls.find_one_by_filters(enable_outgoing=1, enable_incoming=1, append_to=match_by_doctype)
if doc:
return {match_by_doctype: doc}
doc = cls.find_default_outgoing()
if doc:
return {'default': doc}
@classmethod
def find_default_outgoing(cls):
""" Find default outgoing account.
"""
doc = cls.find_one_by_filters(enable_outgoing=1, default_outgoing=1)
doc = doc or cls.find_from_config()
return doc or (are_emails_muted() and cls.create_dummy())
@classmethod
def find_incoming(cls, match_by_email=None, match_by_doctype=None):
"""Find the incoming Email account to use.
:param match_by_email: Find account using emailID
:param match_by_doctype: Find account by matching `Append To` doctype
"""
doc = cls.find_one_by_filters(enable_incoming=1, email_id=match_by_email)
if doc:
return doc
doc = cls.find_one_by_filters(enable_incoming=1, append_to=match_by_doctype)
if doc:
return doc
doc = cls.find_default_incoming()
return doc
@classmethod
def find_default_incoming(cls):
doc = cls.find_one_by_filters(enable_incoming=1, default_incoming=1)
return doc
@classmethod
def get_account_details_from_site_config(cls):
if not frappe.conf.get("mail_server"):
return {}
field_to_conf_name_map = {
'smtp_server': {'conf_names': ('mail_server',)},
'smtp_port': {'conf_names': ('mail_port',)},
'use_tls': {'conf_names': ('use_tls', 'mail_login')},
'login_id': {'conf_names': ('mail_login',)},
'email_id': {'conf_names': ('auto_email_id', 'mail_login'), 'default': 'notifications@example.com'},
'password': {'conf_names': ('mail_password',)},
'always_use_account_email_id_as_sender':
{'conf_names': ('always_use_account_email_id_as_sender',), 'default': 0},
'always_use_account_name_as_sender_name':
{'conf_names': ('always_use_account_name_as_sender_name',), 'default': 0},
'name': {'conf_names': ('email_sender_name',), 'default': 'Frappe'},
'from_site_config': {'default': True}
}
account_details = {}
for doc_field_name, d in field_to_conf_name_map.items():
conf_names, default = d.get('conf_names') or [], d.get('default')
value = [frappe.conf.get(k) for k in conf_names if frappe.conf.get(k)]
account_details[doc_field_name] = (value and value[0]) or default
return account_details
def sendmail_config(self):
return {
'server': self.smtp_server,
'port': cint(self.smtp_port),
'login': getattr(self, "login_id", None) or self.email_id,
'password': self._password,
'use_ssl': cint(self.use_ssl_for_outgoing),
'use_tls': cint(self.use_tls)
}
def get_smtp_server(self):
config = self.sendmail_config()
return SMTPServer(**config)
def handle_incoming_connect_error(self, description):
if test_internet():
if self.get_failed_attempts_count() > 2:
self.db_set("enable_incoming", 0)
for user in get_system_managers(only_name=True):
try:
assign_to.add({
'assign_to': user,
'doctype': self.doctype,
'name': self.name,
'description': description,
'priority': 'High',
'notify': 1
})
except assign_to.DuplicateToDoError:
frappe.message_log.pop()
pass
else:
self.set_failed_attempts_count(self.get_failed_attempts_count() + 1)
else:
frappe.cache().set_value("workers:no-internet", True)
def set_failed_attempts_count(self, value):
frappe.cache().set('{0}:email-account-failed-attempts'.format(self.name), value)
def get_failed_attempts_count(self):
return cint(frappe.cache().get('{0}:email-account-failed-attempts'.format(self.name)))
def receive(self):
"""Called by scheduler to receive emails from this EMail account using POP3/IMAP."""
exceptions = []
inbound_mails = self.get_inbound_mails()
for mail in inbound_mails:
try:
communication = mail.process()
frappe.db.commit()
# If email already exists in the system
# then do not send notifications for the same email.
if communication and mail.flags.is_new_communication:
# notify all participants of this thread
if self.enable_auto_reply:
self.send_auto_reply(communication, mail)
communication.send_email(is_inbound_mail_communcation=True)
except SentEmailInInboxError:
frappe.db.rollback()
except Exception:
frappe.db.rollback()
frappe.log_error(title="EmailAccount.receive")
if self.use_imap:
self.handle_bad_emails(mail.uid, mail.raw_message, frappe.get_traceback())
exceptions.append(frappe.get_traceback())
else:
frappe.db.commit()
#notify if user is linked to account
if len(inbound_mails)>0 and not frappe.local.flags.in_test:
frappe.publish_realtime('new_email',
{"account":self.email_account_name, "number":len(inbound_mails)}
)
if exceptions:
raise Exception(frappe.as_json(exceptions))
def get_inbound_mails(self) -> List[InboundMail]:
"""retrive and return inbound mails.
"""
mails = []
def process_mail(messages, append_to=None):
for index, message in enumerate(messages.get("latest_messages", [])):
uid = messages['uid_list'][index] if messages.get('uid_list') else None
seen_status = messages.get('seen_status', {}).get(uid)
if self.email_sync_option != 'UNSEEN' or seen_status != "SEEN":
# only append the emails with status != 'SEEN' if sync option is set to 'UNSEEN'
mails.append(InboundMail(message, self, uid, seen_status, append_to))
if not self.enable_incoming:
return []
email_sync_rule = self.build_email_sync_rule()
try:
email_server = self.get_incoming_server(in_receive=True, email_sync_rule=email_sync_rule)
if self.use_imap:
# process all given imap folder
for folder in self.imap_folder:
if email_server.select_imap_folder(folder.folder_name):
email_server.settings['uid_validity'] = folder.uidvalidity
messages = email_server.get_messages(folder=f'"{folder.folder_name}"') or {}
process_mail(messages, folder.append_to)
else:
# process the pop3 account
messages = email_server.get_messages() or {}
process_mail(messages)
# close connection to mailserver
email_server.logout()
except Exception:
frappe.log_error(title=_("Error while connecting to email account {0}").format(self.name))
return []
return mails
def handle_bad_emails(self, uid, raw, reason):
if cint(self.use_imap):
import email
try:
if isinstance(raw, bytes):
mail = email.message_from_bytes(raw)
else:
mail = email.message_from_string(raw)
message_id = mail.get('Message-ID')
except Exception:
message_id = "can't be parsed"
unhandled_email = frappe.get_doc({
"raw": raw,
"uid": uid,
"reason":reason,
"message_id": message_id,
"doctype": "Unhandled Email",
"email_account": self.name
})
unhandled_email.insert(ignore_permissions=True)
frappe.db.commit()
def send_auto_reply(self, communication, email):
"""Send auto reply if set."""
from frappe.core.doctype.communication.email import set_incoming_outgoing_accounts
if self.enable_auto_reply:
set_incoming_outgoing_accounts(communication)
unsubscribe_message = (self.send_unsubscribe_message and _("Leave this conversation")) or ""
frappe.sendmail(recipients = [email.from_email],
sender = self.email_id,
reply_to = communication.incoming_email_account,
subject = " ".join([_("Re:"), communication.subject]),
content = render_template(self.auto_reply_message or "", communication.as_dict()) or \
frappe.get_template("templates/emails/auto_reply.html").render(communication.as_dict()),
reference_doctype = communication.reference_doctype,
reference_name = communication.reference_name,
in_reply_to = email.mail.get("Message-Id"), # send back the Message-Id as In-Reply-To
unsubscribe_message = unsubscribe_message)
def get_unreplied_notification_emails(self):
"""Return list of emails listed"""
self.send_notification_to = self.send_notification_to.replace(",", "\n")
out = [e.strip() for e in self.send_notification_to.split("\n") if e.strip()]
return out
def on_trash(self):
"""Clear communications where email account is linked"""
Communication = frappe.qb.DocType("Communication")
frappe.qb.update(Communication) \
.set(Communication.email_account, "") \
.where(Communication.email_account == self.name).run()
remove_user_email_inbox(email_account=self.name)
def after_rename(self, old, new, merge=False):
frappe.db.set_value("Email Account", new, "email_account_name", new)
def build_email_sync_rule(self):
if not self.use_imap:
return "UNSEEN"
if self.email_sync_option == "ALL":
max_uid = get_max_email_uid(self.name)
last_uid = max_uid + int(self.initial_sync_count or 100) if max_uid == 1 else "*"
return "UID {}:{}".format(max_uid, last_uid)
else:
return self.email_sync_option or "UNSEEN"
def mark_emails_as_read_unread(self, email_server=None, folder_name="INBOX"):
""" mark Email Flag Queue of self.email_account mails as read"""
if not self.use_imap:
return
EmailFlagQ = frappe.qb.DocType("Email Flag Queue")
flags = (
frappe.qb.from_(EmailFlagQ)
.select(EmailFlagQ.name, EmailFlagQ.communication, EmailFlagQ.uid, EmailFlagQ.action)
.where(EmailFlagQ.is_completed == 0)
.where(EmailFlagQ.email_account == frappe.db.escape(self.name))
).run(as_dict=True)
uid_list = { flag.get("uid", None): flag.get("action", "Read") for flag in flags }
if flags and uid_list:
if not email_server:
email_server = self.get_incoming_server()
if not email_server:
return
email_server.update_flag(folder_name, uid_list=uid_list)
# mark communication as read
docnames = ",".join("'%s'"%flag.get("communication") for flag in flags \
if flag.get("action") == "Read")
self.set_communication_seen_status(docnames, seen=1)
# mark communication as unread
docnames = ",".join([ "'%s'"%flag.get("communication") for flag in flags \
if flag.get("action") == "Unread" ])
self.set_communication_seen_status(docnames, seen=0)
docnames = ",".join([ "'%s'"%flag.get("name") for flag in flags ])
EmailFlagQueue = frappe.qb.DocType("Email Flag Queue")
frappe.qb.update(EmailFlagQueue) \
.set(EmailFlagQueue.is_completed, 1) \
.where(EmailFlagQueue.name.isin(docnames)).run()
def set_communication_seen_status(self, docnames, seen=0):
""" mark Email Flag Queue of self.email_account mails as read"""
if not docnames:
return
Communication = frappe.qb.from_("Communication")
frappe.qb.update(Communication) \
.set(Communication.seen == seen) \
.where(Communication.name.isin(docnames)).run()
def check_automatic_linking_email_account(self):
if self.enable_automatic_linking:
if not self.enable_incoming:
frappe.throw(_("Automatic Linking can be activated only if Incoming is enabled."))
if frappe.db.exists("Email Account", {"enable_automatic_linking": 1, "name": ('!=', self.name)}):
frappe.throw(_("Automatic Linking can be activated only for one Email Account."))
def append_email_to_sent_folder(self, message):
email_server = None
try:
email_server = self.get_incoming_server(in_receive=True)
except Exception:
frappe.log_error(title=_("Error while connecting to email account {0}").format(self.name))
if not email_server:
return
email_server.connect()
if email_server.imap:
try:
message = safe_encode(message)
email_server.imap.append("Sent", "\\Seen", imaplib.Time2Internaldate(time.time()), message)
except Exception:
frappe.log_error(title="EmailAccount.append_email_to_sent_folder")
@frappe.whitelist()
def get_append_to(doctype=None, txt=None, searchfield=None, start=None, page_len=None, filters=None):
txt = txt if txt else ""
email_append_to_list = []
# Set Email Append To DocTypes via DocType
filters = {"istable": 0, "issingle": 0, "email_append_to": 1}
for dt in frappe.get_all("DocType", filters=filters, fields=["name", "email_append_to"]):
email_append_to_list.append(dt.name)
# Set Email Append To DocTypes set via Customize Form
for dt in frappe.get_list("Property Setter", filters={"property": "email_append_to", "value": 1}, fields=["doc_type"]):
email_append_to_list.append(dt.doc_type)
email_append_to = [[d] for d in set(email_append_to_list) if txt in d]
return email_append_to
def test_internet(host="8.8.8.8", port=53, timeout=3):
"""Returns True if internet is connected
Host: 8.8.8.8 (google-public-dns-a.google.com)
OpenPort: 53/tcp
Service: domain (DNS/TCP)
"""
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return True
except Exception as ex:
print(ex.message)
return False
def notify_unreplied():
"""Sends email notifications if there are unreplied Communications
and `notify_if_unreplied` is set as true."""
for email_account in frappe.get_all("Email Account", "name", filters={"enable_incoming": 1, "notify_if_unreplied": 1}):
email_account = frappe.get_doc("Email Account", email_account.name)
if email_account.use_imap:
append_to = [folder.get("append_to") for folder in email_account.imap_folder]
else:
append_to = email_account.append_to
if append_to:
# get open communications younger than x mins, for given doctype
for comm in frappe.get_all("Communication", "name", filters=[
{"sent_or_received": "Received"},
{"reference_doctype": ("in", append_to)},
{"unread_notification_sent": 0},
{"email_account":email_account.name},
{"creation": ("<", datetime.now() - timedelta(seconds = (email_account.unreplied_for_mins or 30) * 60))},
{"creation": (">", datetime.now() - timedelta(seconds = (email_account.unreplied_for_mins or 30) * 60 * 3))}
]):
comm = frappe.get_doc("Communication", comm.name)
if frappe.db.get_value(comm.reference_doctype, comm.reference_name, "status")=="Open":
# if status is still open
frappe.sendmail(recipients=email_account.get_unreplied_notification_emails(),
content=comm.content, subject=comm.subject, doctype= comm.reference_doctype,
name=comm.reference_name)
# update flag
comm.db_set("unread_notification_sent", 1)
def pull(now=False):
"""Will be called via scheduler, pull emails from all enabled Email accounts."""
if frappe.cache().get_value("workers:no-internet") == True:
if test_internet():
frappe.cache().set_value("workers:no-internet", False)
else:
return
queued_jobs = get_jobs(site=frappe.local.site, key='job_name')[frappe.local.site]
for email_account in frappe.get_list("Email Account",
filters={"enable_incoming": 1, "awaiting_password": 0}):
if now:
pull_from_email_account(email_account.name)
else:
# job_name is used to prevent duplicates in queue
job_name = 'pull_from_email_account|{0}'.format(email_account.name)
if job_name not in queued_jobs:
enqueue(pull_from_email_account, 'short', event='all', job_name=job_name,
email_account=email_account.name)
def pull_from_email_account(email_account):
'''Runs within a worker process'''
email_account = frappe.get_doc("Email Account", email_account)
email_account.receive()
def get_max_email_uid(email_account):
# get maximum uid of emails
max_uid = 1
result = frappe.db.get_all("Communication", filters={
"communication_medium": "Email",
"sent_or_received": "Received",
"email_account": email_account
}, fields=["max(uid) as uid"])
if not result:
return 1
else:
max_uid = cint(result[0].get("uid", 0)) + 1
return max_uid
def setup_user_email_inbox(email_account, awaiting_password, email_id, enable_outgoing):
""" setup email inbox for user """
from frappe.core.doctype.user.user import ask_pass_update
def add_user_email(user):
user = frappe.get_doc("User", user)
row = user.append("user_emails", {})
row.email_id = email_id
row.email_account = email_account
row.awaiting_password = awaiting_password or 0
row.enable_outgoing = enable_outgoing or 0
user.save(ignore_permissions=True)
update_user_email_settings = False
if not all([email_account, email_id]):
return
user_names = frappe.db.get_values("User", {"email": email_id}, as_dict=True)
if not user_names:
return
for user in user_names:
user_name = user.get("name")
# check if inbox is alreay configured
user_inbox = frappe.db.get_value("User Email", {
"email_account": email_account,
"parent": user_name
}, ["name"]) or None
if not user_inbox:
add_user_email(user_name)
else:
# update awaiting password for email account
update_user_email_settings = True
if update_user_email_settings:
UserEmail = frappe.qb.DocType("User Email")
frappe.qb.update(UserEmail) \
.set(UserEmail.awaiting_password, (awaiting_password or 0)) \
.set(UserEmail.enable_outgoing, enable_outgoing) \
.where(UserEmail.email_account == email_account).run()
else:
users = " and ".join([frappe.bold(user.get("name")) for user in user_names])
frappe.msgprint(_("Enabled email inbox for user {0}").format(users))
ask_pass_update()
def remove_user_email_inbox(email_account):
""" remove user email inbox settings if email account is deleted """
if not email_account:
return
users = frappe.get_all("User Email", filters={
"email_account": email_account
}, fields=["parent as name"])
for user in users:
doc = frappe.get_doc("User", user.get("name"))
to_remove = [row for row in doc.user_emails if row.email_account == email_account]
[doc.remove(row) for row in to_remove]
doc.save(ignore_permissions=True)
@frappe.whitelist(allow_guest=False)
def set_email_password(email_account, user, password):
account = frappe.get_doc("Email Account", email_account)
if account.awaiting_password:
account.awaiting_password = 0
account.password = password
try:
account.save(ignore_permissions=True)
except Exception:
frappe.db.rollback()
return False
return True
| {
"content_hash": "5afa79637894748b73ae692f50e58f5f",
"timestamp": "",
"source": "github",
"line_count": 828,
"max_line_length": 120,
"avg_line_length": 34.63164251207729,
"alnum_prop": 0.6947863993025284,
"repo_name": "mhbu50/frappe",
"id": "3a1b68339895f0f5a2e029a6670e785f50bf74ac",
"size": "28773",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/email/doctype/email_account/email_account.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "67734"
},
{
"name": "HTML",
"bytes": "247122"
},
{
"name": "JavaScript",
"bytes": "2359670"
},
{
"name": "Less",
"bytes": "25489"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3464477"
},
{
"name": "SCSS",
"bytes": "248877"
},
{
"name": "Shell",
"bytes": "3505"
},
{
"name": "Vue",
"bytes": "96912"
}
],
"symlink_target": ""
} |
"""
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
import os
from ...utils.filemanip import split_filename
from ..base import (traits, TraitedSpec, File,
CommandLine, CommandLineInputSpec, isdefined)
class ConmatInputSpec(CommandLineInputSpec):
in_file = File(exists=True, argstr='-inputfile %s', mandatory=True,
desc='Streamlines as generated by the Track interface')
target_file = File(exists=True, argstr='-targetfile %s', mandatory=True,
desc='An image containing targets, as used in ProcStreamlines interface.')
scalar_file = File(exists=True, argstr='-scalarfile %s',
desc=('Optional scalar file for computing tract-based statistics. '
'Must be in the same space as the target file.'),
requires=['tract_stat'])
targetname_file = File(exists=True, argstr='-targetnamefile %s',
desc=('Optional names of targets. This file should contain one entry per line, '
'with the target intensity followed by the name, separated by white space. '
'For example: '
' 1 some_brain_region '
' 2 some_other_region '
'These names will be used in the output. The names themselves should not '
'contain spaces or commas. The labels may be in any order but the output '
'matrices will be ordered by label intensity.'))
tract_stat = traits.Enum("mean", "min", "max", "sum", "median", "var", argstr='-tractstat %s', units='NA',
desc=("Tract statistic to use. See TractStats for other options."),
requires=['scalar_file'], xor=['tract_prop'])
tract_prop = traits.Enum("length", "endpointsep", argstr='-tractstat %s',
units='NA', xor=['tract_stat'],
desc=('Tract property average to compute in the connectivity matrix. '
'See TractStats for details.'))
output_root = File(argstr='-outputroot %s', genfile=True,
desc=('filename root prepended onto the names of the output files. '
'The extension will be determined from the input.'))
class ConmatOutputSpec(TraitedSpec):
conmat_sc = File(exists=True, desc='Connectivity matrix in CSV file.')
conmat_ts = File(desc='Tract statistics in CSV file.')
class Conmat(CommandLine):
"""
Creates a connectivity matrix using a 3D label image (the target image)
and a set of streamlines. The connectivity matrix records how many stream-
lines connect each pair of targets, and optionally the mean tractwise
statistic (eg tract-averaged FA, or length).
The output is a comma separated variable file or files. The first row of
the output matrix is label names. Label names may be defined by the user,
otherwise they are assigned based on label intensity.
Starting from the seed point, we move along the streamline until we find
a point in a labeled region. This is done in both directions from the seed
point. Streamlines are counted if they connect two target regions, one on
either side of the seed point. Only the labeled region closest to the seed
is counted, for example if the input contains two streamlines: ::
1: A-----B------SEED---C
2: A--------SEED-----------
then the output would be ::
A,B,C
0,0,0
0,0,1
0,1,0
There are zero connections to A because in streamline 1, the connection
to B is closer to the seed than the connection to A, and in streamline 2
there is no region reached in the other direction.
The connected target regions can have the same label, as long as the seed
point is outside of the labeled region and both ends connect to the same
label (which may be in different locations). Therefore this is allowed: ::
A------SEED-------A
Such fibers will add to the diagonal elements of the matrix. To remove
these entries, run procstreamlines with -endpointfile before running conmat.
If the seed point is inside a labled region, it counts as one end of the
connection. So ::
----[SEED inside A]---------B
counts as a connection between A and B, while ::
C----[SEED inside A]---------B
counts as a connection between A and C, because C is closer to the seed point.
In all cases, distance to the seed point is defined along the streamline path.
Example 1
---------
To create a standard connectivity matrix based on streamline counts.
>>> import nipype.interfaces.camino as cam
>>> conmat = cam.Conmat()
>>> conmat.inputs.in_file = 'tracts.Bdouble'
>>> conmat.inputs.target_file = 'atlas.nii.gz'
>>> conmat.run() # doctest: +SKIP
Example 1
---------
To create a standard connectivity matrix and mean tractwise FA statistics.
>>> import nipype.interfaces.camino as cam
>>> conmat = cam.Conmat()
>>> conmat.inputs.in_file = 'tracts.Bdouble'
>>> conmat.inputs.target_file = 'atlas.nii.gz'
>>> conmat.inputs.scalar_file = 'fa.nii.gz'
>>> conmat.tract_stat = 'mean'
>>> conmat.run() # doctest: +SKIP
"""
_cmd = 'conmat'
input_spec = ConmatInputSpec
output_spec = ConmatOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
output_root = self._gen_outputroot()
outputs['conmat_sc'] = os.path.abspath(output_root + "sc.csv")
outputs['conmat_ts'] = os.path.abspath(output_root + "ts.csv")
return outputs
def _gen_outfilename(self):
return self._gen_outputroot()
def _gen_outputroot(self):
output_root = self.inputs.output_root
if not isdefined(output_root):
output_root = self._gen_filename('output_root')
return output_root
def _gen_filename(self, name):
if name == 'output_root':
_, filename, _ = split_filename(self.inputs.in_file)
filename = filename + "_"
return filename
| {
"content_hash": "b8b648d52023f6142c46f090158804b1",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 110,
"avg_line_length": 42.3312101910828,
"alnum_prop": 0.6092386397833284,
"repo_name": "mick-d/nipype",
"id": "654d71b1f1253b2946fda5c5eca21dec9e98e941",
"size": "6670",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "nipype/interfaces/camino/connectivity.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "1854"
},
{
"name": "Matlab",
"bytes": "1999"
},
{
"name": "Python",
"bytes": "4607773"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.automl.v1beta1",
manifest={
"NormalizedVertex",
"BoundingPoly",
},
)
class NormalizedVertex(proto.Message):
r"""A vertex represents a 2D point in the image.
The normalized vertex coordinates are between 0 to 1 fractions
relative to the original plane (image, video). E.g. if the plane
(e.g. whole image) would have size 10 x 20 then a point with
normalized coordinates (0.1, 0.3) would be at the position (1,
6) on that plane.
Attributes:
x (float):
Required. Horizontal coordinate.
y (float):
Required. Vertical coordinate.
"""
x = proto.Field(
proto.FLOAT,
number=1,
)
y = proto.Field(
proto.FLOAT,
number=2,
)
class BoundingPoly(proto.Message):
r"""A bounding polygon of a detected object on a plane. On output both
vertices and normalized_vertices are provided. The polygon is formed
by connecting vertices in the order they are listed.
Attributes:
normalized_vertices (Sequence[google.cloud.automl_v1beta1.types.NormalizedVertex]):
Output only . The bounding polygon normalized
vertices.
"""
normalized_vertices = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="NormalizedVertex",
)
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "655cb88f52c74336d0302b89135f7ed7",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 91,
"avg_line_length": 26.178571428571427,
"alnum_prop": 0.6343792633015006,
"repo_name": "googleapis/python-automl",
"id": "586d4adbca779d2c256e5adab75f8f66686d04c3",
"size": "2066",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/automl_v1beta1/types/geometry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2347989"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
} |
from flask import jsonify, abort
from flask_restful import reqparse, Resource
from config import db
from data.user import UserData
from datetime import datetime, timedelta
from data.user_session import UserSessionData
parser = reqparse.RequestParser()
parser.add_argument('username')
parser.add_argument('password')
class Login(Resource):
def post(self):
request_json = parser.parse_args()
username = request_json['username']
password = request_json['password']
user = UserData.query.filter(
UserData.username == username,
UserData.password == password,
UserData.status == 'active'
).first()
if not user:
return abort(400)
# create new user session and return session_id
user_session = UserSessionData(user.id, datetime.now() + timedelta(days=10))
db.session.add(user_session)
db.session.commit()
return jsonify(user_session.serialize())
| {
"content_hash": "2e457dfdf753fffa701cf7b91cbacfd4",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 84,
"avg_line_length": 32.733333333333334,
"alnum_prop": 0.6680244399185336,
"repo_name": "ONSdigital/nsdc",
"id": "b07d0af1d5f8010ec7eddc7c33bb9c1c0be29019",
"size": "982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/api/login.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6947"
},
{
"name": "HTML",
"bytes": "37985"
},
{
"name": "JavaScript",
"bytes": "26546"
},
{
"name": "Python",
"bytes": "45285"
},
{
"name": "TypeScript",
"bytes": "131957"
}
],
"symlink_target": ""
} |
import os
import sys
from base import Daemon
from shuttl import app
from shuttl.Models.Queue import Queue
from shuttl.Models.Website import Website
## A worker daemon that clears out the publish Queue. This checks the Queue to
# see if there are any publish messages in there, if there is, publish them
# else ignore.
# \note right now this works by constantly polling the DB to see if there is a
# task. It would be graet if we could redesign this to be a monitor or semaphore
# of some type.
class PublishWorker(Daemon):
## This is continuously running. This is what clears out the queue. This
# constantly checks to see if there is something in the database
def run(self):
print("Publisher Has started")
with app.app_context():
while True:
kwargs = {}
if not Queue.Empty():
obj = Queue.Pop()
try:
print ("publishing the websites")
if type(obj) == Website:
kwargs = {"website": obj}
obj.publish()
pass
else:
kwargs = {"fileObject": obj}
site = obj.website
site.publish(obj)
pass
pass
except Exception as e:
import logging
args = [
type(obj),
obj.id,
obj.name
]
logging.exception(
"An Error prevented publishing! the type is {0}, the id {1} and the name is {2}".format(*args)
)
entry = Queue.Push(**kwargs)
entry.recoverable = False
entry.save()
pass
pass
pass
pass
pass
## \cond
if __name__ == "__main__":
daemon = PublishWorker(
os.path.join(app.config["PID_PATH"], "publish.pid"),
stdout=os.path.join(app.config["LOG_PATH"], "publish.log"),
stderr=os.path.join(app.config["LOG_PATH"], "publish.error")
)
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
print( "Unknown command")
sys.exit(2)
sys.exit(0)
else:
print ("usage: %s start|stop|restart" % sys.argv[0])
sys.exit(2)
## \endcond | {
"content_hash": "ce0e5ffff531590a8276369eeb032dc8",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 122,
"avg_line_length": 36.08860759493671,
"alnum_prop": 0.4535250789196773,
"repo_name": "shuttl-io/shuttl",
"id": "c466f93a1a8c8a93da45a18ea2ca34cc5faf356d",
"size": "2851",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daemons/PublishWorker.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "44727"
},
{
"name": "Elixir",
"bytes": "34"
},
{
"name": "HTML",
"bytes": "27869"
},
{
"name": "JavaScript",
"bytes": "29584"
},
{
"name": "Python",
"bytes": "286624"
},
{
"name": "Shell",
"bytes": "1496"
},
{
"name": "Vue",
"bytes": "44133"
}
],
"symlink_target": ""
} |
import socket
import threading
import time
import zlib
import httplib
import urllib2
import urlparse
try:
import cPickle as pickle
except ImportError:
import pickle
from . import UniformRetryStrategy
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
__all__ = ['WebCache']
class WebCache (object):
"""
Allows for thread-safe cached downloads, honors last-modified.
WebCache can also write and read the cache to and from disk.
"""
# default retry parameters
retry_times = 3
retry_lower = 5
retry_upper = 15
# default user string
user_string = "Mozilla/5.0"
def __init__ (self, cache_file=None, sleeper=None):
"""
Initializes an instance from an optional cache_file, an optional
retrier, and an optional sleeper.
The cache_file parameter may either be a filename or an open file-like
object.
If the cache_file parameter is not given the cache is initialized to be
empty.
The sleeper parameter must be a function that takes the number of
seconds to sleep (as a floating point number).
If the sleeper parameter is not given it is initialized as time.sleep.
"""
if cache_file is None:
self.cache = {}
else:
self.load(cache_file)
if sleeper is None:
self.sleeper = time.sleep
self.cache_lock = threading.Lock()
self.set_online_mode()
def download (self, url, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, bypass_cache=False):
"""
Downloads the contents from the URL, if something goes wrong it
registers the exception with the retrier and asks for a sleep time.
"""
retry = 0.0
retrier = UniformRetryStrategy (
self.retry_times,
self.retry_lower,
self.retry_upper
)
while retry is not None:
if retry:
logger.debug('sleeping on %s for %s seconds', url, retry)
self.sleeper(retry)
try:
return self.downloader(url, timeout=timeout, bypass_cache=bypass_cache)
except Exception as e:
logger.debug('got on %s exception %s', url, e)
retrier.register_error(e)
retry = retrier.seconds()
return ''
def download_offline (self, url, timeout=None, bypass_cache=False):
"""
Simulates downloading contents from URL while only looking it up in the
cache.
"""
contents = None
key = self.url_to_key(url)
if bypass_cache:
raise ValueError ('Cache bypass doesn\'t make sense in offline mode.')
if self.has_key(key):
_, contents = self.get_values(key)
return zlib.decompress(contents)
raise urllib2.URLError(OSError('not in cache'))
def download_online (self, url, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, bypass_cache=False):
"""
Downloads contents from the URL, using the internal cache if applicable.
"""
contents = None
key = self.url_to_key(url)
request = urllib2.Request(url)
request.add_header('User-agent', self.user_string)
if not bypass_cache and self.has_key(key):
lastmodified, contents = self.get_values(key)
request.add_header('if-modified-since', lastmodified)
try:
connection = urllib2.urlopen(request, timeout=timeout)
lastmodified = connection.headers.get('last-modified')
contents = connection.read()
connection.close()
except urllib2.HTTPError as e:
if e.code == 304:
logger.debug (
'cache hit %r not modified since %s',
key, lastmodified
)
return zlib.decompress(contents)
raise
if not bypass_cache:
self.set_values(key, lastmodified, zlib.compress(contents))
return contents
def dump (self, outfile):
"""
Writes internal cache to outfile.
outfile may be a filename or an open file-like object.
"""
if isinstance(outfile, str):
outfile = open(outfile, 'wb')
pickle.dump(self.cache, outfile, protocol=-1)
def get_values (self, key):
"""
Returns the values referred to by key in a thread-safe manner.
"""
with self.cache_lock:
logger.debug('getting %r from cache', key)
return self.cache[key]
def has_key (self, key):
"""
Returns if the cache contains entries for key.
"""
logger.debug('looking for %r in cache', key)
return key in self.cache
def keys (self):
"""
Makes a copy of the list of keys and returns it.
"""
return self.cache.keys()
def load (self, infile):
"""
Loads internal cache from infile.
infile may be a filename or an open file-like object.
"""
try:
if isinstance(infile, str):
infile = open(infile, 'rb')
self.cache = pickle.load(infile)
except IOError:
self.cache = {}
def url_to_key (self, url):
"""
Takes an url and returns a key for use in the cache.
"""
return urlparse.urlparse(url).path
def remove_key (self, key):
"""
Removes an entry from the cache, not thread-safe.
"""
del self.cache[key]
def set_offline_mode (self):
"""
Sets offline mode for the webcache.
"""
self.downloader = self.download_offline
def set_online_mode (self):
"""
Sets online mode for the webcache.
"""
self.downloader = self.download_online
def set_values (self, key, *args):
"""
Sets values in a thread-safe manner.
"""
with self.cache_lock:
logger.debug('storing %r in cache', key)
self.cache[key] = args
| {
"content_hash": "ae1f645dc620b4a2b4c35c201204763c",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 96,
"avg_line_length": 28.43778801843318,
"alnum_prop": 0.5733268514017177,
"repo_name": "crypt3lx2k/Imageboard-Web-Interface",
"id": "2b4eed09450b95d1dec500b4b628f0c43b24b085",
"size": "6171",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "iwi/web/WebCache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45615"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import Environment, EnvironmentProject
from sentry.testutils import APITestCase
class ProjectEnvironmentsTest(APITestCase):
def test_get(self):
project = self.create_project()
environment = Environment.objects.create(
project_id=project.id, organization_id=project.organization_id, name="production"
)
environment.add_project(project)
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-environment-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"environment": "production",
},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data == {
"id": u"{}".format(
EnvironmentProject.objects.get(environment__name="production", project=project).id
),
"name": "production",
"isHidden": False,
}
assert (
self.client.get(
reverse(
"sentry-api-0-project-environment-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"environment": "invalid",
},
),
format="json",
).status_code
== 404
)
def test_put(self):
project = self.create_project()
environment = Environment.objects.create(
project_id=project.id, organization_id=project.organization_id, name="production"
)
environment.add_project(project)
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-environment-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"environment": "production",
},
)
response = self.client.put(url, {"isHidden": True}, format="json")
assert response.status_code == 200, response.content
assert (
EnvironmentProject.objects.get(project=project, environment=environment).is_hidden
is True
)
assert (
self.client.put(
reverse(
"sentry-api-0-project-environment-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"environment": "invalid",
},
),
{"isHidden": True},
format="json",
).status_code
== 404
)
| {
"content_hash": "495fde5bc46bf08a50986bdeeac24513",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 98,
"avg_line_length": 32.43010752688172,
"alnum_prop": 0.5185676392572944,
"repo_name": "mvaled/sentry",
"id": "a3bee0c4d3cbee6fa8b1e261b2d5bad67bd6dd8f",
"size": "3016",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/sentry/api/endpoints/test_project_environment_details.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
} |
import contextlib
import uuid
from django.db.utils import IntegrityError, InternalError
from django.test import TestCase, TransactionTestCase
from unittest.mock import patch
from casexml.apps.case.mock import CaseBlock, CaseFactory, CaseStructure
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.apps.products.models import SQLProduct
from corehq.apps.receiverwrapper.util import submit_form_locally
from corehq.form_processor.exceptions import CaseNotFound, XFormNotFound
from corehq.form_processor.interfaces.dbaccessors import LedgerAccessors
from corehq.form_processor.models import CommCareCase, XFormInstance
from corehq.form_processor.reprocess import (
reprocess_form,
reprocess_unfinished_stub,
reprocess_xform_error,
)
from corehq.form_processor.signals import sql_case_post_save
from corehq.form_processor.tests.utils import (
FormProcessorTestUtils,
sharded,
)
from corehq.util.context_managers import catch_signal
from couchforms.models import UnfinishedSubmissionStub
from couchforms.signals import successful_form_received
@sharded
class ReprocessXFormErrorsTest(TestCase):
@classmethod
def setUpClass(cls):
super(ReprocessXFormErrorsTest, cls).setUpClass()
cls.domain = uuid.uuid4().hex
@classmethod
def tearDownClass(cls):
FormProcessorTestUtils.delete_all_cases_forms_ledgers(cls.domain)
super(ReprocessXFormErrorsTest, cls).tearDownClass()
def test_reprocess_xform_error(self):
case_id = uuid.uuid4().hex
parent_case_id = uuid.uuid4().hex
case = CaseBlock(
create=True,
case_id=case_id,
user_id='user1',
owner_id='user1',
case_type='demo',
case_name='child',
index={'parent': ('parent_type', parent_case_id)}
)
submit_case_blocks([case.as_text()], domain=self.domain)
get_forms_by_type = XFormInstance.objects.get_forms_by_type
error_forms = get_forms_by_type(self.domain, 'XFormError', 10)
self.assertEqual(1, len(error_forms))
form = error_forms[0]
reprocess_xform_error(form)
error_forms = get_forms_by_type(self.domain, 'XFormError', 10)
self.assertEqual(1, len(error_forms))
case = CaseBlock(
create=True,
case_id=parent_case_id,
user_id='user1',
owner_id='user1',
case_type='parent_type',
case_name='parent',
)
submit_case_blocks([case.as_text()], domain=self.domain)
reprocess_xform_error(XFormInstance.objects.get_form(form.form_id))
form = XFormInstance.objects.get_form(form.form_id)
# self.assertTrue(form.initial_processing_complete) Can't change this with SQL forms at the moment
self.assertTrue(form.is_normal)
self.assertIsNone(form.problem)
case = CommCareCase.objects.get_case(case_id, self.domain)
self.assertEqual(1, len(case.indices))
self.assertEqual(case.indices[0].referenced_id, parent_case_id)
self._validate_case(case)
def _validate_case(self, case):
self.assertEqual(1, len(case.transactions))
self.assertTrue(case.transactions[0].is_form_transaction)
self.assertTrue(case.transactions[0].is_case_create)
self.assertTrue(case.transactions[0].is_case_index)
self.assertFalse(case.transactions[0].revoked)
@sharded
class ReprocessSubmissionStubTests(TestCase):
@classmethod
def setUpClass(cls):
super(ReprocessSubmissionStubTests, cls).setUpClass()
cls.domain = uuid.uuid4().hex
cls.product = SQLProduct.objects.create(domain=cls.domain, product_id='product1', name='product1')
@classmethod
def tearDownClass(cls):
cls.product.delete()
super(ReprocessSubmissionStubTests, cls).tearDownClass()
def setUp(self):
super(ReprocessSubmissionStubTests, self).setUp()
self.factory = CaseFactory(domain=self.domain)
self.formdb = XFormInstance.objects
self.ledgerdb = LedgerAccessors(self.domain)
def tearDown(self):
FormProcessorTestUtils.delete_all_cases_forms_ledgers(self.domain)
super(ReprocessSubmissionStubTests, self).tearDown()
def test_reprocess_unfinished_submission_case_create(self):
case_id = uuid.uuid4().hex
with _patch_save_to_raise_error(self):
self.factory.create_or_update_cases([
CaseStructure(case_id=case_id, attrs={'case_type': 'parent', 'create': True})
])
stubs = UnfinishedSubmissionStub.objects.filter(domain=self.domain, saved=False).all()
self.assertEqual(1, len(stubs))
# form that was saved before case error raised
normal_form_ids = XFormInstance.objects.get_form_ids_in_domain(self.domain, 'XFormInstance')
self.assertEqual(0, len(normal_form_ids))
# shows error form (duplicate of form that was saved before case error)
# this is saved becuase the saving was assumed to be atomic so if there was any error it's assumed
# the form didn't get saved
# we don't really care about this form in this test
error_forms = XFormInstance.objects.get_forms_by_type(self.domain, 'XFormError', 10)
self.assertEqual(1, len(error_forms))
self.assertIsNone(error_forms[0].orig_id)
self.assertEqual(error_forms[0].form_id, stubs[0].xform_id)
self.assertEqual(0, len(CommCareCase.objects.get_case_ids_in_domain(self.domain)))
result = reprocess_unfinished_stub(stubs[0])
self.assertEqual(1, len(result.cases))
case_ids = CommCareCase.objects.get_case_ids_in_domain(self.domain)
self.assertEqual(1, len(case_ids))
self.assertEqual(case_id, case_ids[0])
with self.assertRaises(UnfinishedSubmissionStub.DoesNotExist):
UnfinishedSubmissionStub.objects.get(pk=stubs[0].pk)
def test_reprocess_unfinished_submission_case_update(self):
case_id = uuid.uuid4().hex
form_ids = []
form_ids.append(submit_case_blocks(
CaseBlock(case_id=case_id, create=True, case_type='box').as_text(),
self.domain
)[0].form_id)
with _patch_save_to_raise_error(self):
submit_case_blocks(
CaseBlock(case_id=case_id, update={'prop': 'a'}).as_text(),
self.domain
)
stubs = UnfinishedSubmissionStub.objects.filter(domain=self.domain, saved=False).all()
self.assertEqual(1, len(stubs))
form_ids.append(stubs[0].xform_id)
# submit second form with case update
form_ids.append(submit_case_blocks(
CaseBlock(case_id=case_id, update={'prop': 'b'}).as_text(),
self.domain
)[0].form_id)
case = CommCareCase.objects.get_case(case_id, self.domain)
self.assertEqual(2, len(case.xform_ids))
self.assertEqual('b', case.get_case_property('prop'))
result = reprocess_unfinished_stub(stubs[0])
self.assertEqual(1, len(result.cases))
self.assertEqual(0, len(result.ledgers))
case = CommCareCase.objects.get_case(case_id, self.domain)
self.assertEqual('b', case.get_case_property('prop')) # should be property value from most recent form
self.assertEqual(3, len(case.xform_ids))
self.assertEqual(form_ids, case.xform_ids)
with self.assertRaises(UnfinishedSubmissionStub.DoesNotExist):
UnfinishedSubmissionStub.objects.get(pk=stubs[0].pk)
def test_reprocess_unfinished_submission_ledger_create(self):
from corehq.apps.commtrack.tests.util import get_single_balance_block
case_id = uuid.uuid4().hex
self.factory.create_or_update_cases([
CaseStructure(case_id=case_id, attrs={'case_type': 'parent', 'create': True})
])
with _patch_save_to_raise_error(self):
submit_case_blocks(
get_single_balance_block(case_id, 'product1', 100),
self.domain
)
stubs = UnfinishedSubmissionStub.objects.filter(domain=self.domain, saved=False).all()
self.assertEqual(1, len(stubs))
ledgers = self.ledgerdb.get_ledger_values_for_case(case_id)
self.assertEqual(0, len(ledgers))
case = CommCareCase.objects.get_case(case_id, self.domain)
self.assertEqual(1, len(case.xform_ids))
ledger_transactions = self.ledgerdb.get_ledger_transactions_for_case(case_id)
self.assertEqual(0, len(ledger_transactions))
result = reprocess_unfinished_stub(stubs[0])
self.assertEqual(1, len(result.cases))
self.assertEqual(1, len(result.ledgers))
ledgers = self.ledgerdb.get_ledger_values_for_case(case_id)
self.assertEqual(1, len(ledgers))
ledger_transactions = self.ledgerdb.get_ledger_transactions_for_case(case_id)
self.assertEqual(1, len(ledger_transactions))
# case still only has 2 transactions
case = CommCareCase.objects.get_case(case_id, self.domain)
self.assertEqual(2, len(case.xform_ids))
self.assertTrue(case.actions[1].is_ledger_transaction)
def test_reprocess_unfinished_submission_ledger_rebuild(self):
from corehq.apps.commtrack.tests.util import get_single_balance_block
case_id = uuid.uuid4().hex
form_ids = []
form_ids.append(submit_case_blocks(
[
CaseBlock(case_id=case_id, create=True, case_type='shop').as_text(),
get_single_balance_block(case_id, 'product1', 100),
],
self.domain
)[0].form_id)
with _patch_save_to_raise_error(self):
submit_case_blocks(
get_single_balance_block(case_id, 'product1', 50),
self.domain
)
stubs = UnfinishedSubmissionStub.objects.filter(domain=self.domain, saved=False).all()
self.assertEqual(1, len(stubs))
form_ids.append(stubs[0].xform_id)
# submit another form afterwards
form_ids.append(submit_case_blocks(
get_single_balance_block(case_id, 'product1', 25),
self.domain
)[0].form_id)
ledgers = self.ledgerdb.get_ledger_values_for_case(case_id)
self.assertEqual(1, len(ledgers))
self.assertEqual(25, ledgers[0].balance)
ledger_transactions = self.ledgerdb.get_ledger_transactions_for_case(case_id)
self.assertEqual(2, len(ledger_transactions))
# should rebuild ledger transactions
result = reprocess_unfinished_stub(stubs[0])
self.assertEqual(1, len(result.cases))
self.assertEqual(1, len(result.ledgers))
ledgers = self.ledgerdb.get_ledger_values_for_case(case_id)
self.assertEqual(1, len(ledgers)) # still only 1
self.assertEqual(25, ledgers[0].balance)
ledger_transactions = self.ledgerdb.get_ledger_transactions_for_case(case_id)
self.assertEqual(3, len(ledger_transactions))
# make sure transactions are in correct order
self.assertEqual(form_ids, [trans.form_id for trans in ledger_transactions])
self.assertEqual(100, ledger_transactions[0].updated_balance)
self.assertEqual(100, ledger_transactions[0].delta)
self.assertEqual(50, ledger_transactions[1].updated_balance)
self.assertEqual(-50, ledger_transactions[1].delta)
self.assertEqual(25, ledger_transactions[2].updated_balance)
self.assertEqual(-25, ledger_transactions[2].delta)
def test_fire_signals(self):
from corehq.apps.receiverwrapper.tests.test_submit_errors import failing_signal_handler
case_id = uuid.uuid4().hex
form_id = uuid.uuid4().hex
with failing_signal_handler('signal death'):
submit_case_blocks(
CaseBlock(case_id=case_id, create=True, case_type='box').as_text(),
self.domain,
form_id=form_id
)
form = self.formdb.get_form(form_id)
with catch_signal(successful_form_received) as form_handler, \
catch_signal(sql_case_post_save) as case_handler:
submit_form_locally(
instance=form.get_xml(),
domain=self.domain,
)
case = CommCareCase.objects.get_case(case_id, self.domain)
self.assertEqual(form, form_handler.call_args[1]['xform'])
self.assertEqual(case, case_handler.call_args[1]['case'])
def test_reprocess_normal_form(self):
case_id = uuid.uuid4().hex
form, cases = submit_case_blocks(
CaseBlock(case_id=case_id, create=True, case_type='box').as_text(),
self.domain
)
self.assertTrue(form.is_normal)
result = reprocess_form(form, save=True, lock_form=False)
self.assertIsNone(result.error)
case = CommCareCase.objects.get_case(case_id, self.domain)
transactions = case.actions
self.assertEqual([trans.form_id for trans in transactions], [form.form_id])
def test_processing_skipped_when_migrations_are_in_progress(self):
case_id = uuid.uuid4().hex
with _patch_save_to_raise_error(self):
self.factory.create_or_update_cases([
CaseStructure(case_id=case_id, attrs={'case_type': 'parent', 'create': True})
])
stubs = UnfinishedSubmissionStub.objects.filter(domain=self.domain, saved=False).all()
self.assertEqual(1, len(stubs))
with patch('corehq.form_processor.reprocess.any_migrations_in_progress', return_value=True):
result = reprocess_unfinished_stub(stubs[0])
self.assertIsNone(result)
result = reprocess_unfinished_stub(stubs[0])
self.assertEqual(1, len(result.cases))
def test_processing_retuns_error_for_missing_form(self):
case_id = uuid.uuid4().hex
with _patch_save_to_raise_error(self):
self.factory.create_or_update_cases([
CaseStructure(case_id=case_id, attrs={'case_type': 'parent', 'create': True})
])
stubs = UnfinishedSubmissionStub.objects.filter(domain=self.domain, saved=False).all()
self.assertEqual(1, len(stubs))
FormProcessorTestUtils.delete_all_cases_forms_ledgers(self.domain)
with self.assertRaises(XFormNotFound):
self.formdb.get_form(stubs[0].xform_id)
result = reprocess_unfinished_stub(stubs[0])
self.assertIsNotNone(result.error)
@sharded
class TestReprocessDuringSubmission(TestCase):
@classmethod
def setUpClass(cls):
super(TestReprocessDuringSubmission, cls).setUpClass()
cls.domain = uuid.uuid4().hex
def setUp(self):
super(TestReprocessDuringSubmission, self).setUp()
self.factory = CaseFactory(domain=self.domain)
self.formdb = XFormInstance.objects
self.ledgerdb = LedgerAccessors(self.domain)
def tearDown(self):
FormProcessorTestUtils.delete_all_cases_forms_ledgers(self.domain)
super(TestReprocessDuringSubmission, self).tearDown()
def test_error_saving(self):
case_id = uuid.uuid4().hex
form_id = uuid.uuid4().hex
with _patch_save_to_raise_error(self):
submit_case_blocks(
CaseBlock(case_id=case_id, create=True, case_type='box').as_text(),
self.domain,
form_id=form_id
)
stubs = UnfinishedSubmissionStub.objects.filter(domain=self.domain, saved=False).all()
self.assertEqual(1, len(stubs))
form = self.formdb.get_form(form_id)
self.assertTrue(form.is_error)
with self.assertRaises(CaseNotFound):
CommCareCase.objects.get_case(case_id, self.domain)
result = submit_form_locally(
instance=form.get_xml(),
domain=self.domain,
)
duplicate_form = result.xform
self.assertTrue(duplicate_form.is_duplicate)
case = CommCareCase.objects.get_case(case_id, self.domain)
self.assertIsNotNone(case)
form = self.formdb.get_form(form_id)
self.assertTrue(form.is_normal)
self.assertIsNone(getattr(form, 'problem', None))
self.assertEqual(duplicate_form.orig_id, form.form_id)
def test_processing_error(self):
case_id = uuid.uuid4().hex
parent_case_id = uuid.uuid4().hex
form_id = uuid.uuid4().hex
form, _ = submit_case_blocks(
CaseBlock(
case_id=case_id, create=True, case_type='box',
index={'cupboard': ('cupboard', parent_case_id)},
).as_text(),
self.domain,
form_id=form_id
)
self.assertTrue(form.is_error)
self.assertTrue('InvalidCaseIndex' in form.problem)
self.assertEqual(form.form_id, form_id)
with self.assertRaises(CaseNotFound):
CommCareCase.objects.get_case(case_id, self.domain)
stubs = UnfinishedSubmissionStub.objects.filter(domain=self.domain, saved=False).all()
self.assertEqual(0, len(stubs))
# create parent case
submit_case_blocks(
CaseBlock(case_id=parent_case_id, create=True, case_type='cupboard').as_text(),
self.domain,
)
# re-submit form
result = submit_form_locally(
instance=form.get_xml(),
domain=self.domain,
)
duplicate_form = result.xform
self.assertTrue(duplicate_form.is_duplicate)
case = CommCareCase.objects.get_case(case_id, self.domain)
self.assertIsNotNone(case)
form = self.formdb.get_form(form_id)
self.assertTrue(form.is_normal)
self.assertIsNone(getattr(form, 'problem', None))
self.assertEqual(duplicate_form.orig_id, form.form_id)
@sharded
class TestTransactionErrors(TransactionTestCase):
domain = uuid.uuid4().hex
def tearDown(self):
FormProcessorTestUtils.delete_all_cases_forms_ledgers(self.domain)
super().tearDown()
def test_error_saving_case(self):
form_id = uuid.uuid4().hex
case_id = uuid.uuid4().hex
error_on_save = patch.object(CommCareCase, 'save', side_effect=IntegrityError)
with error_on_save, self.assertRaises(IntegrityError):
submit_case_blocks(
[CaseBlock(case_id=case_id, update={'a': "2"}).as_text()],
self.domain,
form_id=form_id
)
form = XFormInstance.objects.get_form(form_id)
self.assertTrue(form.is_error)
self.assertIsNotNone(form.get_xml())
def test_error_saving_case_during_edit(self):
form_id = uuid.uuid4().hex
case_id = uuid.uuid4().hex
submit_case_blocks(
[CaseBlock(case_id=case_id, update={'a': "1"}).as_text()],
self.domain,
form_id=form_id
)
error_on_save = patch.object(CommCareCase, 'save', side_effect=IntegrityError)
with error_on_save, self.assertRaises(IntegrityError):
submit_case_blocks(
[CaseBlock(case_id=case_id, update={'a': "2"}).as_text()],
self.domain,
form_id=form_id
)
[error_form_id] = XFormInstance.objects.get_form_ids_in_domain(self.domain, 'XFormError')
self.assertNotEqual(error_form_id, form_id)
form = XFormInstance.objects.get_form(error_form_id)
self.assertTrue(form.is_error)
self.assertIsNotNone(form.get_xml())
def test_error_reprocessing_ledgers_after_borked_save(self):
from corehq.apps.commtrack.tests.util import get_single_balance_block
form_id, case_id, product_id = uuid.uuid4().hex, uuid.uuid4().hex, uuid.uuid4().hex
# setup by creating the case
submit_case_blocks([CaseBlock(case_id=case_id, create=True).as_text()], self.domain)
# submit a form that updates the case and ledger
submit_case_blocks(
[
CaseBlock(case_id=case_id, update={'a': "1"}).as_text(),
get_single_balance_block(case_id, product_id, 100),
],
self.domain,
form_id=form_id
)
# simulate an error by deleting the form XML
form = XFormInstance.objects.get_form(form_id)
form.get_attachment_meta('form.xml').delete()
# re-submit the form again
submit_case_blocks(
[
CaseBlock(case_id=case_id, update={'a': "1"}).as_text(),
get_single_balance_block(case_id, product_id, 100),
],
self.domain,
form_id=form_id
)
form = XFormInstance.objects.get_form(form_id)
self.assertTrue(form.is_normal)
@contextlib.contextmanager
def _patch_save_to_raise_error(test_class):
sql_patch = patch(
'corehq.form_processor.backends.sql.processor.FormProcessorSQL.save_processed_models',
side_effect=InternalError
)
with sql_patch, test_class.assertRaises(InternalError):
yield
| {
"content_hash": "937644fcf9cf08abcab1674498c004d3",
"timestamp": "",
"source": "github",
"line_count": 553,
"max_line_length": 111,
"avg_line_length": 38.34719710669078,
"alnum_prop": 0.6375082523814015,
"repo_name": "dimagi/commcare-hq",
"id": "fc04cea6349a9bb9847e7eb0db50fed0e9e30c8a",
"size": "21206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/form_processor/tests/test_reprocess_errors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
""" Art Gatherer class module """
from collections import namedtuple
from datetime import datetime
from urllib import request
import numpy as np
from mtgsdk import Card, Set
import cv2
from colorpie.image_processing import ImageProcessing
MagicCard = namedtuple('MagicCard', [
'card_id',
'name',
'set_code',
'set_name',
'color_identity',
'image',
'artwork'
])
class ArtGatherer:
""" Wrapper class to fetch card information from mtgsdk
"""
def __init__(self):
pass
@staticmethod
def _color_to_identity(colors):
""" Returns the color identity based on a list of card colors
Usefull to name colorless and Multicolor cards
"""
if not colors:
return 'Colorless'
if len(colors) > 1:
return 'Multicolor'
return colors[0]
@staticmethod
def _url_to_image(image_url='goo.gl/QBqtqE'):
""" Returns a cv2 image object from a gatherer image_id
"""
resp = request.urlopen(image_url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
return image
@staticmethod
def _get_artwork(card, image):
""" Crops image to fit artwork only."""
return ImageProcessing.crop_image(image)
@classmethod
def _build_magic_card(cls, card):
identity = cls._color_to_identity(card.colors)
image = cls._url_to_image(card.image_url)
artwork = cls._get_artwork(card, image)
return MagicCard(
card.multiverse_id,
card.name,
card.set,
card.set_name,
identity,
image,
artwork
)
@classmethod
def get_card_info(cls, card_id=40545):
""" Returns a cv2 image object from a multiverse id of a specific card
"""
card = Card.find(card_id)
return cls._build_magic_card(card)
@staticmethod
def print_cardset_names():
all_sets = Set.all()
for mtgset in all_sets:
print(mtgset.code, mtgset.name)
@classmethod
def get_full_set(cls, code=None):
""" Returns a card list for a full set based on set codename
"""
card_set = list()
if code is None:
return card_set
print('[INFO]\tSearching for cards...')
fullset = Card.where(set=code).all()
print('[INFO]\tBuilding card set...')
for card in fullset:
# Skip lands. Too basic
if 'Land' in card.types:
continue
magic_card = cls._build_magic_card(card)
card_set.append((
magic_card.image,
magic_card.color_identity,
magic_card.artwork))
return card_set
@classmethod
def get_card_list(cls, card_id_list=None):
""" Returns card list based on a list of multiverse ids
"""
cardlist = list()
for cid in card_id_list:
cardlist.append(cls.get_card_info(cid))
return cardlist
| {
"content_hash": "018e9c07c9a0fc5d04a567b2fee3ab3c",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 78,
"avg_line_length": 27.972972972972972,
"alnum_prop": 0.5748792270531401,
"repo_name": "valenc3x/colorpie",
"id": "7b34df2811bea5e7f26152a6967a80c36f3b4f27",
"size": "3105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "colorpie/art_gatherer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11216"
}
],
"symlink_target": ""
} |
from abc import abstractproperty
import requests
class PreparedMoodleRequest(requests.PreparedRequest):
def __init__(self):
super().__init__()
self.function = None
def prepare(self, data=None, **kwargs):
if 'wsfunction' in data:
self.function = data['wsfunction']
super().prepare(data=data, **kwargs)
def prepare_body(self, data, files, json=None):
super().prepare_body(data, files, json)
def prepare_url(self, url, params):
"""Prepares the given HTTP URL.
Mostly copied from requests lib, removed python2 checks and added checks for https"""
from urllib3.util import parse_url
from urllib3.exceptions import LocationParseError
from urllib.parse import urlunparse
from requests.exceptions import InvalidURL
from requests.utils import requote_uri
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = str(url)
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
# normally an error is thrown, we assume https
scheme = 'https'
elif scheme != 'https':
raise InvalidURL('Invalid URL %r: must be https' % url)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if isinstance(params, (str, bytes)):
params = requests.utils.to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
class TokenRequest(PreparedMoodleRequest):
endpoint = '/login/token.php'
class UploadFileRequest(PreparedMoodleRequest):
endpoint = '/webservice/upload.php'
class DownloadFileRequest(PreparedMoodleRequest):
endpoint = '/webservice/pluginfile.php'
class WebServiceRequest(PreparedMoodleRequest):
endpoint = '/webservice/rest/server.php'
@abstractproperty
def function(self): pass
class SaveGradeRequest(WebServiceRequest):
function = 'mod_assign_save_grade'
def __init__(self):
super().__init__()
| {
"content_hash": "1bc1d14b01433b44d12a455f6ab89dc7",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 93,
"avg_line_length": 29.990825688073393,
"alnum_prop": 0.5992658305292138,
"repo_name": "manly-man/moodle-destroyer-tools",
"id": "7eba482f6e558fa5df701603147f0129eb08073b",
"size": "3269",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "playground/requests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "189961"
},
{
"name": "Shell",
"bytes": "566"
}
],
"symlink_target": ""
} |
"""Defines plots for clustering models built with scikit-learn."""
from warnings import simplefilter
import pandas as pd
import sklearn
import wandb
from wandb.sklearn import utils
from wandb.sklearn import calculate
# ignore all future warnings
simplefilter(action="ignore", category=FutureWarning)
def clusterer(model, X_train, cluster_labels, labels=None, model_name="Clusterer"):
"""Generates all sklearn clusterer plots supported by W&B.
The following plots are generated:
elbow curve, silhouette plot.
Should only be called with a fitted clusterer (otherwise an error is thrown).
Arguments:
model: (clusterer) Takes in a fitted clusterer.
X_train: (arr) Training set features.
cluster_labels: (list) Names for cluster labels. Makes plots easier to read
by replacing cluster indexes with corresponding names.
labels: (list) Named labels for target varible (y). Makes plots easier to
read by replacing target values with corresponding index.
For example if `labels=['dog', 'cat', 'owl']` all 0s are
replaced by dog, 1s by cat.
model_name: (str) Model name. Defaults to 'Clusterer'
Returns:
None: To see plots, go to your W&B run page then expand the 'media' tab
under 'auto visualizations'.
Example:
```python
wandb.sklearn.plot_clusterer(kmeans, X, cluster_labels, labels, 'KMeans')
```
"""
wandb.termlog("\nPlotting %s." % model_name)
if isinstance(model, sklearn.cluster.KMeans):
elbow_curve(model, X_train)
wandb.termlog("Logged elbow curve.")
silhouette(model, X_train, cluster_labels, labels=labels, kmeans=True)
else:
silhouette(model, X_train, cluster_labels, kmeans=False)
wandb.termlog("Logged silhouette plot.")
def elbow_curve(
clusterer=None, X=None, cluster_ranges=None, n_jobs=1, show_cluster_time=True
):
"""Measures and plots variance explained as a function of the number of clusters.
Useful in picking the optimal number of clusters.
Should only be called with a fitted clusterer (otherwise an error is thrown).
Please note this function fits the model on the training set when called.
Arguments:
model: (clusterer) Takes in a fitted clusterer.
X: (arr) Training set features.
Returns:
None: To see plots, go to your W&B run page then expand the 'media' tab
under 'auto visualizations'.
Example:
```python
wandb.sklearn.plot_elbow_curve(model, X_train)
```
"""
if not hasattr(clusterer, "n_clusters"):
wandb.termlog(
"n_clusters attribute not in classifier. Cannot plot elbow method."
)
return
not_missing = utils.test_missing(clusterer=clusterer)
correct_types = utils.test_types
is_fitted = utils.test_fitted(clusterer)
if not_missing and correct_types and is_fitted:
elbow_curve_chart = calculate.elbow_curve(
clusterer, X, cluster_ranges, n_jobs, show_cluster_time
)
wandb.log({"elbow_curve": elbow_curve_chart})
def silhouette(
clusterer=None,
X=None,
cluster_labels=None,
labels=None,
metric="euclidean",
kmeans=True,
):
"""Measures & plots silhouette coefficients.
Silhouette coefficients near +1 indicate that the sample is far away from
the neighboring clusters. A value near 0 indicates that the sample is on or
very close to the decision boundary between two neighboring clusters and
negative values indicate that the samples might have been assigned to the wrong cluster.
Should only be called with a fitted clusterer (otherwise an error is thrown).
Please note this function fits the model on the training set when called.
Arguments:
model: (clusterer) Takes in a fitted clusterer.
X: (arr) Training set features.
cluster_labels: (list) Names for cluster labels. Makes plots easier to read
by replacing cluster indexes with corresponding names.
Returns:
None: To see plots, go to your W&B run page then expand the 'media' tab
under 'auto visualizations'.
Example:
```python
wandb.sklearn.plot_silhouette(model, X_train, ['spam', 'not spam'])
```
"""
not_missing = utils.test_missing(clusterer=clusterer)
correct_types = utils.test_types(clusterer=clusterer)
is_fitted = utils.test_fitted(clusterer)
if not_missing and correct_types and is_fitted:
if isinstance(X, (pd.DataFrame)):
X = X.values
silhouette_chart = calculate.silhouette(
clusterer, X, cluster_labels, labels, metric, kmeans
)
wandb.log({"silhouette_plot": silhouette_chart})
| {
"content_hash": "fd0d2d3068393e3540ec70c3ef8dfcfa",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 92,
"avg_line_length": 33.710344827586205,
"alnum_prop": 0.6612111292962357,
"repo_name": "wandb/client",
"id": "8704f17099473a6ae77eccc06711be861ae40539",
"size": "4888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wandb/sklearn/plot/clusterer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "Dockerfile",
"bytes": "3491"
},
{
"name": "Jupyter Notebook",
"bytes": "7751"
},
{
"name": "Makefile",
"bytes": "1863"
},
{
"name": "Objective-C",
"bytes": "80764"
},
{
"name": "Python",
"bytes": "3634228"
},
{
"name": "Shell",
"bytes": "4662"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ThreadedComment.email_hash'
db.delete_column('generic_threadedcomment', 'email_hash')
def backwards(self, orm):
# Adding field 'ThreadedComment.email_hash'
db.add_column('generic_threadedcomment', 'email_hash', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True), keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 18, 17, 51, 43, 560694)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 18, 17, 51, 43, 560579)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': "orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.rating': {
'Meta': {'object_name': 'Rating'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.IntegerField', [], {}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'generic.threadedcomment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'ThreadedComment', '_ormbases': ['comments.Comment']},
'by_author': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['comments.Comment']", 'unique': 'True', 'primary_key': 'True'}),
'replied_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'null': 'True', 'to': "orm['generic.ThreadedComment']"})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['generic']
| {
"content_hash": "33fdb83d674baf2bbe4efe6c47aaadc0",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 182,
"avg_line_length": 72.05454545454545,
"alnum_prop": 0.554882664647994,
"repo_name": "eRestin/MezzGIS",
"id": "a94c77297f7c047717ecc781be485c2ad6a3a9ae",
"size": "7944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mezzanine/generic/migrations/0009_auto__del_field_threadedcomment_email_hash.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "359014"
},
{
"name": "HTML",
"bytes": "153323"
},
{
"name": "JavaScript",
"bytes": "525988"
},
{
"name": "Nginx",
"bytes": "3644"
},
{
"name": "Perl",
"bytes": "271341"
},
{
"name": "Python",
"bytes": "1130497"
},
{
"name": "Shell",
"bytes": "135"
}
],
"symlink_target": ""
} |
import logging
import time
from datetime import datetime, timedelta
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connection, transaction
from kitsune.questions.models import Question, QuestionMappingType
from kitsune.search.es_utils import ES_EXCEPTIONS, get_documents
from kitsune.search.tasks import index_task
log = logging.getLogger('k.cron')
class Command(BaseCommand):
help = "Archive all questions that were created over 180 days ago."
def handle(self, **options):
# Set up logging so it doesn't send Ricky email.
logging.basicConfig(level=logging.ERROR)
# Get a list of ids of questions we're going to go change. We need
# a list of ids so that we can feed it to the update, but then
# also know what we need to update in the index.
days_180 = datetime.now() - timedelta(days=180)
q_ids = list(
Question.objects.filter(is_archived=False)
.filter(created__lte=days_180)
.values_list('id', flat=True))
if q_ids:
log.info('Updating %d questions', len(q_ids))
sql = """
UPDATE questions_question
SET is_archived = 1
WHERE id IN (%s)
""" % ','.join(map(str, q_ids))
cursor = connection.cursor()
cursor.execute(sql)
if not transaction.get_connection().in_atomic_block:
transaction.commit()
if settings.ES_LIVE_INDEXING:
try:
# So... the first time this runs, it'll handle 160K
# questions or so which stresses everything. Thus we
# do it in chunks because otherwise this won't work.
#
# After we've done this for the first time, we can nix
# the chunking code.
from kitsune.search.utils import chunked
for chunk in chunked(q_ids, 100):
# Fetch all the documents we need to update.
es_docs = get_documents(QuestionMappingType, chunk)
log.info('Updating %d index documents', len(es_docs))
documents = []
# For each document, update the data and stick it
# back in the index.
for doc in es_docs:
doc[u'question_is_archived'] = True
doc[u'indexed_on'] = int(time.time())
documents.append(doc)
QuestionMappingType.bulk_index(documents)
except ES_EXCEPTIONS:
# Something happened with ES, so let's push index
# updating into an index_task which retries when it
# fails because of ES issues.
index_task.delay(QuestionMappingType, q_ids)
| {
"content_hash": "1ea321de78534e3ac90e0926b3e36936",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 77,
"avg_line_length": 38.717948717948715,
"alnum_prop": 0.5529801324503312,
"repo_name": "anushbmx/kitsune",
"id": "06c374e93e7a0ab23986e63e213b5e85ab4addfe",
"size": "3020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kitsune/questions/management/commands/auto_archive_old_questions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "335184"
},
{
"name": "Dockerfile",
"bytes": "3547"
},
{
"name": "Groovy",
"bytes": "4221"
},
{
"name": "HTML",
"bytes": "628447"
},
{
"name": "JavaScript",
"bytes": "802494"
},
{
"name": "Makefile",
"bytes": "3600"
},
{
"name": "Python",
"bytes": "2994910"
},
{
"name": "Shell",
"bytes": "19325"
},
{
"name": "TSQL",
"bytes": "1035"
}
],
"symlink_target": ""
} |
"""Tests for the reconstruction of non-debugger-decorated GraphDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class ReconstructNonDebugGraphTest(test_util.TensorFlowTestCase):
_OP_TYPE_BLACKLIST = (
"_Send", "_Recv", "_HostSend", "_HostRecv", "_Retval")
def _no_rewrite_session_config(self):
rewriter_config = rewriter_config_pb2.RewriterConfig(
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,
pin_to_host_optimization=rewriter_config_pb2.RewriterConfig.OFF,
min_graph_nodes=-1)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def setUp(self):
super(ReconstructNonDebugGraphTest, self).setUp()
self._dump_dir = tempfile.mkdtemp()
self._debug_url = "file://" + self._dump_dir
ops.reset_default_graph()
def tearDown(self):
file_io.delete_recursively(self._dump_dir)
super(ReconstructNonDebugGraphTest, self).tearDown()
def _graphDefWithoutBlacklistedNodes(self, graph_def):
output_graph_def = graph_pb2.GraphDef()
for node in graph_def.node:
if node.op not in self._OP_TYPE_BLACKLIST:
new_node = output_graph_def.node.add()
new_node.CopyFrom(node)
if new_node.op == "Enter":
# The debugger sets parallel_iterations attribute of while-loop Enter
# nodes to 1 for debugging.
for attr_key in new_node.attr:
if attr_key == "parallel_iterations":
new_node.attr[attr_key].i = 1
elif new_node.op == "Switch":
# We don't check the inputs to Switch ops as their inputs may be
# Send/Recv nodes.
del new_node.input[:]
return output_graph_def
def _compareOriginalAndReconstructedGraphDefs(self,
sess,
fetches,
feed_dict=None,
expected_output=None):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
output = sess.run(fetches, feed_dict=feed_dict, options=run_options,
run_metadata=run_metadata)
if expected_output is not None:
self.assertAllClose(expected_output, output)
non_debug_graph_defs = run_metadata.partition_graphs
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_url)
run_metadata = config_pb2.RunMetadata()
output = sess.run(fetches, feed_dict=feed_dict, options=run_options,
run_metadata=run_metadata)
if expected_output is not None:
self.assertAllClose(expected_output, output)
dump = debug_data.DebugDumpDir(
self._dump_dir, partition_graphs=run_metadata.partition_graphs,
validate=True)
reconstructed = dump.reconstructed_non_debug_partition_graphs()
self.assertEqual(len(non_debug_graph_defs), len(reconstructed))
for i, non_debug_graph_def in enumerate(non_debug_graph_defs):
device_name = debug_graphs._infer_device_name(non_debug_graph_def)
test_util.assert_equal_graph_def(
self._graphDefWithoutBlacklistedNodes(reconstructed[device_name]),
self._graphDefWithoutBlacklistedNodes(non_debug_graph_def))
# Test debug_graphs.reconstruct_non_debug_graph_def.
reconstructed_again = (
debug_graphs.reconstruct_non_debug_graph_def(
run_metadata.partition_graphs[i]))
test_util.assert_equal_graph_def(
self._graphDefWithoutBlacklistedNodes(reconstructed_again),
self._graphDefWithoutBlacklistedNodes(non_debug_graph_def))
def testReconstructSimpleGraph(self):
with session.Session() as sess:
u = variables.Variable([12.0], name="u")
v = variables.Variable([30.0], name="v")
w = math_ops.add(u, v, name="w")
self.evaluate(u.initializer)
self.evaluate(v.initializer)
self._compareOriginalAndReconstructedGraphDefs(
sess, w, expected_output=[42.0])
def testReconstructGraphWithControlEdge(self):
with session.Session() as sess:
a = variables.Variable(10.0, name="a")
with ops.control_dependencies([a]):
b = math_ops.add(a, a, name="b")
with ops.control_dependencies([a, b]):
c = math_ops.multiply(b, b, name="c")
self.evaluate(a.initializer)
self._compareOriginalAndReconstructedGraphDefs(
sess, c, expected_output=400.0)
def testReconstructGraphWithCond(self):
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = variables.Variable(10.0, name="x")
y = variables.Variable(20.0, name="y")
cond = control_flow_ops.cond(
x > y, lambda: math_ops.add(x, 1), lambda: math_ops.add(y, 1))
self.evaluate(x.initializer)
self.evaluate(y.initializer)
self._compareOriginalAndReconstructedGraphDefs(
sess, cond, expected_output=21.0)
def testReconstructGraphWithWhileLoop(self):
with session.Session(config=self._no_rewrite_session_config()) as sess:
loop_body = lambda i: math_ops.add(i, 2)
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
self._compareOriginalAndReconstructedGraphDefs(sess, loop)
def testReconstructGraphWithGradients(self):
with session.Session(config=self._no_rewrite_session_config()) as sess:
u = variables.Variable(12.0, name="u")
v = variables.Variable(30.0, name="v")
x = constant_op.constant(1.1, name="x")
toy_loss = x * (u - v)
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(toy_loss, name="train_op")
self.evaluate(u.initializer)
self.evaluate(v.initializer)
self._compareOriginalAndReconstructedGraphDefs(sess, train_op)
if __name__ == "__main__":
test.main()
| {
"content_hash": "a9620b175cd4e6c5de4716afbd12a563",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 79,
"avg_line_length": 40.928994082840234,
"alnum_prop": 0.6774613271649559,
"repo_name": "renyi533/tensorflow",
"id": "fb722efab4ef8a8e78ce14f6980c1aa93c95208a",
"size": "7606",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/debug/lib/debug_graph_reconstruction_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31572"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "903309"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82507951"
},
{
"name": "CMake",
"bytes": "6967"
},
{
"name": "Dockerfile",
"bytes": "113964"
},
{
"name": "Go",
"bytes": "1871425"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "988219"
},
{
"name": "Jupyter Notebook",
"bytes": "550861"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "2073744"
},
{
"name": "Makefile",
"bytes": "66796"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "319021"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37811412"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "6846"
},
{
"name": "Shell",
"bytes": "696058"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3655758"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
# '-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++14',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'../BoostParts',
'-isystem',
'./include',
'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
'/System/Library/Frameworks/Python.framework/Headers',
'-isystem',
'../llvm/include',
'-isystem',
'../llvm/tools/clang/include',
'-I',
'.',
'-I',
'./ClangCompleter',
'-isystem',
'./tests/gmock/gtest',
'-isystem',
'./tests/gmock/gtest/include',
'-isystem',
'./tests/gmock',
'-isystem',
'./tests/gmock/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
# try:
# final_flags.remove( '-stdlib=libc++' )
# except ValueError:
# pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| {
"content_hash": "ae60a343f3096a1154cff47141c2d963",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 80,
"avg_line_length": 31.089171974522294,
"alnum_prop": 0.6805982380659701,
"repo_name": "darthdeus/dotfiles",
"id": "158acae1d4a2aba7665d924022d0fc8c62dcc811",
"size": "6281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cpp_ycm_extra_conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "253"
},
{
"name": "Emacs Lisp",
"bytes": "83954"
},
{
"name": "Lua",
"bytes": "32992"
},
{
"name": "Makefile",
"bytes": "1080"
},
{
"name": "Nix",
"bytes": "17751"
},
{
"name": "Perl",
"bytes": "4505"
},
{
"name": "Python",
"bytes": "37249"
},
{
"name": "Shell",
"bytes": "300298"
},
{
"name": "Vim Script",
"bytes": "256030"
},
{
"name": "Vim Snippet",
"bytes": "2141"
},
{
"name": "YASnippet",
"bytes": "150"
}
],
"symlink_target": ""
} |
import unittest
from quickbooks import QuickBooks
from quickbooks.objects.purchaseorder import PurchaseOrder
class PurchaseOrderTests(unittest.TestCase):
def test_unicode(self):
purchase_order = PurchaseOrder()
purchase_order.TotalAmt = 1000
self.assertEquals(str(purchase_order), '1000')
def test_valid_object_name(self):
obj = PurchaseOrder()
client = QuickBooks()
result = client.isvalid_object_name(obj.qbo_object_name)
self.assertTrue(result)
| {
"content_hash": "a323564bab466393b5982449b9fed02a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 64,
"avg_line_length": 27.263157894736842,
"alnum_prop": 0.7027027027027027,
"repo_name": "ZachGoldberg/python-quickbooks",
"id": "102ae393d5fe9435c459eb56d81375526ec213f9",
"size": "518",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/unit/objects/test_purchaseorder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "164"
},
{
"name": "Python",
"bytes": "222448"
}
],
"symlink_target": ""
} |
import sys
if sys.platform != 'android':
from panda3d.ode import OdeBody, OdeBoxGeom, OdeMass, OdePlaneGeom, OdeSphereGeom, OdeUtil
from panda3d.core import BitMask32, NodePath, Quat, Vec4
from direct.directnotify import DirectNotifyGlobal
from toontown.minigame import DistributedMinigamePhysicsWorld
from toontown.minigame import IceGameGlobals
from toontown.golf import BuildGeometry
MetersToFeet = 3.2808399
FeetToMeters = 1.0 / MetersToFeet
class DistributedIceWorld(DistributedMinigamePhysicsWorld.DistributedMinigamePhysicsWorld):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedMinigamePhysicsWorld')
floorCollideId = 1
floorMask = BitMask32(floorCollideId)
wallCollideId = 2
wallMask = BitMask32(wallCollideId)
obstacleCollideId = 4
obstacleMask = BitMask32(obstacleCollideId)
tireCollideIds = [256,
512,
1024,
2048]
tire0Mask = BitMask32(tireCollideIds[0])
tire1Mask = BitMask32(tireCollideIds[1])
tire2Mask = BitMask32(tireCollideIds[2])
tire3Mask = BitMask32(tireCollideIds[3])
allTiresMask = tire0Mask | tire1Mask | tire2Mask | tire3Mask
tireMasks = (tire0Mask,
tire1Mask,
tire2Mask,
tire3Mask)
tireDensity = 1
tireSurfaceType = 0
iceSurfaceType = 1
fenceSurfaceType = 2
def __init__(self, cr):
DistributedMinigamePhysicsWorld.DistributedMinigamePhysicsWorld.__init__(self, cr)
def delete(self):
DistributedMinigamePhysicsWorld.DistributedMinigamePhysicsWorld.delete(self)
if hasattr(self, 'floor'):
self.floor = None
return
def setupSimulation(self):
DistributedMinigamePhysicsWorld.DistributedMinigamePhysicsWorld.setupSimulation(self)
self.world.setGravity(0, 0, -32.174)
self.world.setAutoDisableFlag(1)
self.world.setAutoDisableLinearThreshold(0.5 * MetersToFeet)
self.world.setAutoDisableAngularThreshold(OdeUtil.getInfinity())
self.world.setAutoDisableSteps(10)
self.world.setCfm(1e-05 * MetersToFeet)
self.world.initSurfaceTable(3)
self.world.setSurfaceEntry(0, 1, 0.2, 0, 0, 0, 0, 0, 0.1)
self.world.setSurfaceEntry(0, 0, 0.1, 0.9, 0.1, 0, 0, 0, 0)
self.world.setSurfaceEntry(0, 2, 0.9, 0.9, 0.1, 0, 0, 0, 0)
self.floor = OdePlaneGeom(self.space, Vec4(0.0, 0.0, 1.0, -20.0))
self.floor.setCollideBits(self.allTiresMask)
self.floor.setCategoryBits(self.floorMask)
self.westWall = OdePlaneGeom(self.space, Vec4(1.0, 0.0, 0.0, IceGameGlobals.MinWall[0]))
self.westWall.setCollideBits(self.allTiresMask)
self.westWall.setCategoryBits(self.wallMask)
self.space.setSurfaceType(self.westWall, self.fenceSurfaceType)
self.space.setCollideId(self.westWall, self.wallCollideId)
self.eastWall = OdePlaneGeom(self.space, Vec4(-1.0, 0.0, 0.0, -IceGameGlobals.MaxWall[0]))
self.eastWall.setCollideBits(self.allTiresMask)
self.eastWall.setCategoryBits(self.wallMask)
self.space.setSurfaceType(self.eastWall, self.fenceSurfaceType)
self.space.setCollideId(self.eastWall, self.wallCollideId)
self.southWall = OdePlaneGeom(self.space, Vec4(0.0, 1.0, 0.0, IceGameGlobals.MinWall[1]))
self.southWall.setCollideBits(self.allTiresMask)
self.southWall.setCategoryBits(self.wallMask)
self.space.setSurfaceType(self.southWall, self.fenceSurfaceType)
self.space.setCollideId(self.southWall, self.wallCollideId)
self.northWall = OdePlaneGeom(self.space, Vec4(0.0, -1.0, 0.0, -IceGameGlobals.MaxWall[1]))
self.northWall.setCollideBits(self.allTiresMask)
self.northWall.setCategoryBits(self.wallMask)
self.space.setSurfaceType(self.northWall, self.fenceSurfaceType)
self.space.setCollideId(self.northWall, self.wallCollideId)
self.floorTemp = OdePlaneGeom(self.space, Vec4(0.0, 0.0, 1.0, 0.0))
self.floorTemp.setCollideBits(self.allTiresMask)
self.floorTemp.setCategoryBits(self.floorMask)
self.space.setSurfaceType(self.floorTemp, self.iceSurfaceType)
self.space.setCollideId(self.floorTemp, self.floorCollideId)
self.space.setAutoCollideWorld(self.world)
self.space.setAutoCollideJointGroup(self.contactgroup)
self.totalPhysicsSteps = 0
def createTire(self, tireIndex):
if tireIndex < 0 or tireIndex >= len(self.tireMasks):
self.notify.error('invalid tireIndex %s' % tireIndex)
self.notify.debug('create tireindex %s' % tireIndex)
zOffset = 0
body = OdeBody(self.world)
mass = OdeMass()
mass.setSphere(self.tireDensity, IceGameGlobals.TireRadius)
body.setMass(mass)
body.setPosition(IceGameGlobals.StartingPositions[tireIndex][0], IceGameGlobals.StartingPositions[tireIndex][1], IceGameGlobals.StartingPositions[tireIndex][2])
body.setAutoDisableDefaults()
geom = OdeSphereGeom(self.space, IceGameGlobals.TireRadius)
self.space.setSurfaceType(geom, self.tireSurfaceType)
self.space.setCollideId(geom, self.tireCollideIds[tireIndex])
self.massList.append(mass)
self.geomList.append(geom)
geom.setCollideBits(self.allTiresMask | self.wallMask | self.floorMask | self.obstacleMask)
geom.setCategoryBits(self.tireMasks[tireIndex])
geom.setBody(body)
if self.notify.getDebug():
self.notify.debug('tire geom id')
geom.write()
self.notify.debug(' -')
if self.canRender:
testTire = render.attachNewNode('tire holder %d' % tireIndex)
smileyModel = NodePath()
if not smileyModel.isEmpty():
smileyModel.setScale(IceGameGlobals.TireRadius)
smileyModel.reparentTo(testTire)
smileyModel.setAlphaScale(0.5)
smileyModel.setTransparency(1)
testTire.setPos(IceGameGlobals.StartingPositions[tireIndex])
tireModel = loader.loadModel('phase_4/models/minigames/ice_game_tire')
tireHeight = 1
tireModel.setZ(-IceGameGlobals.TireRadius + 0.01)
tireModel.reparentTo(testTire)
self.odePandaRelationList.append((testTire, body))
else:
testTire = None
self.bodyList.append((None, body))
return (testTire, body, geom)
def placeBodies(self):
for pair in self.odePandaRelationList:
pandaNodePathGeom = pair[0]
odeBody = pair[1]
if pandaNodePathGeom:
pandaNodePathGeom.setPos(odeBody.getPosition())
pandaNodePathGeom.setQuat(Quat(odeBody.getQuaternion()[0], odeBody.getQuaternion()[1], odeBody.getQuaternion()[2], odeBody.getQuaternion()[3]))
pandaNodePathGeom.setP(0)
pandaNodePathGeom.setR(0)
newQuat = pandaNodePathGeom.getQuat()
odeBody.setQuaternion(newQuat)
def postStep(self):
DistributedMinigamePhysicsWorld.DistributedMinigamePhysicsWorld.postStep(self)
self.placeBodies()
self.totalPhysicsSteps += 1
def createObstacle(self, pos, obstacleIndex, cubicObstacle):
if cubicObstacle:
return self.createCubicObstacle(pos, obstacleIndex)
else:
return self.createCircularObstacle(pos, obstacleIndex)
def createCircularObstacle(self, pos, obstacleIndex):
self.notify.debug('create obstacleindex %s' % obstacleIndex)
geom = OdeSphereGeom(self.space, IceGameGlobals.TireRadius)
geom.setCollideBits(self.allTiresMask)
geom.setCategoryBits(self.obstacleMask)
self.space.setCollideId(geom, self.obstacleCollideId)
tireModel = loader.loadModel('phase_4/models/minigames/ice_game_tirestack')
tireHeight = 1
tireModel.setPos(pos)
tireModel.reparentTo(render)
geom.setPosition(tireModel.getPos())
tireModel.setZ(0)
return tireModel
def createCubicObstacle(self, pos, obstacleIndex):
self.notify.debug('create obstacleindex %s' % obstacleIndex)
sideLength = IceGameGlobals.TireRadius * 2
geom = OdeBoxGeom(self.space, sideLength, sideLength, sideLength)
geom.setCollideBits(self.allTiresMask)
geom.setCategoryBits(self.obstacleMask)
self.space.setCollideId(geom, self.obstacleCollideId)
tireModel = loader.loadModel('phase_4/models/minigames/ice_game_crate')
tireModel.setPos(pos)
tireModel.reparentTo(render)
geom.setPosition(tireModel.getPos())
tireModel.setZ(0)
return tireModel | {
"content_hash": "594f736e070bfd835428c8f00d7598cd",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 168,
"avg_line_length": 48.752747252747255,
"alnum_prop": 0.6767722303617717,
"repo_name": "DedMemez/ODS-August-2017",
"id": "d70572361cc4079bf4fae902ba2dbd2d0c96fcc5",
"size": "8971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "minigame/DistributedIceWorld.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10152014"
},
{
"name": "Shell",
"bytes": "707"
}
],
"symlink_target": ""
} |
'''
Created on Mar 17, 2017
@author: tonyq
'''
import codecs
import csv
import pandas as pd
import numpy as np
import re, sys, os
import time
from bs4 import BeautifulSoup
import logging
from keras.preprocessing.sequence import pad_sequences
from tqdm._tqdm import tqdm
from numpy import array, zeros
import operator
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
logger = logging.getLogger(__name__)
uri_re = r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))'
def stripTagsAndUris(x):
if x:
# BeautifulSoup on content
soup = BeautifulSoup(x, "html.parser")
# Stripping all <code> tags with their content if any
if soup.code:
soup.code.decompose()
# Get all the text out of the html
text = soup.get_text()
# Returning text stripping out all uris
return re.sub(uri_re, "", text)
else:
return ""
def get_words(text):
# word_split = re.compile('[^a-zA-Z0-9_\\+\\-]')
# return [word.strip().lower() for word in word_split.split(text)]
text = str(text)
# text = text.replace('’s', ' ’s')
# text = text.replace('…', ' ')
# text = text.replace('”', ' ')
# text = text.replace('“', ' ')
# text = text.replace('‘', ' ')
# text = text.replace('’', ' ')
# text = text.replace('"', ' ')
# text = text.replace("'", " ")
# text = text.replace('-', ' ')
# text = text.replace('/', ' ')
# text = text.replace("\\", " ")
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!'.+-=%]", " ", text)
text = text.replace("what's", "what is ")
text = text.replace("'s", " ")
text = text.replace("'ve", " have ")
text = text.replace("can't", "cannot ")
text = text.replace("n't", " not ")
text = text.replace("i'm", "i am ")
text = text.replace("'re", " are ")
text = text.replace("'d", " would ")
text = text.replace("'ll", " will ")
text = text.replace(",", " , ")
text = text.replace(".", " . ")
text = text.replace("!", " ! ")
# text = text.replace("/", " ")
# text = text.replace("^", " ^ ")
# text = text.replace("+", " + ")
# text = text.replace("-", " - ")
# text = text.replace("=", " = ")
text = text.replace("'", " ")
text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
text = text.replace(":", " : ")
# text = text.replace(" e g ", " eg ")
# text = text.replace(" b g ", " bg ")
# text = text.replace(" u s ", " american ")
# text = re.sub(r"\0s", "0", text)
text = text.replace(" 9 11 ", "911")
text = text.replace("e - mail", "email")
# text = text.replace("j k", "jk")
text = re.sub(r"\s{2,}", " ", text)
# text = text.replace("\\", " ")
text = text.replace('"', ' ')
return text.strip()
# return word_tokenize(text)
# The function "text_to_wordlist" is from
# https://www.kaggle.com/currie32/quora-question-pairs/the-importance-of-cleaning-text
def text_to_wordlist(text, remove_stopwords=False, stem_words=False):
# Clean the text, with the option to remove stopwords and to stem words.
# Convert words to lower case and split them
text = text.lower().split()
# Optionally, remove stop words
if remove_stopwords:
stops = set(stopwords.words("english"))
text = [w for w in text if not w in stops]
text = " ".join(text)
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
text = re.sub(r":", " : ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r" u s ", " american ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e - mail", "email", text)
text = re.sub(r"j k", "jk", text)
text = re.sub(r"\s{2,}", " ", text)
# Optionally, shorten words to their stems
if stem_words:
text = text.split()
stemmer = SnowballStemmer('english')
stemmed_words = [stemmer.stem(word) for word in text]
text = " ".join(stemmed_words)
# Return a list of words
return (text)
def csv_processing(path, test=False):
texts_1 = []
texts_2 = []
labels = []
test_ids = []
with codecs.open(path, encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',')
header = next(reader)
if test == False:
for values in tqdm(reader):
texts_1.append(text_to_wordlist(values[3]))
texts_2.append(text_to_wordlist(values[4]))
labels.append(int(values[5]))
return texts_1, texts_2, labels
else:
for values in tqdm(reader):
texts_1.append(text_to_wordlist(values[1]))
texts_2.append(text_to_wordlist(values[2]))
test_ids.append(values[0])
return texts_1, texts_2, test_ids
def get_pdTable(path, notag=False):
logger.info(' Processing pandas csv ')
pdtable = pd.read_csv(path)
if notag:
try:
return pdtable.test_id, pdtable.question1, pdtable.question2
except AttributeError:
return pdtable.id, pdtable.question1, pdtable.question2
else:
return pdtable.id, pdtable.question1, pdtable.question2, pdtable.is_duplicate
def text_cleaner(table):
textTable = []
maxLen = 0
for text in tqdm(table, file=sys.stdout):
text = get_words(text)
textTable.append(text)
if len(text) > maxLen:
maxLen = len(text)
return textTable, maxLen
def tokenizeIt(table, clean=False, addHead=None):
tokenizedTable = []
maxLen = 0
for text in tqdm(table, file=sys.stdout):
if clean:
# text = stripTagsAndUris(text)
text = word_tokenize(get_words(text))
if not addHead is None:
text = [addHead] + text
tokenizedTable.append(text)
if len(text) > maxLen:
maxLen = len(text)
else:
text = str(text).split(' ')
if not addHead is None:
text = [addHead] + text
tokenizedTable.append(text)
if len(text) > maxLen:
maxLen = len(text)
return tokenizedTable, maxLen
def createVocab(tableList, min_count=1, reservedList=['<pad>', '<EOF>', '<unk>']):
logger.info(' Creating vocabulary ')
contentList = []
for list1 in tableList:
contentList.extend(list1)
wdFrq = {}
total_words = 0
for line in contentList:
for wd in line:
try:
wdFrq[wd] += 1
except KeyError:
wdFrq[wd] = 1
total_words += 1
logger.info(' %i total words, %i unique words ' % (total_words, len(wdFrq)))
sorted_word_freqs = sorted(wdFrq.items(), key=operator.itemgetter(1), reverse=True)
vocab_size = 0
for _, freq in sorted_word_freqs:
if freq >= min_count:
vocab_size += 1
vocabDict = {}
vocabReverseDict = []
idx = 0
for item1 in reservedList:
vocabDict[item1] = idx
vocabReverseDict.append(item1)
idx += 1
for word, _ in sorted_word_freqs[:vocab_size]:
vocabDict[word] = idx
vocabReverseDict.append(word)
idx += 1
logger.info(' vocab size %i ' % len(vocabReverseDict))
return vocabDict, vocabReverseDict
def word2num(contentTable, vocab, unk, maxLen, padding=None, eof=None):
unk_hit = 0
totalword = 0
data = []
for line in contentTable:
w2num = []
for word in line:
if word in vocab:
w2num.append(vocab[word])
else:
if not type(unk) is type(None):
w2num.append(vocab[unk])
unk_hit += 1
totalword += 1
if not type(eof) is type(None):
w2num.append(vocab[eof])
data.append(w2num)
logger.info(' total %i tokens processed, %i unk hit ' % (totalword, unk_hit))
# pad to np array
if not type(padding) is type(None):
logger.info(' padding data to width %d by %s padding' % (maxLen, padding))
np_ary = pad_sequences(data, maxlen=maxLen, padding=padding)
else:
np_ary = array(data)
return np_ary
def to_categorical2D(y, nb_classes=None):
if not nb_classes:
nb_classes = y.max()
return (np.arange(nb_classes) == y[:,:,None]).astype(int)
def to_categoricalAll(y, nb_classes):
categorical = zeros((len(y),nb_classes))
line_idx = 0
for line in y:
for elem in line:
categorical[line_idx][elem] = 1
line_idx += 1
return categorical
def categorical_toary(y, round01=False):
(length, nb_classes) = y.shape
if round01:
y = np.around(y)
y_ary = []
for i in range(length):
y_ary.append(np.argwhere(y[i,:] == 1).ravel().tolist())
return y_ary
def prob_top_n(y, top=5):
(length, nb_classes) = y.shape
y_ary = []
for i in range(length):
idx_prob = list(zip(list(range(nb_classes)), y[i,:]))
sorted_idx_prob = sorted(idx_prob, key=operator.itemgetter(1), reverse=True)[:top]
idx_round = np.around(sorted_idx_prob).astype(int)
idx_pos = []
for (idx, prob) in idx_round:
if prob == 1:
idx_pos.append(idx)
y_ary.append(idx_pos)
return y_ary
def embdReader(embd_path, embd_dim, word_index, max_nb_words, fasttext_source='', ft_dim=0,
ft_home='/data2/tonyq/fastText/fasttext', output_dir='/data2/tonyq/quora-output/', skip_header=False,
initializer='glorot'):
########################################
## index word vectors
########################################
if not embd_path == '':
logger.info('Indexing word vectors...')
embeddings_index = {}
with open(embd_path, 'r', encoding='utf8') as f:
if skip_header or embd_path.endswith('.vec'):
next(f)
for line in tqdm(f):
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
logger.info('Found %d word vectors in embedding file.' % len(embeddings_index))
########################################
## prepare fasttext
########################################
if not fasttext_source == '':
from gensim.models.wrappers.fasttext import FastText as FT_wrapper
if fasttext_source.endswith('.bin'):
loaded_model = FT_wrapper.load(fasttext_source)
print(loaded_model)
else:
_, train_question1, train_question2 = get_pdTable(fasttext_source, notag=True)
train_question1, train_maxLen1 = text_cleaner(train_question1)
train_question2, train_maxLen2 = text_cleaner(train_question2)
train_data = train_question1 + train_question2
print('Train data lines %d' % len(train_data))
with open(output_dir + 'questions_file.txt', 'w') as fw:
for line in train_data:
fw.write(line + '\n')
print('Text saved to %s' % (output_dir + 'questions_file.txt'))
# train the model
print('Training wrapper fasttext model...')
tstart = time.time()
model_wrapper = FT_wrapper.train(ft_home, output_dir + 'questions_file.txt', size=ft_dim)
tend = time.time()
print('Time elapsed for training wrapper model %.2f' % (tend - tstart))
print(model_wrapper)
# saving a model trained via fastText wrapper
print('Loading fasttext wrapper model...')
model_wrapper.save(output_dir + 'saved_model_wrapper.bin')
########################################
## prepare embeddings
########################################
logger.info('Preparing embedding matrix based on given word list...')
nb_words = min(max_nb_words, len(word_index))+1
w2v_oov = 0
ft_oov = []
if initializer == 'zero':
# zero initialization of embedding matrix
embedding_matrix = np.zeros((nb_words, embd_dim+ft_dim))
elif initializer == 'glorot':
# glorot uniform initialization of embedding matrix
scale = 1 / nb_words # fan_in
# scale = 1 / (embd_dim + ft_dim) # fan_out
limit = np.sqrt(3. * scale)
embedding_matrix = np.random.uniform(low=-limit, high=limit, size=(nb_words, embd_dim+ft_dim))
else:
raise NotImplementedError
reverseDict = ['']*nb_words
for word, i in tqdm(word_index.items()):
if not embd_path == '':
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i][:embd_dim] = embedding_vector
reverseDict[i] = word
else:
reverseDict[i] = '<' + word + '>'
w2v_oov += 1
if not fasttext_source == '':
try:
embedding_matrix[i][embd_dim:] = model_wrapper[word]
reverseDict[i] = word
except KeyError:
reverseDict[i] = '<' + word + '>'
ft_oov.append(word)
logger.info('Word embeddings shape: %r (%d+%d)' % (embedding_matrix.shape, embd_dim, ft_dim))
if not embd_path == '':
logger.info('Word2Vec null embeddings: %d' % w2v_oov)
if not fasttext_source == '':
logger.info('FastText null embeddings: %d' % len(ft_oov))
logger.info('FastText OOV: %r' % ft_oov)
return embedding_matrix, reverseDict
def w2vEmbdReader(embd_path, reVocab, embd_dim):
logger.info(' getting pre-trained embedding from file... ')
logger.info(' embedding length: %i dim: %i ' % (len(reVocab), embd_dim))
embd_matrix = np.zeros( (len(reVocab), embd_dim) )
with open(embd_path, 'r', encoding='utf8') as fhd:
idx = 1 # let 1st padding line all zeros
for line in tqdm(fhd, total=len(reVocab)):
elem = line.strip().split(' ')
assert len(elem) == embd_dim + 1, 'Incorrect Embedding Dimension, expect %d but got %d ' % (embd_dim, len(elem)-1)
w2vec = np.asarray(elem[1:], dtype='float32')
embd_matrix[idx] = w2vec
idx += 1
return embd_matrix
| {
"content_hash": "1ce015bf7e71fc34ff908a089fbda9b1",
"timestamp": "",
"source": "github",
"line_count": 436,
"max_line_length": 194,
"avg_line_length": 36.10550458715596,
"alnum_prop": 0.5266802185236946,
"repo_name": "tonyqtian/quora-simi",
"id": "cbd56200d377181530c0fbae35367d524948abbf",
"size": "15766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/data_processing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "192523"
}
],
"symlink_target": ""
} |
"""Script to flash new firmware to the tactile device.
Use: python3 flash_firmware.py [port] firmware.ino.zip
This script uses the nordicsemi.dfu Python library to flash a new ino.zip
firmware binary to a connected tactile device.
Instructions:
1. Install adafruit-nrfutil with `pip3 install --user adafruit-nrfutil` or for
other options see the installation instructions at
https://github.com/adafruit/Adafruit_nRF52_nrfutil#installation
2. Connect the tactile device to the computer with a USB cable.
3. Turn on the device. We assume the device has a USB bootloader (Adafruit
Feather variant), but this might not always be the case.
4. Run the script like
$ python3 flash_firmware.py firmware.ino.zip
The script auto-detects the port where the tactile device is connected. It
looks for a device with "nrf52" in its name, otherwise it attempts to
flash to the first available port. If this port selection fails for some
reason, you can specify the port by running the script like
$ python3 flash_firmware.py /dev/tty1 firmware.ino.zip
The script prints "Device programmed" when firmware flashed successfully.
"""
import logging
import os.path
import sys
from typing import Any, List, Sequence
from nordicsemi.dfu.dfu import Dfu
from nordicsemi.dfu.dfu_transport import DfuEvent
from nordicsemi.dfu.dfu_transport_serial import DfuTransportSerial
import serial.tools.list_ports
def list_ports() -> List[Any]:
"""Get a list of available ports."""
available_ports = [tuple(p) for p in list(serial.tools.list_ports.comports())]
if not available_ports:
print('No available ports found.\n')
print('Please make sure the device is connected and turned on.')
sys.exit(1)
print('Available ports:')
for port in available_ports:
print('%-20s %s' % port[:2])
print('')
return available_ports
def select_port(available_ports: List[Any]) -> str:
"""Simple heuristic to select an available port."""
selected_port = available_ports[0][0] # Default to selecting first port.
for port in available_ports:
# If there is an nRF52 device, pick that instead.
if 'nrf52' in port[1].lower():
selected_port = port[0]
break
return selected_port
def main(argv: Sequence[str]) -> int:
# Set verbose logging level.
logging.basicConfig(format='%(message)s', level=logging.INFO)
if len(argv) == 2: # Script called with only the package arg.
_, package = argv
port = None
elif len(argv) == 3: # Called with both port and package args.
_, port, package = argv
else:
print('Use: python3 flash_firmware.py [port] firmware.ino.zip')
return 1
if not os.path.exists(package): # Fail early if package zip isn't found.
print(f'File "{package}" does not exist.')
return 1
available_ports = list_ports()
if port is None:
port = select_port(available_ports)
elif not any(port == p[0] for p in available_ports):
print(f'Port "{port}" is not available.')
return 1
baudrate = 115200
flowcontrol = False
singlebank = False
touch = 1200
serial_transport = DfuTransportSerial(port, baudrate, flowcontrol, singlebank,
touch)
def update_progress(progress=0, done=False, log_message=''):
del log_message
if done:
print('\nDone.')
elif progress:
print('#', flush=True, end='' if progress % 40 else '\n')
serial_transport.register_events_callback(DfuEvent.PROGRESS_EVENT,
update_progress)
dfu = Dfu(package, dfu_transport=serial_transport)
print(f'Upgrading target on {port} with DFU package {package}.')
dfu.dfu_send_images() # Perform the DFU.
print('Device programmed.')
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| {
"content_hash": "860a57268e827be77104e3a69032c988",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 80,
"avg_line_length": 31.363636363636363,
"alnum_prop": 0.6940711462450593,
"repo_name": "google/audio-to-tactile",
"id": "ea6cb91ed3112966468cad3adcc23920e458fecb",
"size": "4371",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "extras/tools/flash_firmware.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2034760"
},
{
"name": "C++",
"bytes": "320571"
},
{
"name": "CMake",
"bytes": "992"
},
{
"name": "CSS",
"bytes": "5330"
},
{
"name": "HTML",
"bytes": "108204"
},
{
"name": "JavaScript",
"bytes": "202111"
},
{
"name": "Jupyter Notebook",
"bytes": "110253"
},
{
"name": "Kotlin",
"bytes": "173194"
},
{
"name": "Makefile",
"bytes": "18821"
},
{
"name": "Python",
"bytes": "295644"
},
{
"name": "Starlark",
"bytes": "39654"
}
],
"symlink_target": ""
} |
__version__ = '$Revision: 1.3 $'[11:-2]
from twisted.trial import unittest
from twisted.protocols import htb
class DummyClock:
time = 0
def set(self, when):
self.time = when
def __call__(self):
return self.time
class SomeBucket(htb.Bucket):
maxburst = 100
rate = 2
class TestBucketBase(unittest.TestCase):
def setUp(self):
self._realTimeFunc = htb.time
self.clock = DummyClock()
htb.time = self.clock
def tearDown(self):
htb.time = self._realTimeFunc
class TestBucket(TestBucketBase):
def testBucketSize(self):
"""Testing the size of the bucket."""
b = SomeBucket()
fit = b.add(1000)
self.assertEqual(100, fit)
def testBucketDrain(self):
"""Testing the bucket's drain rate."""
b = SomeBucket()
fit = b.add(1000)
self.clock.set(10)
fit = b.add(1000)
self.assertEqual(20, fit)
def test_bucketEmpty(self):
"""
L{htb.Bucket.drip} returns C{True} if the bucket is empty after that drip.
"""
b = SomeBucket()
b.add(20)
self.clock.set(9)
empty = b.drip()
self.assertFalse(empty)
self.clock.set(10)
empty = b.drip()
self.assertTrue(empty)
class TestBucketNesting(TestBucketBase):
def setUp(self):
TestBucketBase.setUp(self)
self.parent = SomeBucket()
self.child1 = SomeBucket(self.parent)
self.child2 = SomeBucket(self.parent)
def testBucketParentSize(self):
# Use up most of the parent bucket.
self.child1.add(90)
fit = self.child2.add(90)
self.assertEqual(10, fit)
def testBucketParentRate(self):
# Make the parent bucket drain slower.
self.parent.rate = 1
# Fill both child1 and parent.
self.child1.add(100)
self.clock.set(10)
fit = self.child1.add(100)
# How much room was there? The child bucket would have had 20,
# but the parent bucket only ten (so no, it wouldn't make too much
# sense to have a child bucket draining faster than its parent in a real
# application.)
self.assertEqual(10, fit)
# TODO: Test the Transport stuff?
from test_pcp import DummyConsumer
class ConsumerShaperTest(TestBucketBase):
def setUp(self):
TestBucketBase.setUp(self)
self.underlying = DummyConsumer()
self.bucket = SomeBucket()
self.shaped = htb.ShapedConsumer(self.underlying, self.bucket)
def testRate(self):
# Start off with a full bucket, so the burst-size dosen't factor in
# to the calculations.
delta_t = 10
self.bucket.add(100)
self.shaped.write("x" * 100)
self.clock.set(delta_t)
self.shaped.resumeProducing()
self.assertEqual(len(self.underlying.getvalue()),
delta_t * self.bucket.rate)
def testBucketRefs(self):
self.assertEqual(self.bucket._refcount, 1)
self.shaped.stopProducing()
self.assertEqual(self.bucket._refcount, 0)
| {
"content_hash": "b52eb27eeec60715f812e60547745e0c",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 82,
"avg_line_length": 29.88888888888889,
"alnum_prop": 0.5882899628252788,
"repo_name": "timkrentz/SunTracker",
"id": "7f5ef873decf55ae3d7fe3d721c7407221457f15",
"size": "3246",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/test/test_htb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "185699"
},
{
"name": "Assembly",
"bytes": "38582"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "48362836"
},
{
"name": "C++",
"bytes": "70478135"
},
{
"name": "CMake",
"bytes": "1755036"
},
{
"name": "CSS",
"bytes": "147795"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "190912"
},
{
"name": "Groff",
"bytes": "66799"
},
{
"name": "HTML",
"bytes": "295090"
},
{
"name": "Java",
"bytes": "203238"
},
{
"name": "JavaScript",
"bytes": "1146098"
},
{
"name": "Lex",
"bytes": "47145"
},
{
"name": "Makefile",
"bytes": "5461"
},
{
"name": "Objective-C",
"bytes": "74727"
},
{
"name": "Objective-C++",
"bytes": "265817"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "178176"
},
{
"name": "Prolog",
"bytes": "4556"
},
{
"name": "Python",
"bytes": "16497901"
},
{
"name": "Shell",
"bytes": "48835"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Tcl",
"bytes": "1955829"
},
{
"name": "Yacc",
"bytes": "180651"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from systempay import models
class SystemPayTransactionAdmin(admin.ModelAdmin):
list_display = ['mode', 'operation_type', 'amount', 'currency',
'order_number', 'trans_id', 'trans_date', 'date_created']
readonly_fields = [
'operation_type',
'mode',
'amount',
'currency',
'order_number',
'result',
'auth_result',
'trans_id',
'trans_date',
'error_message',
'raw_request',
'date_created',
'computed_signature',
'request'
]
admin.site.register(models.SystemPayTransaction, SystemPayTransactionAdmin)
| {
"content_hash": "e7b6fd35bfcfd3364e577bc7d96c5b50",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 77,
"avg_line_length": 26,
"alnum_prop": 0.5784023668639053,
"repo_name": "bastien34/django-oscar-systempay",
"id": "feed586a137efae6b054f058aab4ea2c50da357c",
"size": "676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "systempay/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8239"
},
{
"name": "Python",
"bytes": "51960"
}
],
"symlink_target": ""
} |
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = _descriptor.FileDescriptor(
name='managed_user_shared_setting_specifics.proto',
package='sync_pb',
serialized_pb='\n+managed_user_shared_setting_specifics.proto\x12\x07sync_pb\"k\n!ManagedUserSharedSettingSpecifics\x12\r\n\x05mu_id\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\x12\x1b\n\x0c\x61\x63knowledged\x18\x04 \x01(\x08:\x05\x66\x61lseB\x04H\x03`\x01')
_MANAGEDUSERSHAREDSETTINGSPECIFICS = _descriptor.Descriptor(
name='ManagedUserSharedSettingSpecifics',
full_name='sync_pb.ManagedUserSharedSettingSpecifics',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mu_id', full_name='sync_pb.ManagedUserSharedSettingSpecifics.mu_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key', full_name='sync_pb.ManagedUserSharedSettingSpecifics.key', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='sync_pb.ManagedUserSharedSettingSpecifics.value', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='acknowledged', full_name='sync_pb.ManagedUserSharedSettingSpecifics.acknowledged', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=56,
serialized_end=163,
)
DESCRIPTOR.message_types_by_name['ManagedUserSharedSettingSpecifics'] = _MANAGEDUSERSHAREDSETTINGSPECIFICS
class ManagedUserSharedSettingSpecifics(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MANAGEDUSERSHAREDSETTINGSPECIFICS
# @@protoc_insertion_point(class_scope:sync_pb.ManagedUserSharedSettingSpecifics)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), 'H\003`\001')
# @@protoc_insertion_point(module_scope)
| {
"content_hash": "baec38099795d57d432a013a04a433cd",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 299,
"avg_line_length": 39.66233766233766,
"alnum_prop": 0.733464309102816,
"repo_name": "smartdj/chrome-sync-server",
"id": "eb0f761a8ab84e8789f55c50ef5c1ff053d9d977",
"size": "3168",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "protocol/managed_user_shared_setting_specifics_pb2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4023"
},
{
"name": "C",
"bytes": "6623"
},
{
"name": "C++",
"bytes": "3562442"
},
{
"name": "Java",
"bytes": "1337225"
},
{
"name": "Protocol Buffer",
"bytes": "410915"
},
{
"name": "Python",
"bytes": "1493999"
},
{
"name": "Shell",
"bytes": "6362"
}
],
"symlink_target": ""
} |
import mraa
IN = mraa.DIR_IN
OUT = mraa.DIR_OUT
class gpio:
def __init__(self, pin, direction):
self.gpio = mraa.Gpio(pin)
self.gpio.dir(direction)
def input(self):
return self.gpio.read()
def output(self, value):
self.gpio.write(value)
def on(self):
self.gpio.write(1)
def off(self):
self.gpio.write(0)
| {
"content_hash": "0e14eed482d9afc6c56993a3873162c2",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 39,
"avg_line_length": 18,
"alnum_prop": 0.5767195767195767,
"repo_name": "fjacob21/pycon2015",
"id": "79b45e70b1cbccba78983c2c5ce404acc45e77eb",
"size": "1573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elpiwear/Edison/gpio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "167709"
}
],
"symlink_target": ""
} |
from fs.tests import FSTestCases, ThreadingTestCases
import unittest
import os
import sys
import shutil
import tempfile
import subprocess
import time
from os.path import abspath
import urllib
from six import PY3
try:
from pyftpdlib import ftpserver
except ImportError:
if not PY3:
raise ImportError("Requires pyftpdlib <http://code.google.com/p/pyftpdlib/>")
from fs.path import *
from fs import ftpfs
ftp_port = 30000
class TestFTPFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
__test__ = not PY3
def setUp(self):
global ftp_port
ftp_port += 1
use_port = str(ftp_port)
#ftp_port = 10000
self.temp_dir = tempfile.mkdtemp(u"ftpfstests")
file_path = __file__
if ':' not in file_path:
file_path = abspath(file_path)
# Apparently Windows requires values from default environment, so copy the exisiting os.environ
env = os.environ.copy()
env['PYTHONPATH'] = os.getcwd() + os.pathsep + env.get('PYTHONPATH', '')
self.ftp_server = subprocess.Popen([sys.executable,
file_path,
self.temp_dir,
use_port],
stdout=subprocess.PIPE,
env=env)
# Block until the server writes a line to stdout
self.ftp_server.stdout.readline()
# Poll until a connection can be made
start_time = time.time()
while time.time() - start_time < 5:
try:
ftpurl = urllib.urlopen('ftp://127.0.0.1:%s' % use_port)
except IOError:
time.sleep(0)
else:
ftpurl.read()
ftpurl.close()
break
else:
# Avoid a possible infinite loop
raise Exception("Unable to connect to ftp server")
self.fs = ftpfs.FTPFS('127.0.0.1', 'user', '12345', dircache=True, port=use_port, timeout=5.0)
self.fs.cache_hint(True)
def tearDown(self):
#self.ftp_server.terminate()
if sys.platform == 'win32':
os.popen('TASKKILL /PID '+str(self.ftp_server.pid)+' /F')
else:
os.system('kill '+str(self.ftp_server.pid))
shutil.rmtree(self.temp_dir)
self.fs.close()
def check(self, p):
check_path = self.temp_dir.rstrip(os.sep) + os.sep + p
return os.path.exists(check_path.encode('utf-8'))
if __name__ == "__main__":
# Run an ftp server that exposes a given directory
import sys
authorizer = ftpserver.DummyAuthorizer()
authorizer.add_user("user", "12345", sys.argv[1], perm="elradfmw")
authorizer.add_anonymous(sys.argv[1])
def nolog(*args):
pass
ftpserver.log = nolog
ftpserver.logline = nolog
handler = ftpserver.FTPHandler
handler.authorizer = authorizer
address = ("127.0.0.1", int(sys.argv[2]))
#print address
ftpd = ftpserver.FTPServer(address, handler)
sys.stdout.write('serving\n')
sys.stdout.flush()
ftpd.serve_forever()
| {
"content_hash": "4224b35a1702e5d7896720a74bd95eda",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 103,
"avg_line_length": 29.61467889908257,
"alnum_prop": 0.5641263940520446,
"repo_name": "wylieswanson/agilepyfs",
"id": "588367f7eddc9d7e65f90907310bfe390111997d",
"size": "3250",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "fs/tests/test_ftpfs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1067599"
}
],
"symlink_target": ""
} |
from google.appengine.ext import ndb
from logic.notification_request import CONTENT_TYPE_JSON
from models import Employee
class Subscription(ndb.Model):
"""Models a webhook subscription."""
request_method = ndb.StringProperty(required=True, default='post')
request_format = ndb.StringProperty(required=True, default=CONTENT_TYPE_JSON)
request_url = ndb.StringProperty(required=True)
active = ndb.BooleanProperty(required=True, default=False)
event = ndb.StringProperty(required=True)
secret = ndb.StringProperty(required=True)
timestamp = ndb.DateTimeProperty(auto_now_add=True)
owner_key = ndb.KeyProperty(kind=Employee)
@classmethod
def create_from_dict(cls, d, persist=True):
new_subscription = cls()
new_subscription.owner_key = Employee.get_current_employee().key
new_subscription.request_url = d['request_url']
new_subscription.active = d['active']
new_subscription.event = d['event']
new_subscription.secret = d['secret']
if persist is True:
new_subscription.put()
return new_subscription
@classmethod
def all_active_for_event(cls, event):
return cls.query(
cls.active == True, # noqa
cls.event == event,
)
| {
"content_hash": "1a4d8e8878c38e13811bcffbcddec0dd",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 81,
"avg_line_length": 34.86486486486486,
"alnum_prop": 0.6759689922480621,
"repo_name": "Yelp/love",
"id": "c27732583855c7ad8b41e01353e3783243c8882e",
"size": "1314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/subscription.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5902"
},
{
"name": "HTML",
"bytes": "43114"
},
{
"name": "JavaScript",
"bytes": "836"
},
{
"name": "Makefile",
"bytes": "1241"
},
{
"name": "Python",
"bytes": "118945"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from traits.has_traits import Interface
# ============= standard library imports ========================
# ============= local library imports ==========================
class IDVCSource(Interface):
def get_irradiation_import_spec(self, name):
pass
def connect(self):
pass
def get_irradiation_names(self):
pass
# ============= EOF =============================================
| {
"content_hash": "297285b7333e9efd50e5bd989044fcf4",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 65,
"avg_line_length": 23.1,
"alnum_prop": 0.474025974025974,
"repo_name": "NMGRL/pychron",
"id": "8a6792cf623c5251db451d45fad6e11032e96ee4",
"size": "1191",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pychron/data_mapper/sources/idvc_source.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
} |
from types import SimpleNamespace
from typing import Dict
from unittest.mock import MagicMock, patch
from django.http import HttpRequest
from django.http.response import HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.actions import do_rename_stream
from zerver.lib.exceptions import InvalidJSONError, JsonableError
from zerver.lib.send_email import FromAddress
from zerver.lib.test_classes import WebhookTestCase, ZulipTestCase
from zerver.lib.test_helpers import HostRequestMock
from zerver.lib.users import get_api_key
from zerver.lib.webhooks.common import (
INVALID_JSON_MESSAGE,
MISSING_EVENT_HEADER_MESSAGE,
MissingHTTPEventHeader,
get_fixture_http_headers,
standardize_headers,
validate_extract_webhook_http_header,
)
from zerver.models import UserProfile, get_realm, get_user
class WebhooksCommonTestCase(ZulipTestCase):
def test_webhook_http_header_header_exists(self) -> None:
webhook_bot = get_user("webhook-bot@zulip.com", get_realm("zulip"))
request = HostRequestMock()
request.META["HTTP_X_CUSTOM_HEADER"] = "custom_value"
request.user = webhook_bot
header_value = validate_extract_webhook_http_header(
request, "X_CUSTOM_HEADER", "test_webhook"
)
self.assertEqual(header_value, "custom_value")
def test_webhook_http_header_header_does_not_exist(self) -> None:
realm = get_realm("zulip")
webhook_bot = get_user("webhook-bot@zulip.com", realm)
webhook_bot.last_reminder = None
notification_bot = self.notification_bot(realm)
request = HostRequestMock()
request.user = webhook_bot
request.path = "some/random/path"
exception_msg = "Missing the HTTP event header 'X_CUSTOM_HEADER'"
with self.assertRaisesRegex(MissingHTTPEventHeader, exception_msg):
validate_extract_webhook_http_header(request, "X_CUSTOM_HEADER", "test_webhook")
msg = self.get_last_message()
expected_message = MISSING_EVENT_HEADER_MESSAGE.format(
bot_name=webhook_bot.full_name,
request_path=request.path,
header_name="X_CUSTOM_HEADER",
integration_name="test_webhook",
support_email=FromAddress.SUPPORT,
).rstrip()
self.assertEqual(msg.sender.id, notification_bot.id)
self.assertEqual(msg.content, expected_message)
def test_notify_bot_owner_on_invalid_json(self) -> None:
@webhook_view("ClientName", notify_bot_owner_on_invalid_json=False)
def my_webhook_no_notify(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
raise InvalidJSONError("Malformed JSON")
@webhook_view("ClientName", notify_bot_owner_on_invalid_json=True)
def my_webhook_notify(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
raise InvalidJSONError("Malformed JSON")
webhook_bot_email = "webhook-bot@zulip.com"
webhook_bot_realm = get_realm("zulip")
webhook_bot = get_user(webhook_bot_email, webhook_bot_realm)
webhook_bot_api_key = get_api_key(webhook_bot)
request = HostRequestMock()
request.POST["api_key"] = webhook_bot_api_key
request.host = "zulip.testserver"
expected_msg = INVALID_JSON_MESSAGE.format(webhook_name="ClientName")
last_message_id = self.get_last_message().id
with self.assertRaisesRegex(JsonableError, "Malformed JSON"):
my_webhook_no_notify(request)
# First verify that without the setting, it doesn't send a PM to bot owner.
msg = self.get_last_message()
self.assertEqual(msg.id, last_message_id)
self.assertNotEqual(msg.content, expected_msg.strip())
# Then verify that with the setting, it does send such a message.
with self.assertRaisesRegex(JsonableError, "Malformed JSON"):
my_webhook_notify(request)
msg = self.get_last_message()
self.assertNotEqual(msg.id, last_message_id)
self.assertEqual(msg.sender.id, self.notification_bot(webhook_bot_realm).id)
self.assertEqual(msg.content, expected_msg.strip())
@patch("zerver.lib.webhooks.common.importlib.import_module")
def test_get_fixture_http_headers_for_success(self, import_module_mock: MagicMock) -> None:
def fixture_to_headers(fixture_name: str) -> Dict[str, str]:
# A sample function which would normally perform some
# extra operations before returning a dictionary
# corresponding to the fixture name passed. For this test,
# we just return a fixed dictionary.
return {"key": "value"}
fake_module = SimpleNamespace(fixture_to_headers=fixture_to_headers)
import_module_mock.return_value = fake_module
headers = get_fixture_http_headers("some_integration", "complex_fixture")
self.assertEqual(headers, {"key": "value"})
def test_get_fixture_http_headers_for_non_existant_integration(self) -> None:
headers = get_fixture_http_headers("some_random_nonexistant_integration", "fixture_name")
self.assertEqual(headers, {})
@patch("zerver.lib.webhooks.common.importlib.import_module")
def test_get_fixture_http_headers_with_no_fixtures_to_headers_function(
self,
import_module_mock: MagicMock,
) -> None:
fake_module = SimpleNamespace()
import_module_mock.return_value = fake_module
self.assertEqual(
get_fixture_http_headers("some_integration", "simple_fixture"),
{},
)
def test_standardize_headers(self) -> None:
self.assertEqual(standardize_headers({}), {})
raw_headers = {"Content-Type": "text/plain", "X-Event-Type": "ping"}
djangoified_headers = standardize_headers(raw_headers)
expected_djangoified_headers = {"CONTENT_TYPE": "text/plain", "HTTP_X_EVENT_TYPE": "ping"}
self.assertEqual(djangoified_headers, expected_djangoified_headers)
class WebhookURLConfigurationTestCase(WebhookTestCase):
STREAM_NAME = "helloworld"
WEBHOOK_DIR_NAME = "helloworld"
URL_TEMPLATE = "/api/v1/external/helloworld?stream={stream}&api_key={api_key}"
def setUp(self) -> None:
super().setUp()
stream = self.subscribe(self.test_user, self.STREAM_NAME)
# In actual webhook tests, we will not need to use stream id.
# We assign stream id to STREAM_NAME for testing URL configuration only.
self.STREAM_NAME = str(stream.id)
do_rename_stream(stream, "helloworld_renamed", self.test_user)
self.url = self.build_webhook_url()
def test_trigger_stream_message_by_id(self) -> None:
# check_webhook cannot be used here as it
# subscribes the test user to self.STREAM_NAME
payload = self.get_body("hello")
self.send_webhook_payload(
self.test_user, self.url, payload, content_type="application/json"
)
expected_topic = "Hello World"
expected_message = "Hello! I am happy to be here! :smile:\nThe Wikipedia featured article for today is **[Marilyn Monroe](https://en.wikipedia.org/wiki/Marilyn_Monroe)**"
msg = self.get_last_message()
self.assert_stream_message(
message=msg,
stream_name="helloworld_renamed",
topic_name=expected_topic,
content=expected_message,
)
class MissingEventHeaderTestCase(WebhookTestCase):
STREAM_NAME = "groove"
URL_TEMPLATE = "/api/v1/external/groove?stream={stream}&api_key={api_key}"
# This tests the validate_extract_webhook_http_header function with
# an actual webhook, instead of just making a mock
def test_missing_event_header(self) -> None:
self.subscribe(self.test_user, self.STREAM_NAME)
result = self.client_post(
self.url,
self.get_body("ticket_state_changed"),
content_type="application/x-www-form-urlencoded",
)
self.assert_json_error(result, "Missing the HTTP event header 'X_GROOVE_EVENT'")
realm = get_realm("zulip")
webhook_bot = get_user("webhook-bot@zulip.com", realm)
webhook_bot.last_reminder = None
notification_bot = self.notification_bot(realm)
msg = self.get_last_message()
expected_message = MISSING_EVENT_HEADER_MESSAGE.format(
bot_name=webhook_bot.full_name,
request_path="/api/v1/external/groove",
header_name="X_GROOVE_EVENT",
integration_name="Groove",
support_email=FromAddress.SUPPORT,
).rstrip()
if msg.sender.id != notification_bot.id: # nocoverage
# This block seems to fire occasionally; debug output:
print(msg)
print(msg.content)
self.assertEqual(msg.sender.id, notification_bot.id)
self.assertEqual(msg.content, expected_message)
def get_body(self, fixture_name: str) -> str:
return self.webhook_fixture_data("groove", fixture_name, file_type="json")
| {
"content_hash": "86fcd07af6acb830a8c9c2bd16bd4a61",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 178,
"avg_line_length": 42.905660377358494,
"alnum_prop": 0.6655672823218998,
"repo_name": "eeshangarg/zulip",
"id": "97940e7c094f52d67847a5f4b00282b446363323",
"size": "9096",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "zerver/tests/test_webhooks_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "484233"
},
{
"name": "Dockerfile",
"bytes": "5056"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "713408"
},
{
"name": "Handlebars",
"bytes": "343958"
},
{
"name": "JavaScript",
"bytes": "3738321"
},
{
"name": "Perl",
"bytes": "9884"
},
{
"name": "Puppet",
"bytes": "106355"
},
{
"name": "Python",
"bytes": "9442083"
},
{
"name": "Ruby",
"bytes": "3250"
},
{
"name": "Shell",
"bytes": "135667"
},
{
"name": "TypeScript",
"bytes": "275302"
}
],
"symlink_target": ""
} |
import argparse
import string
class TestGroup:
def __init__(self, name, parent = None):
self.parent = parent
self.name = name
self.testGroups = {}
self.testCases = {}
if parent:
assert not name in parent.testGroups
parent.testGroups[name] = self
def getName (self):
return self.name
def getPath (self):
if self.parent:
return self.parent.getPath() + "." + self.name
else:
return self.name
def hasGroup(self, groupName):
return groupName in self.testGroups
def getGroup(self, groupName):
return self.testGroups[groupName]
def hasTest(self, testName):
return testName in self.testCases
def getTest(self, testName):
return self.testCases[testName]
def hasTestCases(self):
return len(self.testCases) != 0
def hasTestGroups(self):
return len(self.testGroups) != 0
def getTestCases(self):
return self.testCases.values()
def getTestGroups(self):
return self.testGroups.values()
class TestCase:
def __init__(self, name, parent):
self.name = name
self.parent = parent
assert not name in self.parent.testCases
self.parent.testCases[name] = self
def getPath (self):
return self.parent.getPath() + "." + self.name
def getName(self):
return self.name
def addGroupToHierarchy(rootGroup, path):
pathComponents = string.split(path, ".")
currentGroup = rootGroup
assert pathComponents[0] == rootGroup.getName()
for i in range(1, len(pathComponents)):
component = pathComponents[i]
if currentGroup.hasGroup(component):
currentGroup = currentGroup.getGroup(component)
else:
currentGroup = TestGroup(component, parent=currentGroup)
def addTestToHierarchy(rootGroup, path):
pathComponents = string.split(path, ".")
currentGroup = rootGroup
assert pathComponents[0] == rootGroup.getName()
for i in range(1, len(pathComponents)):
component = pathComponents[i]
if i == len(pathComponents) - 1:
TestCase(component, currentGroup)
else:
if currentGroup.hasGroup(component):
currentGroup = currentGroup.getGroup(component)
else:
currentGroup = TestGroup(component, parent=currentGroup)
def loadTestHierarchy (input):
line = input.readline()
rootGroup = None
if line.startswith("GROUP: "):
groupName = line[len("GROUP: "):-1]
rootGroup = TestGroup(groupName)
else:
assert False
for line in input:
if line.startswith("GROUP: "):
groupPath = line[len("GROUP: "):-1];
addGroupToHierarchy(rootGroup, groupPath)
elif line.startswith("TEST: "):
testPath = line[len("TEST: "):-1]
addTestToHierarchy(rootGroup, testPath)
else:
assert False
return rootGroup
def hasFilteredCases(group, includeTests):
for child in group.getTestCases():
if child.getPath() in includeTests:
return True
for child in group.getTestGroups():
if hasFilteredCases(child, includeTests):
return True
return False
def addFilteredTest(parent, group, includeTests):
for child in group.getTestGroups():
if hasFilteredCases(child, includeTests):
newChild = TestGroup(child.getName(), parent)
addFilteredTest(newChild, child, includeTests)
for child in group.getTestCases():
if child.getPath() in includeTests:
TestCase(child.getName(), parent)
def filterTests(includeTests, group):
root = TestGroup(group.getName())
addFilteredTest(root, group, includeTests)
return root
def writeAndroidCTSTest(test, output):
output.write('<Test name="%s" />\n' % test.getName())
def writeAndroidCTSTestCase(group, output):
assert group.hasTestCases()
assert not group.hasTestGroups()
output.write('<TestCase name="%s">\n' % group.getName())
for testCase in group.getTestCases():
writeAndroidCTSTest(testCase, output)
output.write('</TestCase>\n')
def writeAndroidCTSTestSuite(group, output):
output.write('<TestSuite name="%s">\n' % group.getName())
for childGroup in group.getTestGroups():
if childGroup.hasTestCases():
assert not childGroup.hasTestGroups()
writeAndroidCTSTestCase(childGroup, output)
elif childGroup.hasTestGroups():
writeAndroidCTSTestSuite(childGroup, output)
# \note Skips groups without testcases or child groups
output.write('</TestSuite>\n')
def writeAndroidCTSFile(rootGroup, output, mustpass, name="dEQP-GLES3", appPackageName="com.drawelements.deqp.gles3"):
output.write('<?xml version="1.0" encoding="UTF-8"?>\n')
output.write('<TestPackage name="%s" appPackageName="%s" testType="deqpTest">\n' % (name, appPackageName))
writeAndroidCTSTestSuite(filterTests(mustpass, rootGroup), output)
output.write('</TestPackage>\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input', type=argparse.FileType('r'), help="Input dEQP test hierarchy in txt format.")
parser.add_argument('output', type=argparse.FileType('w'), help="Output file for Android CTS test file.")
parser.add_argument('--name', dest="name", type=str, required=True, help="Name of the test package")
parser.add_argument('--package', dest="package", type=str, required=True, help="Name of the app package")
parser.add_argument('--must-pass', dest="mustpass", type=argparse.FileType('r'), required=True, help="Must pass file")
args = parser.parse_args()
rootGroup = loadTestHierarchy(args.input)
writeAndroidCTSFile(rootGroup, args.output, name=args.name, appPackageName=args.package, mustpass=set(map(lambda x : x.rstrip(), args.mustpass.readlines())))
| {
"content_hash": "5b25da2f2dc06afbbbacb91fcc26e602",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 158,
"avg_line_length": 29.021164021164022,
"alnum_prop": 0.7132178669097539,
"repo_name": "geekboxzone/lollipop_external_deqp",
"id": "1f23a2c857bc201fdcec694d7711224a785146f2",
"size": "5485",
"binary": false,
"copies": "7",
"ref": "refs/heads/geekbox",
"path": "android/scripts/GenAndroidCTSXML.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "320"
},
{
"name": "C",
"bytes": "479104"
},
{
"name": "C++",
"bytes": "19006248"
},
{
"name": "CMake",
"bytes": "172740"
},
{
"name": "HTML",
"bytes": "55742"
},
{
"name": "Java",
"bytes": "22702"
},
{
"name": "Makefile",
"bytes": "25178"
},
{
"name": "Objective-C",
"bytes": "16593"
},
{
"name": "Objective-C++",
"bytes": "17364"
},
{
"name": "Python",
"bytes": "287686"
},
{
"name": "Shell",
"bytes": "166"
}
],
"symlink_target": ""
} |
import json
import os
from django.contrib import admin
from django.db import models
from .templates import *
from .utils import get_fieldclass_by_name
class BaseSettings(object):
def template(self, *args):
settings_str = self.tpl.format(*args)
return json.loads(settings_str)
@property
def to_settings(self):
raise NotImplementedError(
'You should implement the tojson method'
)
class DBSettings(BaseSettings):
engine = ''
name = ''
tpl = db_tpl
def __init__(self, base_dir, *args, **kwargs):
super(DBSettings, self).__init__(*args, **kwargs)
self.base_dir = base_dir
self.engine = kwargs.get('db_engine', DEFAULT_DB_ENGINE)
self.name = os.path.join(
self.base_dir,
kwargs.get('db_name', DEFAULT_DB_NAME)
)
@property
def to_settings(self):
return self.template(self.engine, self.name)
class TemplateSettings(BaseSettings):
backend = ''
app_dirs = True
base_dir = ''
tpl = templates_tpl
def __init__(self, base_dir, *args, **kwargs):
super(TemplateSettings, self).__init__(*args, **kwargs)
self.backend = kwargs.get('backend', DEFAULT_TEMPLATE_BACKEND)
self.app_dirs = kwargs.get('app_dirs', DEFAULT_APP_DIRS)
self.base_dir = base_dir
@property
def to_settings(self):
return self.template(self.backend, self.base_dir, str(self.app_dirs).lower())
class MiddlewareSettings(BaseSettings):
def __init__(self, *args, **kwargs):
super(MiddlewareSettings, self).__init__(*args, **kwargs)
@property
def to_settings(self):
return {
'MIDDLEWARE_CLASSES': base_middlewares
}
class StaticSettings(BaseSettings):
tpl = statics_tpl
def __init__(self, base_dir, *args, **kwargs):
super(StaticSettings, self).__init__(*args, **kwargs)
self.base_dir = base_dir
@property
def to_settings(self):
return self.template('/static/', os.path.join(self.base_dir, 'static'))
class ModelFactory(object):
@classmethod
def create(cls, app_label, module, *args, **kwargs):
try:
name = kwargs.pop('name')
kwargs['attrs'] = kwargs.pop('attrs', {})
has_admin = kwargs.pop('admin', False)
META = type('Meta', (object,), {'app_label': app_label})
kwargs['attrs']['Meta'] = META
kwargs['attrs']['__module__'] = module
fields = kwargs.pop('fields', {})
for arg, opts in fields.iteritems():
field_obj = None
field = fields[arg]
if isinstance(field, list):
field_class = get_fieldclass_by_name(field[0])
field_obj = field_class(**field[1])
elif isinstance(field, dict):
field_obj = opts
kwargs['attrs'][arg] = field_obj
model = type(name, (models.Model,), kwargs['attrs'])
if has_admin:
admin.site.register(model)
return model
except KeyError:
return None
class Config(object):
_data = []
def __init__(self, *args, **kwargs):
pass
@property
def settings(self):
res = {}
for conf in self._data:
res.update(conf.to_settings)
return res
def register(self, conf):
self._data.append(conf)
| {
"content_hash": "e224b58c7c972a287c5787d376830294",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 85,
"avg_line_length": 26.976744186046513,
"alnum_prop": 0.5658045977011494,
"repo_name": "fmarco/django-lite",
"id": "5722f70b2d32aedcd8d43f875e4aa5fa06ad5fb2",
"size": "3503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_lite/services.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "308"
},
{
"name": "Python",
"bytes": "20112"
}
],
"symlink_target": ""
} |
from isserviceup.services.models.statuspage import StatusPagePlugin
class HashiCorp(StatusPagePlugin):
name = 'HashiCorp'
status_url = 'https://status.hashicorp.com/'
icon_url = '/images/icons/hashicorp.png'
| {
"content_hash": "c39d9f8f0004abaa568b0f210fd65b9d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 67,
"avg_line_length": 31.714285714285715,
"alnum_prop": 0.7477477477477478,
"repo_name": "marcopaz/is-service-up",
"id": "1333ccb3674daca54717768ecfcae785cb9e9371",
"size": "222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "isserviceup/services/hashicorp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4372"
},
{
"name": "HTML",
"bytes": "1758"
},
{
"name": "JavaScript",
"bytes": "20583"
},
{
"name": "Python",
"bytes": "52615"
},
{
"name": "Vue",
"bytes": "13608"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class abstractclassmethod(classmethod): # NOQA: N801
"""Backport from Python 3.2
Once we are only on Python 3.3+, `abstractmethod` should be sufficient.
"""
__slots__ = ()
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super(abstractclassmethod, self).__init__(callable)
| {
"content_hash": "e95b38e8b8590b0d667d3f1412a81892",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 75,
"avg_line_length": 27.941176470588236,
"alnum_prop": 0.6589473684210526,
"repo_name": "epage/nixnet-python",
"id": "1fbabae7efa4de07ed634d0dead646cb1a4b1ab3",
"size": "477",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nixnet/_py2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "696993"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import os.path
readme = ""
here = os.path.abspath(os.path.dirname(__file__))
readme_path = os.path.join(here, "README.rst")
if os.path.exists(readme_path):
with open(readme_path, "rb") as stream:
readme = stream.read().decode("utf8")
setup(
long_description=readme,
name="lightbus",
version="1.2.0a4",
description="RPC & event framework for Python 3",
python_requires=">=3.7",
project_urls={
"documentation": "https://lightbus.org",
"homepage": "https://lightbus.org",
"repository": "https://github.com/adamcharnock/lightbus/",
},
author="Adam Charnock",
author_email="adam@adamcharnock.com",
keywords="python messaging redis bus queue",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: AsyncIO",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Programming Language :: Python :: 3",
"Topic :: System :: Networking",
"Topic :: Communications",
],
entry_points={
"console_scripts": ["lightbus = lightbus.commands:lightbus_entry_point"],
"lightbus_event_transports": [
"debug = lightbus:DebugEventTransport",
"redis = lightbus:RedisEventTransport",
],
"lightbus_plugins": [
"internal_metrics = lightbus.plugins.metrics:MetricsPlugin",
"internal_state = lightbus.plugins.state:StatePlugin",
],
"lightbus_result_transports": [
"debug = lightbus:DebugResultTransport",
"redis = lightbus:RedisResultTransport",
],
"lightbus_rpc_transports": [
"debug = lightbus:DebugRpcTransport",
"redis = lightbus:RedisRpcTransport",
],
"lightbus_schema_transports": [
"debug = lightbus:DebugSchemaTransport",
"redis = lightbus:RedisSchemaTransport",
],
},
packages=[
"lightbus",
"lightbus.client",
"lightbus.client.docks",
"lightbus.client.internal_messaging",
"lightbus.client.subclients",
"lightbus.commands",
"lightbus.config",
"lightbus.plugins",
"lightbus.schema",
"lightbus.serializers",
"lightbus.transports",
"lightbus.transports.redis",
"lightbus.utilities",
],
package_dir={"": "."},
package_data={},
install_requires=["aioredis>=1.2.0", "jsonschema>=3.2", "pyyaml>=3.12"],
)
| {
"content_hash": "dec14041966b524d9a4acb0625ac82d8",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 81,
"avg_line_length": 32.583333333333336,
"alnum_prop": 0.5933503836317136,
"repo_name": "adamcharnock/lightbus",
"id": "1d26e026f89c7acf328898793ab8d737a2bf664b",
"size": "2874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "710699"
}
],
"symlink_target": ""
} |
import json
from database import db_models
from logic import db_to_api
R = db_models.Rep
def main():
rep_datas = []
for db_rep in R.query:
rep_datas.append([
db_rep.rep_id,
db_rep.first_name,
db_rep.last_name,
db_rep.state_code,
db_rep.state_name(),
db_rep.district_code,
db_rep.district_number,
db_rep.district_ordinal(),
db_to_api.DB_CHAMBER_TO_TITLE.get(db_rep.chamber),
])
print json.dumps(rep_datas)
if __name__ == '__main__':
from tools import db_utils
with db_utils.request_context():
main()
| {
"content_hash": "88c024feb78e05f7c9825cdbe5e7bb45",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 62,
"avg_line_length": 24.40740740740741,
"alnum_prop": 0.5417298937784522,
"repo_name": "jlgoldman/writetogov",
"id": "0b692ead3311dfe7f5240513ee58f9ffdddb8783",
"size": "659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "database/generate_rep_autocomplete_json.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "7583"
},
{
"name": "HTML",
"bytes": "50704"
},
{
"name": "JavaScript",
"bytes": "23391"
},
{
"name": "Python",
"bytes": "132147"
},
{
"name": "Shell",
"bytes": "745"
}
],
"symlink_target": ""
} |
from genologics.entities import Project
from config import RUN_PROCESSES, RAW_DIR, PROCESSED_DIR, NEXTCLOUD_HOST,NEXTCLOUD_WEBDAV_ROOT,NEXTCLOUD_RAW_DIR,NEXTCLOUD_PROCESSED_DIR,NEXTCLOUD_MANUAL_DIR,MAIL_SENDER, NEXTCLOUD_USER, NEXTCLOUD_PW
from os.path import expanduser, exists
from texttable import Texttable
import datetime
import os
import multiprocessing
import subprocess
from modules.useq_illumina_parsers import parseConversionStats, parseRunParameters
from modules.useq_nextcloud import NextcloudUtil
from modules.useq_mail import sendMail
from modules.useq_template import TEMPLATE_PATH,TEMPLATE_ENVIRONMENT,renderTemplate
import sys
import tarfile
GPG_DIR = expanduser("~/.gnupg/")
def zipRun( dir, dir_info=None):
run_name = os.path.basename(dir)
zip_name = None
if dir_info:
zip_name = "-".join(dir_info['projects'].keys())
else:
zip_name = os.path.basename(dir)
run_zip = "{0}/{1}.tar.gz".format(dir,zip_name)
with tarfile.open(run_zip, "w:gz", dereference=True) as tar:
tar.add(dir, arcname=run_name)
return run_zip
def encryptRun( run_zip ,client_mail):
run_encrypted = "{0}.gpg".format(run_zip)
if os.path.isfile(run_encrypted):
os.remove(run_encrypted)
#Wanted to use gnupg module for this, but it doesn't support encrypting 'large' files
try:
subprocess.check_output("gpg --encrypt --output {0} --recipient '{1}' {2}".format(run_encrypted,client_mail, run_zip), shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
return e.output
return run_encrypted
def shareManual(email,dir):
name = multiprocessing.current_process().name
print "{0}\tStarting".format(name)
print "{0}\tRunning compression".format(name)
run_zip = zipRun(dir)
if not os.path.isfile(run_zip):
print "{0}\tError : {1}/{2}.tar.gz was not properly created!".format(name,dir,os.path.basename(dir))
return
print "{0}\tRunning encryption".format(name)
run_encrypted = encryptRun(run_zip, email)
if not os.path.isfile(run_encrypted):
print "{0}\tError : Something went wrong during encryption of {1}/{2}.tar.gz with error message:\n\t{3}".format(name,dir,os.path.basename(dir), run_encrypted)
return
print "{0}\tRunning upload to NextCloud".format(name)
upload_response = nextcloud_util.upload(run_encrypted)
if "ERROR" in upload_response:
print "{0}\tError : Failed to upload {1} with message:\n\t{2}".format(name, run_encrypted, upload_response["ERROR"])
return
print "{0}\tSharing run {1} with {2}".format(name, dir, email)
share_response = nextcloud_util.share(run_encrypted, email)
if "ERROR" in share_response:
print "{0}\tError : Failed to share {1} with message:\n\t{2}".format(name, run_encrypted, share_response["ERROR"])
return
else:
share_id = share_response["SUCCES"]
template_data = {
'dir' : os.path.basename(dir),
'nextcloud_host' : NEXTCLOUD_HOST,
'share_id' : share_id
}
mail_content = renderTemplate('share_manual_template.html', template_data)
mail_subject = "USEQ has shared a file with you."
sendMail(mail_subject,mail_content, MAIL_SENDER ,email)
os.remove(run_zip)
os.remove(run_encrypted)
return
def shareProcessed(dir,dir_info):
name = multiprocessing.current_process().name
print "{0}\tStarting".format(name)
print "{0}\tRunning compression".format(name)
run_zip = zipRun( dir, dir_info )
if not os.path.isfile(run_zip):
print "{0}\tError : {1}/{2}.tar.gz was not properly created!".format(name,dir,dir_info['projects'].keys()[0])
return
print "{0}\tRunning encryption".format(name)
run_encrypted = encryptRun(run_zip, dir_info['researcher_email'])
if not os.path.isfile(run_encrypted):
print "{0}\tError : Something went wrong during encryption of {1}/{2}.tar.gz with error message:\n\t{3}".format(name,dir,dir_info['projects'].keys()[0], run_encrypted)
return
print "{0}\tRunning upload to NextCloud".format(name)
upload_response = nextcloud_util.upload(run_encrypted)
if "ERROR" in upload_response:
print "{0}\tError : Failed to upload {1} with message:\n\t{2}".format(name, run_encrypted, upload_response["ERROR"])
return
print "{0}\tSharing run {1} with {2}".format(name, dir, dir_info['researcher_email'])
share_response = nextcloud_util.share(run_encrypted, dir_info['researcher_email'])
if "ERROR" in share_response:
print "{0}\tError : Failed to share {1} with message:\n\t{2}".format(name, run_encrypted, share_response["ERROR"])
return
else:
share_id = share_response["SUCCES"]
template_data = {
'project_ids' : ",".join(dir_info['projects'].keys()),
'nextcloud_host' : NEXTCLOUD_HOST,
'share_id' : share_id
}
mail_content = renderTemplate('share_processed_template.html', template_data)
mail_subject = "UBEC analysis of sequencing-run ID(s) {0} finished".format(",".join(dir_info['projects'].keys()))
sendMail(mail_subject,mail_content, MAIL_SENDER ,dir_info['researcher_email'])
os.remove(run_zip)
os.remove(run_encrypted)
return
def shareRaw(dir,dir_info):
name = multiprocessing.current_process().name
print "{0}\tStarting".format(name)
conversion_stats = parseConversionStats( "{0}/Data/Intensities/BaseCalls/Stats/ConversionStats.xml".format(dir) )
if not conversion_stats:
print "{0}\tError : No ConversionStats.xml file could be found in {1}/Data/Intensities/BaseCalls/Stats/!".format(name,dir)
return
expected_yield = parseRunParameters( "{0}/RunParameters.xml".format(dir) )
if not expected_yield:
print "{0}\tError : No RunParameters.xml file could be found in {1}!".format(name,dir)
return
print "{0}\tRunning compression".format(name)
run_zip = zipRun( dir, dir_info )
if not os.path.isfile(run_zip):
print "{0}\tError : {1}/{2}.tar.gz was not properly created!".format(name,dir,dir_info['projects'].keys()[0])
return
print "{0}\tRunning encryption".format(name)
run_encrypted = encryptRun(run_zip, dir_info['researcher_email'])
if not os.path.isfile(run_encrypted):
print "{0}\tError : Something went wrong during encryption of {1}/{2}.tar.gz with error message:\n\t{3}".format(name,dir,dir_info['projects'].keys()[0], run_encrypted)
return
print "{0}\tRunning upload to NextCloud".format(name)
upload_response = nextcloud_util.upload(run_encrypted)
if "ERROR" in upload_response:
print "{0}\tError : Failed to upload {1} with message:\n\t{2}".format(name, run_encrypted, upload_response["ERROR"])
return
print "{0}\tSharing run {1} with {2}".format(name, dir, dir_info['researcher_email'])
share_response = nextcloud_util.share(run_encrypted, dir_info['researcher_email'])
if "ERROR" in share_response:
print "{0}\tError : Failed to share {1} with message:\n\t{2}".format(name, run_encrypted, share_response["ERROR"])
return
else:
share_id = share_response["SUCCES"]
template_data = {
'project_id' : dir_info['projects'].keys()[0],
'nextcloud_host' : NEXTCLOUD_HOST,
'share_id' : share_id,
'expected_reads' : expected_yield,
'raw_reads' : conversion_stats['total_reads_raw'],
'filtered_reads' : conversion_stats['total_reads'],
'conversion_stats' : conversion_stats
}
mail_content = renderTemplate('share_raw_template.html', template_data)
mail_subject = "USEQ sequencing of sequencing-run ID {0} finished".format(dir_info['projects'].keys()[0])
sendMail(mail_subject,mail_content, MAIL_SENDER ,dir_info['researcher_email'])
os.remove(run_zip)
os.remove(run_encrypted)
return
def check( run_info ):
print "\nAre you sure you want to send the following datasets(s) (yes/no): "
table = Texttable(max_width=0)
table.add_rows([['Dir','Project(s) (ID:Name)','Client Email']])
for datadir in run_info:
projects = ",".join( ["{0}:{1}".format(id,name) for id,name in run_info[datadir]['projects'].iteritems() ] )
table.add_row( [ datadir, projects, run_info[datadir]['researcher_email'] ])
print table.draw()
yes = set(['yes','y', 'ye', ''])
no = set(['no','n'])
choice = raw_input().lower()
if choice in yes:
choice = True
elif choice in no:
choice = False
else:
sys.stdout.write("Please respond with 'yes' or 'no'")
return choice
def getProcessedData( lims, project_name, project_id ):
"""Get the most recent processed run info based on project name and allowed RUN_PROCESSES"""
runs = []
project_processes = lims.get_processes(
projectname=project_name,
type=RUN_PROCESSES
)
for process in project_processes:
run_id = None
flowcell_id = None
if 'Run ID' in process.udf:
run_id = process.udf['Run ID']
for path in os.listdir(PROCESSED_DIR):
if os.path.isdir( os.path.join( PROCESSED_DIR, path, run_id)):
return os.path.join( PROCESSED_DIR, path, run_id)
if 'Flow Cell ID' in process.udf:
flowcell_id = process.udf['Flow Cell ID']
for root,dirs,files in os.walk(PROCESSED_DIR, topdown=True):
for dir in dirs:
path = os.path.join(root,dir)
if path.endswith("_000000000-"+flowcell_id): #MiSeq
return path
elif path.endswith("_"+flowcell_id): #NextSeq
return path
elif path.endswith("A"+flowcell_id) or path.endswith("B"+flowcell_id): #HiSeq
return path
for root,dirs,files in os.walk(PROCESSED_DIR, topdown=True):
for dir in dirs:
path = os.path.join(root,dir)
if project_id in path:
return path
return
def getRawData( lims, project_name ):
"""Get the most recent raw run info based on project name and allowed RUN_PROCESSES"""
runs = {}
project_processes = lims.get_processes(
projectname=project_name,
type=RUN_PROCESSES
)
for process in project_processes:
run_id = None
flowcell_id = None
if 'Run ID' in process.udf: run_id = process.udf['Run ID']
if 'Flow Cell ID' in process.udf: flowcell_id = process.udf['Flow Cell ID']
runs[ process.date_run ] = [ run_id, flowcell_id ]
if not runs:
return None
run_dates = [datetime.datetime.strptime(ts, "%Y-%m-%d") for ts in runs.keys()]
sorted_run_dates = [datetime.datetime.strftime(ts, "%Y-%m-%d") for ts in sorted(run_dates)]
recent_run = runs[sorted_run_dates[-1]] #the most recent run, this is the run we want to share
#Try to determine run directory
if recent_run[0]: #run name is known
for path in os.listdir(RAW_DIR):
if os.path.isdir( os.path.join( RAW_DIR, path, recent_run[0])):
return os.path.join( RAW_DIR, path, recent_run[0])
elif recent_run[1]: #run flowcell is known
for root,dirs,files in os.walk(RAW_DIR, topdown=True):
for dir in dirs:
path = os.path.join(root,dir)
if path.endswith("_000000000-"+recent_run[1]): #MiSeq
return path
elif path.endswith("_"+recent_run[1]): #NextSeq
return path
elif path.endswith("A"+recent_run[1]) or path.endswith("B"+recent_run[1]): #HiSeq
return path
def shareDataByEmail(lims, email, dir):
if not email.lower() in gpg_key_list:
print "Error : No public key found for email {0}".format(email)
sys.exit()
if not exists(dir):
print "Error : Directory {0} not found".format(dir)
dir = dir.rstrip('/')
share_processes = []
share_process = multiprocessing.Process(name="Process_{0}".format(os.path.basename(dir)), target=shareManual, args=(email, dir) )
share_processes.append(share_process)
share_process.start()
for process in share_processes:
process.join()
def shareDataById(lims, mode,ids):
"""Get's the run names, encrypts the run data and sends it to the appropriate client"""
project_ids = ids.split(",")
run_info = {}
for project_id in project_ids:
project = None
project_name = ''
try:
project = Project(lims, id=project_id)
project_name = project.name
except:
print "Error : Project ID {0} not found!".format(project_id)
continue
researcher = project.researcher
#Check if client has a gpg-key
if not researcher.email.lower() in gpg_key_list:
print "Error : User ID {0} ({1}) for project ID {2} has not provided a public key yet!".format(researcher.username,researcher.email, project_id)
continue
#Get run info
info = None
if mode == 'raw' :
datadir = getRawData(lims, project_name)
else :
datadir = getProcessedData(lims, project_name, project_id)
if not datadir:
print "Error : No dir could be found for project ID {0}!".format(project_id)
continue
#Got all the info we need
if datadir not in run_info:
run_info[datadir] = {
'researcher_email' : researcher.email,
'projects' : {}
}
run_info[datadir]['projects'][project_id] = project_name
if not run_info:
print "Error : None of the provided project IDs are able to be processed!"
elif check(run_info):
#Start sharing threads
share_processes =[]
for datadir in run_info:
share_process = None
if mode == 'raw':
share_process = multiprocessing.Process(name="Process_{0}".format(os.path.basename(datadir)), target=shareRaw, args=(datadir, run_info[datadir]) )
else:
share_process = multiprocessing.Process(name="Process_{0}".format(os.path.basename(datadir)), target=shareProcessed, args=(datadir, run_info[datadir]) )
share_processes.append(share_process)
share_process.start()
for process in share_processes:
process.join()
def run(lims, mode, ids, email, dir):
"""Runs raw, processed or manual function based on mode"""
import gnupg
global gpg
global gpg_key_list
global nextcloud_util
#Set up gpg keychain
gpg = gnupg.GPG(homedir=GPG_DIR)
gpg_key_list = {}
for key in gpg.list_keys():
gpg_key_list [ key['uids'][0].split("<")[1][:-1].lower() ] = key
#Set up nextcloud
nextcloud_util = NextcloudUtil()
nextcloud_util.setHostname( NEXTCLOUD_HOST )
if mode == 'raw':
nextcloud_util.setup( NEXTCLOUD_USER, NEXTCLOUD_PW, NEXTCLOUD_WEBDAV_ROOT,NEXTCLOUD_RAW_DIR,MAIL_SENDER )
shareDataById(lims, mode, ids)
elif mode == 'processed':
nextcloud_util.setup( NEXTCLOUD_USER, NEXTCLOUD_PW, NEXTCLOUD_WEBDAV_ROOT,NEXTCLOUD_PROCESSED_DIR,MAIL_SENDER )
shareDataById(lims, mode, ids)
else:
nextcloud_util.setup( NEXTCLOUD_USER, NEXTCLOUD_PW, NEXTCLOUD_WEBDAV_ROOT,NEXTCLOUD_MANUAL_DIR,MAIL_SENDER )
shareDataByEmail(lims, email, dir)
| {
"content_hash": "b8603cf402f83081a0fd69d3744b2143",
"timestamp": "",
"source": "github",
"line_count": 400,
"max_line_length": 199,
"avg_line_length": 39.27,
"alnum_prop": 0.6258594346829641,
"repo_name": "CuppenResearch/clarity_utils",
"id": "5118f32bf33771dc027b7c42682b4c03ce8566f1",
"size": "15708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utilities/useq_share_run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29846"
}
],
"symlink_target": ""
} |
"""Core module. Provides the basic operations needed in sympy.
"""
from .sympify import sympify, SympifyError
from .cache import cacheit
from .basic import Basic, Atom, preorder_traversal
from .singleton import S
from .expr import Expr, AtomicExpr, UnevaluatedExpr
from .symbol import Symbol, Wild, Dummy, symbols, var
from .numbers import Number, Float, Rational, Integer, NumberSymbol, \
RealNumber, igcd, ilcm, seterr, E, I, nan, oo, pi, zoo, \
AlgebraicNumber, comp, mod_inverse
from .power import Pow, integer_nthroot, integer_log
from .mul import Mul, prod
from .add import Add
from .mod import Mod
from .relational import ( Rel, Eq, Ne, Lt, Le, Gt, Ge,
Equality, GreaterThan, LessThan, Unequality, StrictGreaterThan,
StrictLessThan )
from .multidimensional import vectorize
from .function import Lambda, WildFunction, Derivative, diff, FunctionClass, \
Function, Subs, expand, PoleError, count_ops, \
expand_mul, expand_log, expand_func, \
expand_trig, expand_complex, expand_multinomial, nfloat, \
expand_power_base, expand_power_exp, arity
from .evalf import PrecisionExhausted, N
from .containers import Tuple, Dict
from .exprtools import gcd_terms, factor_terms, factor_nc
from .evaluate import evaluate
# expose singletons
Catalan = S.Catalan
EulerGamma = S.EulerGamma
GoldenRatio = S.GoldenRatio
TribonacciConstant = S.TribonacciConstant
| {
"content_hash": "f995602529750b65b2b629c6ed4b93ea",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 78,
"avg_line_length": 39.57142857142857,
"alnum_prop": 0.7631768953068592,
"repo_name": "kaushik94/sympy",
"id": "380a72ea80bc3a05e7891698aff5ca984d97cc59",
"size": "1385",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sympy/core/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5094"
},
{
"name": "Python",
"bytes": "13553568"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4008"
},
{
"name": "TeX",
"bytes": "32356"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
import os
import re
import sys
import urllib
"""Logpuzzle exercise
Given an apache logfile, find the puzzle urls and download the images.
Here's what a puzzle url looks like:
10.254.254.28 - - [06/Aug/2007:00:13:48 -0700] "GET /~foo/puzzle-bar-aaab.jpg HTTP/1.0" 302 528 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.6) Gecko/20070725 Firefox/2.0.0.6"
"""
import pprint
def read_urls(filename):
"""Returns a list of the puzzle urls from the given log file,
extracting the hostname from the filename itself.
Screens out duplicate urls and returns the urls sorted into
increasing order."""
op=[]
with open(filename) as f:
for line in f:
match2=re.search(r'GET\s(.*)\sHTTP',line)
if match2 and re.search(r'puzzle',match2.group(1),re.IGNORECASE):
op.append('http://'+filename.split('_')[-1]+match2.group(1))
opset=list(set(op))
opset=sorted(opset, key = lambda x: x.split('/')[-1].split('.')[0].split('-')[-1])
return opset
# +++your code here+++
def download_images(img_urls, dest_dir):
"""Given the urls already in the correct order, downloads
each image into the given directory.
Gives the images local filenames img0, img1, and so on.
Creates an index.html in the directory
with an img tag to show each local image file.
Creates the directory if necessary.
"""
str1="<verbatim> <html> <body>"
str2="</body></html>"
str3=""
for index,url in enumerate(img_urls):
#urllib.urlretrieve(url,dest_dir+'/img'+str(index))
print "done for "+url
str3=str3+'<img src="'+dest_dir+'/img'+str(index)+'">'
str1=str1+str3+str2
#print str1
with open('index.html','w') as f:
f.write(str1)
sys.exit()
def main():
args = sys.argv[1:]
if not args:
print 'usage: [--todir dir] logfile '
sys.exit(1)
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
img_urls = read_urls(args[0])
pprint.pprint(img_urls)
if todir:
download_images(img_urls, todir)
else:
print '\n'.join(img_urls)
if __name__ == '__main__':
main()
| {
"content_hash": "085b8b1374aa70b46fb3cb6fb54a9b56",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 192,
"avg_line_length": 28.2027027027027,
"alnum_prop": 0.6425491135601341,
"repo_name": "bourneagain/pythonBytes",
"id": "1e4833c5ec76bbddc6989f5d149fa68f08686ceb",
"size": "2314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google-python-exercises/logpuzzle/logpuzzle-bk.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "12470"
},
{
"name": "DIGITAL Command Language",
"bytes": "383216"
},
{
"name": "HTML",
"bytes": "1300577"
},
{
"name": "Python",
"bytes": "620148"
},
{
"name": "Shell",
"bytes": "27"
}
],
"symlink_target": ""
} |
import abc
from typing import Dict, Optional
from paralleldomain.decoding.common import DecoderSettings, LazyLoadPropertyMixin, create_cache_key
from paralleldomain.decoding.map_query.map_query import MapQuery
from paralleldomain.model.map.area import Area
from paralleldomain.model.map.edge import Edge
from paralleldomain.model.map.map_components import Junction, LaneSegment, RoadSegment
from paralleldomain.model.type_aliases import (
AreaId,
EdgeId,
FrameId,
JunctionId,
LaneSegmentId,
RoadSegmentId,
SceneName,
SensorName,
)
class MapDecoder(LazyLoadPropertyMixin, metaclass=abc.ABCMeta):
def __init__(self, dataset_name: str, scene_name: SceneName, settings: DecoderSettings):
self.scene_name = scene_name
self.settings = settings
self.dataset_name = dataset_name
self.map_query = self._create_map_query()
self.map_query.add_map_data(
road_segments=self.get_road_segments(),
lane_segments=self.get_lane_segments(),
junctions=self.get_junctions(),
areas=self.get_areas(),
edges=self.get_edges(),
)
def get_unique_id(
self,
sensor_name: Optional[SensorName] = None,
frame_id: Optional[FrameId] = None,
extra: Optional[str] = None,
) -> str:
return create_cache_key(
dataset_name=self.dataset_name,
scene_name=self.scene_name,
sensor_name=sensor_name,
frame_id=frame_id,
extra=extra,
)
def get_map_query(self) -> MapQuery:
return self.map_query
@abc.abstractmethod
def decode_road_segments(self) -> Dict[RoadSegmentId, RoadSegment]:
pass
@abc.abstractmethod
def decode_lane_segments(self) -> Dict[LaneSegmentId, LaneSegment]:
pass
@abc.abstractmethod
def decode_junctions(self) -> Dict[JunctionId, Junction]:
pass
@abc.abstractmethod
def decode_areas(self) -> Dict[AreaId, Area]:
pass
@abc.abstractmethod
def decode_edges(self) -> Dict[EdgeId, Edge]:
pass
@abc.abstractmethod
def _create_map_query(self) -> MapQuery:
pass
def get_road_segments(self) -> Dict[RoadSegmentId, RoadSegment]:
_unique_cache_key = self.get_unique_id(extra="road_segments")
road_segments = self.lazy_load_cache.get_item(
key=_unique_cache_key,
loader=lambda: self.decode_road_segments(),
)
return road_segments
def get_lane_segments(self) -> Dict[LaneSegmentId, LaneSegment]:
_unique_cache_key = self.get_unique_id(extra="lane_segments")
lane_segments = self.lazy_load_cache.get_item(
key=_unique_cache_key,
loader=lambda: self.decode_lane_segments(),
)
return lane_segments
def get_junctions(self) -> Dict[JunctionId, Junction]:
_unique_cache_key = self.get_unique_id(extra="junctions")
junctions = self.lazy_load_cache.get_item(
key=_unique_cache_key,
loader=lambda: self.decode_junctions(),
)
return junctions
def get_areas(self) -> Dict[AreaId, Area]:
_unique_cache_key = self.get_unique_id(extra="areas")
areas = self.lazy_load_cache.get_item(
key=_unique_cache_key,
loader=lambda: self.decode_areas(),
)
return areas
def get_edges(self) -> Dict[EdgeId, Edge]:
_unique_cache_key = self.get_unique_id(extra="edges")
edges = self.lazy_load_cache.get_item(
key=_unique_cache_key,
loader=lambda: self.decode_edges(),
)
return edges
| {
"content_hash": "8b7db787bdab6ab3793a141215a3ee11",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 99,
"avg_line_length": 32.421052631578945,
"alnum_prop": 0.6282467532467533,
"repo_name": "parallel-domain/pd-sdk",
"id": "af27020f8f653eca80a89e827f7160d33f2b0935",
"size": "3696",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "paralleldomain/decoding/map_decoder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1030434"
},
{
"name": "Shell",
"bytes": "1375"
}
],
"symlink_target": ""
} |
from django import forms
from django.conf import settings
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
from django.contrib.admin.util import flatten_fieldsets
from django.contrib.contenttypes.models import ContentType
class AdminForm(object):
def __init__(self, form, fieldsets, prepopulated_fields):
self.form, self.fieldsets = form, fieldsets
self.prepopulated_fields = [{
'field': form[field_name],
'dependencies': [form[f] for f in dependencies]
} for field_name, dependencies in prepopulated_fields.items()]
def __iter__(self):
for name, options in self.fieldsets:
yield Fieldset(self.form, name, **options)
def first_field(self):
try:
fieldset_name, fieldset_options = self.fieldsets[0]
field_name = fieldset_options['fields'][0]
if not isinstance(field_name, basestring):
field_name = field_name[0]
return self.form[field_name]
except (KeyError, IndexError):
pass
try:
return iter(self.form).next()
except StopIteration:
return None
def _media(self):
media = self.form.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class Fieldset(object):
def __init__(self, form, name=None, fields=(), classes=(), description=None):
self.form = form
self.name, self.fields = name, fields
self.classes = u' '.join(classes)
self.description = description
def _media(self):
if 'collapse' in self.classes:
return forms.Media(js=['%sjs/admin/CollapsedFieldsets.js' % settings.ADMIN_MEDIA_PREFIX])
return forms.Media()
media = property(_media)
def __iter__(self):
for field in self.fields:
yield Fieldline(self.form, field)
class Fieldline(object):
def __init__(self, form, field):
self.form = form # A django.forms.Form instance
if isinstance(field, basestring):
self.fields = [field]
else:
self.fields = field
def __iter__(self):
for i, field in enumerate(self.fields):
yield AdminField(self.form, field, is_first=(i == 0))
def errors(self):
return mark_safe(u'\n'.join([self.form[f].errors.as_ul() for f in self.fields]).strip('\n'))
class AdminField(object):
def __init__(self, form, field, is_first):
self.field = form[field] # A django.forms.BoundField instance
self.is_first = is_first # Whether this field is first on the line
self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput)
def label_tag(self):
classes = []
if self.is_checkbox:
classes.append(u'vCheckboxLabel')
contents = force_unicode(escape(self.field.label))
else:
contents = force_unicode(escape(self.field.label)) + u':'
if self.field.field.required:
classes.append(u'required')
if not self.is_first:
classes.append(u'inline')
attrs = classes and {'class': u' '.join(classes)} or {}
return self.field.label_tag(contents=contents, attrs=attrs)
class InlineAdminFormSet(object):
"""
A wrapper around an inline formset for use in the admin system.
"""
def __init__(self, inline, formset, fieldsets):
self.opts = inline
self.formset = formset
self.fieldsets = fieldsets
def __iter__(self):
for form, original in zip(self.formset.initial_forms, self.formset.get_queryset()):
yield InlineAdminForm(self.formset, form, self.fieldsets, self.opts.prepopulated_fields, original)
for form in self.formset.extra_forms:
yield InlineAdminForm(self.formset, form, self.fieldsets, self.opts.prepopulated_fields, None)
def fields(self):
fk = getattr(self.formset, "fk", None)
for field_name in flatten_fieldsets(self.fieldsets):
if fk and fk.name == field_name:
continue
yield self.formset.form.base_fields[field_name]
def _media(self):
media = self.opts.media + self.formset.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class InlineAdminForm(AdminForm):
"""
A wrapper around an inline form for use in the admin system.
"""
def __init__(self, formset, form, fieldsets, prepopulated_fields, original):
self.formset = formset
self.original = original
if original is not None:
self.original.content_type_id = ContentType.objects.get_for_model(original).pk
self.show_url = original and hasattr(original, 'get_absolute_url')
super(InlineAdminForm, self).__init__(form, fieldsets, prepopulated_fields)
def __iter__(self):
for name, options in self.fieldsets:
yield InlineFieldset(self.formset, self.form, name, **options)
def field_count(self):
# tabular.html uses this function for colspan value.
num_of_fields = 1 # always has at least one field
num_of_fields += len(self.fieldsets[0][1]["fields"])
if self.formset.can_order:
num_of_fields += 1
if self.formset.can_delete:
num_of_fields += 1
return num_of_fields
def pk_field(self):
return AdminField(self.form, self.formset._pk_field.name, False)
def fk_field(self):
fk = getattr(self.formset, "fk", None)
if fk:
return AdminField(self.form, fk.name, False)
else:
return ""
def deletion_field(self):
from django.forms.formsets import DELETION_FIELD_NAME
return AdminField(self.form, DELETION_FIELD_NAME, False)
def ordering_field(self):
from django.forms.formsets import ORDERING_FIELD_NAME
return AdminField(self.form, ORDERING_FIELD_NAME, False)
class InlineFieldset(Fieldset):
def __init__(self, formset, *args, **kwargs):
self.formset = formset
super(InlineFieldset, self).__init__(*args, **kwargs)
def __iter__(self):
fk = getattr(self.formset, "fk", None)
for field in self.fields:
if fk and fk.name == field:
continue
yield Fieldline(self.form, field)
class AdminErrorList(forms.util.ErrorList):
"""
Stores all errors for the form/formsets in an add/change stage view.
"""
def __init__(self, form, inline_formsets):
if form.is_bound:
self.extend(form.errors.values())
for inline_formset in inline_formsets:
self.extend(inline_formset.non_form_errors())
for errors_in_inline_form in inline_formset.errors:
self.extend(errors_in_inline_form.values())
| {
"content_hash": "80b46304b1bb770c320bd08dd1c002ad",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 110,
"avg_line_length": 37.12169312169312,
"alnum_prop": 0.6150228050171037,
"repo_name": "chewable/django",
"id": "aaa2e304ced423013af0aea32dfba39ad22af519",
"size": "7017",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/contrib/admin/helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.