repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
frappe/erpnext | erpnext/hr/doctype/hr_settings/test_hr_settings.py | Python | gpl-3.0 | 153 | 0.006536 | # | Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import unittest
class TestHRSettings(unittest.TestCase):
| pass
|
artish/syncsettings | sync_settings/__init__.py | Python | mit | 18 | 0.055556 | fro | m .cli | import * |
cyhmat/pythonDersleri | XOX.py | Python | mit | 8,375 | 0.010988 | from time import sleep
def menu():
try:
sleep(1.5)
print(menuStr)
sleep(0.5)
choose = int(input("For Play = 1\nFor Exit = 0\nChoose : "))
print(stars)
except:
print("There's something wrong about your input. Try again.")
menu()
if choose == 1:
sleep(0.3)
print("Game starting...")
player1 = str(input("Player1 : "))
player2 = str(input("Player2 : "))
sleep(0.3)
print("Thanks a lot. ^^")
return player1, player2
elif choose == 0:
print("Cya dude.")
else:
print("There's something wrong about your input. Try again.")
menu()
def turns(p1,p2):
global loop
while True:
sleep(1)
print(stars)
sleep(0.3)
print(gamePlany1)
sleep(0.3)
print(gamePlany2)
sleep(0.3)
print(gamePlany3)
sleep(0.3)
print(stars)
if win() == 1:
loop = 1
return loop
else:
pass
if len(p1Cond) + len(p2Cond) == 9:
print("Its a draw ! :D")
menu()
else:
pass
if turn%2 == 0:
sleep(0.3)
print("Its {}'s turn.".format(p1))
sleep(0.3)
print("Can you give me one x and one y cordinate ?")
makeYourMove()
break
else:
print("Its {}'s turn.".format(p2))
print("Can you give me one x and one y cordinate ?")
makeYourMove()
break
def makeYourMove():
try:
x = int(input("X : "))
y = int(input("Y : "))
checkIt(x, y)
except:
print("Hatalı bir girdi verdiniz.")
makeYourMove()
def checkIt(x,y):
global turn
if x == 1:
if y == 1:
if gamePlany1[0] != "___":
print("Its already taken. Try again...")
makeYourMove()
else:
if turn%2 == 0:
gamePlany1[0] = "X".center(3)
turn +=1
p1Cond.append(1)
turns(p1,p2)
else:
gamePlany1[0]= "O".center(3)
turn += 1
p2Cond.append(1)
turns(p1,p2)
if x == 1:
if y == 2:
if gamePlany1[1] != "___":
print("Its already taken. Try again...")
makeYourMove()
else:
if turn%2 == 0:
gamePlany1[1] = "X".center(3)
turn +=1
p1Cond.append(2)
turns(p1,p2)
else:
gamePlany1[1]= "O".center(3)
turn += 1
p2Cond.append(2)
turns(p1,p2)
if x == 1:
if y == 3:
if gamePlany1[2] != "___":
print("Its already taken. Try again...")
makeYourMove()
else:
if turn%2 == 0:
gamePlany1[2] = "X".center(3)
turn +=1
p1Cond.append(3)
turns(p1,p2)
else:
gamePlany1[2]= "O".center(3)
turn += 1
p2Cond.append(3)
turns(p1,p2)
if x == 3:
if y == 1:
if gamePlany3[0] != "___":
print("Its already taken. Try again...")
makeYourMove()
else:
if turn%2 == 0:
gamePlany3[0] = "X".center(3)
turn +=1
p1Cond.append(7)
turns(p1,p2)
else:
gamePlany3[0]= "O".center(3)
turn += 1
p2Cond.append(7)
turns(p1,p2)
if x == 3:
if y == 2:
if gamePlany3[1] != "___":
print("Its already taken. Try again...")
makeYourMove()
else:
if turn%2 == 0:
gamePlany3[1] = "X".center(3)
turn +=1
p1Cond.append(8)
turns(p1,p2)
else:
gamePlany3[1]= "O".center(3)
turn += 1
p2Cond.append(8)
turns(p1,p2)
if x == 3:
if y == 3:
if gamePlany3[2] != "___":
print("Its already taken. Try again...")
makeYourMove()
else:
if turn%2 == 0:
gamePlany3[2] = "X".center(3)
turn +=1
p1Cond.append(9)
turns(p1,p2)
else:
gamePlany3[2]= "O".center(3)
turn += 1
p2Cond.append(9)
turns(p1,p2)
if x == 2:
if y == 1:
if gamePlany2[0] != "___":
print("Its already taken. Try again...")
makeYourMove()
else:
if turn%2 == 0:
gamePlany2[0] = "X".center(3)
turn +=1
p1Cond.append(4)
turns(p1,p2)
else:
gamePlany2[0]= "O".center(3)
turn += 1
p2Cond.append(4)
turns(p1,p2)
if x == 2:
if y == 2:
if gamePlany2[1] != "___":
print("Its already taken. Try again...")
makeYourMove()
else:
if turn%2 == 0:
gamePlany2[1] = "X".center(3)
turn +=1
p1Cond.append(5)
turns(p1,p2)
else:
gamePlany2[1]= "O".center(3)
turn += 1 |
p2Cond.appe | nd(5)
turns(p1,p2)
if x == 2:
if y == 3:
if gamePlany2[2] != "___":
print("Its already taken. Try again...")
makeYourMove()
else:
if turn%2 == 0:
gamePlany2[2] = "X".center(3)
turn +=1
p1Cond.append(6)
turns(p1,p2)
else:
gamePlany2[2]= "O".center(3)
turn += 1
p2Cond.append(6)
turns(p1,p2)
if x >3 or x<1 :
sleep(0.3)
print("Hatalı bir girdi verdiniz.")
makeYourMove()
def win():
p1Cond.sort()
p2Cond.sort()
w = 0
if winCond.count(p1Cond) == 1:
print("Congratulations {}. You win.".format(p1))
w = 1
return w
elif winCond.count(p2Cond) == 1:
print("Congratulations {}. You win.".format(p2))
w = 1
return w
menuStr = """
_________________________________________________________
### ## # ### ### ##
/#### #### / / /### /#### #### /
/ ### /####/ / / ### / ### /####/
### / ## / ## ### ### / ##
### / / ### ### ### /
###/ ## ## ## ###/
### ## ## ## ###
/### ## ## ## /###
/ ### ## ## ## / ###
/ ### ## ## ## / ###
/ ### ## ## ## / ###
/ ### ## # / / ###
/ ### / ### / / ### /
/ ####/ ######/ / ####/
/ ### ### / ###
v0.1.0
cyhmat 2017
__________________________________________________________
"""
stars = "_" * 89
gamePlany1 =["___","___","___"]
gamePlany2 =["___","___","___"]
gamePlany3 =["___","___" |
mov-q/dumpy | discariche/model/reallocation.py | Python | gpl-3.0 | 1,137 | 0.009675 | """Person model"""
from sqlalchemy import Column, UniqueConstraint, ForeignKey
from sqlalchemy import schema as saschema
from sqlalchemy.types import Integer, String, Unicode, Float, UnicodeText, DateTime
from discariche.model.meta import Base
class Reallocation(Base):
__tablename__ = "reallocation"
id = Column(Integer, primary_key=True)
id_dump = Column(Integer, saschema.ForeignKey('dump.id', onupdate="CASCADE", ondelete="CASCADE"))
id_dumptype = Column(Integer, saschema.ForeignKey('dumptype.id',onupdate="CASCADE", ondelete="SET NULL"))
start_date = Column(DateTime, nullable=False)
end_date = Column(DateTime, nullable=True)
notes = Column(Unicode(5 | 12), nullable=True)
modder = Column(Integer, saschema.ForeignKey('user.id', onupdate="CASCADE", ondelete | ="SET NULL"))
lastmod = Column(DateTime, nullable=False)
__table_args__ = (
{
"mysql_engine":"InnoDB",
"mysql_charset":"utf8"
}
)
def __init__(self):
pass
def __repr__(self):
pass
|
jorisroovers/opencv-playground | coin_dectector/coins/detector2.py | Python | apache-2.0 | 7,609 | 0.001051 | import cv2
import cv2.cv
import numpy as np
import time
COLOR_TOLERANCE = 25
# GOLD = (207, 176, 79)
GOLD = (51, 255, 255)
GOLD_MIN = (41, 100, 100)
GOLD_MAX = (61, 255, 255)
# GOLD = (255, 215, 0)
SILVER = (0, 0, int(255 * 0.75))
SILVER_MIN = (0, 0, SILVER[2] * 0.75)
SILVER_MAX = (10, 100, SILVER[2] * 1.25)
# SILVER = (192,192, 192)
# COPPER = (190, 190, 190)
COPPER = (56, 90, 150)
# COPPER = (184, 155, 51)
def color_distance(color, reference_color):
square_distance = (color[0] - reference_color[0]) ** 2 + \
(color[1] - reference_color[1]) ** 2
# (color[2] - reference_color[2]) ** 2
# square_distance = ((color[0] - reference_color[0]) * 0.299) ** 2 + \
# ((color[1] - reference_color[1]) * 0.587) ** 2 + \
# ((color[2] - reference_color[2]) * 0.114) ** 2
return square_distance
def classify_coin_color(color):
coin_colors = [GOLD, SILVER]
distances = []
for coin_color in coin_colors:
distances.append(color_distance(color, coin_color))
# print "DISTANCES", distances
smallest_index = distances.index(min(distances))
return coin_colors[smallest_index]
def classify_coin_color_str(color):
classified_color = classify_coin_color(color)
# print "CLASSIFIED", classified_color
if classified_color == GOLD:
return "GOLD"
elif classified_color == SILVER:
return "SILVER"
elif classified_color == COPPER:
return "COPPER"
return "UNKNOWN"
def square_distance(x, x1, y, y1):
# Distance formula: distance = sqrt((x-x1)^2+(y-y1)^2))
return (x - x1) ** 2 + (y - y1) ** 2
def determine_coin(center_color, border_color):
if center_color == "SILVER" and border_color == "GOLD":
return "1 EURO"
elif center_color == "GOLD" and border_color == "SILVER":
return "2 EURO"
elif center_color == "GOLD" and border_color == "GOLD":
return "10, 20 OR 50ct"
elif center_color == "COPPER" and border_color == "COPPER":
return "1, 2 OR 5ct"
return "IMPOSSIBLE!"
def detect(src_image_path, param1, param2, debug=False):
color_img = cv2.imread(src_image_path, 1)
img = cv2.imread(src_image_path, 0)
height, width = img.shape
hsv_image = cv2.cvtColor(color_img, cv2.COLOR_BGR2HSV)
lower_silver = np.array([0, 0, 130])
upper_silver = np.array([60, 50, 255])
lower_gold = np.array([20, 75, 0])
upper_gold = np.array([70, 255, 255])
mask_gold = cv2.inRange(hsv_image, lower_gold, upper_gold)
mask_silver = cv2.inRange(hsv_image, lower_silver, upper_silver)
print "GOLDEN PIXELS", np.count_nonzero(mask_gold)
print "SILVER PIXELS", np.count_nonzero(mask_silver)
output = np.zeros((height, width, 3), np.uint8)
blurred = cv2.medianBlur(img, 5)
allcircles = []
magic_values = [1.3]
for magic_value in magic_values:
detected_circles = cv2.HoughCircles(blurred, cv2.cv.CV_HOUGH_GRADIENT,
magic_value, 50)
allcircles.append(detected_circles)
cnt = 1
for circles in allcircles:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
if i[2] > width / 12:
print "SKIPPING CIRCLE %d with radius %d" % (cnt, i[2])
continue
x, y = np.meshgrid(np.arange(width), np.arange(height))
pointC = (i[0], i[1])
pointR1 = (i[0], i[1] + int(0.90 * i[2]))
pointR2 = (i[0], i[1] - int(0.90 * i[2]))
pointR3 = (i[0] + int(0.90 * i[2]), i[1])
pointR4 = (i[0] - int(0.90 * i[2]), i[1])
radius_border_point = 2
radius_center_point = int(i[2] * 0.5)
# Distance formula: distance = sqrt((x-x1)^2+(y-y1)^2))
d2 = (x - i[0]) ** 2 + (y - i[1]) ** 2
dR1 = square_distance(x, pointR1[0], y, pointR1[1])
dR2 = square_distance(x, pointR2[0], y, pointR2[1])
dR3 = square_distance(x, pointR3[0], y, pointR3[1])
dR4 = square_distance(x, pointR4[0], y, pointR4[1])
# Point is within circle if squared distance to center < radius^2
mask = d2 < i[2] ** 2
maskC = d2 < radius_center_point ** 2
maskR1 = dR1 < radius_border_point ** 2
maskR2 = dR2 < radius_border_point ** 2
maskR3 = dR3 < radius_border_point ** 2
maskR4 = dR4 < radius_border_point ** 2
cntgold = 0
cntsilver = 0
cntCgold = 0
cntCsilver = 0
cntRgold = 0
| cntRsilver = 0
for a in range(0, width):
for b in range(0, height):
if mask[b, a]:
output[b, a] = color_img[b, a]
if mask_gold[b, a]:
cntgold += 1
elif mask_silver[b, a]:
cntsilver += 1
| if maskC[b, a]:
if mask_gold[b, a]:
cntCgold += 1
elif mask_silver[b, a]:
cntCsilver += 1
if maskR1[b, a]:
if mask_gold[b, a]:
cntRgold += 1
elif mask_silver[b, a]:
cntRsilver += 1
if maskR2[b, a]:
if mask_gold[b, a]:
cntRgold += 1
elif mask_silver[b, a]:
cntRsilver += 1
if maskR3[b, a]:
if mask_gold[b, a]:
cntRgold += 1
elif mask_silver[b, a]:
cntRsilver += 1
if maskR4[b, a]:
if mask_gold[b, a]:
cntRgold += 1
elif mask_silver[b, a]:
cntRsilver += 1
cv2.circle(output, pointC, radius_center_point, (0, 255, 0), 3)
center_color = "GOLD"
if cntCgold < cntCsilver:
center_color = "SILVER"
border_color = "GOLD"
if cntRgold < cntRsilver:
border_color = "SILVER"
coin_type = determine_coin(center_color, border_color)
print "CIRLCE", cnt
# print "avg color of circle:", avg_circle
print "color border of circle:", border_color
print "color center of circle:", center_color
print "COIN=", coin_type
print "*" * 50
if debug:
color_border_point = (0, 0, 255)
cv2.circle(output, pointR1, radius_border_point, color_border_point, 3)
cv2.circle(output, pointR2, radius_border_point, color_border_point, 3)
cv2.circle(output, pointR3, radius_border_point, color_border_point, 3)
cv2.circle(output, pointR4, radius_border_point, color_border_point, 3)
cv2.putText(output, str(cnt), pointC, cv2.FONT_HERSHEY_SIMPLEX, 2, 255, 2)
cv2.putText(output, coin_type, (pointC[0] - 20, pointC[1] + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(255, 0, 0), 2)
cnt += 1
cv2.imshow('detected circles', output)
cv2.waitKey(0)
cv2.destroyAllWindows()
ts = int(time.time())
dest = "generated/gen%d.png" % ts
cv2.imwrite(dest, output)
return dest
|
davetcoleman/ompl | demos/RigidBodyPlanningWithODESolverAndControls.py | Python | bsd-3-clause | 4,218 | 0.003793 | #!/usr/bin/env python
######################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Rice University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Rice University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING | ,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIA | BILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# Author: Mark Moll
from math import sin, cos, tan
from functools import partial
try:
from ompl import base as ob
from ompl import control as oc
from ompl import geometric as og
except:
# if the ompl module is not in the PYTHONPATH assume it is installed in a
# subdirectory of the parent directory called "py-bindings."
from os.path import abspath, dirname, join
import sys
sys.path.insert(0, join(dirname(dirname(abspath(__file__))),'py-bindings'))
from ompl import base as ob
from ompl import control as oc
from ompl import geometric as og
def kinematicCarODE(q, u, qdot):
theta = q[2];
carLength = 0.2;
qdot[0] = u[0] * cos(theta)
qdot[1] = u[0] * sin(theta)
qdot[2] = u[0] * tan(u[1]) / carLength
def isStateValid(spaceInformation, state):
# perform collision checking or check if other constraints are
# satisfied
return spaceInformation.satisfiesBounds(state)
def plan():
# construct the state space we are planning in
space = ob.SE2StateSpace()
# set the bounds for the R^2 part of SE(2)
bounds = ob.RealVectorBounds(2)
bounds.setLow(-1)
bounds.setHigh(1)
space.setBounds(bounds)
# create a control space
cspace = oc.RealVectorControlSpace(space, 2)
# set the bounds for the control space
cbounds = ob.RealVectorBounds(2)
cbounds.setLow(-.3)
cbounds.setHigh(.3)
cspace.setBounds(cbounds)
# define a simple setup class
ss = oc.SimpleSetup(cspace)
validityChecker = ob.StateValidityCheckerFn(partial(isStateValid, ss.getSpaceInformation()))
ss.setStateValidityChecker(validityChecker)
ode = oc.ODE(kinematicCarODE)
odeSolver = oc.ODEBasicSolver(ss.getSpaceInformation(), ode)
propagator = oc.ODESolver.getStatePropagator(odeSolver)
ss.setStatePropagator(propagator)
# create a start state
start = ob.State(space)
start().setX(-0.5);
start().setY(0.0);
start().setYaw(0.0);
# create a goal state
goal = ob.State(space);
goal().setX(0.0);
goal().setY(0.5);
goal().setYaw(0.0);
# set the start and goal states
ss.setStartAndGoalStates(start, goal, 0.05)
# attempt to solve the problem
solved = ss.solve(120.0)
if solved:
# print the path to screen
print("Found solution:\n%s" % ss.getSolutionPath().asGeometric().printAsMatrix())
if __name__ == "__main__":
plan()
|
mbokulic/bmt_parser | tests/unit_tests.py | Python | mit | 9,581 | 0 | import unittest
import bmt_parser.collaborators as collabs
import bmt_parser.name_corrections as corr
import pandas as pd
class Test_collabs(unittest.TestCase):
def test_repeat_collab(self):
testdata = {
'issue_id': [1, 1, 1, 2, 2],
'authors': ['a', 'b', 'c', 'a', 'b']
}
testdata = pd.DataFrame(testdata)
result = collabs.get_collaborators(testdata)
as_list = []
for i, row in result.iterrows():
as_list.append(row.tolist())
self.assertEqual(len(as_list), 3)
self.assertTrue(['a', 'b', 2] in as_list)
self.assertTrue(['a', 'c', 1] in as_list)
self.assertTrue(['b', 'c', 1] in as_list)
def test_multiple_authors(self):
testdata = {
'issue_id': [1, 2],
'authors': ['a||b||c', 'a||b']
}
testdata = pd.DataFrame(testdata)
result = collabs.get_collaborators(testdata)
as_list = []
for i, row in result.iterrows():
as_list.append(row.tolist())
self.assertEqual(len(as_list), 3)
self.assertTrue(['a', 'b', 2] in as_list)
self.assertTrue(['a', 'c', 1] in as_list)
self.assertTrue(['b', 'c', 1] in as_list)
class Test_corrections(unittest.TestCase):
def test_are_initials(self):
self.assertTrue(corr.are_initials('M.B.'))
self.assertTrue(corr.are_initials('M. B.'))
self.assertTrue(corr.are_initials('M. B.'))
self.assertTrue(corr.are_initials('M. v. B.'))
self.assertTrue(corr.are_initials('A. C. W.'))
self.assertTrue(corr.are_initials('M B.'))
self.assertTrue(corr.are_initials('A. L'))
self.assertTrue(corr.are_initials('M B'))
self.assertTrue(corr.are_initials('Dr. M. B.'))
def test_arent_initials(self):
self.assertFalse(corr.are_initials('M.Bokulic'))
self.assertFalse(corr.are_initials('Dr. W. Pape'))
self.assertFalse(corr.are_initials('Schwitters'))
self.assertFalse(corr.are_initials('W.Decksel'))
self.assertFalse(corr.are_initials('T'))
self.assertFalse(corr.are_initials('T.'))
self.assertFalse(corr.are_initials('P.L. Flouquet'))
self.assertFalse(corr.are_initials('P. L. Flouquet'))
self.assertFalse(corr.are_initials('Joseph Aug. Lux'))
self.assertFalse(corr.are_initials('J. Leonard Roeselare'))
def test_fix_initials(self):
self.assertEqual(corr.fix_initials('A. C. W'), 'A. C. W.')
self.assertEqual(corr.fix_initials('A. D'), 'A. D.')
self.assertEqual(corr.fix_initials('A. v. K'), 'A. v. K.')
self.assertEqual(corr.fix_initials('F.H.'), 'F. H.')
self.assertEqual(corr.fix_initials('R R'), 'R. R.')
self.assertEqual(corr.fix_initials('R.B.'), 'R. B.')
self.assertEqual(corr.fix_initials('W. W.'), 'W. W.')
self.assertEqual(corr.fix_initials('H W'), 'H. W.')
self.assertEqual(corr.fix_initials('H P.'), 'H. P.')
self.assertEqual(corr.fix_initials('H P.'), 'H. P.')
self.assertEqual(corr.fix_initials('H P '), 'H. P.')
self.assertEqual(corr.fix_initials('H. P. '), 'H. P.')
def test_get_title(self):
self.assertEqual(corr.get_title_and_rest('Prof. Kochalka')[0],
'Prof.')
self.assertEqual(corr.get_title_and_rest('prof. Kochalka')[0],
'Prof.')
self.assertEqual(corr.get_title_and_rest('Dr. med. A. Meier-Naef')[0],
'Dr. med.')
# subtitle needs to have a dot
self.assertEqual(corr.get_title_and_rest('Dr med A. Meier-Naef')[0],
'Dr.')
self.assertEqual(corr.get_title_and_rest('Dr. Med. A. Meier-Naef')[0],
'Dr.')
self.assertEqual(corr.get_title_and_rest('Dr. S. Friedlaender')[0],
'Dr.')
self.assertEqual(corr.get_title_and_rest('Dr. phil. G. Räusch')[0],
'Dr. phil.')
self.assertEqual(corr.get_title_and_rest('Prof. Avgust Černigoj')[0],
'Prof.')
# longforms
self.assertEqual(corr.get_title_and_rest('Doktor phil. G. Räusch')[0], |
'Dr. phil.')
self.assertEqual(corr.get_title_and_rest(
'Profesor Avgust Černigoj')[0],
| 'Prof.')
# without titles
self.assertEqual(corr.get_title_and_rest('Avgust Černigoj')[0],
'')
# testing rest
self.assertEqual(corr.get_title_and_rest('Prof. Avgust Černigoj')[1],
'Avgust Černigoj')
self.assertEqual(corr.get_title_and_rest('Dr. med. A. Meier-Naef')[1],
'A. Meier-Naef')
self.assertEqual(corr.get_title_and_rest(' A. Meier-Naef')[1],
' A. Meier-Naef')
def test_strip_year(self):
self.assertEqual(corr.strip_year("A R. Schönlank"),
"A R. Schönlank")
self.assertEqual(corr.strip_year("Adrian-Nilsson, Gösta, 1884-1965"),
"Adrian-Nilsson, Gösta")
self.assertEqual(corr.strip_year("Albrecht, Emil Oskar 1895-1953"),
"Albrecht, Emil Oskar")
self.assertEqual(corr.strip_year("Alcaro, Rodolfo, 1896-1982"),
"Alcaro, Rodolfo")
self.assertEqual(corr.strip_year("Allwohn, Adolf, 1893-"),
"Allwohn, Adolf")
self.assertEqual(corr.strip_year("Apollinaire, Guillaume, 1880-1918"),
"Apollinaire, Guillaume")
self.assertEqual(corr.strip_year("Baum, Peter, 1939-..."),
"Baum, Peter")
self.assertEqual(corr.strip_year("Berlewi, H. (Henryk), 1894-1967"),
"Berlewi, H. (Henryk)")
self.assertEqual(corr.strip_year("Buzzi, Paolo, -1956"),
"Buzzi, Paolo")
self.assertEqual(corr.strip_year("Byk, Edgar 1883-"),
"Byk, Edgar")
self.assertEqual(corr.strip_year("Jaeger-Mewe, Harrn (1876- )."),
"Jaeger-Mewe, Harrn")
self.assertEqual(corr.strip_year("Leonardo, da Vinci, 1452-1519."),
"Leonardo, da Vinci")
self.assertEqual(corr.strip_year("Topp, Arnold, 1887-1945?"),
"Topp, Arnold")
self.assertEqual(corr.strip_year("Zerbst, Max, 18..-19.."),
"Zerbst, Max")
def test_get_initials(self):
self.assertEqual(corr.get_initials("Reinhard Goering"), 'R. G.')
self.assertEqual(corr.get_initials("Herwarth Walden"), 'H. W.')
self.assertEqual(corr.get_initials("Mary Schneider-Braillard"),
'M. S. B.')
self.assertEqual(corr.get_initials("Franz W. Seiwert"), 'F. W. S.')
self.assertEqual(corr.get_initials("Aage von Kohl"), 'A. v. K.')
self.assertEqual(corr.get_initials("A. M. Frey"), 'A. M. F.')
self.assertEqual(corr.get_initials("Campendonk"), 'C.')
self.assertEqual(corr.get_initials("L. Wachlmeier"), 'L. W.')
self.assertEqual(corr.get_initials("Schmidt-Rottluff"), 'S. R.')
self.assertEqual(corr.get_initials("r delaunay"), 'r. d.')
self.assertEqual(corr.get_initials("Chr. Berberoff"), 'C. B.')
self.assertEqual(corr.get_initials("Edm. Kesting"), 'E. K.')
self.assertEqual(corr.get_initials("LOUIS MARCOUSSIS"), 'L. M.')
# dont know what to do with this, it doesnt matter
self.assertEqual(corr.get_initials("365-er Dichter"), '3. e. D.')
self.assertEqual(corr.get_initials("L'Oeil de Zinc"), 'L. d. Z.')
self.assertEqual(corr.get_initials("Karl Höfler"), 'K. H.')
self.assertEqual(corr.get_initials("Ernst Kállai"), 'E. K.')
def test_order_names(self):
self.assertEqual(corr.order_names("A R. Schönlank"),
"A R. Schönlank")
self.assertEqual(corr.order_names("Adrian-Nilsson, Gösta"),
"Gösta Adr |
anhstudios/swganh | data/scripts/templates/object/ship/shared_smuggler_warlord_ship_tier5.py | Python | mit | 424 | 0.049528 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Ship()
result.template = "object/ship/shared_smuggler_warlord_ship_tier5.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### E | ND MOD | IFICATIONS ####
return result |
mburakergenc/Malware-Detection-using-Machine-Learning | cuckoo/modules/processing/platform/linux.py | Python | mit | 4,175 | 0.005269 | # Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import logging
import datetime
import re
import dateutil.parser
from lib.cuckoo.common.abstracts import BehaviorHandler
log = logging.getLogger(__name__)
class FilteredProcessLog(list):
def __init__(self, eventstream, **kwfilters):
self.eventstream = eventstream
self.kwfilters = kwfilters
| def __iter__(self):
for event in self.eventstream:
for k, v in self.kwfilters | .items():
if event[k] != v:
continue
del event["type"]
yield event
def __nonzero__(self):
return True
class LinuxSystemTap(BehaviorHandler):
"""Parses systemtap generated plaintext logs (see data/strace.stp)."""
key = "processes"
def __init__(self, *args, **kwargs):
super(LinuxSystemTap, self).__init__(*args, **kwargs)
self.processes = []
self.pids_seen = set()
self.forkmap = {}
self.matched = False
self._check_for_probelkm()
def _check_for_probelkm(self):
path_lkm = os.path.join(self.analysis.logs_path, "all.lkm")
if os.path.exists(path_lkm):
lines = open(path_lkm).readlines()
forks = [re.findall("task (\d+)@0x[0-9a-f]+ forked to (\d+)@0x[0-9a-f]+", line) for line in lines]
self.forkmap = dict((j, i) for i, j in reduce(lambda x, y: x+y, forks, []))
# self.results["source"].append("probelkm")
def handles_path(self, path):
if path.endswith(".stap"):
self.matched = True
return True
def parse(self, path):
parser = StapParser(open(path))
for event in parser:
pid = event["pid"]
if pid not in self.pids_seen:
self.pids_seen.add(pid)
ppid = self.forkmap.get(pid, -1)
process = {
"pid": pid,
"ppid": ppid,
"process_name": event["process_name"],
"first_seen": event["time"],
}
# create a process event as we don't have those with linux+systemtap
pevent = dict(process)
pevent["type"] = "process"
yield pevent
process["calls"] = FilteredProcessLog(parser, pid=pid)
self.processes.append(process)
yield event
def run(self):
if not self.matched:
return
self.processes.sort(key=lambda process: process["first_seen"])
return self.processes
class StapParser(object):
"""Handle .stap logs from the Linux analyzer."""
def __init__(self, fd):
self.fd = fd
def __iter__(self):
self.fd.seek(0)
for line in self.fd:
# 'Thu May 7 14:58:43 2015.390178 python@7f798cb95240[2114] close(6) = 0\n'
# datetime is 31 characters
datetimepart, rest = line[:31], line[32:]
# incredibly sophisticated date time handling
dtms = datetime.timedelta(0, 0, int(datetimepart.split(".", 1)[1]))
dt = dateutil.parser.parse(datetimepart.split(".", 1)[0]) + dtms
parts = re.match("^(.+)@([a-f0-9]+)\[(\d+)\] (\w+)\((.*)\) = (\S+){0,1}\s{0,1}(\(\w+\)){0,1}$", rest)
if not parts:
log.warning("Could not parse syscall trace line: %s", line)
continue
pname, ip, pid, fn, arguments, retval, ecode = parts.groups()
argsplit = arguments.split(", ")
arguments = dict(("p%u" % pos, argsplit[pos]) for pos in range(len(argsplit)))
pid = int(pid) if pid.isdigit() else -1
yield {
"time": dt, "process_name": pname, "pid": pid,
"instruction_pointer": ip, "api": fn, "arguments": arguments,
"return_value": retval, "status": ecode,
"type": "apicall", "raw": line,
}
|
byndcivilization/toy-infrastructure | flask-app/config.py | Python | gpl-3.0 | 851 | 0 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
DEBUG = False
TESTING = False
CSRF_ENABLED = True
SECRET_KEY = os.getenv('APP_SECRET_KEY', '')
# db config
DB_PORT = os.getenv('DB_PORT', '')
DB_HOST = os.getenv('DB_HOST', '')
DB_ROLE = os.getenv('DB_ROLE', '')
# TODO: abstract auth stuff to kubernetes manifests
DB_PASSWORD = os.getenv('DB_PASSWORD', '')
DB_NAME = os.getenv('DB_NAME', '')
SQLALCHEMY_DATABASE_URI = 'postgresql://{}:{}@{}:{}/{}'.format(
| DB_ROLE, DB_PASSWORD, DB_HOST, str(DB_PORT), DB_NAME)
|
class ProductionConfig(Config):
DEBUG = False
class StagingConfig(Config):
DEVELOPMENT = True
DEBUG = True
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
class TestingConfig(Config):
TESTING = True
|
TheMutley/openpilot | selfdrive/controls/lib/longitudinal_mpc/libmpc_py.py | Python | mit | 989 | 0.002022 | import os
import subprocess
from cffi import FFI
mpc_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
subprocess.check_call(["make", "-j4"], cwd=mpc_dir)
def _get_libmpc(mpc_id):
libmpc_fn = os.path.join(mpc_dir, | "libcommampc%d.so" % mpc_id)
ffi = FFI()
ffi.cdef("""
typedef struct {
double x_ego, v_ego, a_ego, x_l, v_l, a_l;
} state_t;
typedef struct {
double x_ego[21];
double v_ego[21];
double a_ | ego[21];
double j_ego[21];
double x_l[21];
double v_l[21];
double a_l[21];
double cost;
} log_t;
void init(double ttcCost, double distanceCost, double accelerationCost, double jerkCost);
void init_with_simulation(double v_ego, double x_l, double v_l, double a_l, double l);
int run_mpc(state_t * x0, log_t * solution,
double l);
""")
return (ffi, ffi.dlopen(libmpc_fn))
mpcs = [_get_libmpc(1), _get_libmpc(2)]
def get_libmpc(mpc_id):
return mpcs[mpc_id - 1]
|
DreamSourceLab/DSView | libsigrokdecode4DSL/decoders/edid/__init__.py | Python | gpl-3.0 | 1,316 | 0.009119 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012 Bert Vermeulen <bert@biot.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
'''
Extended Display Identification Data (EDID) 1.3 structure decoder.
The three-character vendor ID as specified in the EDID standard ref | ers to
a Plug and Play ID (PNPID). The list of PNPID assignments is done by Microsoft.
The 'pnpids.txt' file included with this protocol decoder is derived from
the list of | assignments downloadable from that page. It was retrieved in
January 2012.
Details:
https://en.wikipedia.org/wiki/Extended_display_identification_data
http://msdn.microsoft.com/en-us/windows/hardware/gg463195
'''
from .pd import Decoder
|
jschaf/pylint-flask | test/input/func_noerror_flask_ext_long.py | Python | gpl-2.0 | 115 | 0 | '''Ensure | that pylint finds the exported methods from flask.ext.'''
from flask.ext.wtf import Form
MYFORM = Form | |
hagenw/ltfat | mat2doc/mat/release.py | Python | gpl-3.0 | 770 | 0.014286 | print "Creating downloadable package"
# Remove unwanted files
s=os.path.join(conf.t.dir,'testing')
rmrf(s)
os.rmdir(s)
s=os.path.join(conf.t.dir,'timing')
rmrf(s)
os.rmdir(s)
s=os.path.join(conf.t.dir,'reference')
rmrf(s)
os.rmdir(s)
# Recursively remove the .git files
for root, dirs, files in os.walk(conf.t.dir, topdown=False):
for name in files:
if name in ['.gitatt | ributes','.gitignore','desktop.ini']:
os.remove(os.path.join(root, name))
# "bootstrap" the configure files
os.system("cd "+conf.t.dir+"/src; ./bootstrap")
s=os.path.join(conf.t.dir,'src','autom4te.cache')
rmrf(s)
os.rmdir(s)
# Compile the Java classes
os.system("cd "+conf.t.dir+"/blockproc/java; make")
os.system("cd "+conf.t.dir+"/blockproc/j | ava; make classclean")
|
antkillerfarm/antkillerfarm_crazy | python/ml/tensorflow/unit_test/run_list.py | Python | gpl-3.0 | 257 | 0 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import tensorflow as tf
hello | = tf.constant('Hello, TensorFlow!')
sess = tf.Session()
print(sess.run(hello))
a = tf.constant(10)
b = tf.constant(32)
c = a + b
d = a - b
run_list = | [c, d]
print(sess.run(run_list))
|
arseneyr/essentia | test/src/unittest/sfx/test_tctototal_streaming.py | Python | agpl-3.0 | 2,098 | 0.001907 | #!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is fr | ee so | ftware: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.streaming import TCToTotal as sTCToTotal
class TestTCToTotal(TestCase):
def testEmpty(self):
gen = VectorInput([])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertRaises(KeyError, lambda: p['lowlevel.tctototal'])
def testOneValue(self):
gen = VectorInput([1])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
self.assertRaises(RuntimeError, lambda: run(gen))
def testRegression(self):
envelope = range(22050)
envelope.reverse()
envelope = range(22050) + envelope
gen = VectorInput(envelope)
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertAlmostEqual(p['lowlevel.tctototal'],
TCToTotal()(envelope))
suite = allTests(TestTCToTotal)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
veltri/DLV2 | tests/parser/bug.69_working.test.py | Python | apache-2.0 | 1,691 | 0.002365 | input = """
% Simplified version of a program by Michael Fink, which segfaults
% with the Jun 11 2001 Release.
object(o1,4).
object(o2,5).
object(o3,17).
bid(1,14).
bid(2,34).
bid(3,14).
contains(1,o1,2).
contains(1,o2,4).
contains(1,o3,2).
contains(2,o3,10).
contains(3,o2,5).
%#maxint=62.
select(B) | nselect(B) :- bid(B,V).
:- nselect(B).
assigned(B,I) :- bid(B,V), I!=0, not nassigned(B,I). %#int(I), !=(I,0), not nassigned(B,I).
nassigned(B,I) :- assigned(B,J), I!=J. %#int(I), !=(I,J).
nassigned(B,I) :- assigned(B1,I), select(B), B!=B1. %!=(B,B1).
assigned_bidId(I) :- assigned(B,I).
:- bid(B,V), not as | signed_bidId(1).
:- assigned(B1,I), not assigned_bidId(J), J!=0. %#prec(I,J), !=(J,0), !=(J,#maxint).
%:- nselect(B),bid(B,V). [V:4]
%:- assigned(B,I), contains(B,O,N), #int(S), S=N+I. [S:1]
"""
output = """
% Simplified version of a program by Michael Fink, which segfaults
% with the Jun 11 2001 Relea | se.
object(o1,4).
object(o2,5).
object(o3,17).
bid(1,14).
bid(2,34).
bid(3,14).
contains(1,o1,2).
contains(1,o2,4).
contains(1,o3,2).
contains(2,o3,10).
contains(3,o2,5).
%#maxint=62.
select(B) | nselect(B) :- bid(B,V).
:- nselect(B).
assigned(B,I) :- bid(B,V), I!=0, not nassigned(B,I). %#int(I), !=(I,0), not nassigned(B,I).
nassigned(B,I) :- assigned(B,J), I!=J. %#int(I), !=(I,J).
nassigned(B,I) :- assigned(B1,I), select(B), B!=B1. %!=(B,B1).
assigned_bidId(I) :- assigned(B,I).
:- bid(B,V), not assigned_bidId(1).
:- assigned(B1,I), not assigned_bidId(J), J!=0. %#prec(I,J), !=(J,0), !=(J,#maxint).
%:- nselect(B),bid(B,V). [V:4]
%:- assigned(B,I), contains(B,O,N), #int(S), S=N+I. [S:1]
"""
|
danielgarm/Public-Algorithms | LeetCode/0206 - Reverse Linked List.py | Python | mit | 472 | 0.004237 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseList(self, head: ListNode) -> ListN | ode:
current = head
previous = None
while current is no | t None:
temp = current.next
current.next = previous
previous = current
current = temp
return previous
|
tangming2010/gitRepository | we.py | Python | gpl-2.0 | 201 | 0.014925 | import | json
class Student(object):
def __init__(self, name, age, score):
self.name = name
self.age = age
self.score = score
s = Student('Bob', 20, 88)
| print(json.dumps(s)) |
ozdemircili/pycheat | pycheat/api_nmap.py | Python | gpl-3.0 | 2,549 | 0.009023 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 13 12:10:57 2013
@author: ozdemircili
"""
"""
Install :
pip install python-nmap
"""
import nmap # import nmap.py module
nm = nmap.PortScanner() # instantiate nmap.PortScanner object
nm.scan('127.0.0.1', '22-443') # scan host 127.0.0.1, ports from 22 to 443
nm.command_line() # get command line used for the scan : nmap -oX - -p 22-443 127.0.0.1
nm.scaninfo() | # get nmap scan informations {'tcp': {'services': '22-443', 'method': 'connect'}}
nm.all_hosts() # get all hosts tha | t were scanned
for host in nm.all_hosts():
print('----------------------------------------------------')
print('Host : %s (%s)' % (host, nm[host].hostname()))
print('State : %s' % nm[host].state())
for proto in nm[host].all_protocols():
print('----------')
print('Protocol : %s' % proto)
lport = nm[host][proto].keys()
lport.sort()
for port in lport:
print('port : %s\tstate : %s' % (port, nm[host][proto][port]['state']))
print('----------------------------------------------------')
#Explanation in detail:
nm['127.0.0.1'].hostname() # get hostname for host 127.0.0.1
nm['127.0.0.1'].state() # get state of host 127.0.0.1 (up|down|unknown|skipped)
nm['127.0.0.1'].all_protocols() # get all scanned protocols ['tcp', 'udp'] in (ip|tcp|udp|sctp)
nm['127.0.0.1']['tcp'].keys() # get all ports for tcp protocol
nm['127.0.0.1'].all_tcp() # get all ports for tcp protocol (sorted version)
nm['127.0.0.1'].all_udp() # get all ports for udp protocol (sorted version)
nm['127.0.0.1'].all_ip() # get all ports for ip protocol (sorted version)
nm['127.0.0.1'].all_sctp() # get all ports for sctp protocol (sorted version)
nm['127.0.0.1'].has_tcp(22) # is there any information for port 22/tcp on host 127.0.0.1
nm['127.0.0.1']['tcp'][22] # get infos about port 22 in tcp on host 127.0.0.1
nm['127.0.0.1'].tcp(22) # get infos about port 22 in tcp on host 127.0.0.1
nm['127.0.0.1']['tcp'][22]['state'] # get state of port 22/tcp on host 127.0.0.1 (open
#print result as CSV
print(nm.csv())
#Another scan example
# If you want to do a pingsweep on network 10.5.200.0/24
nm.scan(hosts='10.5.200.1/24', arguments='-n -sP -PE -PA21,23,80,3389')
hosts_list = [(x, nm[x]['status']['state']) for x in nm.all_hosts()]
for host, status in hosts_list:
print('{0}:{1}'.format(host, status))
|
VitalPet/addons-onestein | hr_absenteeism/__manifest__.py | Python | agpl-3.0 | 776 | 0 | # -*- coding: utf-8 -*-
# Copyright 2016 Onestein (<http://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': "Absence Management",
'summary': """Create time based absence notifications""",
'author': 'Onestein',
'website': 'http://www.onestein.eu',
'images': ['static/description/main_screenshot.png'],
'category': 'Human Resources',
'version': '10.0.1.0.0',
'license': 'AGPL-3',
'depends': [
'hr_holidays',
],
'data': [
'security/ir.model.access.csv',
'views/hr_holidays_status.xml',
'views/hr_holidays.xml',
'data/hr_absenteeism_cron.xml',
],
'demo': [],
'installable': True, |
'aut | o_install': False,
'application': False,
}
|
datalogics/scons | test/LIBS.py | Python | mit | 7,418 | 0.002831 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
import sys
if sys.platform == 'win32':
_exe = '.exe'
bar_lib = 'bar.lib'
else:
_exe = ''
bar_lib = 'libbar.a'
test = TestSCons.TestSCons()
test.subdir('sub1', 'sub2')
foo1_exe = test.workpath('foo1' + _exe)
foo2_exe = test.workpath('foo2' + _exe)
foo3_exe = test.workpath('foo3' + _exe)
foo4_exe = test.workpath('foo4' + _exe)
foo5_exe = test.workpath('foo5' + _exe)
slprog_exe = test.workpath('slprog' + _exe)
test.write('SConstruct', """
env = Environment(LIBS=['bar'], LIBPATH = '.')
env.Program(target='foo1', source='foo1.c')
env2 = Environment(LIBS=[File(r'%s')], LIBPATH = '.')
env2.Program(target='foo2', source='foo2.c')
env3 = Environment(LIBS='bar', LIBPATH = '.')
env3.Program(target='foo3', source='foo3.c')
env4 = Environment(LIBS=File(r'%s'), LIBPATH = '.')
env4.Program(target='foo4', source='foo4.c')
env5 = Environment(LIBS=['bar', '$UNSPECIFIED'], LIBPATH = '.')
env5.Program(target='foo5', source='foo5.c')
sl = env.StaticLibrary('sl.c')
env.Program(target='slprog.c', LIBS=[sl])
SConscript('sub1/SConscript', 'env')
SConscript('sub2/SConscript', 'env')
""" % (bar_lib, bar_lib))
test.write(['sub1', 'SConscript'], r"""
Import('env')
lib = env.Library(target='bar', source=Split('bar.c baz.c'))
env.Install('..', lib)
""")
test.write(['sub2', 'SConscript'], r"""
Import('env')
lib = env.Library(target='baz', source='baz.c')
env.Install('..', lib)
""")
foo_contents = r"""
void bar();
void baz();
int main(void)
{
bar();
baz();
return 0;
}
"""
test.write('foo1.c', foo_contents)
test.write('foo2.c', foo_contents)
test.write('foo3.c', foo_contents)
test.write('foo4.c', foo_contents)
test.write('foo5.c', foo_contents)
test.write('sl.c', """\
#include <stdio.h>
void
sl(void)
{
printf("sl.c\\n");
}
""")
test.write('slprog.c', """\
#include <stdio.h>
int
main(int argc, char *argv[])
{
sl();
printf("slprog.c\\n");
exit (0);
}
""")
test.write(['sub1', 'bar.c'], r"""
#include <stdio.h>
void bar()
{
printf("sub1/bar.c\n");
}
""")
test.write(['sub1', 'baz.c'], r"""
#include <stdio.h>
void baz()
{
printf("sub1/baz.c\n");
}
""")
test.write(['sub2', 'baz.c'], r"""
#include <stdio.h>
void baz()
{
printf("sub2/baz.c\n");
}
""")
# ar sometimes produces a "warning" on stderr -- ar: creating sub1/libbar.a
test.run(arguments = '.', stderr=None)
test.run(program=foo1_exe, stdout='sub1/bar.c\nsub1/baz.c\n')
test.run(program=foo2_exe, stdout='sub1/bar.c\nsub1/baz.c\n')
test.run(program=foo3_exe, stdout='sub1/bar.c\nsub1/baz.c\n')
test.run(program=foo4_exe, stdout='sub1/bar.c\nsub1/baz.c\n')
test.run(program=foo5_exe, stdout='sub1/bar.c\nsub1/baz.c\n')
test.run(program=slprog_exe, stdout='sl.c\nslprog.c\n')
#
test.write('SConstruct', """
env = Environment(LIBS=['baz'])
env.Program(target='foo1', source='foo1.c', LIBS=['$LIBS', 'bar'], LIBPATH = '.')
SConscript('sub1/SConscript', 'env')
SConscript('sub2/SConscript', 'env')
""")
test.run(arguments = '.')
test.run(program=foo1_exe, stdout='sub1/bar.c\nsub2/baz.c\n')
#
test.write('SConstruct', """
env = Environment(LIBS=['bar', 'baz'], LIBPATH = '.')
env.Program(target='foo1', source='foo1.c')
SConscript('sub1/SConscript', 'env')
SConscript('sub2/SConscript', 'env')
""")
# on IRIX, ld32 prints out a warning saying that libbaz.a isn't used
sw = 'ld32: WARNING 84 : ./libbaz.a is not used for resolving any symbol.\n'
test.run(arguments = '.',
stderr='(%s|%s'%(sw, TestSCons.noisy_ar[1:]),
match=TestSCons.match_re_dotall)
#test.fail_test(not test.stderr() in ['', sw])
test.run(program=foo1_exe, stdout='sub1/bar.c\nsub1/baz.c\n')
#
test.write('SConstruct', """
env = Environment()
env.Program(target='foo1', source='foo1.c', LIBS=['bar', 'baz'], LIBPATH = '.')
SConscript('sub | 1/SConscript', 'env')
SConscript('sub2/SConscript', 'env')
""")
test.run(arguments = '.')
test.run(program=foo1_exe, stdout='sub1/bar.c\nsub1/baz.c\n')
test.write(['sub1', 'baz.c'], r"""
#include <stdio.h>
void baz()
{
printf("sub1/baz.c 2\n");
}
""")
test.run(arguments = | '.',
stderr='(%s|%s'%(sw, TestSCons.noisy_ar[1:]),
match=TestSCons.match_re_dotall)
#test.fail_test(not test.stderr() in ['', sw, TestSCons.noisy_ar])
test.run(program=foo1_exe, stdout='sub1/bar.c\nsub1/baz.c 2\n')
# Make sure we don't add $LIBPREFIX to library names that
# already have the prefix on them.
blender_exe = test.workpath('blender' + _exe)
test.subdir('src', ['src', 'component1'], ['src', 'component2'])
test.write('SConstruct', """\
SConscript(['src/SConscript'])
libpath = (['lib'])
libraries = (['libtest_component2',
'libtest_component1'])
# To remove the dependency problem, you should rename blender to mlender.
Program (source='main.c', target='blender', LIBS=libraries, LIBPREFIX='lib', LIBPATH=libpath, CPPPATH=['src/component2'])
""")
test.write('main.c', """\
#include <stdlib.h>
#include "message2.h"
int main (void)
{
DisplayMessage2();
exit (0);
}
""")
test.write(['src', 'SConscript'], """\
SConscript(['component1/SConscript',
'component2/SConscript'])
""")
test.write(['src', 'component1', 'SConscript'], """\
source_files = ['message1.c']
Library (target='../../lib/libtest_component1', source=source_files, LINKFLAGS='')
""")
test.write(['src', 'component1', 'message1.c'], """\
#include <stdio.h>
void DisplayMessage1 (void)
{
printf ("src/component1/message.c\\n");
}
""")
test.write(['src', 'component1', 'message1.h'], """\
void DisplayMessage1 (void);
""")
test.write(['src', 'component2', 'SConscript'], """\
source_files = ['message2.c']
include_paths = ['../component1']
Library (target='../../lib/libtest_component2', source=source_files, CPPPATH=include_paths)
""")
test.write(['src', 'component2', 'message2.h'], """\
void DisplayMessage2 (void);
""")
test.write(['src', 'component2', 'message2.c'], """\
#include <stdio.h>
#include "message1.h"
int DisplayMessage2 (void)
{
DisplayMessage1();
printf ("src/component2/hello.c\\n");
}
""")
test.run(arguments = '.',
stderr=TestSCons.noisy_ar,
match=TestSCons.match_re_dotall)
test.run(program=blender_exe,
stdout='src/component1/message.c\nsrc/component2/hello.c\n')
test.pass_test()
|
jjon/Google-Spreadsheet-python-scripts | GSheet2Python.py | Python | gpl-3.0 | 1,622 | 0.006169 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Takes Google's json encoded spreadsheet and prints a python dictionary keyed by
the values in the first column of the SS. ©2017 J. J. Crump, GNU general public
license
"""
import urllib2
from pprint import pprint
import re
import json
# This is the url of a sample google spreadsheet that I've published to the web. The url returns a prettyprinted json string:
ssURL = "https://spreadsheets.google.com/feeds/list/1OPNQC3xBp3iQTpjVfd6cpvvA0BpHWhb3QiNOvGFZ9z8/od6/public/basic?prettyprint=true&alt=json"
response = urllib2.urlopen(ssURL)
jsonIn = response.read()
pyDict = json.loads(jsonIn)
entryLis | t = pyDict['feed']['entry']
fields = ["name", "city", "state", "zip"]
SSdict = {}
def parsestring(rowstring, fields):
"""yields tuples of (fieldname, fieldvalue)"""
i = iter(fields[1:])
field = i.next()
start = end = 0
try:
while True:
lastfield = | field
field = i.next()
if rowstring.find(field) == -1:
field = lastfield
continue
end = rowstring.find(field)
yield lastfield, re.sub('^.*?:', '', rowstring[start:end].strip().strip(',')).strip()
start = end
except StopIteration:
start = rowstring.find(field)
yield lastfield, re.sub('^.*?:', '', rowstring[start:].strip().strip(',')).strip()
for e in entryList:
entrydict = dict([x for x in parsestring(e['content']['$t'], fields)])
entrykey = e['title']['$t']
SSdict[entrykey] = entrydict
#print stringIn
pprint(SSdict)
|
nf-dj/cellpilot | drone/scripts/set_led.py | Python | mit | 1,239 | 0.00565 | #!/usr/bin/python
# Copyright (c) 2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, | to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limita | tion the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import cellpilot_usart
led_no=int(sys.argv[1])
state=int(sys.argv[2])
cellpilot_usart.set_led(led_no,state)
|
chienlieu2017/it_management | odoo/addons/account/models/account_move.py | Python | gpl-3.0 | 83,795 | 0.00537 | # -*- coding: utf-8 -*-
import time
from collections import OrderedDict
from odoo import api, fields, models, _
from odoo.osv import expression
from odoo.exceptions import RedirectWarning, UserError, ValidationError
from odoo.tools.misc import formatLang
from odoo.tools import float_is_zero, float_compare
from odoo.tools.safe_eval import safe_eval
from lxml import etree
#----------------------------------------------------------
# Entries
#----------------------------------------------------------
class AccountMove(models.Model):
_name = "account.move"
_description = "Account Entry"
_order = 'date desc, id desc'
@api.multi
@api.depends('name', 'state')
def name_get(self):
result = []
for move in self:
if move.state == 'draft':
name = '* ' + str(move.id)
else:
name = move.name
result.append((move.id, name))
return result
@api.multi
@api.depends('line_ids.debit', 'line_ids.credit')
def _amount_compute(self):
for move in self:
total = 0.0
for line in move.line_ids:
total += line.debit
move.amount = total
@api.depends('line_ids.debit', 'line_ids.credit', 'line_ids.matched_debit_ids.amount', 'line_ids.matched_credit_ids.amount', 'line_ids.account_id.user_type_id.type')
def _compute_matched_percentage(self):
"""Compute the percentage to apply for cash basis method. This value is relevant only for moves that
involve journal items on receivable or payable accounts.
"""
for move in self:
total_amount = 0.0
total_reconciled = 0.0
for line in move.line_ids:
if line.account_id.user_type_id.type in ('receivable', 'payable'):
amount = abs(line.debit - line.credit)
total_amount += amount
for partial_line in (line.matched_debit_ids + line.matched_credit_ids):
total_reconciled += partial_line.amount
if float_is_zero(total_amount, precision_rounding=move.currency_id.rounding):
move.matched_percentage = 1.0
else:
move.matched_percentage = total_reconciled / total_amount
@api.one
@api.depends('company_id')
def _compute_currency(self):
self.currency_id = self.company_id.currency_id or self.env.user.company_id.currency_id
@api.multi
def _get_default_journal(self):
if self.env.context.get('default_journal_type'):
return self.env['account.journal'].search([('type', '=', self.env.context['default_journal_type'])], limit=1).id
@api.multi
@api.depends('line_ids.partner_id')
def _compute_partner_id(self):
for move in self:
partner = move.line_ids.mapped('partner_id')
move.partner_id = partner.id if len(partner) == 1 else False
name = fields.Char(string='Number', required=True, copy=False, default='/')
ref = fields.Char(string='Reference', copy=False)
date = fields.Date(required=True, states={'posted': [('readonly', True)]}, index=True, default=fields.Date.context_today)
journal_id = fields.Many2one('account.journal', string='Journal', required=True, states={'posted': [('readonly', True)]}, default=_get_default_journal)
currency_id = fields. | Many2one('res.currency', compute='_compute_currency', store=True, string="Currency")
state = fields.Selection([('draft', 'Unposted'), ('posted', 'Posted')], stri | ng='Status',
required=True, readonly=True, copy=False, default='draft',
help='All manually created new journal entries are usually in the status \'Unposted\', '
'but you can set the option to skip that status on the related journal. '
'In that case, they will behave as journal entries automatically created by the '
'system on document validation (invoices, bank statements...) and will be created '
'in \'Posted\' status.')
line_ids = fields.One2many('account.move.line', 'move_id', string='Journal Items',
states={'posted': [('readonly', True)]}, copy=True)
partner_id = fields.Many2one('res.partner', compute='_compute_partner_id', string="Partner", store=True, readonly=True)
amount = fields.Monetary(compute='_amount_compute', store=True)
narration = fields.Text(string='Internal Note')
company_id = fields.Many2one('res.company', related='journal_id.company_id', string='Company', store=True, readonly=True,
default=lambda self: self.env.user.company_id)
matched_percentage = fields.Float('Percentage Matched', compute='_compute_matched_percentage', digits=0, store=True, readonly=True, help="Technical field used in cash basis method")
statement_line_id = fields.Many2one('account.bank.statement.line', index=True, string='Bank statement line reconciled with this entry', copy=False, readonly=True)
# Dummy Account field to search on account.move by account_id
dummy_account_id = fields.Many2one('account.account', related='line_ids.account_id', string='Account', store=False)
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
res = super(AccountMove, self).fields_view_get(
view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
if self._context.get('vat_domain'):
res['fields']['line_ids']['views']['tree']['fields']['tax_line_id']['domain'] = [('tag_ids', 'in', [self.env.ref(self._context.get('vat_domain')).id])]
return res
@api.model
def create(self, vals):
move = super(AccountMove, self.with_context(check_move_validity=False, partner_id=vals.get('partner_id'))).create(vals)
move.assert_balanced()
return move
@api.multi
def write(self, vals):
if 'line_ids' in vals:
res = super(AccountMove, self.with_context(check_move_validity=False)).write(vals)
self.assert_balanced()
else:
res = super(AccountMove, self).write(vals)
return res
@api.multi
def post(self):
invoice = self._context.get('invoice', False)
self._post_validate()
for move in self:
move.line_ids.create_analytic_lines()
if move.name == '/':
new_name = False
journal = move.journal_id
if invoice and invoice.move_name and invoice.move_name != '/':
new_name = invoice.move_name
else:
if journal.sequence_id:
# If invoice is actually refund and journal has a refund_sequence then use that one or use the regular one
sequence = journal.sequence_id
if invoice and invoice.type in ['out_refund', 'in_refund'] and journal.refund_sequence:
if not journal.refund_sequence_id:
raise UserError(_('Please define a sequence for the refunds'))
sequence = journal.refund_sequence_id
new_name = sequence.with_context(ir_sequence_date=move.date).next_by_id()
else:
raise UserError(_('Please define a sequence on the journal.'))
if new_name:
move.name = new_name
return self.write({'state': 'posted'})
@api.multi
def button_cancel(self):
for move in self:
if not move.journal_id.update_posted:
raise UserError(_('You cannot modify a posted entry of this journal.\nFirst you should set the journal to allow cancelling entries.'))
if self.ids:
self._check_lock_date()
self._cr.execute('UPDATE account_move '\
'SET state=%s '\
'WHERE id IN %s', ('draft', tuple(self.ids),))
self.invalidate_cache()
self._check_lock_date()
return True
@api.multi
def unlink(se |
lxml/lxml | src/lxml/tests/test_unicode.py | Python | bsd-3-clause | 7,379 | 0.002035 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
import sys
from .common_imports import StringIO, etree, HelperTestCase, _str, _bytes, _chr, needs_libxml
try:
unicode
except NameError:
unicode = str
ascii_uni = _bytes('a').decode('utf8')
klingon = _bytes("\\uF8D2").decode("unicode_escape") # not valid for XML names
invalid_tag = _bytes("test").decode('utf8') + klingon
uni = _bytes('\\xc3\\u0680\\u3120').decode("unicode_escape") # some non-ASCII characters
uxml = _bytes("<test><title>test \\xc3\\xa1\\u3120</title><h1>page \\xc3\\xa1\\u3120 title</h1></test>"
).decode("unicode_escape")
class UnicodeTestCase(HelperTestCase):
def test__str(self):
# test the testing framework, namely _str from common_imports
self.assertEqual(_str('\x10'), _str('\u0010'))
self.assertEqual(_str('\x10'), _str('\U00000010'))
self.assertEqual(_str('\u1234'), _str('\U00001234'))
def test_unicode_xml(self):
tree = etree.XML('<p>%s</p>' % uni)
self.assertEqual(uni, tree.text)
@needs_libxml(2, 9, 5) # not sure, at least 2.9.4 fails
def test_wide_unicode_xml(self):
if sys.maxunicode < 1114111:
return # skip test
tree = etree.XML(_bytes('<p>\\U00026007</p>').decode('unicode_escape'))
self.assertEqual(1, len(tree.text))
self.assertEqual(_bytes('\\U00026007').decode('unicode_escape'),
tree.text)
def test_unicode_xml_broken(self):
uxml = ('<?xml version="1.0" encoding="UTF-8"?>' +
'<p>%s</p>' % uni)
self.assertRaises(ValueError, etree.XML, uxml)
def test_unicode_tag(self):
el = etree.Element(uni)
self.assertEqual(uni, el.tag)
def test_unicode_tag_invalid(self):
# sadly, Klingon is not well-formed
self.assertRaises(ValueError, etree.Element, invalid_tag)
def test_unicode_nstag(self):
tag = "{http://abc/}%s" % uni
el = etree.Element(tag)
self.assertEqual(tag, el.tag)
def test_unicode_ns_invalid(self):
# namespace URIs must conform to RFC 3986
tag = "{http://%s/}abc" % uni
self.assertRaises(ValueError, etree.Element, tag)
def test_unicode_nstag_invalid(self):
# sadly, Klingon is not well-formed
tag = "{http://abc/}%s" % invalid_tag
self.assertRaises(ValueError, etree.Element, tag)
def test_unicode_qname(self):
qname = etree.QName(uni, uni)
tag = "{%s}%s" % (uni, uni)
self.assertEqual(qname.text, tag)
self.assertEqual(unicode(qname), tag)
def test_unicode_qname_invalid(self):
self.assertRaises(ValueError, etree.QName, invalid_tag)
def test_unicode_attr(self):
el = etree.Element('foo', {'bar': uni})
self.assertEqual(un | i, el.attrib['bar'])
def test_unicode_comment(self):
el = etree.Comment(uni)
self.assertEqual(uni, el.text)
def test_unicode_repr1(self):
x = etree.Element(_str('å'))
# must not raise UnicodeEncodeError
repr(x)
def test_unicode_repr2(self):
x = etree.Comment(_str('ö'))
repr(x)
def test_unicode_repr3(self):
x = etree.ProcessingInstruction(_str('Å'), _s | tr('\u0131'))
repr(x)
def test_unicode_repr4(self):
x = etree.Entity(_str('ä'))
repr(x)
def test_unicode_text(self):
e = etree.Element('e')
def settext(text):
e.text = text
self.assertRaises(ValueError, settext, _str('ab\ufffe'))
self.assertRaises(ValueError, settext, _str('ö\ffff'))
self.assertRaises(ValueError, settext, _str('\u0123\ud800'))
self.assertRaises(ValueError, settext, _str('x\ud8ff'))
self.assertRaises(ValueError, settext, _str('\U00010000\udfff'))
self.assertRaises(ValueError, settext, _str('abd\x00def'))
# should not Raise
settext(_str('\ud7ff\ue000\U00010000\U0010FFFFäöas'))
for char_val in range(0xD800, 0xDFFF+1):
self.assertRaises(ValueError, settext, 'abc' + _chr(char_val))
self.assertRaises(ValueError, settext, _chr(char_val))
self.assertRaises(ValueError, settext, _chr(char_val) + 'abc')
self.assertRaises(ValueError, settext, _bytes('\xe4'))
self.assertRaises(ValueError, settext, _bytes('\x80'))
self.assertRaises(ValueError, settext, _bytes('\xff'))
self.assertRaises(ValueError, settext, _bytes('\x08'))
self.assertRaises(ValueError, settext, _bytes('\x19'))
self.assertRaises(ValueError, settext, _bytes('\x20\x00'))
# should not Raise
settext(_bytes('\x09\x0A\x0D\x20\x60\x7f'))
def test_uniname(self):
Element = etree.Element
def el(name):
return Element(name)
self.assertRaises(ValueError, el, ':')
self.assertRaises(ValueError, el, '0a')
self.assertRaises(ValueError, el, _str('\u203f'))
# should not Raise
el(_str('\u0132'))
def test_unicode_parse_stringio(self):
el = etree.parse(StringIO('<p>%s</p>' % uni)).getroot()
self.assertEqual(uni, el.text)
## def test_parse_fileobject_unicode(self):
## # parse unicode from unnamed file object (not supported by ElementTree)
## f = SillyFileLike(uxml)
## root = etree.parse(f).getroot()
## self.assertEqual(unicode(etree.tostring(root, 'UTF-8'), 'UTF-8'),
## uxml)
class EncodingsTestCase(HelperTestCase):
def test_illegal_utf8(self):
data = _bytes('<test>\x80\x80\x80</test>', encoding='iso8859-1')
self.assertRaises(etree.XMLSyntaxError, etree.fromstring, data)
def test_illegal_utf8_recover(self):
data = _bytes('<test>\x80\x80\x80</test>', encoding='iso8859-1')
parser = etree.XMLParser(recover=True)
self.assertRaises(etree.XMLSyntaxError, etree.fromstring, data, parser)
def _test_encoding(self, encoding, xml_encoding_name=None):
foo = """<?xml version='1.0' encoding='%s'?>\n<tag attrib='123'></tag>""" % (
xml_encoding_name or encoding)
root = etree.fromstring(foo.encode(encoding))
self.assertEqual('tag', root.tag)
doc_encoding = root.getroottree().docinfo.encoding
self.assertTrue(
doc_encoding.lower().rstrip('lbe'),
(xml_encoding_name or encoding).lower().rstrip('lbe'))
def test_utf8_fromstring(self):
self._test_encoding('utf-8')
def test_utf8sig_fromstring(self):
self._test_encoding('utf_8_sig', 'utf-8')
def test_utf16_fromstring(self):
self._test_encoding('utf-16')
def test_utf16LE_fromstring(self):
self._test_encoding('utf-16le', 'utf-16')
def test_utf16BE_fromstring(self):
self._test_encoding('utf-16be', 'utf-16')
def test_utf32_fromstring(self):
self._test_encoding('utf-32', 'utf-32')
def test_utf32LE_fromstring(self):
self._test_encoding('utf-32le', 'utf-32')
def test_utf32BE_fromstring(self):
self._test_encoding('utf-32be', 'utf-32')
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(UnicodeTestCase)])
suite.addTests([unittest.makeSuite(EncodingsTestCase)])
return suite
|
edx/edx-enterprise | integrated_channels/cornerstone/migrations/0006_auto_20191001_0742.py | Python | agpl-3.0 | 824 | 0.002427 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-10-01 07:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cornerstone', '0005_auto_20190925_0730'),
]
operations = [
migrations.AddField(
model_name='cornerstoneenterprisecustomerconfiguration',
name='catalogs_to_transmit',
field=models.TextField(blank=True, help_text='A comma-separated list of catal | og UUIDs to transmit.', null=True),
),
migrations.AddField(
model_name='historicalcornerstoneenterprisecustomerc | onfiguration',
name='catalogs_to_transmit',
field=models.TextField(blank=True, help_text='A comma-separated list of catalog UUIDs to transmit.', null=True),
),
]
|
edwardt/pcp | src/python/pcp/pmi.py | Python | lgpl-2.1 | 11,357 | 0.002025 | # pylint: disable=C0103
"""Wrapper module for libpcp_import - Performace Co-Pilot Log Import API
#
# Copyright (C) 2012-2015 Red Hat.
#
# This file is part of the "pcp" module, the python interfaces for the
# Performance Co-Pilot toolkit.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Found | ation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public | License
# for more details.
#
# Example use of this module for creating a PCP archive:
import math
import time
import pmapi
from pcp import pmi
# Create a new archive
log = pmi.pmiLogImport("loadtest")
log.pmiSetHostname("www.abc.com")
log.pmiSetTimezone("EST-10")
# Add a metric with an instance domain
domain = 60 # Linux kernel
pmid = log.pmiID(domain, 2, 0)
indom = log.pmiInDom(domain, 2)
units = log.pmiUnits(0, 0, 0, 0, 0, 0)
log.pmiAddMetric("kernel.all.load", pmid, pmapi.PM_TYPE_FLOAT,
indom, pmapi.PM_SEM_INSTANT, units)
log.pmiAddInstance(indom, "1 minute", 1)
log.pmiAddInstance(indom, "5 minute", 5)
log.pmiAddInstance(indom, "15 minute", 15)
# Create a record with a timestamp
log.pmiPutValue("kernel.all.load", "1 minute", "%f" % 0.01)
log.pmiPutValue("kernel.all.load", "5 minute", "%f" % 0.05)
log.pmiPutValue("kernel.all.load", "15 minute", "%f" % 0.15)
timetuple = math.modf(time.time())
useconds = int(timetuple[0] * 1000000)
seconds = int(timetuple[1])
log.pmiWrite(seconds, useconds)
del log
"""
from pcp.pmapi import pmID, pmInDom, pmUnits, pmResult
from cpmi import pmiErrSymDict, PMI_MAXERRMSGLEN
import ctypes
from ctypes import cast, c_int, c_char_p, POINTER
# Performance Co-Pilot PMI library (C)
LIBPCP_IMPORT = ctypes.CDLL(ctypes.util.find_library("pcp_import"))
##
# PMI Log Import Services
LIBPCP_IMPORT.pmiDump.restype = None
LIBPCP_IMPORT.pmiDump.argtypes = None
LIBPCP_IMPORT.pmiID.restype = pmID
LIBPCP_IMPORT.pmiID.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int]
LIBPCP_IMPORT.pmiInDom.restype = pmInDom
LIBPCP_IMPORT.pmiInDom.argtypes = [ctypes.c_int, ctypes.c_int]
LIBPCP_IMPORT.pmiUnits.restype = pmUnits
LIBPCP_IMPORT.pmiUnits.argtypes = [
ctypes.c_int, ctypes.c_int, ctypes.c_int,
ctypes.c_int, ctypes.c_int, ctypes.c_int]
LIBPCP_IMPORT.pmiErrStr_r.restype = c_char_p
LIBPCP_IMPORT.pmiErrStr_r.argtypes = [c_int, c_char_p, c_int]
LIBPCP_IMPORT.pmiStart.restype = c_int
LIBPCP_IMPORT.pmiStart.argtypes = [c_char_p, c_int]
LIBPCP_IMPORT.pmiUseContext.restype = c_int
LIBPCP_IMPORT.pmiUseContext.argtypes = [c_int]
LIBPCP_IMPORT.pmiEnd.restype = c_int
LIBPCP_IMPORT.pmiEnd.argtypes = None
LIBPCP_IMPORT.pmiSetHostname.restype = c_int
LIBPCP_IMPORT.pmiSetHostname.argtypes = [c_char_p]
LIBPCP_IMPORT.pmiSetTimezone.restype = c_int
LIBPCP_IMPORT.pmiSetTimezone.argtypes = [c_char_p]
LIBPCP_IMPORT.pmiAddMetric.restype = c_int
LIBPCP_IMPORT.pmiAddMetric.argtypes = [
c_char_p, pmID, c_int, pmInDom, c_int, pmUnits]
LIBPCP_IMPORT.pmiAddInstance.restype = c_int
LIBPCP_IMPORT.pmiAddInstance.argtypes = [pmInDom, c_char_p, c_int]
LIBPCP_IMPORT.pmiPutValue.restype = c_int
LIBPCP_IMPORT.pmiPutValue.argtypes = [c_char_p, c_char_p, c_char_p]
LIBPCP_IMPORT.pmiGetHandle.restype = c_int
LIBPCP_IMPORT.pmiGetHandle.argtypes = [c_char_p, c_char_p]
LIBPCP_IMPORT.pmiPutValueHandle.restype = c_int
LIBPCP_IMPORT.pmiPutValueHandle.argtypes = [c_int, c_char_p]
LIBPCP_IMPORT.pmiWrite.restype = c_int
LIBPCP_IMPORT.pmiWrite.argtypes = [c_int, c_int]
LIBPCP_IMPORT.pmiPutResult.restype = c_int
LIBPCP_IMPORT.pmiPutResult.argtypes = [POINTER(pmResult)]
#
# definition of exception classes
#
class pmiErr(Exception):
'''
Encapsulation for PMI interface error code
'''
def __str__(self):
error_code = self.args[0]
try:
error_symbol = pmiErrSymDict[error_code]
error_string = ctypes.create_string_buffer(PMI_MAXERRMSGLEN)
error_string = LIBPCP_IMPORT.pmiErrStr_r(error_code,
error_string, PMI_MAXERRMSGLEN)
except KeyError:
error_symbol = error_string = ""
return "%s %s" % (error_symbol, error_string)
#
# class LogImport
#
# This class wraps the PMI (Log Import) library functions
#
class pmiLogImport(object):
"""Defines a PCP Log Import archive context
This is used to create a PCP archive from an external source
"""
##
# property read methods
def read_path(self):
""" Property for archive path """
return self._path
def read_ctx(self):
""" Property for log import context """
return self._ctx
##
# property definitions
path = property(read_path, None, None, None)
ctx = property(read_ctx, None, None, None)
##
# overloads
def __init__(self, path, inherit = 0):
if type(path) != type(b''):
path = path.encode('utf-8')
self._path = path # the archive path (file name)
self._ctx = LIBPCP_IMPORT.pmiStart(c_char_p(path), inherit)
if self._ctx < 0:
raise pmiErr(self._ctx)
def __del__(self):
if LIBPCP_IMPORT:
LIBPCP_IMPORT.pmiUseContext(self._ctx)
LIBPCP_IMPORT.pmiEnd()
self._ctx = -1
##
# PMI Log Import Services
def pmiSetHostname(self, hostname):
"""PMI - set the source host name for a Log Import archive """
status = LIBPCP_IMPORT.pmiUseContext(self._ctx)
if status < 0:
raise pmiErr(status)
if type(hostname) != type(b''):
hostname = hostname.encode('utf-8')
status = LIBPCP_IMPORT.pmiSetHostname(c_char_p(hostname))
if status < 0:
raise pmiErr(status)
return status
def pmiSetTimezone(self, timezone):
"""PMI - set the source timezone for a Log Import archive
"""
status = LIBPCP_IMPORT.pmiUseContext(self._ctx)
if status < 0:
raise pmiErr(status)
if type(timezone) != type(b''):
timezone = timezone.encode('utf-8')
status = LIBPCP_IMPORT.pmiSetTimezone(c_char_p(timezone))
if status < 0:
raise pmiErr(status)
return status
@staticmethod
def pmiID(domain, cluster, item):
"""PMI - construct a pmID data structure (helper routine) """
return LIBPCP_IMPORT.pmiID(domain, cluster, item)
@staticmethod
def pmiInDom(domain, serial):
"""PMI - construct a pmInDom data structure (helper routine) """
return LIBPCP_IMPORT.pmiInDom(domain, serial)
@staticmethod
def pmiUnits(dim_space, dim_time, dim_count,
scale_space, scale_time, scale_count):
# pylint: disable=R0913
"""PMI - construct a pmiUnits data structure (helper routine) """
return LIBPCP_IMPORT.pmiUnits(dim_space, dim_time, dim_count,
scale_space, scale_time, scale_count)
def pmiAddMetric(self, name, pmid, typed, indom, sem, units):
# pylint: disable=R0913
"""PMI - add a new metric definition to a Log Import context """
status = LIBPCP_IMPORT.pmiUseContext(self._ctx)
if status < 0:
raise pmiErr(status)
if type(name) != type(b''):
name = name.encode('utf-8')
status = LIBPCP_IMPORT.pmiAddMetric(c_char_p(name),
pmid, typed, indom, sem, units)
if status < 0:
raise pmiErr(status)
return status
def pmiAddInstance(self, indom, instance, instid):
"""PMI - add element to an instance domain in a Log Import context """
status = LIBP |
GanjaNoel/pym2 | wdt/chunk.py | Python | lgpl-3.0 | 2,490 | 0.069478 | #!/usr/bin/python
import struct
import array
class WChunk: #Chunk Basic Class for World Data (like adt,wmo etc.)
def __init__(self):
self.magic = 0
self.size = 0
def unpack(self,f):
self.magic, = struct.unp | ack("i",f.read(4))
self.size, = struct.unpack("i",f.read(4))
self.unpackData(f)
return self
def pack(self):
temp = self.packData()
self.size | = len(temp)
ret = struct.pack("i",self.magic)
ret += struct.pack("i",self.size)
ret += temp
return ret
def unpackData(self,f):
pass
def packData(self):
return 0
class MVER(WChunk):
def __init__(self):
self.magic = 1297499474
self.size = 4
self.version = 18
def unpackData(self,f):
self.version, = struct.unpack("i",f.read(4))
def packData(self):
ret = struct.pack("i",self.version)
return ret
class FilenameChunk(WChunk):
def __init__(self,magic):
self.magic = magic
self.size = 0
self.filenames = []
def unpackData(self,f):
pos = 1
temp = f.read(1)
tstr = str(temp)
#print self.size
while(pos < self.size):
pos += 1
while(temp != "\0"):
temp = f.read(1)
tstr += temp
pos += 1
self.filenames.append(tstr)
tstr = ""
#print self.filenames
def packData(self):
ret = ""
for i in self.filenames:
if len(i) == 0:
i= "\0"
if i[len(i)-1] != "\0":
i += "\0"
ret += i
return ret
class EntryChunk(WChunk):
def __init__(self,magic,entrytype):
self.magic = magic
self.size = 0
self.nEntries = 0
self.entries = []
self.Entry = entrytype
def unpackData(self,f):
self.nEntries = self.size / self.Entry.entrySize
self.entries = []
for i in xrange(self.nEntries):
self.entries.append(self.Entry().unpack(f))
def packData(self):
ret = ""
for i in xrange(self.nEntries):
ret += self.entries[i].pack()
return ret
def addEntry(self):
self.nEntries += 1
self.entries.append(self.Entry())
def delEntry(self, entrie = 0):
if (self.nEntries > entrie):
del(self.entries[entrie])
self.nEntries -= 1
class WoWFile:
def __init__(self):
pass
def readData(self,f):
pass
def writeData(self,f):
return f
def read(self,filename):
f = open(filename,"r+b")
self.readData(f)
f.close()
return self
def write(self,filename):
f = open(filename,"wb")
f = self.writeData(f)
f.close()
|
datacommonsorg/data | scripts/eurostat/regional_statistics_by_nuts/fertility_rate_mother_age/fertility_rate_preprocess_gen_tmcf.py | Python | apache-2.0 | 3,789 | 0.000792 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import io
import csv
_DATA_URL = "https://ec.europa.eu/eurostat/estat-navtree-portlet-prod/BulkDownloadListing?file=data/demo_r_find3.tsv.gz"
_CLEANED_CSV = "./Eurostats_NUTS3_FRate_Age.csv"
_TMCF = "./Eurostats_NUTS3_FRate_Age.tmcf"
_OUTPUT_COLUMNS = [
'Date',
'GeoId',
'MeanMothersAge_BirthEvent',
'MedianMothersAge_BirthEvent',
'FertilityRate_Person_Female',
]
def translate_wide_to_long(data_url):
df = pd.read_csv(data_url, delimiter='\t')
assert df.head
header = list(df.columns.values)
years = header[1:]
# Pandas.melt() unpivots a DataFrame from wide format to long format.
df = pd.melt(df,
id_vars=header[0],
value_vars=years,
var_name='time',
value_name='value')
# Separate geo and unit columns.
new = df[header[0]].str.split(",", n=-1, expand=True)
df = df.join(
pd.DataFrame({
'geo': new[2],
'unit': new[1],
'indic_de': new[0]
}))
df["indicator_unit"] = df["indic_de"] + "_" + df["unit"]
df.drop(column | s=[header[0]], inplace=True)
# Remove empty rows, clean values to have all digits.
df | = df[df.value.str.contains('[0-9]')]
possible_flags = [' ', ':']
for flag in possible_flags:
df['value'] = df['value'].str.replace(flag, '')
df['value'] = pd.to_numeric(df['value'])
df = df.pivot_table(values='value',
index=['geo', 'time'],
columns=['indicator_unit'],
aggfunc='first').reset_index().rename_axis(None, axis=1)
return df
def preprocess(df, cleaned_csv):
with open(cleaned_csv, 'w', newline='') as f_out:
writer = csv.DictWriter(f_out,
fieldnames=_OUTPUT_COLUMNS,
lineterminator='\n')
writer.writeheader()
for _, row in df.iterrows():
writer.writerow({
'Date': '%s' % (row['time'][:4]),
'GeoId': 'dcid:nuts/%s' % row['geo'],
'MeanMothersAge_BirthEvent': (row['AGEMOTH_YR']),
'MedianMothersAge_BirthEvent': (row['MEDAGEMOTH_YR']),
'FertilityRate_Person_Female': (row['TOTFERRT_NR']),
})
def get_template_mcf():
# Automate Template MCF generation since there are many Statistical Variables.
TEMPLATE_MCF_TEMPLATE = """
Node: E:Eurostats_NUTS3_FRate_Age->E{index}
typeOf: dcs:StatVarObservation
variableMeasured: dcs:{stat_var}
observationAbout: C:Eurostats_NUTS3_FRate_Age->GeoId
observationDate: C:Eurostats_NUTS3_FRate_Age->Date
value: C:Eurostats_NUTS3_FRate_Age->{stat_var}
measurementMethod: dcs:EurostatRegionalStatistics
"""
stat_vars = _OUTPUT_COLUMNS[2:]
with open(_TMCF, 'w', newline='') as f_out:
for i in range(len(stat_vars)):
f_out.write(
TEMPLATE_MCF_TEMPLATE.format_map({
'index': i,
'stat_var': _OUTPUT_COLUMNS[2:][i]
}))
if __name__ == "__main__":
preprocess(translate_wide_to_long(_DATA_URL), _CLEANED_CSV)
get_template_mcf()
|
SafeSlingerProject/SafeSlinger-AppEngine | safeslinger-messenger/python/apns_enhanced.py | Python | mit | 28,130 | 0.00583 | # PyAPNs was developed by Simon Whitaker <simon@goosoftware.co.uk>
# Source available at https://github.com/simonwhitaker/PyAPNs
#
# PyAPNs is distributed under the terms of the MIT license.
#
# Copyright (c) 2011 Goo Software Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from binascii import a2b_hex, b2a_hex
from datetime import datetime
from socket import socket, timeout, AF_INET, SOCK_STREAM
from socket import error as socket_error
from struct import pack, unpack
import sys
import ssl
import select
import time
import collections, itertools
import logging
import threading
import StringIO
try:
from ssl import wrap_socket, SSLError
except ImportError:
from socket import ssl as wrap_socket, sslerror as SSLError
from _ssl import SSL_ERROR_WANT_READ, SSL_ERROR_WANT_WRITE
try:
import json
except ImportError:
import simplejson as json
_logger = logging.getLogger(__name__)
MAX_PAYLOAD_LENGTH = 256
NOTIFICATION_COMMAND = 0
ENHANCED_NOTIFICATION_COMMAND = 1
NOTIFICATION_FORMAT = (
'!' # network big-endian
'B' # command
'H' # token length
'32s' # token
'H' # payload length
'%ds' # payload
)
ENHANCED_NOTIFICATION_FORMAT = (
'!' # network big-endian
'B' # command
'I' # identifier
'I' # expiry
'H' # token length
'32s' # token
'H' # payload length
'%ds' # payload
)
ERROR_RESPONSE_FORMAT = (
'!' # network big-endian
'B' # command
'B' # status
'I' # identifier
)
TOKEN_LENGTH = 32
ERROR_RESPONSE_LENGTH = 6
DELAY_RESEND_SEC = 0.0
SENT_BUFFER_QTY = 100000
WAIT_WRITE_TIMEOUT_SEC = 10
WAIT_READ_TIMEOUT_SEC = 10
WRITE_RETRY = 3
ER_STATUS = 'status'
ER_IDENTIFER = 'identifier'
class APNs(object):
"""
A class representing an Apple Push Notification service connection
"""
def __init__(self, use_sandbox=False, cert_file=None, key_file=None, enhanced=False):
"""
Set use_sandbox to True to use the sandbox (test) APNs servers.
Default is False.
"""
super(APNs, self).__init__()
self.use_sandbox = use_sandbox
self.cert_file = cert_file
self.key_file = key_file
self._feedback_connection = None
self._gateway_connection = None
self.enhanced = enhanced
@staticmethod
def packed_uchar(num):
"""
Returns an unsigned char in packed form
"""
return pack('>B', num)
@staticmethod
def packed_ushort_big_endian(num):
"""
Returns an unsigned short in packed big-endian (network) form
"""
return pack('>H', num)
@staticmethod
def unpacked_ushort_big_endian(bytes):
"""
Returns an unsigned short from a packed big-endian (network) byte
array
"""
return unpack('>H', bytes)[0]
@staticmethod
def packed_uint_big_endian(num):
"""
Returns an unsigned int in packed big-endian (network) form
"""
return pack('>I', num)
@staticmethod
def unpacked_uint_big_endian(bytes):
"""
Returns an unsigned int from a packed big-endian (network) byte array
"""
return unpack('>I', bytes)[0]
@staticmethod
def unpacked_char_big_endian(bytes):
"""
Returns an unsigned char from a packed big-endian (network) byte array
"""
return unpack('c', bytes)[0]
@property
def feedback_server(self):
if not self._feedback_connection:
self._feedback_connection = FeedbackConnection(
use_sandbox = self.use_sandbox,
cert_file = self.cert_file,
key_file = self.key_file
)
return self._feedback_connection
@property
def gateway_server(self):
if not self._gateway_connection:
self._gateway_connection = GatewayConnection(
use_sandbox = self.use_sandbox,
cert_file = self.cert_file,
key_file = self.key_file,
enhanced = self.enhanced
)
return self._gateway_connection
class APNsConnection(object):
"""
A generic connection class for communicating with the APNs
"""
def __init__(self, cert_file=None, key_file=None, timeout=None, enhanced=False):
super(APNsConnection, self).__init__()
self.cert_file = cert_file
self.key_file = key_file
self.timeout = timeout
self._socket = None
self._ssl = None
self.enhanced = enhanced
self.connection_alive = False
def __del__(self):
self._disconnect();
def _connect(self):
# Establish an SSL connection
_logger.debug("%s APNS connection establishing..." % self.__class__.__name__)
# Fallback for socket timeout.
for i in xrange(3):
try:
self._socket = socket(AF_INET, SOCK_STREAM)
self._socket.settimeout(self.timeout)
self._socket.connect((self.server, self.port))
break
except timeout:
pass
except:
raise
if self.enhanced:
self._last_activity_time = time.time()
self._socket.setblocking(False)
self._ssl = wrap_socket(self._socket, server_side=False, keyfile=StringIO.StringIO(self.key_file), certfile=StringIO.StringIO(self.cert_file), do_handshake_on_connect=False)
while True:
try:
self._ssl.do_handshake()
break
| except ssl.SSLError, err:
if ssl.SSL_ERROR_WANT_READ == err.args[0]:
select.select([self._ssl], [], [])
elif ssl.SSL_ERROR_WANT_WRITE == err.args[0]:
select.select([], [self._ssl], [])
| else:
raise
else:
# Fallback for 'SSLError: _ssl.c:489: The handshake operation timed out'
for i in xrange(3):
try:
self._ssl = wrap_socket(self._socket, server_side=False, keyfile=StringIO.StringIO(self.key_file), certfile=StringIO.StringIO(self.cert_file), do_handshake_on_connect=False)
break
except SSLError, ex:
if ex.args[0] == SSL_ERROR_WANT_READ:
sys.exc_clear()
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
sys.exc_clear()
else:
raise
self.connection_alive = True
_logger.debug("%s APNS connection established" % self.__class__.__name__)
def _disconnect(self):
if self.connection_alive:
if self._socket:
self._socket.close()
if self._ssl:
self._ssl.close()
self.connection_alive = False
_logger.debug(" %s APNS connection closed" % self.__class__.__name__)
def _connection(self):
if not self._ssl or not self.connection_alive |
kbrose/project_euler | p120-129/p123.py | Python | unlicense | 347 | 0.023055 | import primes
# same logic as p120
# I was hoping since these were primes that
# maybe | Fermat's Little Theorem would show up
# ... but it didn't.
def check(p,n,target):
return 2*p*n > target
def main():
ps = primes.primes(250000)
| i = 0
while not check(ps[i],i+1,10**10):
i += 2
print i+1 # zero indexing, yo
main()
|
MSusik/invenio | invenio/ext/sqlalchemy/__init__.py | Python | gpl-2.0 | 7,341 | 0.003678 | # -*- coding: utf-8 -*-
#
## This file is part of Invenio.
## Copyright (C) 2011, 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
invenio.ext.sqlalchemy
----------------------
This module provides initialization and configuration for
`flask.ext.sqlalchemy` module.
"""
import sqlalchemy
from flask.ext.registry import RegistryProxy, ModuleAutoDiscoveryRegistry
from flask.ext.sqlalchemy import SQLAlchemy as FlaskSQLAlchemy
from sqlalchemy import event
from sqlalchemy.ext.hybrid import hybrid_property, Comparator
from sqlalchemy.pool import Pool
from sqlalchemy_utils import JSONType
from invenio.utils.hash import md5
from .expressions import AsBINARY
from .types import MarshalBinary, PickleBinary, GUID
from .utils import get_model_type
def _include_sqlalchemy(obj, engine=None):
#for module in sqlalchemy, sqlalchemy.orm:
# for key in module.__all__:
# if not hasattr(obj, key):
# setattr(obj, key,
# getattr(module, key))
if engine == 'mysql':
from sqlalchemy.dialects import mysql as engine_types
else:
from sqlalchemy import types as engine_types
# Length is provided to JSONType to ensure MySQL uses LONGTEXT instead
# of TEXT which only provides for 64kb storage compared to 4gb for
# LONGTEXT.
setattr(obj, 'JSON', JSONType(length=2**32-2))
setattr(obj, 'Char', engine_types.CHAR)
try:
setattr(obj, 'TinyText', engine_types.TINYTEXT)
except:
setattr(obj, 'TinyText', engine_types.TEXT)
setattr(obj, 'hybrid_property', hybrid_property)
try:
setattr(obj, 'Double', engine_types.DOUBLE)
except:
setattr(obj, 'Double', engine_types.FLOAT)
setattr(obj, 'Integer', engine_types.INTEGER)
setattr(obj, 'SmallInteger', engine_types.SMALLINT)
try:
setattr(obj, 'MediumInteger', engine_types.MEDIUMINT)
except:
setattr(obj, 'MediumInteger', engine_types.INT)
setattr(obj, 'BigInteger', engine_types.BIGINT)
try:
setattr(obj, 'TinyInteger', engine_types.TINYINT)
except:
setattr(obj, 'TinyInteger', engine_types.INT)
setattr(obj, 'Binary', sqlalchemy.types.LargeBinary)
setattr(obj, 'iBinary', sqlalchemy.types.LargeBinary)
setattr(obj, 'iLargeBinary', sqlalchemy.types.LargeBinary)
setattr(obj, 'iMediumBinary', sqlalchemy.types.LargeBinary)
setattr(obj, 'UUID', GUID)
if engine == 'mysql':
from .engines import mysql as dummy_mysql # noqa
# module = invenio.sqlalchemyutils_mysql
# for key in module.__dict__:
# setattr(obj, key,
# getattr(module, key))
obj.AsBINARY = AsBINARY
obj.MarshalBinary = MarshalBinary
obj.PickleBinary = PickleBinary
## Overwrite :meth:`MutableDick.update` to detect changes.
from sqlalchemy.ext.mutable import MutableDict
def update_mutable_dict(self, *args, **kwargs):
super(MutableDict, self).update(*args, **kwargs)
self.changed()
MutableDict.update = update_mutable_dict
obj.MutableDict = MutableDict
class PasswordComparator(Comparator):
def __eq__(self, other):
return self.__clause_element__() == self.hash(other)
def hash(self, password):
if db.engine.name != 'mysql':
return md5(password).digest()
email = self.__clause_element__().table.columns.email
return db.func.aes_encrypt(email, password)
def autocommit_on_checkin(dbapi_con, con_record):
"""Calls autocommit on raw mysql connection for fixing bug in MySQL 5.5"""
try:
dbapi_con.autocommit(True)
except:
pass
#FIXME
#from invenio.ext.logging import register_exception
#register_exception()
## Possibly register globally.
#event.listen(Pool, 'checkin', autocommit_on_checkin)
class SQLAlchemy(FlaskSQLAlchemy):
"""Database object."""
PasswordComparator = PasswordComparator
def init_app(self, app):
super(self.__class__, self).init_app(app)
engine = app.config.get('CFG_DATABASE_TYPE', 'mysql')
self.Model = get_model_type(self.Model)
if engine == 'mysql':
self.Model.__table_args__ = {'keep_exi | sting': True,
'extend_exis | ting': False,
'mysql_engine': 'MyISAM',
'mysql_charset': 'utf8'}
_include_sqlalchemy(self, engine=engine)
def __getattr__(self, name):
# This is only called when the normal mechanism fails, so in practice
# should never be called.
# It is only provided to satisfy pylint that it is okay not to
# raise E1101 errors in the client code.
# :see http://stackoverflow.com/a/3515234/780928
raise AttributeError("%r instance has no attribute %r" % (self, name))
def schemadiff(self, excludeTables=None):
from migrate.versioning import schemadiff
return schemadiff.getDiffOfModelAgainstDatabase(self.metadata,
self.engine,
excludeTables=excludeTables)
def apply_driver_hacks(self, app, info, options):
"""
This method is called before engine creation.
"""
# Don't forget to apply hacks defined on parent object.
super(self.__class__, self).apply_driver_hacks(app, info, options)
if info.drivername == 'mysql':
options.setdefault('execution_options', {'autocommit': True,
'use_unicode': False,
'charset': 'utf8mb4',
})
event.listen(Pool, 'checkin', autocommit_on_checkin)
db = SQLAlchemy()
"""
Provides access to :class:`~.SQLAlchemy` instance.
"""
models = RegistryProxy('models', ModuleAutoDiscoveryRegistry, 'models')
def setup_app(app):
"""Setup SQLAlchemy extension."""
if 'SQLALCHEMY_DATABASE_URI' not in app.config:
from sqlalchemy.engine.url import URL
cfg = app.config
app.config['SQLALCHEMY_DATABASE_URI'] = URL(
cfg.get('CFG_DATABASE_TYPE', 'mysql'),
username=cfg.get('CFG_DATABASE_USER'),
password=cfg.get('CFG_DATABASE_PASS'),
host=cfg.get('CFG_DATABASE_HOST'),
database=cfg.get('CFG_DATABASE_NAME'),
port=cfg.get('CFG_DATABASE_PORT'),
)
## Let's initialize database.
db.init_app(app)
return app
|
open-dynaMIX/experms | src/experms/restore.py | Python | gpl-3.0 | 435 | 0 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from collect_filenames import collect
from action.prepare_files import prepare
def restore(config, debug):
"""
Restores all the ownerships and permissions on all files
| """
filenames = collect(config)
for item in filenames[0]:
prepare(item, 'RESTORE', T | rue, config, debug)
for item in filenames[1]:
prepare(item, 'RESTORE', False, config, debug)
|
hbiyik/tribler | src/tribler-core/tribler_core/modules/metadata_store/serialization.py | Python | lgpl-3.0 | 16,048 | 0.002555 | import struct
from datetime import datetime, timedelta
from ipv8.database import database_blob
from ipv8.keyvault.crypto import default_eccrypto
from ipv8.messaging.payload import Payload
from ipv8.messaging.serialization import default_serializer
from tribler_core.exceptions import InvalidSignatureException
from tribler_core.utilities.unicode import hexlify
EPOCH = datetime(1970, 1, 1)
SIGNATURE_SIZE = 64
NULL_SIG = b'\x00' * 64
NULL_KEY = b'\x00' * 64
# Metadata types. Should have been an enum, but in Python its unwieldy.
TYPELESS = 100
CHANNEL_NODE = 200
METADATA_NODE = 210
COLLECTION_NODE = 220
REGULAR_TORRENT = 300
CHANNEL_TORRENT = 400
DELETED = 500
def time2int(date_time, epoch=EPOCH):
"""
Convert a datetime object to an int .
:param date_time: The datetime object to convert.
:param epoch: The epoch time, defaults to Jan 1, 1970.
:return: The int representation of date_time.
WARNING: TZ-aware timestamps are madhouse...
"""
return int((date_time - epoch).total_seconds())
def int2time(timestamp, epoch=EPOCH):
"""
Convert an int into a datetime object.
:param timestamp: The timestamp to be converted.
:param epoch: The epoch time, defaults to Jan 1, 1970.
:return: The datetime representation of timestamp.
"""
return epoch + timedelta(seconds=timestamp)
class KeysMismatchException(Exception):
pass
class UnknownBlobTypeException(Exception):
pass
def read_payload_with_offset(data, offset=0):
# First we have to determine the actual payload type
metadata_type = struct.unpack_from('>H', database_blob(data), offset=offset)[0]
if metadata_type == DELETED:
return DeletedMetadataPayload.from_signed_blob_with_offset(data, offset=offset)
elif metadata_type == REGULAR_TORRENT:
return TorrentMetadataPayload.from_signed_blob_with_offset(data, offset=offset)
elif metadata_type == COLLECTION_NODE:
return CollectionNodePayload.from_signed_blob_with_offset(data, offset=offset)
elif metadata_type == CHANNEL_TORRENT:
return ChannelMetadataPayload.from_signed_blob_with_offset(data, offset=offset)
# Unknown metadata type, raise exception
raise UnknownBlobTypeException
def read_payload(data):
return read_payload_with_offset(data)[0]
class SignedPayload(Payload):
"""
Payload for metadata.
"""
format_list = ['H', 'H', '64s']
def __init__(self, metadata_type, reserved_flags, public_key, **kwargs):
super(SignedPayload, self).__init__()
self.meta | data_type = metadata_type
self.reserved_ | flags = reserved_flags
self.public_key = bytes(public_key)
self.signature = bytes(kwargs["signature"]) if "signature" in kwargs and kwargs["signature"] else None
# Special case: free-for-all entries are allowed to go with zero key and without sig check
if "unsigned" in kwargs and kwargs["unsigned"]:
self.public_key = NULL_KEY
self.signature = NULL_SIG
return
if "skip_key_check" in kwargs and kwargs["skip_key_check"]:
return
# This is integrity check for FFA payloads.
if self.public_key == NULL_KEY:
if self.signature == NULL_SIG:
return
else:
raise InvalidSignatureException("Tried to create FFA payload with non-null signature")
serialized_data = default_serializer.pack_multiple(self.to_pack_list())[0]
if "key" in kwargs and kwargs["key"]:
key = kwargs["key"]
if self.public_key != key.pub().key_to_bin()[10:]:
raise KeysMismatchException(self.public_key, key.pub().key_to_bin()[10:])
self.signature = default_eccrypto.create_signature(key, serialized_data)
elif "signature" in kwargs:
# This check ensures that an entry with a wrong signature will not proliferate further
if not default_eccrypto.is_valid_signature(
default_eccrypto.key_from_public_bin(b"LibNaCLPK:" + self.public_key), serialized_data, self.signature
):
raise InvalidSignatureException("Tried to create payload with wrong signature")
else:
raise InvalidSignatureException("Tried to create payload without signature")
def to_pack_list(self):
data = [('H', self.metadata_type), ('H', self.reserved_flags), ('64s', self.public_key)]
return data
@classmethod
def from_unpack_list(cls, metadata_type, reserved_flags, public_key, **kwargs):
return SignedPayload(metadata_type, reserved_flags, public_key, **kwargs)
@classmethod
def from_signed_blob(cls, data, check_signature=True):
return cls.from_signed_blob_with_offset(data, check_signature)[0]
@classmethod
def from_signed_blob_with_offset(cls, data, check_signature=True, offset=0):
# TODO: stop serializing/deserializing stuff twice
unpack_list, end_offset = default_serializer.unpack_multiple(cls.format_list, data, offset=offset)
if check_signature:
signature = data[end_offset : end_offset + SIGNATURE_SIZE]
payload = cls.from_unpack_list(*unpack_list, signature=signature)
else:
payload = cls.from_unpack_list(*unpack_list, skip_key_check=True)
return payload, end_offset + SIGNATURE_SIZE
def to_dict(self):
return {
"metadata_type": self.metadata_type,
"reserved_flags": self.reserved_flags,
"public_key": self.public_key,
"signature": self.signature,
}
def _serialized(self):
serialized_data = default_serializer.pack_multiple(self.to_pack_list())[0]
return serialized_data, self.signature
def serialized(self):
return b''.join(self._serialized())
@classmethod
def from_file(cls, filepath):
with open(filepath, 'rb') as f:
return cls.from_signed_blob(f.read())
# fmt: off
class ChannelNodePayload(SignedPayload):
format_list = SignedPayload.format_list + ['Q', 'Q', 'Q']
def __init__(self, metadata_type, reserved_flags, public_key,
id_, origin_id, timestamp,
**kwargs):
self.id_ = id_
self.origin_id = origin_id
self.timestamp = timestamp
super(ChannelNodePayload, self).__init__(metadata_type, reserved_flags, public_key,
**kwargs)
def to_pack_list(self):
data = super(ChannelNodePayload, self).to_pack_list()
data.append(('Q', self.id_))
data.append(('Q', self.origin_id))
data.append(('Q', self.timestamp))
return data
@classmethod
def from_unpack_list(cls, metadata_type, reserved_flags, public_key,
id_, origin_id, timestamp,
**kwargs):
return ChannelNodePayload(metadata_type, reserved_flags, public_key,
id_, origin_id, timestamp,
**kwargs)
def to_dict(self):
dct = super(ChannelNodePayload, self).to_dict()
dct.update(
{"id_": self.id_,
"origin_id": self.origin_id,
"timestamp": self.timestamp
})
return dct
class MetadataNodePayload(ChannelNodePayload):
format_list = ChannelNodePayload.format_list + ['varlenI', 'varlenI']
def __init__(self, metadata_type, reserved_flags, public_key,
id_, origin_id, timestamp,
title, tags,
**kwargs):
self.title = title.decode('utf-8') if isinstance(title, bytes) else title
self.tags = tags.decode('utf-8') if isinstance(tags, bytes) else tags
super(MetadataNodePayload, self).__init__(
metadata_type, reserved_flags, public_key,
id_, origin_id, timestamp,
**kwargs
)
def to_pack_list(self):
data = super(MetadataNodePayload, self).to_pack_list()
data.append(('varlenI', self.title.encode('utf-8')))
data.append(('varlenI', self.tags.encode('utf-8 |
myevan/flask_server | examples/websocket_server.py | Python | mit | 278 | 0.007194 | from flask import Flask
from flask_sockets import Sockets
app = Flask(__name__)
sockets = Sockets(app)
@sockets.route('/echo')
def echo_socket(ws):
while True:
message | = ws.receive()
ws.send(message)
@app.route('/' | )
def hello():
return 'Hello World!'
|
carquois/blobon | blobon/posts/migrations/0004_auto__del_blog__add_blogpost.py | Python | mit | 6,130 | 0.00783 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Blog'
db.delete_table('posts_blog')
# Adding model 'BlogPost'
db.create_table('posts_blogpost', (
('post_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['posts.Post'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=140, blank=True)),
('content', self.gf('django.db.models.fields.TextField')(max_length=10000, blank=True)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(default='P', max_length=2)),
))
db.send_create_signal('posts', ['BlogPost'])
def backwards(self, orm):
# Adding model 'Blog'
db.create_table('posts_blog', (
('status', self.gf('django.db.models.fields.CharField')(default='P', max_length=2)),
('post_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['posts.Post'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=140, blank=True)),
('content', self.gf('django.db.models.fields.TextField')(max_length=10000, blank=True)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
))
db.send_create_signal('posts', ['Blog'])
# Deleting model 'BlogPost'
db.delete_table('posts_blogpost')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
| 'groups': ('django.db | .models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'posts.blogpost': {
'Meta': {'object_name': 'BlogPost', '_ormbases': ['posts.Post']},
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'post_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['posts.Post']", 'unique': 'True', 'primary_key': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'})
},
'posts.post': {
'Meta': {'object_name': 'Post'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['posts'] |
jjoaonunes/namebench | libnamebench/tk.py | Python | apache-2.0 | 13,586 | 0.007434 | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tk user interface implementation for namebench."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import datetime
import os
import Queue
import sys
import threading
import tkFont
# Wildcard imports are evil.
from Tkinter import *
import tkMessageBox
import traceback
import addr_util
import base_ui
import conn_quality
import nameserver_list
import sys_nameservers
import util
THREAD_UNSAFE_TK = 0
LOG_FILE_PATH = util.GenerateOutputFilename('log')
def closedWindowHandler():
print 'Au revoir, mes amis!'
sys.exit(1)
global_message_queue = Queue.Queue()
global_last_message = None
def AddMsg(message, master=None, backup_notifier=None, **kwargs):
"""Add a message to the global queue for output."""
global global_message_queue
global global_last_message
global THREAD_UNSAFE_TK
new_message = StatusMessage(message, **kwargs)
if new_message != global_last_message:
global_message_queue.put(new_message)
if master:
try:
master.event_generate('<<msg>>', when='tail')
global_last_message = new_message
# Tk thread-safety workaround #1
except TclError:
# If we aren't thread safe, we already assume this won't work.
if not THREAD_UNSAFE_TK:
print 'First TCL Error:'
traceback.print_exc()
try:
backup_notifier(-1)
THREAD_UNSAFE_TK = 1
except:
print 'Backup notifier failure:'
traceback.print_exc()
class StatusMessage(object):
"""Messages to be passed from to the main thread from children.
Used to avoid thread issues inherent with Tk.
"""
def __init__(self, message, error=False, count=False, total=False, |
enable_button=None, debug=False):
self.message = message
self.error = error
self.count = count
self.debug = debug
| self.total = total
self.enable_button = enable_button
class WorkerThread(threading.Thread, base_ui.BaseUI):
"""Handle benchmarking and preparation in a separate UI thread."""
def __init__(self, supplied_ns, global_ns, regional_ns, options, data_source=None, master=None,
backup_notifier=None):
threading.Thread.__init__(self)
self.SetupDataStructures()
self.status_callback = self.msg
self.data_src = data_source
self.backup_notifier = backup_notifier
self.include_internal = False
self.supplied_ns = supplied_ns
self.global_ns = global_ns
self.regional_ns = regional_ns
self.master = master
self.options = options
self.resource_dir = os.path.dirname(os.path.dirname(__file__))
def msg(self, message, **kwargs):
"""Add messages to the main queue."""
return AddMsg(message, master=self.master, backup_notifier=self.backup_notifier, **kwargs)
def run(self):
self.msg('Started thread', enable_button=False)
try:
self.PrepareTestRecords()
self.PrepareNameServers()
self.PrepareBenchmark()
self.RunAndOpenReports()
except nameserver_list.OutgoingUdpInterception:
(exc_type, exception, tb) = sys.exc_info()
self.msg('Outgoing requests were intercepted!', error=exception)
except nameserver_list.TooFewNameservers:
(exc_type, exception, tb) = sys.exc_info()
self.msg('Too few nameservers to test', error=exception)
except conn_quality.OfflineConnection:
(exc_type, exception, tb) = sys.exc_info()
self.msg('The connection appears to be offline!', error=exception)
except:
(exc_type, exception, tb) = sys.exc_info()
traceback.print_exc(tb)
error_msg = '\n'.join(traceback.format_tb(tb)[-4:])
self.msg(exception, error=error_msg)
self.msg(None, enable_button=True)
class NameBenchGui(object):
"""The main GUI."""
def __init__(self, options, supplied_ns, global_ns, regional_ns, version=None):
self.options = options
self.supplied_ns = supplied_ns
self.global_ns = global_ns
self.regional_ns = regional_ns
self.version = version
def Execute(self):
self.root = Tk()
app = MainWindow(self.root, self.options, self.supplied_ns, self.global_ns,
self.regional_ns, self.version)
app.DrawWindow()
self.root.bind('<<msg>>', app.MessageHandler)
self.root.mainloop()
class MainWindow(Frame, base_ui.BaseUI):
"""The main Tk GUI class."""
def __init__(self, master, options, supplied_ns, global_ns, regional_ns, version=None):
"""TODO(tstromberg): Remove duplication from NameBenchGui class."""
Frame.__init__(self)
self.SetupDataStructures()
self.master = master
self.options = options
self.supplied_ns = supplied_ns
self.global_ns = global_ns
self.regional_ns = regional_ns
self.version = version
try:
self.log_file = open(LOG_FILE_PATH, 'w')
except:
print 'Failed to open %s for write' % LOG_FILE_PATH
self.master.protocol('WM_DELETE_WINDOW', closedWindowHandler)
def UpdateStatus(self, message, count=None, total=None, error=None, debug=False):
"""Update our little status window."""
if not message:
return None
if total:
state = '%s... [%s/%s]' % (message, count, total)
elif count:
state = '%s%s' % (message, '.' * count)
else:
state = message
print '> %s' % str(state)
try:
self.log_file.write('%s: %s\r\n' % (datetime.datetime.now(), state))
self.log_file.flush()
except:
pass
if not debug:
self.status.set(state[0:75])
def DrawWindow(self):
"""Draws the user interface."""
self.nameserver_form = StringVar()
self.status = StringVar()
self.query_count = IntVar()
self.data_source = StringVar()
self.health_performance = StringVar()
self.location = StringVar()
self.use_global = IntVar()
self.use_regional = IntVar()
self.use_censor_checks = IntVar()
self.share_results = IntVar()
self.master.title('namebench')
outer_frame = Frame(self.master)
outer_frame.grid(row=0, padx=16, pady=16)
inner_frame = Frame(outer_frame, relief=GROOVE, bd=2, padx=12, pady=12)
inner_frame.grid(row=0, columnspan=2)
status = Label(outer_frame, text='...', textvariable=self.status)
status.grid(row=15, sticky=W, column=0)
if sys.platform[:3] == 'win':
seperator_width = 490
else:
seperator_width = 585
bold_font = tkFont.Font(font=status['font'])
bold_font['weight'] = 'bold'
ns_label = Label(inner_frame, text='Nameservers')
ns_label.grid(row=0, columnspan=2, sticky=W)
ns_label['font'] = bold_font
nameservers = Entry(inner_frame, bg='white',
textvariable=self.nameserver_form,
width=80)
nameservers.grid(row=1, columnspan=2, sticky=W, padx=4, pady=2)
self.nameserver_form.set(', '.join(nameserver_list.InternalNameServers()))
global_button = Checkbutton(inner_frame,
text='Include global DNS providers (Google Public DNS, OpenDNS, UltraDNS, etc.)',
variable=self.use_global)
global_button.grid(row=2, columnspan=2, sticky=W)
global_button.toggle()
regional_button = Checkbutton(inner_frame,
text='Include best available regional DNS services',
variable=self.use_regional)
regional_button.grid(row=3, columnspan=2, sticky=W)
regional_button.toggle()
separator = Frame(inner_frame, height=2, width=seperator_width, bd=1, relief=SUNKEN)
separator.grid(row=4, padx=5, pady=5, columnspan=2)
ds_label = L |
mattjmorrison/ReportLab | tests/test_rl_accel.py | Python | bsd-3-clause | 6,284 | 0.021801 | __version__=''' $Id'''
__doc__='''basic tests.'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, printLocation
setOutDir(__name__)
import unittest
def getrc(defns,depth=1):
from sys import getrefcount, _getframe
f = _getframe(depth)
G0 = f.f_globals
L = f.f_locals
if L is not G0:
LL = [L]
while 1:
f = f.f_back
G = f.f_globals
L = f.f_locals
if G is not G0 or G is L: break
LL.append(L)
L = {}
LL.reverse()
for l in LL:
L.update(l)
else:
L = L.copy()
G0 = G0.copy()
return ' '.join([str(getrefcount(eval(x,L,G0))-1) for x in defns.split()])
def checkrc(defns,rcv0):
rcv1 = getrc(defns,2)
return ' '.join(["%s %s-->%s" % (x,v,w) for x,v,w in zip(defns.split(),rcv0.split(),rcv1.split()) if v!=w])
class RlAccelTestCase(unittest.TestCase):
def testFpStr(self):
# should give siz decimal places if less than 1.
# if more, give up to seven sig figs
from _rl_accel import fp_str
assert fp_str(1,2,3)=='1 2 3'
assert fp_str(1) == '1'
assert fp_str(595.275574) == '595.2756'
assert fp_str(59.5275574) == '59.52756'
assert fp_str(5.95275574) == '5.952756'
def test_AsciiBase85Encode(self):
from _rl_accel import _AsciiBase85Encode
assert _AsciiBase85Encode('Dragan Andric')=='6ul^K@;[2RDIdd%@f~>'
def test_AsciiBase85Decode(self):
from _rl_accel import _AsciiBase85Decode
assert _AsciiBase85Decode('6ul^K@;[2RDIdd%@f~>')=='Dragan Andric'
def testEscapePDF(self):
from _rl_accel import escapePDF
assert escapePDF('(test)')=='\\(test\\)'
def test_instanceEscapePDF(self):
from _rl_accel import _instanceEscapePDF
assert _instanceEscapePDF('', '(test)')=='\\(test\\)'
def testCalcChecksum(self):
from _rl_accel import calcChecksum
assert calcChecksum('test')==1952805748
def test_instanceStringWidth(self):
from reportlab.pdfbase.pdfmetrics import registerFont, getFont, _fonts, unicode2T1
from reportlab.pdfbase.ttfonts import TTFont
ttfn = 'Vera'
t1fn = 'Times-Roman'
registerFont(TTFont(ttfn, "Vera.ttf"))
ttf = getFont(ttfn)
t1f = getFont(t1fn)
testCp1252 = 'copyright %s trademark %s registered %s ReportLab! Ol%s!' % (chr(169), chr(153),chr(174), chr(0xe9))
enc='cp1252'
senc = 'utf8'
ts = 'ABCDEF\xce\x91\xce\xb2G'
utext = 'ABCDEF\xce\x91\xce\xb2G'.decode(senc)
fontSize = 12
defns="ttfn t1fn ttf t1f testCp1252 enc senc ts utext fontSize ttf.face ttf.face.charWidths ttf.face.defaultWidth t1f.widths t1f.encName t1f.substitutionFonts _fonts"
rcv = getrc(defns)
def tfunc(f,ts,fontSize,enc):
w1 = f.stringWidth(ts,fontSize,enc)
w2 = f._py_stringWidth(ts,fontSize,enc)
assert abs(w1-w2)<1e-10,"f(%r).stringWidthU(%r,%s,%r)-->%r != f._py_stringWidth(...)-->%r" % (f,ts,fontSize,enc,w1,w2)
tfunc(t1f,testCp1252,fontSize,enc)
tfunc(t1f,ts,fontSize,senc)
tfunc(t1f,utext,fontSize,senc)
tfunc(ttf,ts,fontSize,senc)
tfunc(ttf,testCp1252,fontSize,enc)
tfunc(ttf,utext,fontSize,senc)
rcc = checkrc(defns,rcv)
assert not rcc, "rc diffs (%s)" % rcc
def test_unicode2T1(self):
from reportlab.pdfbase.pdfmetrics import _py_unicode2T1, getFont, _fonts
from _rl_accel import unicode2T1
t1fn = 'Times-Roman'
t1f = getFont(t1fn)
enc = 'cp1252'
senc = 'utf8'
testCp1252 = ('copyright %s trademark %s registered %s ReportLab! Ol%s!' % (chr(169), chr(153),chr(174), chr(0xe9))).decode(enc)
utext = 'This is the end of the \xce\x91\xce\xb2 world. This is the end of the \xce\x91\xce\xb2 world jap=\xe3\x83\x9b\xe3\x83\x86. This is the end of the \xce\x91\xce\xb2 world. This is the end of the \xce\x91\xce\xb2 world jap=\xe3\x83\x9b\xe3\x83\x86'.decode('utf8')
def tfunc(f,ts):
w1 = unicode2T1(ts,[f]+f.substitutionFonts)
w2 = _py_unicode2T1(ts,[f]+f.substitutionFonts)
assert w1==w2,"%r != %r" % (w1,w2)
defns="t1fn t1f testCp1252 enc senc utext t1f.widths t1f.encName t1f.substitutionFonts _fonts"
rcv = getrc(defns)
tfunc(t1f,testCp1252)
tfunc(t1f,utext)
rcc = checkrc(defns,rcv)
assert not rcc, "rc diffs (%s)" % rcc
def test_sameFrag(self):
from _rl_accel import _sameFrag
class ABag:
def __init__(self,**kwd):
self.__dict__.update(kwd)
def __str__(self):
V=['%s=%r' % v for v in self.__dict__.items()]
V.sort()
return 'ABag(%s)' % ','.join(V)
a=ABag(fontName='Helvetica',fontSize=12, textColor="red", rise=0, underline=0, strike=0, link="aaaa")
b=ABag(fontName='Helvetica',fontSize=12, textColor="red", rise=0, underline=0, strike=0, link="aaaa")
for name in ("fontName", "fontSize", "textColor", "rise", "underline", "strike", "link"):
old = getattr(a,name)
assert _sameFrag(a,b)==1, "_sameFrag(%s,%s)!=1" % (a,b)
assert _sameFrag(b,a)==1, "_sameFrag(%s,%s)!=1" % (b,a)
setattr(a,name,None)
assert _sameFrag(a,b)==0, "_sameFrag(%s,%s)!=0" % (a,b)
assert _sameFrag(b,a)==0, "_sameFrag(%s,%s)!=0" % (b,a)
delattr(a,name)
assert _sameFrag(a,b)==0, "_sameFrag(%s,%s)!=0" % (a,b)
assert | _sameFrag(b,a)==0, "_sameFrag(%s,%s)!=0" % (b,a)
delattr(b,name)
assert _sameFrag(a,b)==1, "_sameFrag(%s,%s)!=1" % (a,b)
assert _sameFrag(b,a)==1, "_sameFrag(%s,%s)!=1" % (b,a)
setattr(a,name,old)
setattr(b,name,old)
def makeSuite():
# only run the tests if _rl_accel is present
try:
import _rl_accel
Klass = RlAccelTestCase
except:
class Kl | ass(unittest.TestCase):
pass
return makeSuiteForClasses(Klass)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
|
ahcub/hokey_prediction_app | setup.py | Python | bsd-3-clause | 578 | 0.00346 | from distutils.core import setup
import py2exe
import sys
from os.path import join, dirname
setup(
windows=[{"script": "hockey_prediction_app.py", "icon_resources": [(1, join(dirname(sys.argv[0]), 'hokey.ico'))]}],
options={"py2exe": {"includes": ["sip", 'lxml.etree', 'lxml._elementpath', 'gzip', 'pandas', 'numpy']} | },
requires=['requests', 'lxml', 'PyQt4', 'pandas'],
data_files=[('.', [join(dirname(sys.argv[0]), 'up.png')]), |
('.', [join(dirname(sys.argv[0]), 'down.png')]),
('.', [join(dirname(sys.argv[0]), 'hokey.png')])])
|
collective/zettwerk.clickmap | zettwerk/clickmap/__init__.py | Python | gpl-2.0 | 401 | 0.004988 | from zope.i18nmessageid import MessageFactory
clickmapMessageFactor | y = MessageFactory('zettwerk.clickmap')
import ClickmapTool
from Products.CMFCore import utils
def initialize(context):
| """Initializer called when used as a Zope 2 product."""
utils.ToolInit('Zettwerk Clickmap', tools=(ClickmapTool.ClickmapTool,),
icon='z.png'
).initialize(context)
|
lesina/labs2016 | Laba05/exercise04.py | Python | gpl-3.0 | 127 | 0.007874 | k = list(map(int, input().split()))
t = int(inp | ut())
for i in range(t):
k = k[:-k[-1]-1] + k[-1:] | + k[-k[-1]-1:-1]
print(k) |
Brazelton-Lab/lab_scripts | 16S/tax_summary_edit.py | Python | gpl-2.0 | 1,875 | 0.018133 | #! /usr/bin/env python
"""
edits mothur taxonomy summary file
transfers last name that is not "unclassified" or "uncultured" to "unclassified" or "uncultured" assignment
make sure that the file has default sorting (by rankID)
Copyright:
tax_summary_edit edits mothur taxonomy summary file
Copyright (C) 2016 William Brazelton
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHAN | TABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should hav | e received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
infilename = sys.argv[1]
outfilename = infilename + '.renamed.txt'
outfile = open(outfilename,'a')
infile = open(infilename)
for line in infile:
if "unclassified" in line:
columns = line.split('\t')
tax = columns[2]
newtax = tax + ' ' + lasttax
outfile.write(columns[0])
outfile.write('\t')
outfile.write(columns[1])
outfile.write('\t')
outfile.write(newtax)
for tab in columns[3:]:
outfile.write('\t')
outfile.write(tab)
elif "uncultured" in line:
columns = line.split('\t')
tax = columns[2]
newtax = tax + ' ' + lasttax
outfile.write(columns[0])
outfile.write('\t')
outfile.write(columns[1])
outfile.write('\t')
outfile.write(newtax)
for tab in columns[3:]:
outfile.write('\t')
outfile.write(tab)
else:
outfile.write(line)
columns = line.split('\t')
lasttax = columns[2]
infile.close()
outfile.close()
|
ratschlab/ASP | examples/undocumented/python_modular/features_snp_modular.py | Python | gpl-2.0 | 462 | 0.04329 | parameter_list=[['../data/snps.dat']]
def features_snp_m | odular(fname):
from shogun.Features import StringByteFeatures, SNPFeatures, SNP
sf=StringByteFeatures(SNP)
sf.load_ascii_file(fname, False, SNP, SNP)
#print(sf.get_feature | s())
snps=SNPFeatures(sf)
#print(snps.get_feature_matrix())
#print(snps.get_minor_base_string())
#print(snps.get_major_base_string())
if __name__=='__main__':
print('SNP Features')
features_snp_modular(*parameter_list[0])
|
Yangqing/caffe2 | caffe2/python/crf.py | Python | apache-2.0 | 15,115 | 0.000265 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package crf
# Module caffe2.python.crf
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, recurrent, model_helper, brew
import numpy as np
'''
Due to a limitation in ReccurentNetworkOp, this layer only supports batch_size=1
In order to support batch_size > 1, we will have to implement the CRFUnit
and its gradient in C++ and handle the different batches there.
'''
class CRFWithLoss(object):
def __init__(self, model, num_classes, transitions_blob=None):
self.model = model
self.num_classes = num_classes
self.num_classes_padded = num_classes + 2 # After adding BOS and EOS
if not transitions_blob:
transitions_blob = self.model.param_init_net.UniformFill(
[],
[core.ScopedBlobReference('crf_transitions')],
shape=[self.num_classes_padded, self.num_classes_padded],
min=-1.0,
max=1.0
)
self.transitions = transitions_blob
self.model.params.append(self.transitions)
def crf_loss(self, predictions, labels, seq_lengths=None):
# Since the transitions matrix is a shared parameter, need to
# take a snapshot of it at the beginning since it can be updated
# in between the operators that uses it when doing parallel updates
transitions_snapshot = self.model.net.Copy(
self.transitions, core.ScopedBlobReference('transitions_snapshot')
)
# Compute best path unary score from the logits
path_unary_score = self._gather_entries_sum(
predictions, labels, self.num_classes
)
# Append BOS and EOS entries to the predictions and labels
predictions = self._pad_predictions(predictions)
labels = self._pad_labels(labels)
# Compute best path binary scores from the transitions matrix
path_binary_score = self._path_binary_scores(
labels, transitions_snapshot, seq_lengths
)
path_total_score = self.model.net.Add(
[path_binary_score, path_unary_score],
core.ScopedBlobReference('path_total')
)
# Compute all paths score
zero_index = self.model.param_init_net.ConstantFill(
[], shape=[1], value=0
)
initial_state = self.model.net.Gather(
[predictions, zero_index],
core.ScopedBlobReference('rnn_initial'),
dense_gradient=True
)
input_data, _ = self.model.net.RemovePadding(
[predictions],
padding_width=1,
end_padding_width=0,
outputs=2,
)
input_data = self.model.net.ExpandDims(
[input_data],
core.ScopedBlobReference('rnn_input_data'),
dims=[1]
)
# Due to a bug in RecurrentNetworkGradientOp, we need to copy the
# transitions blob before sending it to the recurrent network
transitions_copy = self.model.net.Copy(
transitions_snapshot, core.ScopedBlobReference('transitions_copy')
)
all_paths_scores = self._crf_forward(
input_data, initial_state, transitions_copy
)
loss = self.model.net.Sub(
[all_paths_scores, path_total_score],
core.ScopedBlobReference('crf_loss')
)
return loss
def _pad_predictions(self, predictions):
# This function will introduce two labels for beginning of sequence
# And end of sequence, it will make the necessary udpates to the
# the predictions blob
low_score = -1000.0 # An arbitray very low number
b_scores = np.array(
[[low_score] * self.num_classes + [0, low_score]]
).astype(np.float32)
e_scores = np.array(
[[low_score] * self.num_classes + [low_score, 0]]
).astype(np.float32)
b_scores = self.model.param_init_net.GivenTensorFill(
[], "b_scores", shape=[1, self.num_classes_padded], values=b_scores
)
e_scores = self.model.param_init_net.GivenTensorFill(
[], "e_scores", shape=[1, self.num_classes_padded], values=e_scores
)
zero_index = self.model.net.ConstantFill(
[], shape=[1, ], value=0
)
length = self.model.net.Gather(
[self.model.net.Shape([predictions]), zero_index],
)
length = self.model.net.Cast(length, | to='int32')
t_range = self.model.net.LengthsRangeFill(length)
padding = self.model.net.ConstantFill([t_range], value=low_score)
padding = self.model.net.ExpandDims(padding, dims=[1])
padded_predictions, _ = self.model.net.Concat(
[predictions, padding, padding],
outputs=2,
axis=1
)
padded_predictions_concat, _ = self.model.net.C | oncat(
[b_scores, padded_predictions, e_scores],
outputs=2,
axis=0
)
return padded_predictions_concat
def _pad_labels(self, labels):
bos_i = self.num_classes
eos_i = self.num_classes + 1
bos_i_b = self.model.param_init_net.ConstantFill(
[], shape=[1], value=bos_i
)
eos_i_b = self.model.param_init_net.ConstantFill(
[], shape=[1], value=eos_i
)
labels = self.model.net.Cast([labels], to='int64')
padded_labels, _ = self.model.net.Concat(
[bos_i_b, labels, eos_i_b],
axis=0,
outputs=2
)
return padded_labels
def _path_binary_scores(self, labels, transitions, seq_lengths=None):
column_ids, _ = self.model.net.RemovePadding(
[labels],
outputs=2,
padding_width=1,
end_padding_width=0
)
row_ids, _ = self.model.net.RemovePadding(
[labels],
outputs=2,
padding_width=0,
end_padding_width=1
)
# Since there is no multi-dimensional gather, I flatten the matrix to
# a 1-d vector and transform the ids to (row_ids * num_columns +
# column_ids) and do gather in 1-d
num_columns_blob = self.model.net.ConstantFill(
[row_ids],
value=self.num_classes_padded,
)
flattened_ids = self.model.net.Mul([row_ids, num_columns_blob])
flattened_ids = self.model.net.Add([flattened_ids, column_ids])
flattened_transitions = self.model.net.FlattenToVec([transitions])
entries = self.model.net.Gather(
[flattened_transitions, flattened_ids],
dense_gradient=True
)
return self.model.ReduceFrontSum(entries)
def _gather_entries_sum(self, in_data, indices, index_size):
indices = self.model.net.Cast([indices], to='int64')
index_size_blob = self.model.param_init_net.ConstantFill(
[],
shape=[1],
value=index_size,
)
query_one_hot = self.model.net.OneHot(
[indices, index_size_blob]
)
flattend_query = self.model.net.FlattenToVec(query_one_hot)
flattend_data = self.model.net.FlattenToVec(in_data)
query_scores = self.model.net.DotProduct(
[flattend_query, flattend_data]
)
final_sum = self.model.net.ReduceFrontSum([query_scores])
retur |
mguijarr/mxcube3 | mxcube3/routes/Queue.py | Python | gpl-2.0 | 17,319 | 0.001559 | import json
import logging
import signals
import queue_model_objects_v1 as qmo
import queue_entry as qe
import QueueManager
from flask import Response, jsonify, request, session
from mxcube3 import app as mxcube
from mxcube3 import socketio
from . import qutils
qm = QueueManager.QueueManager('Mxcube3')
@mxcube.route("/mxcube/api/v0.1/queue/start", methods=['PUT'])
def queue_start():
"""
Start execution of the queue.
:returns: Respons obje | ct, status code set to:
200: On success
409: Queue could not be started
"""
logging.getLogger('HWR').info('[QUEUE] Queue going to start')
try:
mxcube.queue.queue_hwobj.set_pause(False)
mxcube.queue.queue_hwobj.execute()
except Exception as ex:
signals.queue_execution_failed(ex)
logging.getLogger('HWR').info('[QUEUE] Queue started')
return Response(s | tatus=200)
@mxcube.route("/mxcube/api/v0.1/queue/stop", methods=['PUT'])
def queue_stop():
"""
Stop execution of the queue.
:returns: Response object status code set to:
200: On success
409: Queue could not be stopped
"""
mxcube.queue.queue_hwobj.stop()
return Response(status=200)
@mxcube.route("/mxcube/api/v0.1/queue/abort", methods=['PUT'])
def queue_abort():
"""
Abort execution of the queue.
:returns: Response object, status code set to:
200 On success
409 queue could not be aborted
"""
mxcube.queue.queue_hwobj.stop()
return Response(status=200)
@mxcube.route("/mxcube/api/v0.1/queue/pause", methods=['PUT'])
def queue_pause():
"""
Pause the execution of the queue
:returns: Response object, status code set to:
200: On success
409: Queue could not be paused
"""
mxcube.queue.queue_hwobj.pause(True)
msg = {'Signal': qutils.queue_exec_state(),
'Message': 'Queue execution paused',
'State': 1}
socketio.emit('queue', msg, namespace='/hwr')
logging.getLogger('HWR').info('[QUEUE] Paused')
return Response(status=200)
@mxcube.route("/mxcube/api/v0.1/queue/unpause", methods=['PUT'])
def queue_unpause():
"""
Unpause execution of the queue
:returns: Response object, status code set to:
200: On success
409: Queue could not be unpause
"""
mxcube.queue.queue_hwobj.pause(False)
msg = {'Signal': qutils.queue_exec_state(),
'Message': 'Queue execution started',
'State': 1}
socketio.emit('queue', msg, namespace='/hwr')
return Response(status=200)
@mxcube.route("/mxcube/api/v0.1/queue/clear", methods=['PUT', 'GET'])
def queue_clear():
"""
Clear the queue.
:returns: Response object, status code set to:
200: On success
409: Queue could not be started
"""
mxcube.diffractometer.savedCentredPos = []
mxcube.queue = qutils.new_queue()
logging.getLogger('HWR').info('[QUEUE] Cleared ' +
str(mxcube.queue.get_model_root()._name))
return Response(status=200)
@mxcube.route("/mxcube/api/v0.1/queue", methods=['GET'])
def queue_get():
"""
Get the queue
:returns: Response object response Content-Type: application/json, json
object containing the queue on the format returned by
queue_to_json_response. The status code is set to:
200: On success
409: On error, could not retrieve queue
"""
logging.getLogger('HWR').info('[QUEUE] queue_get called')
resp = qutils.queue_to_json_response()
resp.status_code = 200
return resp
@mxcube.route("/mxcube/api/v0.1/queue_state", methods=['GET'])
def queue_get_state():
"""
Get the queue.
:returns: Response object response Content-Type: application/json, json
object containing the queue state. The status code is set to:
200: On success
409: On error, could not retrieve queue
"""
logging.getLogger('HWR').info('[QUEUE] queue_get called')
resp = jsonify(qutils.get_queue_state())
resp.status_code = 200
return resp
@mxcube.route("/mxcube/api/v0.1/queue/<sid>/<tindex>/execute", methods=['PUT'])
def execute_entry_with_id(sid, tindex):
"""
Execute the entry with the client id <client_id>
:param int client_id: Identifier of client item to execute
:statuscode: 200, no error
409, queue entry could not be executed
"""
if tindex in ['undefined', 'None', 'null']:
node_id = qutils.queue_to_dict()[sid]["queueID"]
else:
node_id = qutils.queue_to_dict()[sid]["tasks"][int(tindex)]["queueID"]
node, entry = qutils.get_entry(node_id)
signals.queue_execution_started(None)
mxcube.queue.queue_hwobj._is_stopped = False
mxcube.queue.queue_hwobj._set_in_queue_flag()
mxcube.queue.queue_hwobj.set_pause(False)
mxcube.queue.queue_hwobj.execute_entry(entry)
signals.queue_execution_finished(None)
logging.getLogger('HWR').info('[QUEUE] is:\n%s ' % qutils.queue_to_json())
return Response(status=200)
@mxcube.route("/mxcube/api/v0.1/queue", methods=['PUT'])
def set_queue():
# Clear queue
mxcube.diffractometer.savedCentredPos = []
mxcube.queue = qutils.new_queue()
# Set new queue
qutils.queue_add_item(request.get_json())
logging.getLogger('HWR').info('[QUEUE] is:\n%s ' % qutils.queue_to_json())
qutils.save_queue(session)
return Response(status=200)
@mxcube.route("/mxcube/api/v0.1/queue", methods=['POST'])
def queue_add_item():
qutils.queue_add_item(request.get_json())
logging.getLogger('HWR').info('[QUEUE] is:\n%s ' % qutils.queue_to_json())
return Response(status=200)
@mxcube.route("/mxcube/api/v0.1/queue/<sid>/<tindex>", methods=['POST'])
def queue_update_item(sid, tindex):
data = request.get_json()
current_queue = qutils.queue_to_dict()
if tindex in ['undefined']:
node_id = current_queue[sid]["queueID"]
else:
node_id = current_queue[sid]["tasks"][int(tindex)]["queueID"]
model, entry = qutils.get_entry(node_id)
if data["type"] == "DataCollection":
qutils.set_dc_params(model, entry, data)
elif data["type"] == "Characterisation":
qutils.set_char_params(model, entry, data)
logging.getLogger('HWR').info('[QUEUE] is:\n%s ' % qutils.queue_to_json())
return Response(status=200)
@mxcube.route("/mxcube/api/v0.1/queue/delete", methods=['POST'])
def queue_delete_item():
item_pos_list = request.get_json()
current_queue = qutils.queue_to_dict()
for (sid, tindex) in item_pos_list:
if tindex in ['undefined', None]:
node_id = current_queue[sid]["queueID"]
model, entry = qutils.get_entry(node_id)
else:
node_id = current_queue[sid]["tasks"][int(tindex)]["queueID"]
model, entry = qutils.get_entry(node_id)
# Get the TaskGroup of the item, there is currently only one
# task per TaskGroup so we have to remove the entire TaskGroup
# with its task.
entry = entry.get_container()
qutils.delete_entry(entry)
logging.getLogger('HWR').info('[QUEUE] is:\n%s ' % qutils.queue_to_json())
return Response(status=200)
@mxcube.route("/mxcube/api/v0.1/queue/<sid>/<ti1>/<ti2>/swap", methods=['POST'])
def queue_swap_task_item(sid, ti1, ti2):
qutils.swap_task_entry(sid, int(ti1), int(ti2))
logging.getLogger('HWR').info('[QUEUE] is:\n%s ' % qutils.queue_to_json())
return Response(status=200)
@mxcube.route("/mxcube/api/v0.1/queue/<sid>/<ti1>/<ti2>/move", methods=['POST'])
def queue_move_task_item(sid, ti1, ti2):
qutils.move_task_entry(sid, int(ti1), int(ti2))
logging.getLogger('HWR').info('[QUEUE] is:\n%s ' % qutils.queue_to_json())
return Response(status=200)
@mxcube.route("/mxcube/api/v0.1/queue/<sample_id>", methods=['PUT'])
def update_sample(sample_id):
'''
Update a sample info
:parameter node_id: entry identifier, integer. It can be a sample
or a task within |
tjguk/networkzero | networkzero/__init__.py | Python | mit | 1,846 | 0.002709 | # -*- coding: utf-8 -*-
"""Easy network discovery & messaging
Aimed at a classrom or club situation, networkzero makes it simpler to
have several machines or several processes on one machine discovering
each other and talking across a network. Typical examples would include:
* Sending commands to a robot
* Sending scores to a scoreboard
* Having a remote sensor ping a central controller
* A peer-to-peer chat / instant messenger
To send a message and wait for a reply::
[Computer 1]
import networkzero as nw0
echo_address = nw0.advertise("echo")
while True:
name = nw0.wait_for_message_from(echo_address)
nw0.send_reply_to(echo_address, "Hello " + name)
::
[Computer 2]
import networkzero as nw0
echo_address = nw0.discover("echo")
reply = nw0.send_message_to(echo_address, "Alice")
print(reply)
reply = nw0.send_message_to(echo_address, "Bob")
print(reply)
To send news::
[Computer 1]
import networkze | ro as nw0
address = nw0.advertise("data-logger")
while True:
#
# ... do stuff
#
nw0.send_news_to(address, "data", ...)
::
[Computer 2, 3, 4...]
import networkzero as nw0
logger = nw0.discover("data-logger")
while True:
topic, data = nw0.wait_for_news_from(logger, "data")
#
# ... write the data to a | database etc.
#
"""
from .core import (
NetworkZeroError, SocketAlreadyExistsError,
SocketTimedOutError, InvalidAddressError,
SocketInterruptedError, DifferentThreadError,
address, action_and_params,
string_to_bytes, bytes_to_string
)
from .discovery import advertise, discover, discover_all, discover_group
from .messenger import (
send_message_to, wait_for_message_from, send_reply_to,
send_news_to, wait_for_news_from
)
|
areeda/gwpy | examples/miscellaneous/range-spectrogram.py | Python | gpl-3.0 | 2,377 | 0.000421 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) Alex Urban (2019-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Estimating the spectral contribution to inspiral range
We have seen how the binary neutron star (BNS) inspiral range of a
gravitational-wave detector can be measured directly from the strain
readout. In this example, we will estimate the average spectral
contribution to BNS range from the strain record surrounding GW170817
using :func:`gwpy.astro.range_spectrogram`.
"""
__author__ = 'Alex Urban <alexander.urban@ligo.org>'
# First, we need to load some data. As before we can `fetch` the
# `public data <https://www.gw-openscience.org/catalog/>`__
# around the GW170817 BNS merger:
from gwpy.timeseries import TimeSeries
l1 = TimeSeries.fetch_open_data('L1', 1187006834, 1187010930, tag='C02')
# Then, we can calculate a `Spectrogram` of the inspiral range
# amplitude spectrum:
from gwpy.astro import range_spectrogram
l1spec = range_spectrogram(l1, 30, fftlength=4, fmin=15, fmax=500) ** (1./2)
# We can plot this `Spectrogram` to visualise spectral variation in
# LIGO-Livingston's sensitivity in the hour or so surrounding GW170817:
plot = l1spec.plot(figsize=(12, 5))
ax = plot.gca()
ax.set_yscale('log')
ax.set_ylim(15, 500)
ax.s | et_title('LIGO-Livingston sensitivity to BNS around GW170817')
ax.set_epoch(1187008882) # <- set 0 on plot to GW170817
ax.colorbar(cmap='cividis', clim=(0, 16),
label='BNS range amplitude spectral density '
r | '[Mpc/$\sqrt{\mathrm{Hz}}$]')
plot.show()
# Note, the extreme dip in sensitivity near GW170817 is caused by a
# loud, transient noise event, see `Phys. Rev. Lett. vol. 119, p.
# 161101 <http://doi.org/10.1103/PhysRevLett.119.161101>`_ for more
# information.
|
whiteclover/solo | samples/web.py | Python | gpl-2.0 | 609 | 0.003284 | #!/usr/bin/env python
from solo.web.server import WebServer
from solo.web.app import App
class HelloRoot(object):
def index(self):
return "Hello World!"
def page(self, page):
return page
c | lass HelloApp(App):
def initialize(self):
ctl = HelloRoot()
route = self.route()
route.mapper.explicit = False
route.connect('index', '/', controller=ctl, action='index')
route.connect('page', '/page/:page', controller=ctl, action='page')
if __name__ == '__main__':
app = HelloApp()
| WebServer(('127.0.0.1', 8080), app, log=None).start()
|
behrtam/xpython | exercises/simple-cipher/simple_cipher_test.py | Python | mit | 2,258 | 0.001771 | import re
import unittest
from simple_cipher import Cipher
# Tests adapted from `problem-specifications//canonical-data.json` @ v2.0.0
class RandomKeyCipherTest(unittest.TestCase):
def test_can_encode(self):
cipher = Cipher()
plaintext = "aaaaaaaaaa"
self.assertEqual(cipher.encode(plaintext), cipher.key[0 : len(plaintext)])
def test_can_decode(self):
cipher = Cipher()
self.assertEqual(cipher.decode(cipher.key[0 : len("aaaaaaaaaa")]), "aaaaaaaaaa")
def test_is_reversible(self):
cipher = Cipher()
plaintext = "abcdefghij"
self.assertEqual(cipher.decode(cipher.encode(plaintext)), plaintext)
def test_key_is_made_only_of_lowercase_letters(self):
self.assertIsNotNone(re.match("^[a-z]+$", Cipher().key))
class SubstitutionCipherTest(unittest.TestCase):
def test_can_encode(self):
cipher = Cipher("abcdefghij")
plaintext = "aaaaaaaaaa"
self.assertEqual(cipher.encode(plaintext), cipher.key)
def test_can_decode(self):
cipher = Cipher("abcdefghij")
self.assertEqual(cipher.decode(cipher.key), "aaaaaaaaaa")
def test_is_reversible(self):
cipher = Cipher("abcdefghij")
plaintext = "abcdefghij"
self.assertEqual(cipher.decode(cipher.encode(plaintext)), plaintext)
def test_can_double_shift_encode(self):
cipher = Cipher("iamapandabear")
plaintext = "iamapandabear"
self.assertEqual(cipher.encode(plaintext), "qayaeaagaciai")
def test_can_wrap_on_encode(self):
| cipher = Cipher("abcdefghij")
plaintext = "zzzzzzzzzz"
self.assertEqual(cipher.encode(plaintext), "zabcdefghi")
def test_can_wrap_on_decode(self):
cipher = Cipher("abcdefghij")
self.assertEqual(cipher.decode("zabcdefghi"), "zzzzzzzzzz")
def te | st_can_encode_messages_longer_than_the_key(self):
cipher = Cipher("abc")
plaintext = "iamapandabear"
self.assertEqual(cipher.encode(plaintext), "iboaqcnecbfcr")
def test_can_decode_messages_longer_than_the_key(self):
cipher = Cipher("abc")
self.assertEqual(cipher.decode("iboaqcnecbfcr"), "iamapandabear")
if __name__ == "__main__":
unittest.main()
|
kdart/pycopia | XML/pycopia/XML/DTD.py | Python | apache-2.0 | 8,510 | 0.00282 | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Deals with DTD files. Primarily, provides the DTD parser and Python
"compiler".
"""
import sys, os
from pycopia import sourcegen
from pycopia.textutils import identifier, keyword_identifier
import pycopia.XML.POM
from pycopia.XML.POM import (ContentModel, ElementNode, Notation, ValidationError,
normalize_unicode)
from pycopia.XML.POMparse import (XMLAttribute, ANY, PCDATA, EMPTY)
### DTD compiler components ###
def get_dtd_compiler(fo, mixinmodule=None, doctype=None):
import xml
if hasattr(xml, "use_pyxml"): # per Gentoo bug #367729
xml.use_pyxml()
from xml.parsers.xmlproc.dtdparser import DTDParser
generator = sourcegen.get_sourcefile(fo)
dh = DTDConsumerForSourceGeneration(generator, mixinmodule, doctype)
parser = DTDParser()
parser.set_dtd_consumer(dh)
return parser
def get_identifier(uname):
return identifier(normalize_unicode(uname))
class AttributeMap(dict):
def __repr__(self):
s = ["{"]
for t in self.items():
s.append("%r: %s, " % t)
s.append("}")
return "\n ".join(s)
# this DTD parser consumer generates the Python source code from the DTD.
class DTDConsumerForSourceGeneration(object):
def __init__(self, generator, mixins=None, doctype=None):
self.generator = generator
self._code_index = 0
self.elements = {}
self.parameter_entities = {}
self.general_entities = {}
self._forwardattributes = {}
self._allattributes = {}
self.mixins = mixins # should be a module object
self.doctype = doctype
def dtd_start(self):
print "Starting to parse DTD...",
self.generator.add_comment("This file generated by a program. do not edit.")
self.generator.add_import(pycopia.XML.POM)
if self.mixins:
self.generator.add_import(self.mixins)
self.generator.add_blank()
self._code_index = self.generator.get_current_index()
def dtd_end(self):
print "done parsing. Writing file."
gen = self.generator
for name, value in self._allattributes.items():
gen.add_code("%s = %r" % (name, value), index=2)
gen.add_instance("GENERAL_ENTITIES", self.general_entities)
gen.add_comment("Cache for dynamic classes for this dtd.")
gen.add_instance("_CLASSCACHE", {})
gen.write()
def new_element_type(self, elem_name, elem_cont):
"Receives the declaration of an element type."
try:
element = self.elements[elem_name]
except KeyError:
self.make_new_element(elem_name, elem_cont)
def make_new_element(self, elem_name, contentmodel):
parents = [ElementNode]
if self.mixins:
mixinname = "%sMixin" % ( get_identifier(elem_name) )
if hasattr(self.mixins, mixinname):
parents.insert(0, getattr(self.mixins, mixinname))
# class name is capitalized to avoid clashes with Python key words.
ch = self.generator.add_class(get_identifier(elem_name), tuple(parents))
ch.add_attribute("_name", elem_name)
ch.add_attribute("CONTENTMODEL", _ContentModelGenerator(contentmodel))
self.elements[elem_name] = ch
# Add any previously seen attributes
try:
fwdattribs, fwdkwattribs = self._forwardattributes[elem_name]
except KeyError:
pass
else:
ch.add_attribute("ATTRIBUTES", fwdattribs)
ch.add_attribute("KWATTRIBUTES", fwdkwattribs)
del self._forwardattributes[elem_name]
# identify the root element with a generic name (_Root).
if self.doctype and elem_name.lower() == self.doctype.name.lower():
self.generator.add_code("\n_Root = %s\n" % (get_identifier(elem_name),))
def new_attribute(self, elem, a_name, a_type, a_decl, a_def):
"Receives the declaration of a new attribute."
attr = XMLAttribute(a_name, a_type, a_decl, a_def)
ident = attr.get_identifier()
self._allattributes[ident] = attr
try:
element = self.elements[elem]
except KeyError:
# Got a forward attribute definition (defined before element)
try:
fwdattribs, fwdkwattribs = self._forwardattributes[elem]
except KeyError:
fwdattribs = AttributeMap()
fwdkwattribs = AttributeMap()
self._forwardattributes[elem] = (fwdattribs, fwdkwattribs)
fwdattribs[a_name] = ident
| keywordname = keyword_identifier(normalize_unicode(a_name))
fwdkwattribs[keywordname] = ident
else:
self._add_element_attlist(element, attr, ident)
def _add_element_attlist(self, element, xmlattribute, ident):
try:
attrmap = element.get_attribute("ATTRIBUTES")
kwattrmap = element.get_attri | bute("KWATTRIBUTES")
except KeyError:
element.add_attribute("ATTRIBUTES", AttributeMap())
element.add_attribute("KWATTRIBUTES", AttributeMap())
attrmap = element.get_attribute("ATTRIBUTES")
kwattrmap = element.get_attribute("KWATTRIBUTES")
attrmap[xmlattribute.name] = ident
keywordname = keyword_identifier(normalize_unicode(xmlattribute.name))
kwattrmap[keywordname] = ident
def handle_comment(self, contents):
"Receives the contents of a comment."
self.generator.add_comment(contents)
def new_parameter_entity(self,name,val):
"Receives internal parameter entity declarations."
# these are handled internally by the DTD parser. but.. save it anyway.
self.parameter_entities[name] = val
def new_external_pe(self, name, pubid, sysid):
"Receives external parameter entity declarations."
# these are handled internally by the DTD parser.
def new_general_entity(self, name, val):
"Receives internal general entity declarations."
self.general_entities[normalize_unicode(name)] = val
def new_external_entity(self, ent_name, pub_id, sys_id, ndata):
"""Receives external general entity declarations. 'ndata' is the
empty string if the entity is parsed."""
# XXX do we need to handle this?
print "XXX external entity:"
print ent_name, pub_id, sys_id, ndata
def new_notation(self,name, pubid, sysid):
"Receives notation declarations."
n = Notation(name, pubid, sysid)
self.generator.add_instance(get_identifier(name), n)
def handle_pi(self, target, data):
"Receives the target and data of processing instructions."
# XXX do we need to handle this?
print "XXX unhandled PI:",
print "target=%r; data=%r" % (target, data)
class _ContentModelGenerator(object):
"""_ContentModelGenerator(rawmodel)
The DTD parser generated and final content model are so different that a
different content model generator is used for this object.
"""
def __init__(self, rawmodel=None):
tm_type = type(rawmodel)
if tm_type is str:
if rawmodel == "EMPTY":
self.model = EMPTY
elif rawmodel == "#PCDATA":
self.model = PCDATA
elif rawmodel == "ANY":
self.model = ANY
else:
raise ValidationError, "ContentModelGenerator: unknown special type"
elif tm_type is tup |
fadiga/mstock | tools/periods.py | Python | apache-2.0 | 8,621 | 0.001163 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Maintainer: Fad
from datetime import date, timedelta
from calendar import monthrange
def get_week_boundaries(year, week):
"""
Retoure les date du premier et du dernier jour de la semaine dont
on a le numéro.
"""
d = date(year, 1, 1)
if(d.weekday() > 3):
d = d + timedelta(7 - d.weekday())
else:
d = d - timedelta(d.weekday())
dlt = timedelta(days=(week - 1) * 7)
return d + dlt, d + dlt + timedelta(days=6)
class WeekPeriod(object):
"""docstring for WeekPeriod"""
def __init__(self, year, duration, duration_number):
super(WeekPeriod, self).__init__()
self.year = year
self.duration = duration
self.duration_number = duration_number
def __repr__(self):
return ("<Period('%(start)s', '%(end)s')>") \
% {'start': self.current[0], 'end': self.current[1]}
def __unicode__(self):
return ("Semaine de:%(start)s") % {'start': self.current[0]}
@property
def current(self):
return get_week_boundaries(self.year, self.duration_number)
def display_name(self):
return ("Semaine du: %(start)s") % {'start': self.current[0].strftime(u"%d %b %Y")}
@property
def next(self):
# la date de la semaine avant celle qu'on affiche
delta = timedelta(7)
next_date = self.current[0] + delta
next_week_number = next_date.isocalendar()[1]
return next_date.year, next_week_number
@property
def previous(self):
# la date de la semaine avant celle qu'on affiche
delta = timedelta(7)
previous_date = self.current[0] - delta
previous_week_number = previous_date.isocalendar()[1]
return previous_date.year, previous_week_number
class MonthPeriod(object):
"""docstring for WeekPeriod"""
def __init__(self, year, duration, duration_number):
super(MonthPeriod, self).__init__()
self.year = year
self.duration = duration
self.duration_number = duration_number
def __repr__(self):
return ("<Period('%(start)s', '%(end)s')>") \
% {'start': self.current[0].strftime(u'%b %Y'),
'end': self.current[1].strftime(u'%b %Y')}
def __unicode__(self):
return ("Mois de:%(start)s") % {'start': self.current[0].strftime(u'%b %Y')}
@property
def current(self):
nbr_days = monthrange(int(self.year), int(self.duration_number))[1]
return date(int(self.year), int(self.duration_number), 1),\
date(int(self.year), int(self.duration_number), nbr_days)
def display_name(self):
return ("Mois de: %(start)s") % {'start': self.current[0].strftime(u'%b %Y')}
@property
def next(self):
# la date du mois après celui qu'on affiche
days_count = monthrange(self.year, self.duration_number)[1]
delta = timedelta(days_count + 1)
month_next = self.current[0] + delta
return month_next, month_next.month
@property
def previous(self):
# la date du mois avant celui qu'on affiche
delta = timedelta(1)
previous_month = self.current[0] - delta
month_prev = date(previous_month.year, previous_month.month, 1)
return month_prev, month_prev.month
class Period(object):
"""docstring for Period"""
def __init__(self, year, duration, duration_number):
super(Period, self).__init__()
self.year = year
self.duration = duration
self.duration_number = duration_number
W = "week"
M = "month"
# la date à afficher
if duration == W:
# on recupere le premier jour
self.current = WeekPeriod(self.year, W, self.duration_number)
self.next = WeekPeriod(self.current.next[0], W,
self.current.next[1])
self.previous = WeekPeriod(self.current.previous[0], W,
self.current.previous[1])
if duration == M:
self.current = MonthPeriod(self.year, M, self.duration_number)
self.next = MonthPeriod(self.current.next[0], M,
self.current.next[1])
self.previous = MonthPeriod(self.current.previous[0], M,
self.current.previous[1])
# def __repr__(self):
# return ("<Period('%(start)s', '%(end)s')>") \
# % {'start': self.current,
# 'end': self.next}
# def __unicode__(self):
# return ("Mois de:%(start)s") % {'start': self.current}
# if duration == "year":
# self.current = date(self.year, 1, 1)
# self.next = self.next_year()
# self.previous = self.previous_y | ear()
# def next_year(self):
# current_date = date(self.year, 1, 1)
# # la date de l'année après celle qu'on affiche
# return date(current_date.year + 1, current_date.month,
# current_date.day)
# def previous_year(self):
# current_date = date(self.year, 1, 1)
# # la date de l'année avant celle qu'on affiche
# return date(current_da | te.year - 1, current_date.month,
# current_date.day)
# TODO: faire de ce mamouth un middleware ou un context processor
def get_time_pagination(year, duration, duration_number):
"""
navigation entre les dates année, mois, week
"""
todays_date_is_before = False
todays_date_is_after = False
# la date à afficher
todays_date = date.today()
if duration == "month":
current_date = date(year, duration_number, 1)
# la date du mois avant celui qu'on affiche
delta = timedelta(1)
previous_date = current_date - delta
previous_date = date(previous_date.year, previous_date.month, 1)
# la date du mois après celui qu'on affiche
days_count = monthrange(current_date.year, current_date.month)[1]
delta = timedelta(days_count + 1)
next_date = current_date + delta
# Vérification que la semaine d'aujourd'hui est à afficher ou non
if todays_date < previous_date:
todays_date_is_before = True
if todays_date > next_date:
todays_date_is_after = True
# l'adresse pour afficher le mois d'ajourd'hui
todays_date_url = (todays_date.year, duration, todays_date.month)
# l'adresse pour afficher le mois précédent
previous_date_url = (previous_date.year, duration, previous_date.month)
# l'adresse pour afficher le mois suivant
next_date_url = (next_date.year, duration, next_date.month)
# formatage de l'affichage des mois en tenant compte de la
# language code
current_date_ = current_date.strftime(u'%b %Y')
previous_date_ = previous_date.strftime(u'%b %Y')
next_date_ = next_date.strftime(u'%b %Y')
todays_date = "This month"
else:
current_date = date(year, 1, 1)
# la date de l'année avant celle qu'on affiche
previous_date = date(current_date.year - 1,
current_date.month,
current_date.day)
# la date de l'année après celle qu'on affiche
next_date = date(current_date.year + 1, current_date.month,
current_date.day)
# Vérification que l'année d'aujourd'hui est à afficher ou non
if todays_date.year < (current_date.year - 1):
todays_date_is_before = True
if todays_date.year > (current_date.year + 1):
todays_date_is_after = True
# l'adresse pour afficher l'année d'aujourd'hui
todays_date_url = todays_date.year
# l'adresse pour afficher l'année précédent
previous_date_url = previous_date.year
# l'adresse pour afficher l'année suivant
next_date_url = next_date.year
|
Chuban/moose | gui/utils/YamlData.py | Python | lgpl-2.1 | 1,666 | 0.002401 | #!/usr/bin/python
import sys, os, commands, time, re, copy
try:
from PyQt4 import QtCore, QtGui
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
except ImportError:
try:
from PySide import QtCore, QtGui
QtCore.QString = str
except ImportError:
raise ImportError("Cannot load either PyQt or PySide")
from GenSyntax import *
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class YamlData():
def __init__(self, qt_app, app_path, recache, use_cached_syntax):
self.qt_app = qt_app
self.app_path = app_path
self.use_cached_syntax = use_cached_syntax
self.gen_syntax = GenSyntax(qt_app, app_path, use_cached_syntax)
self.yaml_data = self.gen_syntax.GetSyntax(recache)
def recache(self, recache):
self.yaml_data = self.gen_ | syntax.GetSyntax(recache)
def recursiveYamlDataSearch(self, path, current_yaml):
if current_yaml['name'] == path:
return current_yaml
else:
if current_yaml['subblo | cks']:
for child in current_yaml['subblocks']:
yaml_data = self.recursiveYamlDataSearch(path, child)
if yaml_data: # Found it in a child!
return yaml_data
else: # No children.. stop recursion
return None
def findYamlEntry(self, path):
for yaml_it in self.yaml_data:
yaml_data = self.recursiveYamlDataSearch(path, yaml_it)
if yaml_data:
return yaml_data
# This means it wasn't found
return None
|
hxer/exercise | scapy/scanwifi.py | Python | gpl-2.0 | 1,014 | 0.00108 | # -*- coding: utf-8 -*-
"""
扫描周围无线网络,列出SSID and mac address
开启网卡监听模式(mon),侦听无线网络流量
开启/关闭 监听模式
>sud | o airmon-ng start/stop wlan0
找回网络管理图标
>NetworkManager start
"""
import sys
from scapy.all import *
from argparse import ArgumentParser
def packet_handler(pkt):
ap_list = []
if pkt.haslayer(Dot11):
if pkt.type == 0 and pkt.subtype == 8:
# filter mac address
if pkt.addr2 not in ap_list:
ap_list.append(pkt.addr2)
print("Avaliable SSID: {name}, MAC: {addr}"
"".fomat(name=pkt.info, addr=pkt.addr2))
usage = " | python {prog} <interface>\n".format(prog=sys.argv[0])
usage += "\te.g. python {prog} wlan0mon".format(prog=sys.argv[0])
parser = ArgumentParser(usage=usage)
parser.add_argument('interface', help="the interface needed to monitor")
args = parser.parse_args()
iface = args.interface
sniff(iface=iface, prn=packet_handler)
|
rowanv/finders_keepers | finders_keepers/settings.py | Python | apache-2.0 | 2,931 | 0 | """
Django settings for finders_keepers project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import yaml
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_b*-71=))bquif%6)k=s#8)g^zwe&_%cd^2qpgykg@-f@4yg*e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'location_finder',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfV | iewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'finders_keepers.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS | ': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'finders_keepers.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, '../static')
# Config API key files
config_keys_path = os.path.join(BASE_DIR, 'config.yml')
with open(config_keys_path, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
API_KEY = cfg['api_key']
|
LLNL/spack | lib/spack/spack/test/llnl/util/tty/tty.py | Python | lgpl-2.1 | 3,017 | 0 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import pytest
import llnl.util.tty as tty
def test_get_timestamp(monkeypatch):
"""Ensure the results of get_timestamp are reasonable."""
# Debug disabled should return an empty string
monkeypatch.setattr(tty, '_debug', 0)
assert not tty.get_timestamp(False), 'Expected an empty string'
# Debug disabled but force the timestamp should return a string
assert tty.get_timestamp(True), 'Expected a timestamp/non-empty string'
pid_str = ' {0}'.format(os.getpid())
# Level 1 debugging should return a timestamp WITHOUT the pid
monkeypatch.setattr(tty, '_debug', 1)
out_str = tty.get_timestamp(False)
assert out_str and pid_str not in out_str, 'Expected no PID in results'
# Level 2 debugging should also return a timestamp WITH the pid
monkeypatch.setattr(tty, '_debug', 2)
out_str = tty.get_timestamp(False)
assert out_str and pid_str in out_str, 'Expected PID in results'
@pytest.mark.parametrize('msg,enabled,trace,newline', [
('', False, False, False), # Nothing is output
(Exception(''), True, False, True), # Exception output
('trace', True, True, False), # stacktrace output
('newline', True, False, True), # newline in output
('no newline', True, False, False) # no newline output
])
def test_msg(capfd, monkeypatch, enabled, msg, trace, newline):
"""Ensure the output from msg with options is appropriate."""
# temporarily use the parameterized settings
monkeypatch.setattr(tty, '_msg_enabled', enabled)
monkeypatch.setattr(tty, '_stacktrace', trace)
expected = [msg if isinstance(msg, str) else 'Exception: ']
if newline:
expected[0] = '{0}\n'.format(expected[0])
if trace:
expected.insert(0, '.py')
tty.msg(msg, newline=newline)
out = capfd.readouterr()[0]
for msg in expected:
assert msg in out
@pytest.mark.parametrize('msg,trace,wrap', [
(Exception(''), False, False), # Exception output
('trace', True, False), # stacktrace output
('wrap', False, True), # wrap | in output
])
def test_info(capfd, monkeypatch, msg, trace, wrap):
"""Ensure the output from info with options is appropriate."""
# temporarily use the parameterized settings
monkeypatch.setattr(tty, | '_stacktrace', trace)
expected = [msg if isinstance(msg, str) else 'Exception: ']
if trace:
expected.insert(0, '.py')
extra = 'This extra argument *should* make for a sufficiently long line' \
' that needs to be wrapped if the option is enabled.'
args = [msg, extra]
num_newlines = 3 if wrap else 2
tty.info(*args, wrap=wrap, countback=3)
out = capfd.readouterr()[0]
for msg in expected:
assert msg in out
assert out.count('\n') == num_newlines
|
LLNL/spack | var/spack/repos/builtin/packages/py-deeptools/package.py | Python | lgpl-2.1 | 1,792 | 0.002232 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyDeeptools(PythonPackage):
"""deepTools addresses the challenge of handling the large amounts of data
that are now routinely generated from DNA sequencing centers."""
# The test suite and associated test data is missing in the pypi tarball.
homepage = "https://pypi.python.org/pypi/deepTools/"
url = "https://github.com/deeptools/deepTools/archive/3.3.0.tar.gz"
version('3.3.0', sha256='a7aaf79fe939 | ca307fe6ec5e156750389fdfa4324bf0dd6bf5f53d5fda109358 | ')
version('3.2.1', sha256='dbee7676951a9fdb1b88956fe4a3294c99950ef193ea1e9edfba1ca500bd6a75')
version('2.5.2', sha256='16d0cfed29af37eb3c4cedd9da89b4952591dc1a7cd8ec71fcba87c89c62bf79')
depends_on('python@2.7:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-numpy@1.9.0:', type=('build', 'run'))
depends_on('py-scipy@0.17.0:', type=('build', 'run'))
depends_on('py-py2bit@0.2.0:', type=('build', 'run'))
depends_on('py-pybigwig@0.2.1:', type=('build', 'run'))
depends_on('py-pysam@0.14.0:', type=('build', 'run'))
depends_on('py-matplotlib@2.1.2:', type=('build', 'run'))
depends_on('py-numpydoc@0.5:', type=('build', 'run'))
depends_on('py-plotly@2.0.0:', type=('build', 'run'))
depends_on('py-deeptoolsintervals@0.1.8:', type=('build', 'run'))
def patch(self):
# Add nosetest hook for "python setup.py test" argument.
filter_file(r'^setup\(',
r'''setup(
tests_require='nose',
test_suite='nose.collector',''',
'setup.py')
|
DavideCanton/Python3 | ping/pyng.py | Python | gpl-3.0 | 6,649 | 0.00015 | __author__ = "davide"
import struct
import socket
import argparse
import sys
from datetime import datetime
import time
from collections import defaultdict
from signal import signal, SIGINT, SIG_IGN
ICMP_ECHO_REQUEST = 8, 0
ICMP_ECHO_RESPONSE = 0, 0
__all__ = ["ICMPPacket", "Pinger",
"ICMP_ECHO_REQUEST", "ICMP_ECHO_RESPONSE"]
# Python module for pinging hosts
class ICMPPacket:
"""Class that represents an ICMP struct_packet"""
__slots__ = "_data", "_checksum", "_type"
def __init__(self, packetType=ICMP_ECHO_RESPONSE, data=""):
"""Initialize the struct_packet
@param packetType: tuple
"""
self.packetType = packetType
self.data = data
self._checksum = -1
@property
def packetType(self):
"""16 bits that represent the struct_packet type, code"""
return self._type
@packetType.setter
def packetType(self, packet_type):
if len(packet_type) != 2:
raise ValueError("type must be a 2-element tuple")
if any(not 0 <= val < (1 << 8) for val in packet_type):
raise ValueError("Packet type not valid")
self._type = packet_type
@property
def data(self):
"""Packet content"""
return self._data
@data.setter
def data(self, data=b""):
self._data = data or b""
def compute_checksum(self):
# checksum set to zero
header = bytes([self._type[0], self._type[1], 0, 0])
struct_packet = header + self._data
length = len(struct_packet)
if length % 2:
odd = struct_packet[-1] << 8
struct_packet = struct_packet[:-1]
else:
odd = 0
format_len = len(struct_packet) // 2
blocks = struct.unpack("!{}H".format(format_len), struct_packet)
checksum = sum(blocks)
checksum += odd
checksum = (checksum >> 16) + (checksum & 0xFFFF)
checksum += checksum >> 16
self._checksum = ~checksum & 0xFFFF
@property
def checksum(self):
"""Packet checksum"""
return self._checksum
@property
def computedChecksum(self):
"""Computed checksum"""
return self._checksum >= 0
def __str__(self):
return ("ICMPPacket[type={}, data={}, checksum={}]"
.format(self._type, self._data[4:], self._checksum))
def encodePacket(self):
"""Returns the struct_packet encoded in a string"""
if not self.computedChecksum:
self.compute_checksum()
| return struct.pack("!BBH{}s".format(len(self._data)),
self._type[0], self._type[1],
self._checksum, self._data)
@staticmethod
def buildPacket(raw):
"""Builds an ICMPPacket from the string raw
( | received from a pong), returns (IP Header (raw), ICMP Packet)"""
ihl = (raw[0] & 0x0F) << 2
ip_header, raw_packet = raw[:ihl], raw[ihl:]
format_len = len(raw_packet) - 4
unpacked = struct.unpack("!BBH{}s".format(format_len), raw_packet)
packet = ICMPPacket(unpacked[:2], unpacked[3])
packet._checksum = unpacked[2]
return ip_header, packet
class Pinger:
"""Class useful for pinging remote hosts"""
DEFAULT_TIMEOUT = 5
def __init__(self, timeout=DEFAULT_TIMEOUT):
"""Initalize the Pinger with the timeout specified"""
self.socket = None
self.timeout = timeout
self.id_dict = defaultdict(int)
def ping(self, dest_address, data=None):
"""Sends to dest a ping packet with data specified"""
if not self.socket:
self.close()
dest_address = str(dest_address)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.getprotobyname("icmp"))
self.socket.connect((dest_address, 0))
self.socket.settimeout(self.timeout)
packet = ICMPPacket(packetType=ICMP_ECHO_REQUEST)
idpacket = struct.pack("!I", self.id_dict[dest_address])
packet.data = idpacket + (data or b"")
self.id_dict[dest_address] += 1
packet_struct = packet.encodePacket()
self.socket.send(packet_struct)
def pong(self):
"""Returns the response of remote host"""
if not self.socket:
raise socket.error("Socket closed")
return ICMPPacket.buildPacket(self.socket.recv((1 << 16) - 1))
def close(self):
"""Closes the Pinger"""
if self.socket:
self.socket.close()
self.socket = None
def __del__(self):
"""Closes the Pinger"""
self.close()
def main():
def parseArgs():
handler = argparse.ArgumentParser(description="Pinger")
handler.add_argument('-r', '--remote_host', help="Destination",
default="localhost", dest="dest")
handler.add_argument('-d', '--data', help="Dati", default="",
dest="data")
handler.add_argument('-t', '--tries', help="Numero di ping",
default=sys.maxsize, dest="tries", type=int)
return handler.parse_args()
args = parseArgs()
try:
ip = socket.gethostbyname(args.dest)
except socket.gaierror:
sys.exit("{} not found".format(args.dest))
print("Pinging", args.dest, "(" + ip + ")")
pinger = Pinger()
tmax, tmin, tmean, total, received = -1, sys.maxsize, 0, 0, 0
for i in range(args.tries):
total += 1
try:
pinger.ping(args.dest, args.data.encode())
t = datetime.now()
pinger.pong()
t = (datetime.now() - t).microseconds / 1000.
print("Got ping from {} in {:1.2f} ms".format(args.dest, t))
handler = signal(SIGINT, SIG_IGN)
tmax, tmin = max(tmax, t), min(tmin, t)
received += 1
tmean = ((received - 1) * tmean + t) / received
signal(SIGINT, handler)
if i != args.tries - 1:
time.sleep(1)
except socket.timeout:
print("Host is not reachable")
except KeyboardInterrupt:
break
print("***** RESULTS *****")
if received != 0:
stats = "Max time: {:1.2f} ms, Min time: {:1.2f} ms, Avg time: {:1.2f} ms"
print(stats.format(tmax, tmin, tmean))
stats = "Sent packets: {}\tReceived: {}\tLost: {}"
print(stats.format(total, received, total - received))
print("Packet Lost: {:1.0f}%".format((total - received) / total * 100))
if __name__ == '__main__':
main()
|
Aquaio/aqua-io-python | setup.py | Python | mit | 1,417 | 0 | import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='aqua-io',
version='0.1.0',
description='Official Aqua.io API library client for Python',
author='Michael Carroll / Aqua.io',
author_email='michael@aqua.io',
url='https://aqua.io',
download_url='https://github.com/Aquaio/aqua-io-python/tarball/0.1',
keywords=[
'aqua',
'aqua.io',
'api',
'client',
'library',
'ICD',
'ICD9',
'ICD10',
'ICD-9',
'ICD-10',
| 'meaningful use',
'healthcare',
'health',
'EHR',
'EMR',
'medicine',
'medical'
],
license='MIT',
install_requires=[
'requests > | = 2.1.0'
],
packages=[
'aqua_io',
'aqua_io.api',
'aqua_io.error',
'aqua_io.http_client'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
apache/steve | pysteve/lib/voter.py | Python | apache-2.0 | 3,304 | 0.006659 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hashlib, json, random, os, sys, time
try:
from __main__ import config
except:
import ConfigParser as configparser
config = configparser.RawConfigParser()
config.read("%s/../../../steve.cfg" % (os.path.dirname(__file__)))
# SMTP Lib
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from smtplib import SMTPException
from lib import constants, election
backend = constants.initBackend(config)
def get(election, basedata, uid):
xhash = hashlib.sha512(basedata['hash'] + uid).hexdigest()
return backend.voter_get_uid(election, xhash)
def add(election, basedata, PID):
uid = hashlib.sha224("%s%s%s%s" % (PID, basedata['hash'], time.time(), random.randint(1,99999999))).hexdigest()
xhash = hashlib.sha512(basedata['hash'] + uid).hexdigest()
backend.voter_add(election, PID, xhash)
return uid, xhash
def remove(election, basedata, UID):
backend.voter_remove(election, UID)
def hasVoted(election, issue, uid):
issue = issue.strip(".json")
return backend.voter_has_voted(election, issue, uid)
def ballots():
try:
from lib import gateway
uid = gateway.uid()
return backend.voter_ballots(uid) if uid else {}
except:
return {}
def regenerate(election, basedata, xhash):
try:
from lib import gateway
uid = gateway.uid()
valid = backend.ballot_scrub(election, xhash)
if valid:
ballot, xhash = add(election, basedata, uid)
return {
'election': election,
'ballot': ballot
}
else:
return {
'error': "Not a valid ballot!"
}
except:
return {'error': "No suitable gateway mechan | ism found"}
def email(rcpt, subject, message):
sender = config.get("email", "sender")
signature = config.get("email", "signature")
receivers = [rcpt]
# py 2 vs 3 conversion
if type(message) is bytes:
| message = message.decode('utf-8', errors='replace')
msg = u"""From: %s
To: %s
Subject: %s
%s
With regards,
%s
--
Powered by Apache STeVe - https://steve.apache.org
""" % (sender, rcpt, subject, message, signature)
msg = msg.encode('utf-8', errors='replace')
try:
smtpObj = smtplib.SMTP(config.get("email", "mta"))
smtpObj.sendmail(sender, receivers, msg)
except SMTPException:
raise Exception("Could not send email - SMTP server down?")
|
RamonAranda/ConfusionMatrix | lib/formatter/src/_formatter.py | Python | mit | 3,826 | 0.001307 | from pyrsistent import pvector
from toolz import pipe
from toolz.curried import reduce, map
from toolz.dicttoolz import iterkeys, itervalues
from lib.value_objects.src.string_value_object import StringValueObject
__WHITESPACE = StringValueObject(" ")
__VERTICAL_SEPARATOR = StringValueObject("|")
__LINE_BREAK = StringValueObject("\n")
__HORIZONTAL_SEPARATOR = StringValueObject("-")
def __first(sequence):
for _ in sequence:
return _
def format_dict_as_grid(data):
max_length_per_column = __calculate_max_str_length_per_column(data)
separator = __get_row_separator(max_length_per_column)
headers = __get_header(
pipe(data, itervalues, __first, iterkeys), max_length_per_column
)
rows = __get_rows | (data, max_length_per_column)
return StringValueObject(separator + headers + separator + rows)
def __calculate_max_str_length_per_column(data):
def __calculate_max_column_length(column_key):
max_value_length = pipe(
data,
iterkeys,
map(lambda key: data[key][column_key]),
pvector,
map(str),
map(len),
max
)
return | max(max_value_length, len(str(column_key)))
max_values_column_length = pipe(
data,
itervalues,
__first,
iterkeys,
map(__calculate_max_column_length),
pvector
)
max_key_length = pipe(
data, iterkeys, map(str), map(len), pvector, max, lambda x: [x], pvector
)
return max_key_length + max_values_column_length
def __get_row_separator(max_length_per_column):
n_whitespaces = 2 * len(__WHITESPACE) * len(max_length_per_column)
n_vertial_separators = \
len(__VERTICAL_SEPARATOR) * len(max_length_per_column) + 1
length_of_all_col_names = sum(max_length_per_column)
return StringValueObject(
__HORIZONTAL_SEPARATOR *
((n_whitespaces + n_vertial_separators + length_of_all_col_names) /
len(__HORIZONTAL_SEPARATOR))
) + __LINE_BREAK
def __get_header(headers_list, max_length_per_column):
def get_empty_header(max_length):
return StringValueObject(
__VERTICAL_SEPARATOR +
__WHITESPACE * (max_length + 2)
)
def get_column_header(header_name, max_col_length):
return StringValueObject(
__VERTICAL_SEPARATOR + __WHITESPACE + header_name +
(__WHITESPACE * (max_col_length - len(str(header_name)) + 1))
)
empty_header = get_empty_header(max_length_per_column[0])
headers = pipe(
zip(headers_list, max_length_per_column[1:]),
map(lambda (header_name, length):
get_column_header(header_name, length)),
list,
reduce(lambda x, y: x + y)
)
return empty_header + headers + __VERTICAL_SEPARATOR + __LINE_BREAK
def __get_rows(data, max_length_per_column):
return pipe(
data,
iterkeys,
map(lambda key: __get_row(data, key, max_length_per_column)),
reduce(lambda x, y: x + y)
)
def __get_row(data, class_key, max_length_per_column):
def format_value(value, max_len):
return __VERTICAL_SEPARATOR + __WHITESPACE + str(value) + \
__WHITESPACE * ((max_len - len(str(value))) + 1)
row_header = __VERTICAL_SEPARATOR + __WHITESPACE + \
str(class_key) + \
__WHITESPACE * \
(max_length_per_column[0] - len(str(class_key)) + 1)
row_values = pipe(
zip(data[class_key].itervalues(), max_length_per_column[1:]),
map(lambda (value, max_len): format_value(value, max_len)),
list,
reduce(lambda x, y: x + y),
)
return row_header + row_values + __VERTICAL_SEPARATOR + \
__LINE_BREAK + __get_row_separator(max_length_per_column)
|
KRHS-GameProgramming-2016/Spoonghetti-Man | Player.py | Python | mit | 4,204 | 0.009039 | import pygame, sys, math
from Meatball import *
class Player(Meatball):
def __init__(self, maxSpeed =5 , pos=[10,10]):
Meatball.__init__(self, pos, None)
size = [45,45]
self.maxSpeed = maxSpeed
self.images = [pygame.transform.scale(pygame.image.load("rsc/ball/SpoonerF.png"), size),
pygame.transform.scale(pygame.image.load("rsc/ball/SpoonerF(2).png"), size),
pygame.transform.scale(pygame.image.load("rsc/ball/SpoonerF(3.1).png"), size),
pygame.transform.scale(pygame.image.load("rsc/ball/SpoonerF(4).png"), size),
pygame.transform.scale(pygame.image.load("rsc/ball/SpoonerF(5).png"), size),
| pygame.transform.scale(pygame.image.load("rsc/ball/SpoonerF(6).png"), size),
pygame.transform.scale(pygame.image.load("rsc/ball/SpoonerF(7).png"), size),
pygame.transform.scale(pygame.image.load("rsc/ball/SpoonerF(6).png"), size),
pygame.transform.scale(pygame.image.load("rsc/ball/SpoonerF(5).png"), size),
| pygame.transform.scale(pygame.image.load("rsc/ball/SpoonerF(4).png"), size),
pygame.transform.scale(pygame.image.load("rsc/ball/SpoonerF(3.1).png"), size),
pygame.transform.scale(pygame.image.load("rsc/ball/SpoonerF(2).png"), size),
]
self.frame = 0
self.image = self.images[self.frame]
self.rect = self.image.get_rect(center = self.rect.center)
self.maxFrame = len(self.images) - 1
self.animationTimer = 0
self.animationTimerMax = .001 * 100 #seconds * 60 fps
self.points = 0
def move(self):
Meatball.move(self)
self.animate()
def animate(self):
if self.animationTimer < self.animationTimerMax:
self.animationTimer += 1
else:
self.animationTimer = 0
if self.frame < self.maxFrame:
self.frame += 1
else:
self.frame = 0
self.image = self.images[self.frame]
def go(self, direction):
if direction == "up":
self.speedy = -self.maxSpeed
if direction == "down":
self.speedy = self.maxSpeed
if direction == "left":
self.speedx = -self.maxSpeed
if direction == "right":
self.speedx = self.maxSpeed
if direction == "stop up":
self.speedy = 0
if direction == "stop down":
self.speedy = 0
if direction == "stop left":
self.speedx = 0
if direction == "stop right":
self.speedx = 0
def goMouse(self, pos):
self.rect.center = pos
def bounceScreen(self, size):
width = size[0]
height = size[1]
if self.rect.left < 0 or self.rect.right > width:
self.speedx = -self.speedx
self.move()
self.speedx = 0
self.didBounceX = True
if self.rect.top < 0 or self.rect.bottom > height:
self.speedy = -self.speedy
self.move()
self.speedy = 0
self.didBounceY = True
def bounceMeatball(self, other):
if self.rect.right > other.rect.left and self.rect.left < other.rect.right:
if self.rect.bottom > other.rect.top and self.rect.top < other.rect.bottom:
if self.dist(other.rect.center) < self.radius + other.radius:
self.points += other.points
return True
return False
def bounceWall(self, other):
if self.rect.right > other.rect.left and self.rect.left < other.rect.right:
if self.rect.bottom > other.rect.top and self.rect.top < other.rect.bottom:
self.speedx = -self.speedx
self.speedy = -self.speedy
self.move()
self.speedx = 0
self.didBounceX = True
self.speedy = 0
self.didBounceY = True
|
robertwbrandt/zarafa | zarafa-tools/plugins/movetopublic.py | Python | gpl-2.0 | 3,569 | 0.006444 | import MAPI
from MAPI.Util import *
from MAPI.Time import *
from MAPI.Struct import *
from plugintemplates import *
import zconfig
class MoveToPublic(IMapiDAgentPlugin):
prioPreDelivery = 50
configfile = '/etc/zarafa/movetopublic.cfg'
def __init__(self, logger):
self.rulelist = {}
IMapiDAgentPlugin.__init__(self, logger)
self.Init()
def Init(self):
config = zconfig.ZarafaConfigParser(self.configfile,
defaultoptions={}
| )
# scan max for 100 settings
for i in range(1, 100, 1):
try:
data = config.getdict('rule'+str(i), ['recipient', 'destination_folder'])
self.rulelist[data['recipien | t'].lower()] = data['destination_folder']
except:
break
self.logger.logDebug("*--- Rule list %s" % self.rulelist)
def PreDelivery(self, session, addrbook, store, folder, message):
props = message.GetProps([PR_RECEIVED_BY_EMAIL_ADDRESS_W], 0)
if props[0].ulPropTag != PR_RECEIVED_BY_EMAIL_ADDRESS_W:
self.logger.logError("!--- No received by emailaddress")
return MP_CONTINUE,
recipient = props[0].Value.lower()
if recipient not in self.rulelist:
self.logger.logInfo("*--- No rule for recipient '%s'" % recipient.encode('utf-8'))
return MP_CONTINUE,
publicstore = GetPublicStore(session)
if publicstore == None:
# check for company public
companyname = None
storeprops = store.GetProps([PR_MAILBOX_OWNER_ENTRYID], 0)
if storeprops[0].ulPropTag == PR_MAILBOX_OWNER_ENTRYID:
user = addrbook.OpenEntry(storeprops[0].Value, None, 0)
userprops = user.GetProps([PR_EC_COMPANY_NAME_W], 0)
if userprops[0].ulPropTag == PR_EC_COMPANY_NAME_W:
companyname = userprops[0].Value
if companyname == None:
self.logger.logError("!--- Can not open a public store")
return MP_CONTINUE,
ema = store.QueryInterface(IID_IExchangeManageStore)
publicstoreid = ema.CreateStoreEntryID(None, companyname, MAPI_UNICODE)
publicstore = session.OpenMsgStore(0, publicstoreid, None, MDB_WRITE)
publicfolders = publicstore.OpenEntry(publicstore.GetProps([PR_IPM_PUBLIC_FOLDERS_ENTRYID], 0)[0].Value, None, MAPI_MODIFY)
folderlist = self.rulelist[recipient].split('/')
if len(folderlist) == 0:
self.logger.logWarn("!--- No folders in the rule of recipient '%s'" % recipient.encode('utf-8'))
return MP_CONTINUE,
folder = publicfolders
for foldername in folderlist:
if len(foldername) > 0:
folder = folder.CreateFolder(0, foldername, "Create by Move to Public plugin", None, OPEN_IF_EXISTS)
msgnew = folder.CreateMessage(None, 0)
tags = message.GetPropList(MAPI_UNICODE)
message.CopyProps(tags, 0, None, IID_IMessage, msgnew, 0)
msgnew.SaveChanges(0)
folderid = folder.GetProps([PR_ENTRYID], 0)[0].Value
msgid = msgnew.GetProps([PR_ENTRYID], 0)[0].Value
publicstore.NotifyNewMail( NEWMAIL_NOTIFICATION(msgid, folderid, 0, None, 0) )
self.logger.logInfo("*--- Message moved to public folder '%s'" % (self.rulelist[recipient].encode('utf-8')) )
return MP_STOP_SUCCESS,
|
app-git-hub/SendTo | examples/save.py | Python | mit | 481 | 0.035343 | import sublime, sublime_plugin
class SaveAllExistingFilesCommand(sublime_plugin.ApplicationCommand):
| def run(self):
for w in sublime.windows():
self._save_files_in_window(w)
def _save_files_in_window(self, w):
for v in w.views():
self._save_existing_file_in_view(v)
def _save_existing_file_in_view(self, v):
if v.file_name() and v.is_dirty():
v.run_command("save")
r"""
append to file sublime plugin OR api
sublime save dirty file plugin | stackoverflow
""" |
fenderglass/ABruijn | flye/trestle/trestle.py | Python | bsd-3-clause | 138,871 | 0.002463 | #(c) 2016-2018 by Authors
#This file is a part of Flye program.
#Released under the BSD license (see LICENSE file)
"""
Created on Wed Jan 4 03:50:31 2017
@author: jeffrey_yuan
"""
from __future__ import absolute_import
from __future__ import division
import os
import logging
from itertools import combinations, product
import copy
import multiprocessing, signal
import flye.polishing.alignment as flye_aln
from flye.utils.sam_parser import SynchronizedSamReader, Alignment
import flye.utils.fasta_parser as fp
import flye.config.py_cfg as config
import flye.polishing.polish as pol
import flye.trestle.divergence as div
import flye.trestle.trestle_config as trestle_config
from flye.six.moves import range
from flye.six.moves import zip
logger = logging.getLogger()
def resolve_repeats(args, trestle_dir, repeats_info, summ_file,
resolved_repeats_seqs):
all_file_names = define_file_names()
all_labels, initial_file_names = all_file_names[0], all_file_names[2]
all_resolved_reps_dict = {}
all_summaries = []
init_summary(summ_file)
#1. Process repeats from graph - generates a folder for each repeat
logger.debug("Finding unbridged repeats")
process_outputs = process_repeats(args.reads, repeats_info,
trestle_dir, all_labels,
initial_file_names)
repeat_list, repeat_edges, all_edge_headers = process_outputs
logger.info("Simple unbridged repeats: %d", len(repeat_list))
#if not repeat_list:
# return
#Resolve every repeat in a separate thread
def _thread_worker(func_args, log_file, results_queue, error_queue):
try:
#each thred logs to a separate file
log_formatter = \
logging.Formatter("[%(asctime)s] %(name)s: %(levelname)s: "
"%(message)s", "%Y-%m-%d %H:%M:%S")
file_handler = logging.FileHandler(log_file, mode="a")
file_handler.setFormatter(log_formatter)
for handler in logger.handlers[:]:
logger.removeHandler(handler)
logger.addHandler(file_handler)
result = resolve_each_repeat(*func_args)
results_queue.put(result)
except Exception as e:
error_queue.put(e)
job_chunks = [repeat_list[i:i + args.threads]
for i in range(0, len(repeat_list), args.threads)]
for job_chunk in job_chunks:
manager = multiprocessing.Manager()
results_queue = manager.Queue()
error_queue = manager.Queue()
repeat_threads = max(1, args.threads // len(job_chunk))
orig_sigint = signal.signal(signal.SIGINT, signal.SIG_IGN)
threads = []
for rep_id in sorted(job_chunk):
func_args = (rep_id, repeat_edges, all_edge_headers, args, trestle_dir,
repeats_info, all_file_names, repeat_threads)
log_file = os.path.join(trestle_dir,
"repeat_{0}".format(rep_id), "log.txt")
threads.append(multiprocessing.Process(target=_thread_wo | rker,
args=(func_args, log_file,
| results_queue, error_queue)))
signal.signal(signal.SIGINT, orig_sigint)
for t in threads:
t.start()
try:
for t in threads:
t.join()
if t.exitcode == -9:
logger.error("Looks like the system ran out of memory")
if t.exitcode != 0:
raise Exception("One of the processes exited with code: {0}"
.format(t.exitcode))
except KeyboardInterrupt:
for t in threads:
t.terminate()
raise
while not error_queue.empty():
logger.warning("Non-critical error in trestle thread: " + str(error_queue.get()))
#if not error_queue.empty():
# raise error_queue.get()
while not results_queue.empty():
resolved_dict, summary_list = results_queue.get()
all_resolved_reps_dict.update(resolved_dict)
all_summaries.extend(summary_list)
fp.write_fasta_dict(all_resolved_reps_dict, resolved_repeats_seqs)
num_resolved = 0
for summ_items in all_summaries:
if summ_items[6]:
num_resolved += 1
update_summary(summ_items, summ_file)
logger.info("Resolved: %d", num_resolved)
def resolve_each_repeat(rep_id, repeat_edges, all_edge_headers, args,
trestle_dir, repeats_info, all_file_names,
num_threads):
SUB_THRESH = trestle_config.vals["sub_thresh"]
DEL_THRESH = trestle_config.vals["del_thresh"]
INS_THRESH = trestle_config.vals["ins_thresh"]
MAX_ITER = trestle_config.vals["max_iter"]
MIN_ALN_RATE = trestle_config.vals["min_aln_rate"]
NUM_POL_ITERS = trestle_config.vals["num_pol_iters"]
ORIENT_CONFIG = trestle_config.vals["orientations_to_run"]
zero_it = 0
(all_labels, pol_dir_names, initial_file_names,
pre_file_names, div_file_names, aln_names,
middle_file_names, output_file_names) = all_file_names
repeat_label, side_labels = all_labels
pol_temp_name, pol_ext_name, pol_cons_name = pol_dir_names
(template_name, extended_name, repeat_reads_name,
pre_partitioning_name) = initial_file_names
pre_edge_reads_name, pre_read_aln_name, partitioning_name = pre_file_names
div_freq_name, div_pos_name, div_summ_name = div_file_names
(reads_template_aln_name, cons_temp_aln_name,
cut_cons_temp_aln_name, reads_cons_aln_name) = aln_names
(confirmed_pos_name, edge_reads_name,
cut_cons_name, cons_vs_cons_name) = middle_file_names
(side_stats_name, int_stats_name, int_confirmed_pos_name,
resolved_rep_name, res_vs_res_name) = output_file_names
logger.info("Resolving repeat %d: %s",
rep_id, repeats_info[rep_id].repeat_path)
repeat_dir = os.path.join(trestle_dir,
repeat_label.format(rep_id))
run_orientations = []
if ORIENT_CONFIG == "forward":
run_orientations = [("forward", rep_id)]
elif ORIENT_CONFIG == "reverse":
run_orientations = [("reverse", -rep_id)]
elif ORIENT_CONFIG == "both":
run_orientations = [("forward", rep_id), ("reverse", -rep_id)]
repeat_bridged = False
resolved_dict = {}
summary_list = []
for orientation, rep in run_orientations:
logger.debug("Orientation: " + orientation)
orient_dir = os.path.join(repeat_dir, orientation)
template = os.path.join(orient_dir, template_name)
extended = os.path.join(orient_dir, extended_name)
repeat_reads = os.path.join(orient_dir, repeat_reads_name)
term_bool = {s:False for s in side_labels}
#2. Polish template and extended templates
logger.debug("Polishing templates")
pol_temp_dir = os.path.join(orient_dir, pol_temp_name)
if not os.path.isdir(pol_temp_dir):
os.mkdir(pol_temp_dir)
polished_template, _ = \
pol.polish(template, [repeat_reads], pol_temp_dir, NUM_POL_ITERS,
num_threads, args.platform, output_progress=False)
if not os.path.getsize(polished_template):
for side in side_labels:
term_bool[side] = True
polished_extended = {}
pol_ext_dir = os.path.join(orient_dir, pol_ext_name)
for side in side_labels:
for edge_id in repeat_edges[rep][side]:
if not os.path.isdir(pol_ext_dir.format(side, edge_id)):
os.mkdir(pol_ext_dir.format(side, edge_id))
pol_output, _ = \
pol.polish(extended.format(side, edge_id), [repeat_reads],
pol_ext_dir.format(side, edge_id), NUM_POL_ITERS,
num_threads, args.platform,
output_progress=False)
polished_extended[(side, edge_id)] = pol_o |
mhugent/Quantum-GIS | python/plugins/processing/algs/admintools/CreateWorkspace.py | Python | gpl-2.0 | 2,079 | 0 | # -*- coding: utf-8 -*-
"""
***************************************************************************
CreateWorkspace.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'October 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import *
from GeoServerToolsAlgorithm import \
| GeoServerToolsAlgorithm
from processing.parameters.ParameterString import ParameterString
from processing.outputs.OutputString import OutputString
class CreateWorkspace(GeoServerToolsAlgorithm):
WORKSPACE = 'WORKSPACE'
WORKSPACEURI = 'WORKSPACEURI'
def processAlgorithm(self, progress):
| self.createCatalog()
workspaceName = self.getParameterValue(self.WORKSPACE)
workspaceUri = self.getParameterValue(self.WORKSPACEURI)
self.catalog.create_workspace(workspaceName, workspaceUri)
def defineCharacteristics(self):
self.addBaseParameters()
self.name = 'Create workspace'
self.group = 'GeoServer management tools'
self.addParameter(ParameterString(self.WORKSPACE, 'Workspace'))
self.addParameter(ParameterString(self.WORKSPACEURI, 'Workspace URI'))
self.addOutput(OutputString(self.WORKSPACE, 'Workspace'))
|
jburos/survivalstan | test/test_exp_survival_model.py | Python | apache-2.0 | 1,679 | 0.032162 |
import matplotlib as mpl
mpl.use('Agg')
import survivalstan
from stancache import stancache
import numpy as np
from nose.tools import ok_
from functools import partial
num_iter = 1000
from .test_datasets import load_test_dataset
model_code = survivalstan.models.exp_survival_model
make_inits = None
def test_model():
''' Test survival model on test dataset
'''
d = load_test | _dataset()
testfit = survivalstan.fit_stan_survival_model(
model_cohort = 'test model',
model_code = model_code,
df = d,
time_col = 't',
event_co | l = 'event',
formula = 'age + sex',
iter = num_iter,
chains = 2,
seed = 9001,
make_inits = make_inits,
FIT_FUN = stancache.cached_stan_fit,
)
ok_('fit' in testfit)
ok_('coefs' in testfit)
ok_('loo' in testfit)
survivalstan.utils.plot_coefs([testfit])
survivalstan.utils.plot_coefs([testfit], trans=np.exp)
return(testfit)
def test_null_model(**kwargs):
''' Test NULL survival model on flchain dataset
'''
d = load_test_dataset()
testfit = survivalstan.fit_stan_survival_model(
model_cohort = 'test model',
model_code = model_code,
df = d,
time_col = 't',
event_col = 'event',
formula = '~ 1',
iter = num_iter,
chains = 2,
seed = 9001,
make_inits = make_inits,
FIT_FUN = stancache.cached_stan_fit,
)
ok_('fit' in testfit)
ok_('coefs' in testfit)
ok_('loo' in testfit)
survivalstan.utils.plot_coefs([testfit])
survivalstan.utils.plot_coefs([testfit], trans=np.exp)
return(testfit)
|
georgeyk/loafer | loafer/runners.py | Python | mit | 1,955 | 0.000512 | import asyncio
import logging
import signal
from concurrent.futures import CancelledError, ThreadPoolExecutor
from contextlib import suppress
logger = logging.getLogger(__name__)
class LoaferRunner:
def __init__(self, max_workers=None, on_stop_callback=None):
self._on_stop_callback = on_stop_callback
# XXX: See https://github.com/python/asyncio/issues/258
# The minimum value depends on the number of cores in the machine
# See https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor
self._executor = ThreadPoolExecutor(max_workers)
self.loop.set_default_executor(self._executor)
@property
def loop(self):
return asyncio.get_event_loop()
def start(self, debug=False):
if debug:
self.loop.set_debug(enabled=debug)
self.loop.add_signal_handler(signal.SIGINT, self.prepare_stop)
self.loop.add_signal_handler(signal.SIGTERM, self.prepare_stop)
try:
self.loop.run_forever()
finally:
self.stop()
self.loop.close()
| logger.debug('loop.is_running={}'.format(self.loop.is_running()))
logger.debug('loop.is_closed={}'.format(self.loop.is_closed()))
def prepare_stop(self, *args):
if self.loop.is_running():
# signals loop.run_forever to exit in the next iteration
self.loop.stop()
def stop(self, *args, **kwargs):
logger.info('stopping Loafer ...')
if callable(self._on_stop_callback):
self._on_ | stop_callback()
logger.info('cancel schedulled operations ...')
for task in asyncio.Task.all_tasks(self.loop):
task.cancel()
if task.cancelled() or task.done():
continue
with suppress(CancelledError):
self.loop.run_until_complete(task)
self._executor.shutdown(wait=True)
|
Heteroskedastic/chills-pos | chills_pos/pos/management/__init__.py | Python | mit | 1,041 | 0 | from django.db.models.signals import post_migrate
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
def add_view_permissions(sender, **kwargs):
"""
This syncdb hooks takes care of adding a view permission too all our
content types.
"""
# for each of our content types
for content_type in ContentType.objects.all():
# build our permission slug
if not content_type.model:
continue
codename = "view_%s" % content_type.model
# if it doesn't exist..
if not Permission.objects.filter(content_type=content_type,
| codename=codename):
# add it
Permission.objects.create(content_type=content_type,
codename=codename,
name="Can view %s" % content_type.name)
print("Added view permission for %s" % content_type.name)
post_migrate.connect(add_view_permissions)
| |
denny820909/builder | lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/test/unit/test_config.py | Python | mit | 43,114 | 0.00501 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import with_statement
import re
import os
import textwrap
import mock
import __builtin__
from zope.interface import implements
from twisted.trial import unittest
from twisted.application import service
from twisted.internet import defer
from buildbot import config, buildslave, interfaces, revlinks, locks
from buildbot.process import properties, factory
from buildbot.test.util import dirs, compat
from buildbot.test.util.config import ConfigErrorsMixin
from buildbot.changes import base as changes_base
from buildbot.schedulers import base as schedulers_base
from buildbot.status import base as status_base
global_defaults = dict(
title='Buildbot',
titleURL='http://buildbot.net',
buildbotURL='http://localhost:8080/',
changeHorizon=None,
eventHorizon=50,
logHorizon=None,
buildHorizon=None,
logCompressionLimit=4096,
logCompressionMethod='bz2',
logMaxTailSize=None,
logMaxSize=None,
properties=properties.Properties(),
mergeRequests=None,
prioritizeBuilders=None,
slavePortnum=None,
multiMaster=False,
debugPassword=None,
manhole=None,
)
class FakeChangeSource(changes_base.ChangeSource):
pass
class FakeStatusReceiver(status_base.StatusReceiver):
pass
class FakeScheduler(object):
implements(interfaces.IScheduler)
def __init__(self, name):
self.name = name
class FakeBuilder(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class ConfigErrors(unittest.TestCase):
def test_constr(self):
ex = config.ConfigErrors(['a', 'b'])
self.assertEqual(ex.errors, ['a', 'b'])
def test_addError(self):
ex = config.ConfigErrors(['a'])
ex.addError('c')
self.assertEqual(ex.errors, ['a', 'c'])
def test_nonempty(self):
empty = config.ConfigErrors()
full = config.ConfigErrors(['a'])
self.failUnless(not empty)
self.failIf(not full)
def test_error_raises(self):
e = self.assertRaises(config.ConfigErrors, config.error, "message")
self.assertEqual(e.errors, ["message"])
def test_error_no_raise(self):
e = config.ConfigErrors()
self.patch(config, "_errors", e)
config.error("message")
self.assertEqual(e.errors, ["message"])
def test_str(self):
ex = config.ConfigErrors()
self.assertEqual(str(ex), "")
ex = config.ConfigErrors(["a"])
self.assertEqual(str(ex), "a")
ex = config.ConfigErrors(["a", "b"])
self.assertEqual(str(ex), "a\nb")
ex = config.ConfigErrors(["a"])
ex.addError('c')
self.assertEqual(str(ex), "a\nc")
class MasterConfig(ConfigErrorsMixin, dirs.DirsMixin, unittest.TestCase):
def setUp(self):
self.basedir = os.path.abspath('basedir')
self.filename = os.path.join(self.basedir, 'test.cfg')
return self.setUpDirs('basedir')
def tearDown(self):
return self.tearDownDirs()
# utils
def patch_load_helpers(self):
# patch out all of the "helpers" for laodConfig with null functions
for n in dir(config.MasterConfig):
if n.startswith('load_'):
typ = 'loader'
elif n.startswith('check_'):
typ = 'checker'
else:
continue
v = getattr(config.MasterConfig, n)
if callable(v):
if typ == 'loader':
self.patch(config.MasterConfig, n,
mock.Mock(side_effect=
lambda filename, config_dict: None))
else:
self.patch(config.MasterConfig, n,
mock.Mock(side_effect=
lambda: None))
def install_config_file(self, config_file, other_files={}):
config_file = textwrap.dedent(config_file)
with open(os.path.join(self.basedir, self.filename), "w") as f:
f.write(config_file)
for file, contents in other_files.items():
with open(file, "w") as f:
f.write(contents)
# tests
def test_defaults(self):
cfg = config.MasterConfig()
expected = dict(
#validation,
db=dict(
db_url='sqlite:///state.sqlite',
db_poll_interval=None),
metrics = None,
caches = dict(Changes=10, Builds=15),
schedulers = {},
builders = [],
slaves = [],
change_sources = [],
status = [],
user_managers = [],
revlink = revlinks.default_revlink_matcher
)
expected.update(global_defaults)
got = dict([
(attr, getattr(cfg, attr))
for attr, exp in expected.iteritems() ])
self.assertEqual(got, expected)
def test_defaults_validation(self):
# re's aren't comparable, but we can make sure the keys match
cfg = config.MasterConfig()
self.assertEqual(sorted(cfg.validation.keys()),
sorted([
'branch', 'revision', 'property_name', 'property_value',
]))
def test_loadConfig_missing_file(self):
self.assertRaisesConfigError(
re.compile("configuration file .* does not exist"),
lambda : config.MasterConfig.loadConfig(
self.basedir, self.filename))
def test_loadConfig_missing_basedir(self):
self.assertRaisesConfigError(
re.compile("basedir .* does not exist"),
lambda : config.MasterConfig.loadConfig(
os.path.join(self.basedir, 'NO'), 'test.cfg'))
def test_loadConfig_open_error(self):
"""
Check that loadConfig() raises correct ConfigError exception in cases
when configure file is found, but we fail to open it.
"""
def raise_IOError(*args):
raise IOError("error_msg")
self.install_config_file('#dummy')
# override build-in open() function to always rise IOError
self.patch(__builtin__, "open", raise_IOError)
# check that we got the expected ConfigError exception
self.assertRaisesConfigError(
re.compile("unable to o | pen configuration | file .*: error_msg"),
lambda : config.MasterConfig.loadConfig(
self.basedir, self.filename))
@compat.usesFlushLoggedErrors
def test_loadConfig_parse_error(self):
self.install_config_file('def x:\nbar')
self.assertRaisesConfigError(
re.compile("error while parsing.*traceback in logfile"),
lambda : config.MasterConfig.loadConfig(
self.basedir, self.filename))
self.assertEqual(len(self.flushLoggedErrors(SyntaxError)), 1)
def test_loadConfig_eval_ConfigError(self):
self.install_config_file("""\
from buildbot import config
BuildmasterConfig = { 'multiMaster': True }
config.error('oh noes!')""")
self.assertRaisesConfigError("oh noes",
lambda : config.MasterConfig.loadConfig(
self.basedir, self.filename))
def test_loadConfig_eval_ConfigErrors(self):
# We test a config that has embedded errors, as well
# as semantic errors that get added later. If an exception is raised
# prematurely, then the semantic errors wouldn't get reported |
hanamvu/C4E11 | SS3/clothes_shop.py | Python | gpl-3.0 | 1,117 | 0.038496 | clouthes = ["T-Shirt","Sweater"]
print("Hello, welcome to my shop\n")
while (True):
comment = input("Welcome to our shop, what do you want (C, R, U, D)? ")
if comment.upper()=="C":
new_item = input("Enter new item: ")
clouthes.append(new_item.capitalize())
elif comment.upper()=="R":
print(end='')
elif comment.upper()=="U":
pos = int(input("Update position? "))
if pos <= len(clouthes):
new_item = input("Enter new item: ")
clouthes[pos-1] = new_item.capitalize()
else:
print("Sorry, your item is out of sale!")
elif comment.upper()=="D":
pos | = int(input("Delete position? "))
if pos <= len(c | louthes):
clouthes.pop(pos-1)
else:
print("Sorry, your item is out of sale!")
else:
print("Allahu akbar! We're in reconstructing and can't serve you. See you again!")
# items =[", "+clouthe for clouthe in clouthes if clouthes.index(clouthe)>0]
# items.insert(0,clouthes[0])
# print("Our items: {0}".format(items))
# print("\n")
print("Our items: ",end='')
for item in clouthes:
if clouthes.index(item)<len(clouthes)-1:
print(item,end=', ')
else:
print(item+"\n")
|
jbaiter/beets | test/test_mediafile.py | Python | mit | 9,378 | 0.001493 | # This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Specific, edge-case tests for the MediaFile metadata layer.
"""
import os
import shutil
import _common
from _common import unittest
import beets.mediafile
class EdgeTest(unittest.TestCase):
def test_emptylist(self):
# Some files have an ID3 frame that has a list with no elements.
# This is very hard to produce, so this is just the first 8192
# bytes of a file found "in the wild".
emptylist = beets.mediafile.MediaFile(
os.path.join(_common.RSRC, 'emptylist.mp3'))
genre = emptylist.genre
self.assertEqual(genre, '')
def test_release_time_with_space(self):
# Ensures that release times delimited by spaces are ignored.
# Amie Street produces such files.
space_time = beets.mediafile.MediaFile(
os.path.join(_common.RSRC, 'space_time.mp3'))
self.assertEqual(space_time.year, 2009)
self.assertEqual(space_time.month, 9)
self.assertEqual(space_time.day, 4)
def test_release_time_with_t(self):
# Ensures that release times delimited by Ts are ignored.
# The iTunes Store produces such files.
t_time = beets.mediafile.MediaFile(
os.path.join(_common.RSRC, 't_time.m4a'))
self.assertEqual(t_time.year, 1987)
self.assertEqual(t_time.month, 3)
self.assertEqual(t_time.day, 31)
def test_tempo_with_bpm(self):
# Some files have a string like "128 BPM" in the tempo field
# rather than just a number.
f = beets.mediafile.MediaFile(os.path.join(_common.RSRC, 'bpm.mp3'))
self.assertEqual(f.bpm, 128)
def test_discc_alternate_field(self):
# Different taggers use different vorbis comments to reflect
# the disc and disc count fields: ensure that the alternative
# style works.
f = beets.mediafile.MediaFile(os.path.join(_common.RSRC, 'discc.ogg'))
self.assertEqual(f.disc, 4)
self.assertEqual(f.disctotal, 5)
def test_old_ape_version_bitrate(self):
f = beets.mediafile.MediaFile(os.path.join(_common.RSRC, 'oldape.ape'))
self.assertEqual(f.bitrate, 0)
_sc = beets.mediafile._safe_cast
class InvalidValueToleranceTest(unittest.TestCase):
def test_packed_integer_with_extra_chars(self):
pack = beets.mediafile.Packed("06a", beets.mediafile.packing.SLASHED)
self.assertEqual(pack[0], 6)
def test_packed_integer_invalid(self):
pack = beets.mediafile.Packed("blah", beets.mediafile.packing.SLASHED)
self.assertEqual(pack[0], 0)
def test_packed_index_out_of_range(self):
pack = beets.mediafile.Packed("06", beets.mediafile.packing.SLASHED)
self.assertEqual(pack[1], 0)
def test_safe_cast_string_to_int(self):
self.assertEqual(_sc(int, 'something'), 0)
def test_safe_cast_int_string_to_int(self):
self.assertEqual(_sc(int, '20'), 20)
def test_safe_cast_string_to_bool(self):
self.assertEqual(_sc(bool, 'whatever'), False)
def test_safe_cast_intstring_to_bool(self):
self.assertEqual(_sc(bool, '5'), True)
def test_safe_cast_string_to_float(self):
self.assertAlmostEqual(_sc(float, '1.234'), 1.234)
def test_safe_cast_int_to_float(self):
self.assertAlmostEqual(_sc(float, 2), 2.0)
def test_safe_cast_string_with_cruft_to_float(self):
self.assertAlmostEqual(_sc(float, '1.234stuff'), 1.234)
def test_safe_cast_negative_string_to_float(self):
self.assertAlmostEqual(_sc(float, '-1.234'), -1.234)
def test_safe_cast_special_chars_to_unicode(self):
us = _sc(unicode, 'caf\xc3\xa9')
self.assertTrue(isinstance(us, unicode))
self.assertTrue(us.startswith(u'caf'))
class SafetyTest(unittest.TestCase):
def _exccheck(self, fn, exc, data=''):
fn = os.path.join(_common.RSRC, fn)
with open(fn, 'w') as f:
f.write(data)
try:
self.assertRaises(exc, beets.mediafile.MediaFile, fn)
finally:
os.unlink(fn) # delet | e the temporary file
def test_corrupt_mp3_raises_unreadablefileerror(self):
# Make sure we catch Mutagen reading errors appropriately.
self._exccheck('corrupt.mp3', beets.mediafile.UnreadableFileError)
def test_corrupt_mp4_raises_unreadablefileerror(self):
self._exccheck('corrupt.m4a', beets.mediafile.UnreadableFileError)
def test_corru | pt_flac_raises_unreadablefileerror(self):
self._exccheck('corrupt.flac', beets.mediafile.UnreadableFileError)
def test_corrupt_ogg_raises_unreadablefileerror(self):
self._exccheck('corrupt.ogg', beets.mediafile.UnreadableFileError)
def test_invalid_ogg_header_raises_unreadablefileerror(self):
self._exccheck('corrupt.ogg', beets.mediafile.UnreadableFileError,
'OggS\x01vorbis')
def test_corrupt_monkeys_raises_unreadablefileerror(self):
self._exccheck('corrupt.ape', beets.mediafile.UnreadableFileError)
def test_invalid_extension_raises_filetypeerror(self):
self._exccheck('something.unknown', beets.mediafile.FileTypeError)
def test_magic_xml_raises_unreadablefileerror(self):
self._exccheck('nothing.xml', beets.mediafile.UnreadableFileError,
"ftyp")
def test_broken_symlink(self):
fn = os.path.join(_common.RSRC, 'brokenlink')
os.symlink('does_not_exist', fn)
try:
self.assertRaises(beets.mediafile.UnreadableFileError,
beets.mediafile.MediaFile, fn)
finally:
os.unlink(fn)
class SideEffectsTest(unittest.TestCase):
def setUp(self):
self.empty = os.path.join(_common.RSRC, 'empty.mp3')
def test_opening_tagless_file_leaves_untouched(self):
old_mtime = os.stat(self.empty).st_mtime
beets.mediafile.MediaFile(self.empty)
new_mtime = os.stat(self.empty).st_mtime
self.assertEqual(old_mtime, new_mtime)
class EncodingTest(unittest.TestCase):
def setUp(self):
src = os.path.join(_common.RSRC, 'full.m4a')
self.path = os.path.join(_common.RSRC, 'test.m4a')
shutil.copy(src, self.path)
self.mf = beets.mediafile.MediaFile(self.path)
def tearDown(self):
os.remove(self.path)
def test_unicode_label_in_m4a(self):
self.mf.label = u'foo\xe8bar'
self.mf.save()
new_mf = beets.mediafile.MediaFile(self.path)
self.assertEqual(new_mf.label, u'foo\xe8bar')
class ZeroLengthMediaFile(beets.mediafile.MediaFile):
@property
def length(self):
return 0.0
class MissingAudioDataTest(unittest.TestCase):
def setUp(self):
super(MissingAudioDataTest, self).setUp()
path = os.path.join(_common.RSRC, 'full.mp3')
self.mf = ZeroLengthMediaFile(path)
def test_bitrate_with_zero_length(self):
del self.mf.mgfile.info.bitrate # Not available directly.
self.assertEqual(self.mf.bitrate, 0)
class TypeTest(unittest.TestCase):
def setUp(self):
super(TypeTest, self).setUp()
path = os.path.join(_common.RSRC, 'full.mp3')
self.mf = beets.mediafile.MediaFile(path)
def test_year_integer_in_string(self):
self.mf.year = '2009'
self.assertEqual(self.mf.year, 2009)
def test_set_replaygain_gain_to_none(self):
self.mf.rg_track_gain = None
self.assertEqual(self.mf.rg_track_gain, 0.0)
|
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.3/django/contrib/staticfiles/finders.py | Python | bsd-3-clause | 9,183 | 0.000653 | import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import default_storage, Storage, FileSystemStorage
from django.utils.datastructures import SortedDict
from django.utils.functional import memoize, LazyObject
from django.utils.importlib import import_module
from django.utils._os import safe_join
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.storage import AppStaticStorage
_finders = SortedDict()
class BaseFinder(object):
"""
A base file finder to be used for custom staticfiles finder classes.
"""
def find(self, path, all=False):
"""
Given a relative file path this ought to find an
absolute file path.
If the ``all`` parameter is ``False`` (default) only
the first found file path will be returned; if set
to ``True`` a list of all found files paths is returned.
"""
raise NotImplementedError()
def list(self, ignore_patterns=[]):
"""
Given an optional list of paths to ignore, this should return
a two item iterable consisting of the relative path and storage
instance.
"""
raise NotImplementedError()
class FileSystemFinder(BaseFinder):
"""
A static files finder that uses the ``STATICFILES_DIRS`` setting
to locate files.
"""
def __init__(self, apps=None, *args, **kwargs):
# List of locations with static files
self.locations = []
# Maps dir paths to an appropriate storage instance
self.storages = SortedDict()
if not isinstance(settings.STATICFILES_DIRS, (list, tuple)):
raise ImproperlyConfigured(
"Your STATICFILES_DIRS setting is not a tuple or list; "
"perhaps you forgot a trailing comma?")
for root in settings.STATICFILES_DIRS:
if isinstance(root, (list, tuple)):
prefix, root = root
else:
prefix = ''
if os.path.abspath(settings.STATIC_ROOT) == os.path.abspath(root):
raise ImproperlyConfigured(
"The STATICFILES_DIRS setting should "
"not contain the STATIC_ROOT setting")
if (prefix, root) not in self.locations:
self.locations.append((prefix, root))
for prefix, root in self.locations:
filesystem_storage = FileSystemStorage(location=root)
filesystem_storage.prefix = prefix
self.storages[root] = filesystem_storage
super(FileSystemFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the extra locations
as defined in ``STATICFILES_DIRS``.
"""
matches = []
for prefix, root in self.locations:
matched_path = self.find_location(root, path, prefix)
if matched_path:
if not all:
return matched_path
matches.append(matched_path)
return matches
def find_location(self, root, path, prefix=None):
"""
Finds a requested static file in a location, returning the found
absolute path (or ``None`` if no match).
"""
if prefix:
prefix = '%s%s' % (prefix, os.sep)
if not path.startswith(prefix):
return None
path = path[len(prefix):]
path = safe_join(root, path)
if os.path.exists(path):
return path
def list(self, ignore_patterns):
"""
List all files in all locations.
"""
for prefix, root in self.locations:
storage = self.storages[root]
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
class AppDirectoriesFinder(BaseFinder):
"""
A static files finder that looks in the directory of each app as
specified in the source_dir attribute of the given storage class.
"""
storage_class = AppStaticStorage
def __init__(self, apps=None, *args, **kwargs):
# The list of apps that are handled
self.apps = []
# Mapping of app module paths to storage instances
self.storages = SortedDict()
if apps is None:
apps = settings.INSTALLED_APPS
for app in apps:
app_storage = self.storage_class(app)
if os.path.isdir(app_storage.location):
self.storages[app] = app_storage
if app not in self.apps:
self.apps.append(app)
super(AppDirectoriesFinder, self).__init__(*args, **kwargs)
def list(self, ignore_patterns):
"""
List all files in all app storages.
"""
for storage in self.storages.itervalues():
if storage.exists(''): # check if storage location exists
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
def find(self, path, all=False):
"""
Looks for files in the app directories.
"""
matches = []
for app in self.apps:
match = self.find_in_app(app, path)
if match:
if not all:
ret | urn match
matches.append(match)
return matches
def find_in_app(self, app, path):
"""
Find a requested static file in an app's static locations.
"""
storage = self.storages.get(app, None)
if storage:
if storage.prefix:
prefix = '%s%s' % (storage.prefix, os.sep)
if not path.startswith(prefix):
return N | one
path = path[len(prefix):]
# only try to find a file if the source dir actually exists
if storage.exists(path):
matched_path = storage.path(path)
if matched_path:
return matched_path
class BaseStorageFinder(BaseFinder):
"""
A base static files finder to be used to extended
with an own storage class.
"""
storage = None
def __init__(self, storage=None, *args, **kwargs):
if storage is not None:
self.storage = storage
if self.storage is None:
raise ImproperlyConfigured("The staticfiles storage finder %r "
"doesn't have a storage class "
"assigned." % self.__class__)
# Make sure we have an storage instance here.
if not isinstance(self.storage, (Storage, LazyObject)):
self.storage = self.storage()
super(BaseStorageFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the default file storage, if it's local.
"""
try:
self.storage.path('')
except NotImplementedError:
pass
else:
if self.storage.exists(path):
match = self.storage.path(path)
if all:
match = [match]
return match
return []
def list(self, ignore_patterns):
"""
List all files of the storage.
"""
for path in utils.get_files(self.storage, ignore_patterns):
yield path, self.storage
class DefaultStorageFinder(BaseStorageFinder):
"""
A static files finder that uses the default storage backend.
"""
storage = default_storage
def find(path, all=False):
"""
Find a static file with the given path using all enabled finders.
If ``all`` is ``False`` (default), return the first matching
absolute path (or ``None`` if no match). Otherwise return a list.
"""
matches = []
for finder in get_finders():
result = finder.find(path, all=all)
if not all and result:
return result
if not isinstance(result, (list, tuple)):
result = [result]
matches.extend(result)
if matches:
return matches
# No match.
|
maartenbreddels/vaex | .releash.py | Python | mit | 3,029 | 0.006933 | from releash import *
# these objects only tag when they are exe
gitpush = ReleaseTargetGitPush()
# core package
core = add_package("packages/vaex-core", "vaex-core")
version_core = VersionSource(core, '{path}/vaex/core/_version.py')
gittag_core = ReleaseTargetGitTagVersion(version_source=version_core, prefix='core-v')
core.version_source = version_core
core.version_targets.append(VersionTarget(core, '{path}/vaex/core/_version.py'))
core.version_targets.append(VersionTargetReplace(core, [
'packages/vaex-meta/setup.py',
]))
def add_version_replace(package):
# for pre-releases we always bump all requirements that are exact matches
if not package.version_source.semver['prerelease']:
return
if any(k in package.version_source.semver['prerelease'] for k in "dev alpha beta rc"):
package.version_targets.append(VersionTargetReplace(package, [
'packages/vaex-meta/setup.py',
'packages/vaex-arrow/setup.py',
'packages/vaex-graphql/setup.py',
'packages/vaex-hdf5/setup.py',
'packages/vaex-jupyter/setup.py',
'packages/vaex-ml/setup.py',
'packages/vaex-server/setup.py',
'packages/vaex-viz/setup.py',
], pattern='{name}(?P<cmp>[^0-9]*)' + str(package.version_source), ))
add_version_replace(core)
core.tag_targets.append(gittag_core)
core.release_targets.append(ReleaseTargetSourceDist(core))
#core.release_targets.append(gitpush)
core.release_targets.append(ReleaseTargetCondaForge(core, '../feedstocks/vaex-core-feedstock'))
packages = ['vaex-core', 'vaex-meta', 'vaex-viz', 'vaex-hdf5', 'vaex-server', 'vaex-astro', 'vaex-ui', 'vaex-jupyter', 'vaex-ml', 'vaex-graphql']
names = [k[5:] for k in packages[1:]]
for name in names:
if name == 'meta':
package = add_package("packages/vaex-" + name, "vaex-" +name, 'vaex.' + name, distribution_name='vaex')
version = VersionSource(package, '{path}/vaex/' +name +'/_version.py')
else:
package = add_package("packages/vaex-" + name, "vaex-" +name, 'vaex.' + name)
version = VersionSource(package, '{path}/vaex/' +name +'/_version.py')
gittag = ReleaseTargetGitTagVersion(version_source=version, prefix=name + '-v', msg='Release {version} of vaex-' +name)
package.version_source = version
package.version_targets.append(VersionTarget(package, '{path}/vaex/' + name + '/_version.py'))
add_version_replace(package)
# it is ok to add this twice, it will only tag once
package.tag_targets.append(gittag)
package.release_targets.append(ReleaseTargetSourceDist(package))
# also ok to add twice, it will only execute for the last package
package.release_t | argets.appen | d(gitpush)
#if name in ['hdf5', 'viz']:
if name == 'meta':
package.release_targets.append(ReleaseTargetCondaForge(package, '../feedstocks/vaex' + '-feedstock'))
else:
package.release_targets.append(ReleaseTargetCondaForge(package, '../feedstocks/vaex-' + name + '-feedstock'))
|
kingvuplus/EGAMI-D | lib/python/Plugins/Extensions/PicturePlayer/ui.py | Python | gpl-2.0 | 21,962 | 0.027138 | from boxbranding import getMachineBrand
from enigma import ePicLoad, eTimer, getDesktop, gMainDC, eSize
from Screens.Screen import Screen
from Tools.Directories import resolveFilename, pathExists, SCOPE_MEDIA, SCOPE_ACTIVE_SKIN
from Components.Pixmap import Pixmap, MovingPixmap
from Components.ActionMap import ActionMap
from Components.Sources.StaticText import StaticText
from Components.FileList import FileList
from Components.AVSwitch import AVSwitch
from Components.Sources.List import List
from Components.ConfigList import ConfigListScreen
from Components.config import config, ConfigSubsection, ConfigInteger, ConfigSelection, ConfigText, ConfigYesNo, getConfigListEntry
import skin
def getScale():
return AVSwitch().getFramebufferScale()
config.pic = ConfigSubsection()
config.pic.framesize = ConfigInteger(default=30, limits=(5, 99))
config.pic.slidetime = ConfigInteger(default=10, limits=(1, 60))
config.pic.resize = ConfigSelection(default="1", choices = [("0", _("simple")), ("1", _("better"))])
config.pic.cache = ConfigYesNo(default=True)
config.pic.lastDir = ConfigText(default=resolveFilename(SCOPE_MEDIA))
config.pic.infoline = ConfigYesNo(default=True)
config.pic.loop = ConfigYesNo(default=True)
config.pic.bgcolor = ConfigSelection(default="#00000000", choices = [("#00000000", _("black")),("#009eb9ff", _("blue")),("#00ff5a51", _("red")), ("#00ffe875", _("yellow")), ("#0038FF48", _("green"))])
config.pic.textcolor = ConfigSelection(default="#0038FF48", choices = [("#00000000", _("black")),("#009eb9ff", _("blue")),("#00ff5a51", _("red")), ("#00ffe875", _("yellow")), ("#0038FF48", _("green"))])
class picshow(Screen):
skin = """
<screen name="picshow" position="center,center" size="560,440" title="Picture player" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="label" render="Label" position="5,55" size="350,140" font="Regular;19" backgroundColor="#25062748" transparent="1" />
<widget name="thn" position="360,40" size="180,160" alphatest="on" />
<widget name="filelist" position="5,205" zPosition="2" size="550,230" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "DirectionActions", "MenuActions"],
{
"cancel": self.KeyExit,
"red": self.KeyExit,
"green": self.KeyGreen,
"yellow": self.KeyYellow,
"menu": self.KeyMenu,
"ok": self.KeyOk
}, -1)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Thumbnails"))
self["key_yellow"] = StaticText("")
self["label"] = StaticText("")
self["thn"] = Pixmap()
currDir = config.pic.lastDir.value
if not pathExists(currDir):
currDir = "/"
self.filelist = FileList(currDir, matchingPattern = "(?i)^.*\.(jpeg|jpg|jpe|png|bmp|gif)")
self["filelist"] = self.filelist
self["filelist"].onSelectionChanged.append(self.selectionChanged)
self.ThumbTimer = eTimer()
self.ThumbTimer.callback.append(self.showThumb)
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.showPic)
self.onLayoutFinish.append(self.setConf)
def showPic(self, picInfo=""):
ptr = self.picload.getData()
if ptr is not None:
self["thn"].instance.setPixmap(ptr.__deref__())
self["thn"].show()
text = picInfo.split('\n',1)
self["label"].setText(text[1])
self["key_yellow"].setText(_("Exif"))
def showThumb(self):
if not self.filelist.canDescent():
if self.filelist.getCurrentDirectory() and self.filelist.getFilename():
if self.picload.getThumbnail(self.filelist.getCurrentDirectory() + self.filelist.getFilename()) == 1:
self.ThumbTimer.start(500, True)
def selectionChanged(self):
if not self.filelist.canDescent():
self.ThumbTimer.start(500, True)
else:
self["label"].setText("")
self["thn"].hide()
self["key_yellow"].setText("")
def KeyGreen(self):
#if not self.filelist.canDescent():
self.session.openWithCallback(self.callbackView, Pic_Thumb, self.filelist.getFileList(), self.filelist.getSelectionIndex(), self.filelist.getCurrentDirectory())
def KeyYellow(self):
if not self.filelist.canDescent():
self.session.open(Pic_Exif, self.picload.getInfo(self.filelist.getCurrentDirectory() + self.filelist.getFilename()))
def KeyMenu(self):
self.session.openWithCallback(self.setConf ,Pic_Setup)
def KeyOk(self):
if self.filelist.canDescent():
self.filelist.descent()
else:
self.session.openWithCallback(self.callbackView, Pic_Full_View, self.filelist.getFileList(), self.filelist.getSelectionIndex(), self.filelist.getCurrentDirectory())
def setConf(self, retval=None):
self.setTitle(_("Picture player"))
sc = getScale()
#0=Width 1=Height 2=Aspect 3=use_cache 4=resize_type 5=Background(#AARRGGBB)
self.picload.setPara((self["thn"].instance.size().width(), self["thn"].instance.size().height(), sc[0], sc[1], config.pic.cache.value, int(config.pic.resize.value), "#00000000"))
def callbackView(self, val=0):
if val > 0:
self.filelist.moveToIndex(val)
def KeyExit(self):
del self.picload
if self.filelist.getCurrentDirectory() is None:
config.pic.lastDir.setValue("/")
else:
config.pic.lastDir.setValue(self.filelist.getCurrentDirectory())
config.pic.save()
self.close()
#------------------------------------------------------------------------------------------
class Pic_Setup(Screen, ConfigListScreen):
def __init__(self, session):
Screen.__init__(self, session)
self.setTitle(_("PicturePlayer"))
# for the skin: first try MediaPlayerSettings, then Setup, this allows individual skinning
self.skinName = ["PicturePlayerSetup", "Setup"]
self.setup_title = _("Settings")
self.onChangedEntry = []
self.session = session
ConfigListScreen.__init__(self, [], session = session, on_change = self.changedEntry)
self["actions"] = ActionMap(["SetupActions", | "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.keySave,
"ok": self.keySave,
"menu": self.clo | seRecursive,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self.createSetup()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def createSetup(self):
setup_list = [
getConfigListEntry(_("Slide show interval (sec.)"), config.pic.slidetime),
getConfigListEntry(_("Scaling mode"), config.pic.resize),
getConfigListEntry(_("Cache thumbnails"), config.pic.cache),
getConfigListEntry(_("Show info line"), config.pic.infoline),
getConfigListEntry(_("Frame size in full view"), config.pic.framesize),
getConfigListEntry(_("Slide picture in loop"), config.pic.loop),
getConfigListEntry(_("Background color"), config.pic.bgcolor),
getConfigListEntry(_("Text color"), config.pic.textcolor),
getConfigListEntry(_("Fulview resulution"), config.usage.pic_resolution),
]
self["config"].list = setup_list
self["config"].l.setList(setup_list)
def keyLeft(self):
ConfigListScreen.keyLeft(self)
def keyRight(self):
ConfigListScreen.keyRight(self)
def keyCancel(self):
self.close()
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens |
timj/scons | test/SConscript/Return.py | Python | mit | 2,480 | 0.000806 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that the Return() function stops processing the SConscript file
at the point is called, unless the stop= keyword argument is supplied.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SC | onstruct', """\
SConscript('SConscript1')
x = SConscript('SConscript2')
y, z = SConscript('SConscript3')
a4, b4 = SConscript('SConscript4')
foo, bar = SConscript('SConscript5')
print ("x =", x)
print ("y =", y)
print ("z =", z)
print ("a4 =", a4)
print ("b4 =", b4)
print ("foo =", foo)
print ("bar =", bar)
""")
test.write('SConscript1', """\
print ("line 1")
Return()
print ("line 2")
""")
test.write('SConscript2', """\
print ("line 3")
x = 7
Return('x')
print ("line 4")
""")
test.write('SConscrip | t3', """\
print ("line 5")
y = 8
z = 9
Return('y z')
print ("line 6")
""")
test.write('SConscript4', """\
a4 = 'aaa'
b4 = 'bbb'
print ("line 7")
Return('a4', 'b4', stop=False)
b4 = 'b-after'
print ("line 8")
""")
test.write('SConscript5', """\
foo = 'foo'
bar = 'bar'
Return(["foo", "bar"])
print ("line 9")
""")
expect = """\
line 1
line 3
line 5
line 7
line 8
x = 7
y = 8
z = 9
a4 = aaa
b4 = bbb
foo = foo
bar = bar
"""
test.run(arguments = '-q -Q', stdout=expect)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
FRidh/python-acoustics | acoustics/quantity.py | Python | bsd-3-clause | 2,707 | 0.008866 | """
Quantities and units
====================
The Quantity module provides two classes to work with quantities and units.
. | . inheritance-diagram:: acoustics.quantity
"""
from acoustics.standards.iso_tr_25417_2007 import REFERENCE_PRESSURE
quantities = {
'pressure' : ('Pressure', 'pascal', True, 'p', '$p$', REFERENCE_PRESSURE)
}
"""
Dictionary with quantities. Each quantity is stored as a tuple.
"""
units = {
'meter' : ('meter', 'm', '$m$'),
'pascal' : ('pascal', 'Pa', '$Pa$'),
| }
"""
Dictionary with units. Each unit is stored as a tuple.
"""
class Unit(object):
"""
Unit of quantity.
.. note:: Perhaps inherit from tuple or :class:`collections.namedTuple`?
"""
def __init__(self, name, symbol, symbol_latex):
self.name = name
"""
Name of the unit.
"""
self.symbol = symbol
"""
Symbol of the unit.
"""
self.symbol_latex
"""
Symbol of the unit in LaTeX.
"""
def __repr__(self):
return "Unit({})".format(self.name)
def __str__(self):
return self.name
class Quantity(object):
"""
Quantity.
"""
def __init__(self, name, unit, dynamic, symbol=None, symbol_latex=None, reference=1.0):
self.name = name
"""
Name of the quantity.
"""
self.symbol = symbol
"""
Symbol of the quantity.
"""
self.symbol_latex = symbol_latex
"""
Symbol of the unit in LaTeX.
"""
self.unit = unit
"""
Unit. See :class:`Unit`.
"""
self.dynamic = dynamic
"""
Dynamic quantity (`True`) or energetic (`False`).
"""
self.reference = reference
"""
Reference value of the quantity.
"""
def __repr__(self):
return "Quantity({})".format(self.name)
def __str__(self):
return self.name
@property
def energetic(self):
"""
Energetic quantity (`True`) or dynamic (`False`).
"""
return not self.dynamic
def get_quantity(name):
"""
Get quantity by name. Returns instance of :class:`Quantity`.
:param name: Name of the quantity.
"""
try:
q = list(quantities[name])
except KeyError:
raise ValueError("Unknown quantity. Quantity is not yet specified.")
try:
u = units[name]
except KeyError:
raise RuntimeError("Unknown unit. Quantity has been specified but unit has not.")
q[1] = Unit(*units[name])
return Quantity(*q) |
eriknw/eqpy | eqpy/tests/test_nums.py | Python | bsd-3-clause | 747 | 0 | import eqpy
import sympy
from eqpy._utils import raises
def test_constants():
assert eqpy.nums.Catalan is sympy.Catalan
assert eqpy.nums.E is sympy.E
assert eqpy.nums.EulerGamma is sympy.EulerGamma
assert eqpy.nums.GoldenRatio is sympy.GoldenRatio
assert eqpy.nums.I is sympy.I
assert eqpy.nums.nan is sympy.nan
assert eqpy.nums.oo is sympy.oo
assert eqpy.nums.pi is sympy.pi
assert eqpy.nums.zoo is sympy.zoo
def test_sympify():
eqpy.nums.x = '1/2'
assert eqpy.nums.x == sympy.S('1/2')
assert eqpy.nums('2/3 | ') == sympy.S('2/3')
assert raises(sympy.SympifyError, lambda: eqpy.nums('1.2.3'))
def test_dunders():
eqpy.nums.__mydunder__ = '1/2'
assert eqpy.nums.__mydunder__ == | '1/2'
|
CaiZhongda/psutil | psutil/_compat.py | Python | bsd-3-clause | 9,627 | 0.002077 | #!/usr/bin/env python
# Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module which provides compatibility with older Python versions."""
__all__ = ["PY3", "int", "long", "xrange", "exec_", "callable",
"namedtuple", "property", "defaultdict"]
import sys
# --- python 2/3 compatibility layer
PY3 = sys.version_info >= (3,)
try:
import __builtin__
except ImportError:
import builtins as __builtin__ # py3
if PY3:
int = int
long = int
xrange = range
exec_ = getattr(__builtin__, "exec")
print_ = getattr(__builtin__, "print")
else:
int = int
long = long
xrange = xrange
def exec_(code, globs=None, locs=None):
if globs is None:
frame = _sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
def print_(s):
sys.stdout.write(s + '\n')
sys.stdout.flush()
# removed in 3.0, reintroduced in 3.2
try:
callable = callable
except Exception:
def callable(obj):
for klass in type(obj).__mro__:
if "__call__" in klass.__dict__:
return True
return False
# --- stdlib additions
try:
from collections import namedtuple
except ImportError:
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
def namedtuple(typename, field_names, verbose=False, rename=False):
"""A collections.namedtuple implementation written in Python
to support Python versions < 2.6.
Taken from: http://code.activestate.com/recipes/500261/
"""
# Parse and validate the field names. Validation serves two
# purposes, generating informative error messages and preventing
# template injection attacks.
if isinstance(field_names, basestring):
# names separated by whitespace and/or commas
field_names = field_names.replace(',', ' ').split()
field_names = tuple(map(str, field_names))
if rename:
names = list(field_names)
seen = set()
for i, name in enumerate(names):
if (not min(c.isalnum() or c=='_' for c in name) or _iskeyword(name)
or not name or name[0].isdigit() or name.startswith('_')
or name in seen):
names[i] = '_%d' % i
seen.add(name)
field_names = tuple(names)
for name in (typename,) + field_names:
if not min(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain ' \
'alphanumeric characters and underscores: %r'
% name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' \
% name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a ' \
'number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: %r'
% name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
# tuple repr without parens or quotes
argtxt = repr(field_names).replace("'", "")[1:-1]
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(self):
'Return a new dict which maps field names to their values'
return dict(zip(self._fields, self)) \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
if verbose:
sys.stdout.write(template + '\n')
sys.stdout.flush()
# Execute the template string in a temporary namespace
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec_(template, namespace)
except SyntaxError:
e = sys.exc_info()[1]
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set
# to the frame where the named tuple is created. Bypass this
# step in enviroments where sys._getframe is not defined (Jython
# for example) or sys._getframe is not defined for arguments
# greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
# hack to support property.setter/deleter on python < 2.6
# http://docs.python.org/library/functions.html?highlight=property#property
if hasattr(property, 'setter'):
property = property
else:
class property(__builtin__.prope | rty):
__metaclass__ = type
def __init__(self, fget, *args, **kwargs):
super(property, self).__init__(fget, *args, **kwargs)
self.__doc__ = fget.__doc__
def getter(self, method):
return property(method, self.fset, self.fdel)
def setter(self, method):
return property(self.fget, method, self.fdel)
def | deleter(self, method):
return property(self.fget, self.fset, method)
# py 2.5 collections.defauldict
# Taken from:
# http://code.activestate.com/recipes/523034-emulate-collectionsdefaultdict/
# credits: Jason Kirtland
try:
from collections import defaultdict
except ImportError:
class defaultdict(dict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
|
FEniCS/ufl | demo/P5tet.py | Python | lgpl-3.0 | 852 | 0 | # Copyright (C) 2006-2007 Anders Logg
#
# This file is part of UFL.
#
# UFL is free software: you can redistribute it and/or modify
# it under the terms of th | e GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UFL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICUL | AR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with UFL. If not, see <http://www.gnu.org/licenses/>.
#
# A fifth degree Lagrange finite element on a tetrahedron
from ufl import FiniteElement, tetrahedron
element = FiniteElement("Lagrange", tetrahedron, 5)
|
car3oon/saleor | saleor/urls.py | Python | bsd-3-clause | 2,079 | 0 | from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib.sitemaps.views import sitemap
from django.contrib.staticfiles.views import serve
from django.views.i18n import javascript_catalog
from graphene_django.views import GraphQLView
from .cart.urls import urlpatterns as cart_urls
from .checkout.urls import urlpatterns as checkout_urls
from .core.sitemaps import sitemaps
from .core.urls import urlpatterns as core_urls
from .order.urls import urlpatterns as order_urls
from .product.urls import urlpatterns a | s product_urls
from .search.urls import urlpatterns as search_urls
from .userprofile.views import login as login_view
from .userprofile.urls import urlpatt | erns as userprofile_urls
from .data_feeds.urls import urlpatterns as feed_urls
from .dashboard.urls import urlpatterns as dashboard_urls
urlpatterns = [
url(r'^', include(core_urls)),
url(r'^account/', include('allauth.urls')),
url(r'^account/login', login_view, name="account_login"),
url(r'^cart/', include(cart_urls, namespace='cart')),
url(r'^checkout/', include(checkout_urls, namespace='checkout')),
url(r'^dashboard/', include(dashboard_urls, namespace='dashboard')),
url(r'^graphql', GraphQLView.as_view(graphiql=settings.DEBUG)),
url(r'^jsi18n/$', javascript_catalog, name='javascript-catalog'),
url(r'^order/', include(order_urls, namespace='order')),
url(r'^products/', include(product_urls, namespace='product')),
url(r'^profile/', include(userprofile_urls, namespace='profile')),
url(r'^search/', include(search_urls, namespace='search')),
url(r'^feeds/', include(feed_urls, namespace='data_feeds')),
url(r'^sitemap\.xml$', sitemap, {'sitemaps': sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
url(r'', include('payments.urls'))
]
if settings.DEBUG:
# static files (images, css, javascript, etc.)
urlpatterns += [
url(r'^static/(?P<path>.*)$', serve)
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
NaturalEcon/RDb | RDb/models.py | Python | gpl-3.0 | 114 | 0 | from commonmodels import *
from basemodels import | *
from descriptivemodels import *
from operativemodels impo | rt *
|
stsouko/CGRtools | CGRtools/periodictable/groupIX.py | Python | lgpl-3.0 | 6,672 | 0.003447 | # -*- coding: utf-8 -*-
#
# Copyright 2019, 2020 Ramil Nugmanov <nougmanoff@protonmail.com>
# Copyright 2019 Tagir Akhmetshin <tagirshin@gmail.com>
# Copyright 2019 Tansu Nasyrova <tansu.nasurova@gmail.com>
# This file is part of CGRtools.
#
# CGRtools is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, see <https://www.gnu.org/licenses/>.
#
from CachedMethods import FrozenDict
from .element import Element
from .groups import GroupIX
from .periods import PeriodIV, PeriodV, PeriodVI, PeriodVII
class Co(Element, PeriodIV, GroupIX):
__slots__ = ()
@property
def atomic_number(self):
return 27
@property
def isotopes_distribution(self):
return FrozenDict({59: 1.0, 55: 0., 57: 0., 58: 0., 60: 0.})
@property
def isotopes_masses(self):
return FrozenDict({59: 58.9332, 55: 54.941999, 57: 56.936291, 58: 57.935753, 60: 59.933817})
@property
def _common_valences(self):
return 0, 2, 3
@property
def _valences_exceptions(self):
return ((2, False, 0, ()),
(3, False, 0, ()),
(0, False, 0, ((1, 'H'),)),
(-3, False, 0, ((2, 'O'), (1, 'O'), (1, 'O'), (1, 'O'))), # [CoO4]3-
(-2, False, 0, ((1, 'F'), (1, 'F'), (1, 'F'), (1, 'F'), (1, 'F'), (1, 'F'))), # [CoF6]2-
(0, False, 0, ((1, 'C'), (1, 'C'), (1, 'C'), (1, 'C'), (1, 'H'))), # HCo(CO)4
(-1, False, 0, ((1, 'F'), (1, 'F'), (1, 'F'))), # [CoF3]-
(-1, False, 0, ((1, 'Cl'), (1, 'Cl'), (1, 'Cl'))),
(-1, False, 0, ((1, 'O'), (1, 'O'), (1, 'O'))), # [Co(NO3)3]-
(-2, False, 0, ((1, 'F'), (1, 'F'), (1, 'F'), (1, 'F'))), # [CoF4]2-
(-2, False, 0, ((1, 'Cl'), (1, 'Cl'), (1, 'Cl'), (1, 'Cl'))),
(-2, False, 0, ((1, 'Br'), (1, 'Br'), (1, 'Br'), (1, 'Br'))),
(-2, False, 0, ((1, 'I'), (1, 'I'), (1, 'I'), (1, 'I'))),
(-2, False, 0, ((1, 'O'), (1, 'O'), (1, 'O'), (1, 'O'))), # [Co(OH)4]2-
(-1, False, 0, ((1, 'C'), (1, 'C'), (1, 'C'), (1, 'C'))), # [Co(CN4)]-
(0, False, 0, ((1, 'N'), (1, 'N'), (1, 'N'), (1, 'N'), (1, 'N'), (1, 'C'))), # B12
(-3, False, 0, ((1, 'C'), (1, 'C'), (1, 'C'), (1, 'C'), (1, 'C'), (1, 'C'))), # [Co(CN)6]3-
(-3, False, 0, ((1, 'O'), (1, 'O'), (1, 'O'), (1, 'O'), (1, 'O'), (1, 'O'))), # [Co(OH)6]3-
(-3, False, 0, ((1, 'Cl'), (1, 'Cl'), (1, 'Cl'), (1, 'Cl'), (1, 'Cl'))), # [CoCl5]3-
(-3, False, 0, ((1, 'S'), (1, 'S'), (1, 'S'), (1, 'S'), (1, 'S'))), # [Co(NCS)5]3-
(-4, False, 0, ((1, 'S'), (1, 'S'), (1, 'S'), (1, 'S'), (1, 'S'), (1, 'S'))), # [Co(NCS)6]4-
(-4, False, 0, ((1, 'O'), (1, 'O'), (1, 'O'), (1, 'O'), (1, 'O'), (1, 'O')))) # [Co(OH)6]4-
@property
def atomic_radius(self):
return 1.52
class Rh(Element, PeriodV, GroupIX):
__slots__ = ()
@property
def atomic_number(self):
return 45
@property
def isotopes_distribution(self):
return FrozenDict({103: 1.0, 105: 0.})
@property
def isotopes_masses(self):
return FrozenDict({103: 102.905504, 105: 104.905694})
@property
def _common_valences(self):
return 0, 3, 4
@property
def _valences_exceptions(self):
return ((0, False, 0, ((2, 'O'),)), # RhO
(0, False, 0, ((1, 'O'), (1, 'O'))), # Rh(OH)2
(0, False, 0, ((2, 'S'),)),
(0, False, 0, ((1, 'S'), (1, 'S'))),
(-1, False, 0, ((1, 'Br'), (1, 'Br'), (1, 'Br'), (1, 'Br' | ))), # [RhBr4]-
(-3, False, 0, ((1, 'Cl'), (1, 'Cl'), (1, 'Cl'), (1, 'Cl'), (1, 'Cl'), (1, 'Cl'))), # [RhCl6]3-
(-3, False, 0, ((1, 'O'), (1, 'O'), (1, 'O'), (1, 'O'), (1, 'O'), (1, 'O'))), # [Rh(NO2)6]3-
(0, False, 0, ((1, 'F'), (1, 'F'), (1, 'F'), (1, 'F'), (1, 'F'), (1, 'F'))),
(0, False, 0, ((1, 'C'), (1, 'C'), (1, 'C'), (1, 'C'), (1, 'H'))), # HRh(CO)4
(0, False, 0, ((1, 'P'), (1, 'P' | ), (1, 'P'), (1, 'C'), (1, 'H')))) # HRh(CO)[P(Ph)3]3
@property
def atomic_radius(self):
return 1.73
class Ir(Element, PeriodVI, GroupIX):
__slots__ = ()
@property
def atomic_number(self):
return 77
@property
def isotopes_distribution(self):
return FrozenDict({191: 0.373, 193: 0.627, 192: 0.})
@property
def isotopes_masses(self):
return FrozenDict({191: 190.960591, 193: 192.962924, 192: 191.962605})
@property
def _common_valences(self):
return 0, 3, 4
@property
def _valences_exceptions(self):
return ((0, False, 0, ((1, 'F'),)),
(0, False, 0, ((1, 'Cl'),)),
(0, False, 0, ((1, 'Br'),)),
(0, False, 0, ((1, 'I'),)),
(0, False, 0, ((1, 'Cl'), (1, 'Cl'))),
(0, False, 0, ((1, 'Br'), (1, 'Br'))),
(0, False, 0, ((1, 'I'), (1, 'I'))),
(0, False, 0, ((1, 'S'), (1, 'S'))),
(0, False, 0, ((2, 'S'),)),
(0, False, 0, ((1, 'F'), (1, 'F'), (1, 'F'), (1, 'F'), (1, 'F'))),
(0, False, 0, ((1, 'F'), (1, 'F'), (1, 'F'), (1, 'F'), (1, 'F'), (1, 'F'))),
(-3, False, 0, ((1, 'Cl'), (1, 'Cl'), (1, 'Cl'), (1, 'Cl'), (1, 'Cl'), (1, 'Cl'))))
@property
def atomic_radius(self):
return 1.8
class Mt(Element, PeriodVII, GroupIX):
__slots__ = ()
@property
def atomic_number(self):
return 109
@property
def isotopes_distribution(self):
return FrozenDict({278: 1.0})
@property
def isotopes_masses(self):
return FrozenDict({278: 278.15481})
@property
def _common_valences(self):
return 0,
@property
def _valences_exceptions(self):
return ()
@property
def atomic_radius(self):
return 1.8 # unknown, taken radius of previous element in group
__all__ = ['Co', 'Rh', 'Ir', 'Mt']
|
pandas-dev/pandas | pandas/tests/io/sas/test_sas7bdat.py | Python | bsd-3-clause | 12,142 | 0.000659 | import contextlib
from datetime import datetime
import io
import os
from pathlib import Path
import dateutil.parser
import numpy as np
import pytest
from pandas.errors import EmptyDataError
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
@pytest.fixture
def dirpath(datapath):
return datapath("io", "sas", "data")
@pytest.fixture(params=[(1, range(1, 16)), (2, [16])])
def data_test_ix(request, dirpath):
i, test_ix = request.param
fname = os.path.jo | in(dirpath, f"test_sas7bdat_{i}.csv")
df = pd.read_csv(fname)
epoch = datetime(19 | 60, 1, 1)
t1 = pd.to_timedelta(df["Column4"], unit="d")
df["Column4"] = epoch + t1
t2 = pd.to_timedelta(df["Column12"], unit="d")
df["Column12"] = epoch + t2
for k in range(df.shape[1]):
col = df.iloc[:, k]
if col.dtype == np.int64:
df.iloc[:, k] = df.iloc[:, k].astype(np.float64)
return df, test_ix
# https://github.com/cython/cython/issues/1720
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestSAS7BDAT:
@pytest.mark.slow
def test_from_file(self, dirpath, data_test_ix):
df0, test_ix = data_test_ix
for k in test_ix:
fname = os.path.join(dirpath, f"test{k}.sas7bdat")
df = pd.read_sas(fname, encoding="utf-8")
tm.assert_frame_equal(df, df0)
@pytest.mark.slow
def test_from_buffer(self, dirpath, data_test_ix):
df0, test_ix = data_test_ix
for k in test_ix:
fname = os.path.join(dirpath, f"test{k}.sas7bdat")
with open(fname, "rb") as f:
byts = f.read()
buf = io.BytesIO(byts)
with pd.read_sas(
buf, format="sas7bdat", iterator=True, encoding="utf-8"
) as rdr:
df = rdr.read()
tm.assert_frame_equal(df, df0, check_exact=False)
@pytest.mark.slow
def test_from_iterator(self, dirpath, data_test_ix):
df0, test_ix = data_test_ix
for k in test_ix:
fname = os.path.join(dirpath, f"test{k}.sas7bdat")
with pd.read_sas(fname, iterator=True, encoding="utf-8") as rdr:
df = rdr.read(2)
tm.assert_frame_equal(df, df0.iloc[0:2, :])
df = rdr.read(3)
tm.assert_frame_equal(df, df0.iloc[2:5, :])
@pytest.mark.slow
def test_path_pathlib(self, dirpath, data_test_ix):
df0, test_ix = data_test_ix
for k in test_ix:
fname = Path(os.path.join(dirpath, f"test{k}.sas7bdat"))
df = pd.read_sas(fname, encoding="utf-8")
tm.assert_frame_equal(df, df0)
@td.skip_if_no("py.path")
@pytest.mark.slow
def test_path_localpath(self, dirpath, data_test_ix):
from py.path import local as LocalPath
df0, test_ix = data_test_ix
for k in test_ix:
fname = LocalPath(os.path.join(dirpath, f"test{k}.sas7bdat"))
df = pd.read_sas(fname, encoding="utf-8")
tm.assert_frame_equal(df, df0)
@pytest.mark.slow
@pytest.mark.parametrize("chunksize", (3, 5, 10, 11))
@pytest.mark.parametrize("k", range(1, 17))
def test_iterator_loop(self, dirpath, k, chunksize):
# github #13654
fname = os.path.join(dirpath, f"test{k}.sas7bdat")
with pd.read_sas(fname, chunksize=chunksize, encoding="utf-8") as rdr:
y = 0
for x in rdr:
y += x.shape[0]
assert y == rdr.row_count
def test_iterator_read_too_much(self, dirpath):
# github #14734
fname = os.path.join(dirpath, "test1.sas7bdat")
with pd.read_sas(
fname, format="sas7bdat", iterator=True, encoding="utf-8"
) as rdr:
d1 = rdr.read(rdr.row_count + 20)
with pd.read_sas(fname, iterator=True, encoding="utf-8") as rdr:
d2 = rdr.read(rdr.row_count + 20)
tm.assert_frame_equal(d1, d2)
def test_encoding_options(datapath):
fname = datapath("io", "sas", "data", "test1.sas7bdat")
df1 = pd.read_sas(fname)
df2 = pd.read_sas(fname, encoding="utf-8")
for col in df1.columns:
try:
df1[col] = df1[col].str.decode("utf-8")
except AttributeError:
pass
tm.assert_frame_equal(df1, df2)
from pandas.io.sas.sas7bdat import SAS7BDATReader
with contextlib.closing(SAS7BDATReader(fname, convert_header_text=False)) as rdr:
df3 = rdr.read()
for x, y in zip(df1.columns, df3.columns):
assert x == y.decode()
def test_productsales(datapath):
fname = datapath("io", "sas", "data", "productsales.sas7bdat")
df = pd.read_sas(fname, encoding="utf-8")
fname = datapath("io", "sas", "data", "productsales.csv")
df0 = pd.read_csv(fname, parse_dates=["MONTH"])
vn = ["ACTUAL", "PREDICT", "QUARTER", "YEAR"]
df0[vn] = df0[vn].astype(np.float64)
tm.assert_frame_equal(df, df0)
def test_12659(datapath):
fname = datapath("io", "sas", "data", "test_12659.sas7bdat")
df = pd.read_sas(fname)
fname = datapath("io", "sas", "data", "test_12659.csv")
df0 = pd.read_csv(fname)
df0 = df0.astype(np.float64)
tm.assert_frame_equal(df, df0)
def test_airline(datapath):
fname = datapath("io", "sas", "data", "airline.sas7bdat")
df = pd.read_sas(fname)
fname = datapath("io", "sas", "data", "airline.csv")
df0 = pd.read_csv(fname)
df0 = df0.astype(np.float64)
tm.assert_frame_equal(df, df0, check_exact=False)
def test_date_time(datapath):
# Support of different SAS date/datetime formats (PR #15871)
fname = datapath("io", "sas", "data", "datetime.sas7bdat")
df = pd.read_sas(fname)
fname = datapath("io", "sas", "data", "datetime.csv")
df0 = pd.read_csv(
fname, parse_dates=["Date1", "Date2", "DateTime", "DateTimeHi", "Taiw"]
)
# GH 19732: Timestamps imported from sas will incur floating point errors
df.iloc[:, 3] = df.iloc[:, 3].dt.round("us")
tm.assert_frame_equal(df, df0)
@pytest.mark.parametrize("column", ["WGT", "CYL"])
def test_compact_numerical_values(datapath, column):
# Regression test for #21616
fname = datapath("io", "sas", "data", "cars.sas7bdat")
df = pd.read_sas(fname, encoding="latin-1")
# The two columns CYL and WGT in cars.sas7bdat have column
# width < 8 and only contain integral values.
# Test that pandas doesn't corrupt the numbers by adding
# decimals.
result = df[column]
expected = df[column].round()
tm.assert_series_equal(result, expected, check_exact=True)
def test_many_columns(datapath):
# Test for looking for column information in more places (PR #22628)
fname = datapath("io", "sas", "data", "many_columns.sas7bdat")
df = pd.read_sas(fname, encoding="latin-1")
fname = datapath("io", "sas", "data", "many_columns.csv")
df0 = pd.read_csv(fname, encoding="latin-1")
tm.assert_frame_equal(df, df0)
def test_inconsistent_number_of_rows(datapath):
# Regression test for issue #16615. (PR #22628)
fname = datapath("io", "sas", "data", "load_log.sas7bdat")
df = pd.read_sas(fname, encoding="latin-1")
assert len(df) == 2097
def test_zero_variables(datapath):
# Check if the SAS file has zero variables (PR #18184)
fname = datapath("io", "sas", "data", "zero_variables.sas7bdat")
with pytest.raises(EmptyDataError, match="No columns to parse from file"):
pd.read_sas(fname)
def test_corrupt_read(datapath):
# We don't really care about the exact failure, the important thing is
# that the resource should be cleaned up afterwards (BUG #35566)
fname = datapath("io", "sas", "data", "corrupt.sas7bdat")
msg = "'SAS7BDATReader' object has no attribute 'row_count'"
with pytest.raises(AttributeError, match=msg):
pd.read_sas(fname)
def round_datetime_to_ms(ts):
if isinstance(ts, datetime):
return ts.replace(microsecond=int(round(ts.microsecond, -3) / 1000) * 1000)
elif isinstance(ts, str):
_ts = dateutil.parser.parse(timestr=ts)
return _ts.replace(microsecond=int(round(_ts.micro |
divio/django-filer | filer/__init__.py | Python | bsd-3-clause | 582 | 0.001718 | """
See PEP 386 (https://www.python.org/dev/peps/pep-0386/)
Release logic:
1. Increase version number (change | __version__ below).
2. Check that all changes have been documented in CHANGELOG.rst.
3. git add filer/__init__.py CHANGELOG.rst
4. git commit -m 'Bump to {new version}'
5. git push
6. Assure that all tests pass on https://travis-ci.org/github/divio/django-filer.
7. git tag {new version}
8. git push --tags
9. python setup.py sdist
10. twine upload dist/dja | ngo-filer-{new version}.tar.gz
"""
__version__ = '2.1.2'
default_app_config = 'filer.apps.FilerConfig'
|
quantcast/qfs | webui/chart.py | Python | apache-2.0 | 1,146 | 0.009599 | #
# $Id$
#
# Copyright 2011,2016 Quantcast Corporation. All rights reserved.
#
# Author: Kate Labeeva
#
# This file is part of Kosmos File System (KFS).
#
# Licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
#
import platform
kDeltaPrefix="D-"
class ChartServerData:
def __init__(self,serverName,serverArray): |
self.serverName = serverName
self.serverArray = serverArray
class ChartData:
def __init__(self):
self.headers = None
self.serverArray = []
class ChartHTML:
def __init__(self, chartData):
self.chartData = chartData
def printToHTML(self,buffer): |
print "TBD"
|
bblais/plasticity | plasticity/run.py | Python | mit | 17,982 | 0.024747 | #!/usr/bin/env python
__version__= "$Version: $"
__rcsid__="$Id: $"
import matplotlib
#matplotlib.use('WX')
from wx import MilliSleep
from wx import SplashScreen, SPLASH_CENTRE_ON_SCREEN, SPLASH_TIMEOUT
import os
import sys
import warnings
from . import zpickle
from .utils import *
from .dialogs.waxy import *
from .dialogs import *
from .run_sim import *
import threading
import pylab
gray=pylab.cm.gray
from matplotlib.backends.backend_wxagg import FigureCanvasWx as FigureCanvas
from matplotlib.backends.backend_wx import FigureManager
from matplotlib.figure import Figure
from matplotlib.axes import Subplot
class SimThread(threading.Thread):
def __init__(self,params,parent):
self.params=params
self.parent=parent
threading.Thread.__init__(self);
def run(self):
run_sim(self.params,self.parent)
def subplot(*args):
import pylab
if len(args)==1:
return pylab.subplot(args[0])
elif len(args)==3:
return pylab.subplot(args[0],args[1],args[2])
elif len(args)==4:
r=args[2]
c=args[3]
return pylab.subplot(args[0],args[1],c+(r-1)*args[1]);
else:
raise ValueError("invalid number of arguments")
class MainFrame(Frame):
def __init__(self,parent=None,title='',direction='H',
size=(750,750),lfname=None,params=None):
self.fig=None
# turn off security warning on tmpnam. why is it here?
warnings.filterwarnings('ignore')
fname=os.tempnam()
warnings.resetwarnings()
self.base_dir=os.path.dirname(__file__)
if not self.base_dir:
self.base_dir='.'
self.tmpfile=fname+"_plasticity.dat"
self.modified=False
self.running=False
self.stopping=False
self.quitting=False
self.plot_first=False
if not params:
self.params=default_params()
else:
self.params=params
for p in self.params['pattern_input']:
if not os.path.exists(p['filename']):
p['filename']=self.base_dir+"/"+p['filename']
if lfname:
if not self.__load_sim__(lfname):
self.plot_first=True
Frame.__init__(self,parent,title,direction,size)
def Body(self):
self.CreateMenu()
self.CenterOnScreen()
self.ResetTit | le()
fname=self.base_dir+"/images/plasticity_small_icon.ico"
self.SetIcon(fname)
self.fig = Figure(figsize=(7,5),dpi=100) |
self.canvas = FigureCanvas(self, -1, self.fig)
self.figmgr = FigureManager(self.canvas, 1, self)
self.axes = [self.fig.add_subplot(221),
self.fig.add_subplot(222),
self.fig.add_subplot(223),
self.fig.add_subplot(224)]
if self.plot_first:
sim=zpickle.load(self.tmpfile)
sim['params']['display']=True
self.Plot(sim)
def Stopping(self):
return self.stopping
def Yield(self):
wx.Yield()
def ResetTitle(self):
(root,sfname)=os.path.split(self.params['save_sim_file'])
if self.modified:
s=' (*)'
else:
s=''
title='Plasticity: %s%s' % (sfname,s)
self.SetTitle(title)
def Plot(self,sim):
if not sim['params']['display']:
return
if sim['params']['display_module']:
try:
module=__import__(sim['params']['display_module'],fromlist=['UserPlot'])
except ImportError:
sim['params']['display']=False
dlg = MessageDialog(self,
"Error","Error in Import: %s. Turning display off" % sim['params']['display_module'],
icon='error')
dlg.ShowModal()
dlg.Destroy()
return
try:
module.UserPlot(self,sim)
return
except ValueError:
sim['params']['display']=False
dlg = MessageDialog(self,
"Error","Error in display. Turning display off",
icon='error')
dlg.ShowModal()
dlg.Destroy()
return
try:
im=weights2image(sim['params'],sim['weights'])
self.axes[0].hold(False)
self.axes[0].set_axis_bgcolor('k')
self.axes[0].pcolor(im,cmap=gray,edgecolors='k')
self.axes[0].set_aspect('equal')
num_moments=sim['moments_mat'].shape[0]
self.axes[1].hold(False)
num_neurons=sim['moments_mat'].shape[1]
for k in range(num_neurons):
for i in range(num_moments):
self.axes[1].plot(sim['moments_mat'][i,k,:],'-o')
self.axes[1].hold(True)
self.axes[2].hold(False)
response_mat=sim['response_mat']
response_var_list=sim['response_var_list']
styles=['b-o','g-o']
for i,r in enumerate(response_var_list[-1]):
x=r[1]
y=r[2]
self.axes[2].plot(x,y,styles[i])
self.axes[2].hold(True)
self.axes[3].hold(False)
styles=['b-o','g-o']
for i,r in enumerate(response_mat):
self.axes[3].plot(r,styles[i])
self.axes[3].hold(True)
self.canvas.draw()
self.canvas.gui_repaint()
except ValueError:
sim['params']['display']=False
dlg = MessageDialog(self,
"Error","Error in display. Turning display off",
icon='error')
dlg.ShowModal()
dlg.Destroy()
def Run_Pause(self,event):
if not self.running:
# pylab.close()
self.params['tmpfile']=self.tmpfile
if os.path.exists(self.tmpfile):
self.params['continue']=1
self.modified=True
self.ResetTitle()
self.running=True
## d={}
## d['params']=self.params
## zpickle.save(d,'plasticity_tmpparams.dat')
## cmd='./run_sim.py --paramfile plasticity_tmpparams.dat --from_gui 1'
## os.system(cmd)
self.stopping=False
run_sim(self.params,self)
self.params['load_sim_file']=self.tmpfile
self.running=False
if self.quitting:
self.Quit()
else:
self.stopping=True
def __load_sim__(self,lfname):
sim=zpickle.load(lfname)
params=sim['params']
params['save_sim_file']=self.params['save_sim_file']
params['load_sim_file']=''
params['continue']=False
try:
params['initial_weights']=sim['weights']
params['initial_moments']=sim['moments']
except KeyError:
self.params=params
return 1
params['load_sim_file']=self.tmpfile
params['continue']=True
sim['params']=params
self.params=params
zpickle.save(sim,self.tmpfile)
return 0
def Reset_Simulation(self,event=None):
if not os.path.exists(self.tmpfile):
return
self.canvas.Show(False)
if self.modified:
(root,sfname)=os.path.split(self.params['save_sim_file'])
dlg=MessageDialog(self,
text="Do you want to save the changes you made to %s?" % sfname |
aioworkers/aioworkers | tests/test_plugins.py | Python | apache-2.0 | 498 | 0 | import argparse
import sys
i | mport pytest
from aioworkers.core.plugin import ProxyPlugin, search_plugins
class plugin:
configs = ('a',)
@pytest.mark | .parametrize('name', [__name__, 'tests'])
def test_proxy_plugin(name, mocker):
del sys.modules[name]
assert name not in sys.modules
(p,) = search_plugins(name)
assert isinstance(p, ProxyPlugin)
assert p.get_config() == {}
p.add_arguments(mocker.Mock())
p.parse_known_args(args=[], namespace=argparse.Namespace())
|
xchen101/analysis-preservation.cern.ch | cap/modules/records/ext.py | Python | gpl-2.0 | 1,715 | 0 | # -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2016 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
| # waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Jinja utilities for Invenio."""
from __future__ import absolute_import, print_function
from invenio_indexer.signals import before_record_index
# from .indexer import indexer_receiver
from .views import blueprint
class Records(object):
"""Records extension."""
def __init | __(self, app=None):
"""Extension initialization."""
if app:
self.init_app(app)
def init_app(self, app):
"""Flask application initialization."""
app.register_blueprint(blueprint)
# before_record_index.connect(indexer_receiver, sender=app)
app.extensions['cap_records'] = self
|
bejar/kemlglearn | kemlglearn/datasets/__init__.py | Python | mit | 249 | 0.016064 | """
.. module:: __init__.py
__init__.py |
*************
:Description: __init__.py
:Authors: bejar
:Version:
:Created on: 21/01/2015 9:00
"""
__author__ = 'bejar'
from .samples_generator import make_blobs |
__all__ = ['make_blobs']
|
marwano/django-glaze | setup.py | Python | bsd-3-clause | 1,285 | 0 |
from s | etuptools import setup
import re
readme = open('README.rst').read()
changes = open('CHANGES.txt').read()
version_file = 'glaze/__init__.py'
version = re.findall("__version__ = '(.*)'", open(version_file).read())[0]
try:
version = __import__('utile').git_version(version)
except ImportError:
pass
setup(
name='djan | go-glaze',
version=version,
description="Adding extra functionality to Django",
long_description=readme + '\n\n' + changes,
author='Marwan Alsabbagh',
author_email='marwan.alsabbagh@gmail.com',
url='https://github.com/marwano/django-glaze',
license='BSD',
packages=[
'glaze', 'glaze.utils', 'glaze.templatetags',
'glaze.admin',
],
include_package_data=True,
install_requires=[
'utile>=0.3'
],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
google/llvm-propeller | llvm/utils/extract_vplan.py | Python | apache-2.0 | 1,612 | 0.003722 | #!/usr/bin/env python
# This script extracts the VPlan digraphs from the vectoriser debug messages
# and saves them in individual dot files (one for each plan). Optionally, and
# providing 'dot' is installed, it can also render the dot into a PNG file.
from __future__ import print_function
import sys
import re
import argparse
import shutil
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument('--png', action='store_true')
args = parser.parse_args()
dot = shutil.which('dot')
if args.png and not dot:
raise RuntimeError("Can't export to PNG without 'dot' in the system")
pattern = re.compile(r"(digraph VPlan {.*?\n})",re.DOTALL)
mat | ches = re.findall(pattern, sys.stdin.rea | d())
for vplan in matches:
m = re.search("graph \[.+(VF=.+,UF.+)", vplan)
if not m:
raise ValueError("Can't get the right VPlan name")
name = re.sub('[^a-zA-Z0-9]', '', m.group(1))
if args.png:
filename = 'VPlan' + name + '.png'
print("Exporting " + name + " to PNG via dot: " + filename)
p = subprocess.Popen([dot, '-Tpng', '-o', filename],
encoding='utf-8',
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate(input=vplan)
if err:
raise RuntimeError("Error running dot: " + err)
else:
filename = 'VPlan' + name + '.dot'
print("Exporting " + name + " to DOT: " + filename)
with open(filename, 'w') as out:
out.write(vplan)
|
WatanabeYasumasa/edx-platform | lms/envs/common.py | Python | agpl-3.0 | 51,888 | 0.00258 | # -*- coding: utf-8 -*-
"""
This is the common settings file, intended to set sane defaults. If you have a
piece of configuration that's dependent on a set of feature flags being set,
then create a function that returns the calculated value based on the value of
FEATURES[...]. Modules that extend this one can change the feature
configuration in an environment specific config file and re-calculate those
values.
We should make a method that calls all these config methods so that you just
make one call at the end of your site-specific dev file to reset all the
dependent variables (like INSTALLED_APPS) for you.
Longer TODO:
1. Right now our treatment of static content in general and in particular
course-specific static content is haphazard.
2. We should have a more disciplined approach to feature flagging, even if it
just means that we stick them in a dict called FEATURES.
3. We need to | handle configuration for multiple courses. This could be as
multiple sites, but we do need a way to map their data assets.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0611, W0614, C0103
import sys
import os
import imp
import json
from path import pat | h
from .discussionsettings import *
from lms.lib.xblock.mixin import LmsBlockMixin
################################### FEATURES ###################################
# The display name of the platform to be used in templates/emails/etc.
PLATFORM_NAME = "edX"
CC_MERCHANT_NAME = PLATFORM_NAME
COURSEWARE_ENABLED = True
ENABLE_JASMINE = False
DISCUSSION_SETTINGS = {
'MAX_COMMENT_DEPTH': 2,
}
# Features
FEATURES = {
'SAMPLE': False,
'USE_DJANGO_PIPELINE': True,
'DISPLAY_DEBUG_INFO_TO_STAFF': True,
'DISPLAY_HISTOGRAMS_TO_STAFF': True, # For large courses this slows down courseware access for staff.
'REROUTE_ACTIVATION_EMAIL': False, # nonempty string = address for all activation emails
'DEBUG_LEVEL': 0, # 0 = lowest level, least verbose, 255 = max level, most verbose
## DO NOT SET TO True IN THIS FILE
## Doing so will cause all courses to be released on production
'DISABLE_START_DATES': False, # When True, all courses will be active, regardless of start date
# When True, will only publicly list courses by the subdomain. Expects you
# to define COURSE_LISTINGS, a dictionary mapping subdomains to lists of
# course_ids (see dev_int.py for an example)
'SUBDOMAIN_COURSE_LISTINGS': False,
# When True, will override certain branding with university specific values
# Expects a SUBDOMAIN_BRANDING dictionary that maps the subdomain to the
# university to use for branding purposes
'SUBDOMAIN_BRANDING': False,
'FORCE_UNIVERSITY_DOMAIN': False, # set this to the university domain to use, as an override to HTTP_HOST
# set to None to do no university selection
# for consistency in user-experience, keep the value of the following 3 settings
# in sync with the corresponding ones in cms/envs/common.py
'ENABLE_DISCUSSION_SERVICE': True,
'ENABLE_TEXTBOOK': True,
'ENABLE_STUDENT_NOTES': True, # enables the student notes API and UI.
# discussion home panel, which includes a subscription on/off setting for discussion digest emails.
# this should remain off in production until digest notifications are online.
'ENABLE_DISCUSSION_HOME_PANEL': False,
'ENABLE_PSYCHOMETRICS': False, # real-time psychometrics (eg item response theory analysis in instructor dashboard)
'ENABLE_DJANGO_ADMIN_SITE': True, # set true to enable django's admin site, even on prod (e.g. for course ops)
'ENABLE_SQL_TRACKING_LOGS': False,
'ENABLE_LMS_MIGRATION': False,
'ENABLE_MANUAL_GIT_RELOAD': False,
'ENABLE_MASQUERADE': True, # allow course staff to change to student view of courseware
'ENABLE_SYSADMIN_DASHBOARD': False, # sysadmin dashboard, to see what courses are loaded, to delete & load courses
'DISABLE_LOGIN_BUTTON': False, # used in systems where login is automatic, eg MIT SSL
# extrernal access methods
'ACCESS_REQUIRE_STAFF_FOR_COURSE': False,
'AUTH_USE_OPENID': False,
'AUTH_USE_CERTIFICATES': False,
'AUTH_USE_OPENID_PROVIDER': False,
# Even though external_auth is in common, shib assumes the LMS views / urls, so it should only be enabled
# in LMS
'AUTH_USE_SHIB': False,
'AUTH_USE_CAS': False,
# This flag disables the requirement of having to agree to the TOS for users registering
# with Shib. Feature was requested by Stanford's office of general counsel
'SHIB_DISABLE_TOS': False,
# Can be turned off if course lists need to be hidden. Effects views and templates.
'COURSES_ARE_BROWSABLE': True,
# Enables ability to restrict enrollment in specific courses by the user account login method
'RESTRICT_ENROLL_BY_REG_METHOD': False,
# analytics experiments
'ENABLE_INSTRUCTOR_ANALYTICS': False,
# Enables the LMS bulk email feature for course staff
'ENABLE_INSTRUCTOR_EMAIL': True,
# If True and ENABLE_INSTRUCTOR_EMAIL: Forces email to be explicitly turned on
# for each course via django-admin interface.
# If False and ENABLE_INSTRUCTOR_EMAIL: Email will be turned on by default
# for all Mongo-backed courses.
'REQUIRE_COURSE_EMAIL_AUTH': True,
# enable analytics server.
# WARNING: THIS SHOULD ALWAYS BE SET TO FALSE UNDER NORMAL
# LMS OPERATION. See analytics.py for details about what
# this does.
'RUN_AS_ANALYTICS_SERVER_ENABLED': False,
# Flip to True when the YouTube iframe API breaks (again)
'USE_YOUTUBE_OBJECT_API': False,
# Give a UI to show a student's submission history in a problem by the
# Staff Debug tool.
'ENABLE_STUDENT_HISTORY_VIEW': True,
# segment.io for LMS--need to explicitly turn it on for production.
'SEGMENT_IO_LMS': False,
# Provide a UI to allow users to submit feedback from the LMS (left-hand help modal)
'ENABLE_FEEDBACK_SUBMISSION': False,
# Turn on a page that lets staff enter Python code to be run in the
# sandbox, for testing whether it's enabled properly.
'ENABLE_DEBUG_RUN_PYTHON': False,
# Enable URL that shows information about the status of variuous services
'ENABLE_SERVICE_STATUS': False,
# Toggle to indicate use of a custom theme
'USE_CUSTOM_THEME': False,
# Don't autoplay videos for students
'AUTOPLAY_VIDEOS': False,
# Enable instructor dash to submit background tasks
'ENABLE_INSTRUCTOR_BACKGROUND_TASKS': True,
# Enable instructor to assign individual due dates
'INDIVIDUAL_DUE_DATES': True,
# Enable legacy instructor dashboard
'ENABLE_INSTRUCTOR_LEGACY_DASHBOARD': True,
# Is this an edX-owned domain? (used on instructor dashboard)
'IS_EDX_DOMAIN': False,
# Toggle to enable certificates of courses on dashboard
'ENABLE_VERIFIED_CERTIFICATES': False,
# Allow use of the hint managment instructor view.
'ENABLE_HINTER_INSTRUCTOR_VIEW': False,
# for load testing
'AUTOMATIC_AUTH_FOR_TESTING': False,
# Toggle to enable chat availability (configured on a per-course
# basis in Studio)
'ENABLE_CHAT': False,
# Allow users to enroll with methods other than just honor code certificates
'MULTIPLE_ENROLLMENT_ROLES': False,
# Toggle the availability of the shopping cart page
'ENABLE_SHOPPING_CART': False,
# Toggle storing detailed billing information
'STORE_BILLING_INFO': False,
# Enable flow for payments for course registration (DIFFERENT from verified student flow)
'ENABLE_PAID_COURSE_REGISTRATION': False,
# Automatically approve student identity verification attempts
'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': False,
# Disable instructor dash buttons for downloading course data
# when enrollment exceeds this number
'MAX_ENROLLMENT_INSTR_BUTTONS': 200,
# Grade calculation started from the new instructor dashboard will write
# grades CSV files to S3 and give links for do |
ypu/virt-test | virttest/lvsb_base.py | Python | gpl-2.0 | 17,087 | 0.000351 | """
Base classes supporting Libvirt Sandbox (lxc) container testing
:copyright: 2013 Red Hat Inc.
"""
import logging
import signal
import aexpect
class SandboxException(Exception):
"""
Basic exception class for problems occurring in SandboxBase or subclasses
"""
def __init__(self, message):
super(SandboxException, self).__init__()
self.message = message
def __str__(self):
return self.message
# This is to allow us to alter back-end session management w/o affecting
# sandbox subclasses
class SandboxSession(object):
"""
Connection instance to asynchronous I/O redirector process
"""
# Assist with warning on re-use
used = False
def __init__(self):
self.session = None # createdby new_session
@property
def connected(self):
"""
Represents True/False value if background process was created/opened
"""
if self.session is None:
return False
else:
return True
@property
def session_id(self):
"""
Returns unique & persistent identifier for the background process
"""
if self.connected:
return self.session.get_id()
else:
raise SandboxException("Can't get id of non-running sandbox "
"session")
def new_session(self, command):
"""
Create and set new opaque session object
"""
# Allow this to be called more than once w/o consequence
self.close_session(warn_if_nonexist=self.used)
self.session = aexpect.Expect(command, auto_close=False)
self.used = True
def open_session(self, a_id):
"""
Restore connection to existing session identified by a_id
"""
# Allow this to be called more than once w/o consequence
self.close_session(warn_if_nonexist=self.used)
aexpect.Expect(a_id=a_id)
self.used = True
def close_session(self, warn_if_nonexist=True):
"""
| Finalize assigned opaque session object
"""
# Allow this to be called more than once w/o consequence
if self.connected:
self.session.close()
else:
if warn_if_nonexist:
logging.warning("Closing nonexisting sandbox session")
def kill_session(self, sig=signal.SIGTERM):
"""
Send a signal to the opaque session object
"""
if self.connected:
| self.session.kill(sig=sig)
else:
raise SandboxException("Can't send signal to inactive sandbox "
"session")
def send(self, a_string):
"""Send a_string to session"""
if self.connected:
self.session.send(a_string)
else:
raise SandboxException("Can't send to an inactive sandbox session")
def recv(self):
"""Return combined stdout/stderr output received so far"""
if self.connected:
return self.session.get_output()
else:
raise SandboxException("Can't get output from finalized sandbox "
"session")
def recvout(self):
"""Return just stdout output"""
# FIXME: aexpect combines stdout and stderr in a single pipe :(
raise NotImplementedError
def recverr(self):
"""Return just stderr output"""
# FIXME: aexpect combines stdout and stderr in a single pipe :(
raise NotImplementedError
def exit_code(self):
"""Block, and return exit code from session"""
if self.connected:
return self.session.get_status()
else:
raise SandboxException("Can't get exit code from finalized sandbox "
"session")
def is_running(self):
"""Return True if exit_code() would block"""
if self.connected:
return self.session.is_alive()
else:
return None
def auto_clean(self, boolean):
"""Make session cleanup on GC if True"""
if self.connected:
self.session.auto_close = boolean
else:
raise SandboxException("Can't set auto_clean on disconnected "
"sandbox session")
class SandboxBase(object):
"""
Base operations for sandboxed command
"""
# Provide unique instance number for each sandbox
instances = None
def __init__(self, params):
"""
Create a new sandbox interface instance based on this type from params
"""
# Un-pickling instances doesn't call init again
if self.__class__.instances is None:
self.__class__.instances = 1
else:
self.__class__.instances += 1
# store a copy for use to avoid referencing class attribute
self.identifier = self.__class__.instances
# Allow global 'lvsb_*' keys to be overridden for specific subclass
self.params = params.object_params(self.__class__.__name__)
self.options = None # opaque value consumed by make_command()
# Aexpect has some well hidden bugs, private attribute hides
# interface in case it changes from fixes or gets swapped out
# entirely.
self._session = SandboxSession()
# Allow running sandboxes to persist across multiple tests if needed
def __getstate__(self):
"""Serialize instance for pickling"""
# Regular dictionary format for now, but could change later
state = {'params': self.params,
'identifier': self.identifier,
'options': self.options}
# Critical info. to re-connect to session when un-pickle
if self._session.connected:
state['session_id'] = self._session.session_id
return state
def __setstate__(self, state):
"""Actualize instance from state"""
for key in ('identifier', 'params', 'options'):
setattr(self, key, state[key])
if state.haskey('session_id'):
self._session = SandboxSession()
self._session.open_session(state['session_id'])
def run(self, extra=None):
"""
Launch new sandbox as asynchronous background sandbox process
:param extra: String of extra command-line to use but not store
"""
sandbox_cmdline = self.make_sandbox_command_line(extra)
logging.debug("Launching %s", sandbox_cmdline)
self._session.new_session(sandbox_cmdline)
def stop(self):
"""Destroy but don't finalize asynchronous background sandbox process"""
self._session.kill_session()
def fini(self):
"""
Finalize asynchronous background sandbox process (destroys state!)
"""
self._session.close_session()
def send(self, data):
"""Send data to asynchronous background sandbox process"""
self._session.send(data)
def recv(self):
"""
Return stdout and stderr from asynchronous background sandbox process
"""
return self._session.recv()
def recvout(self):
"""
Return only stdout from asynchronous background sandbox process
"""
return self._session.recvout()
def recverr(self):
"""
return only stderr from asynchronous background sandbox process
"""
return self._session.recverr()
def running(self):
"""
Return True/False if asynchronous background sandbox process executing
"""
return self._session.is_running()
def exit_code(self):
"""
Block until asynchronous background sandbox process ends, returning code
"""
return self._session.exit_code()
def auto_clean(self, boolean):
"""
Change behavior of asynchronous background sandbox process on __del__
"""
self._session.auto_clean(boolean)
def make_sandbox_command_line(self, extra=None):
"""
Return the fully formed command-line for the sandbox using self.options
"""
# These a |
dimonaks/siman | siman/picture_functions.py | Python | gpl-2.0 | 47,676 | 0.022506 | # -*- coding: utf-8 -*-
from __future__ import division, unicode_literals, absolute_import
import sys, os
import copy
import numpy as np
try:
import scipy
from scipy import interpolate
# print (scipy.__version__)
# print (dir(interpolate))
except:
print('picture_functions.py: scipy is not avail')
# from scipy.interpolate import spline
try:
''
from scipy.interpolate import CubicSpline
except:
print('scipy.interpolate.CubicSpline is not avail')
try:
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
except:
print('mpl_toolkits or matplotlib are not avail')
try:
from adjustText import adjust_text
adjustText_installed = True
except:
adjustText_installed = False
from siman import header
from siman.header import calc, printlog, printlog
from siman.inout import write_xyz
from siman.small_functions import makedir, is_list_like
from siman.geo import replic
# from siman.chg.chg_func import chg_at_point, cal_chg_diff
# from dos.functions import plot_dos
# from ase.utils.eos import EquationOfState
def plot_mep(atom_pos, mep_energies, image_name = None, filename = None, show = None, plot = 1, fitplot_args = None, style_dic = None):
"""
Used for NEB method
atom_pos (list) - xcart positions of diffusing atom along the path or just coordinates along one line (for polarons)
mep_energies (list) - full energies of the system corresponding to atom_pos
image_name - deprecated, use filename
style_dic - dictionary with styles
'p' - style of points
'l' - style of labels
'label' - label of points
plot - if plot or not
"""
from siman.analysis import determine_barrier
if filename is None:
filename = image_name
#Create
if not style_dic:
style_dic = {'p':'ro', 'l':'b-', 'label':None}
if 'p' not in style_dic:
style_dic['p']='ro'
if not fitplot_args:
fitplot_args = {}
# print
if is_list_like(atom_pos[0]):
atom_pos = np.array(atom_pos)
data = atom_pos.T #
tck, u= interpolate.splprep(data) #now we get all the knots and info about the interpolated spline
path = interpolate.splev(np.linspace(0,1,500), tck) #increase the resolution by increasing the spacing, 500 in this example
path = np.array(path)
diffs = np.diff(path.T, axis = 0)
path_length = np.linalg.norm( diffs, axis = 1).sum()
mep_pos = np.array([p*path_length for p in u])
else:
mep_pos = atom_pos
path_length = atom_pos[-1]
if 0: #plot the path in 3d
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(data[0], data[1], data[2], label='originalpoints', lw =2, c='Dodgerblue')
ax.plot(path[0], path[1], path[2], label='fit', lw =2, c='red')
ax.legend()
plt.show()
# if '_mep' not in calc:
calc['_mep'] = [atom_pos, mep_energies] # just save in temp list to use the results in neb_wrapper
if hasattr(header, 'plot_mep_invert') and header.plot_mep_invert: # for vacancy
mep_energies = list(reversed(mep_energies) )
mine = min(mep_energies)
eners = np.array(mep_energies)-mine
xnew = np.linspace(0, path_length, 1000)
# ynew = spline(mep_pos, eners, xnew )
# spl = CubicSpline(mep_pos, eners, bc_type = 'natural' ) # second-derivative zero
# spl = CubicSpline(mep_pos, eners,) #
# spl = CubicSpline(mep_pos, eners, bc_type = 'periodic')
# spl = CubicSpline(mep_pos, eners, bc_type = 'clamped' ) #first derivative zero
spl = scipy.interpolate.PchipInterpolator(mep_pos, eners)
ynew = spl(xnew)
diff_barrier = determine_barrier(mep_pos, eners)
printlog('plot_mep(): Diffusion barrier =',round(diff_barrier, 2),' eV', imp = 'y')
# sys.exit()
# print()
if 'fig_format' not in fitplot_args:
fitplot_args['fig_format'] = 'eps'
if 'xlim' not in fitplot_args:
fitplot_args['xlim'] = (-0.05, None )
if 'xlabel' not in fitplot_args:
fitplot_args['xlabel'] = 'Reaction coordinate ($\AA$)'
if 'ylabel' not in fitplot_args:
fitplot_args['ylabel'] = 'Energy (eV)'
path2saved = None
if plot:
# print(image_name)
path2saved = fit_and_plot(orig = {'x':mep_pos, 'y':eners, 'fmt':style_dic['p'], 'label':style_dic['label'], 'color':style_dic.get('color')},
spline = {'x':xnew, 'y':ynew, 'fmt':style_dic['l'], 'label':None, 'color':style_dic.get('color')},
image_name = image_name, filename = filename, show = show,
**fitplot_args)
# print(image_name, filename)
if 0:
with open(filename+'.txt', 'w') as f:
f.write('DFT points:\n')
for m, e in zip(mep_pos, eners):
f.write('{:10.5f}, {:10.5f} \n'.format(m, e))
f.write('Spline:\n')
for m, e in zip(xnew, ynew):
f.write('{:10.5f}, {:10.5f} \n'.format(m, e))
return path2saved, diff_barrier
def process_fig_filename(image_name, fig_format):
makedir(image_name)
if fig_format in image_name:
path2saved = str(image_name)
elif str(image_name).split('.')[-1] in ['eps', 'png', 'pdf']:
path2saved = str(image_name)
fig_format = str(image_name).split('.')[-1]
else:
path2saved = str(image_name)+'.'+fig_format
dirname = os.path.dirname(image_name)
if not dirname:
dirnam | e+='.'
path2saved_png = dirname+'/png/'+os.path.basename(image_name)+'.png'
makedir(path2saved_png)
return path2saved, path2saved_png
def fit_and_plot(ax = None, power = None, xlabel = None, ylabel = None,
image_name = None, filename = None,
show = None, pad = None,
xlim = None, ylim = None, title = None, figsize = None,
xlog = False,ylog = False, scatter = False,
legend = False, ncol = 1,
fontsize = None, legend_fontsize=N | one, markersize = None,
linewidth = None, hor = False, ver = True, fig_format = 'eps', dpi = 300,
ver_lines = None, hor_lines = None, xy_line = None, x_nbins = None,
alpha = 0.8, fill = False,
first = True, last = True,
convex = None, dashes = None,
corner_letter = None, corner_letter_pos = None, hide_ylabels = None, hide_xlabels= None, annotate = None,
params = None,
**data):
"""
Plot multiple plots on one axes using *data*
return filename of saved plot
ax (axes) - matplotlib axes object - to create multiple axes plots
data - each entry should be
(X, Y, fmt)
or
(X, Y, fmt, label)
or
{'x':,'y':, 'fmt':, 'label', 'xticks' } not implemented for powers and scatter yet
or
(X, Y, R, fmt) - for scatter = 1, R - size of spots
first, last - allows to call this function multiple times to put several plots on one axes. Use first = 1, last = 0 for the first plot, 0, 0 for intermidiate, and 0, 1 for last
power (int) - the power of polynom, turn on fitting
scatter (bool) - plot scatter points - the data format is slightly different - see *data*
convex (bool) - plot convex hull around points like in ATAT
fill (bool) - fill under the curves
filename (str) - name of file with figure, image_name - deprecated
fig_format (str) - format of saved file.
dpi - resolution of saved file
ver_lines - list of dic args for vertical lines {'x':, 'c', 'lw':, 'ls':}
hor_lines
ver - vertical line at 0
hor - horizontal line at 0
hide_ylabels - just hide numbers
ncol - number of legend columns
corner_letter - letter in the corner of the plot
corner_letter_pos (list*2 float) - list with [x,y] corner position, default left upper corner is set
pad - additional padding, if dict than the same keys as in plt.subplots_adjust() are used
annotate - annotate each point, 'annotates' list should be in data dic!
linewidth - was 3 !
markersize - was 10
x_nbins - number of ticks
params - dictionary with parameters
- 'xlim_power' - xlim for power
|
pombreda/django-hotclub | libs/external_libs/docutils-0.4/docutils/transforms/__init__.py | Python | mit | 6,690 | 0.000149 | # Authors: David Goodger, Ueli Schlaepfer
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 3892 $
# Date: $Date: 2005-09-20 22:04:53 +0200 (Tue, 20 Sep 2005) $
# Copyright: This module has been placed in the public domain.
"""
This package contains modules for standard tree transforms available
to Docutils components. Tree transforms serve a variety of purposes:
- To tie up certain syntax-specific "loose ends" that remain after the
initial parsing of the input plaintext. These transforms are used to
supplement a limited syntax.
- To automate the internal linking of the document tree (hyperlink
references, footnote references, etc.).
- To extract useful information from the document tree. These
transforms may be used to construct (for example) indexes and tables
of contents.
Each transform is an optional step that a Docutils Reader may choose to
perform on the parsed document, depending on the input context. A Docutils
Reader may also perform Reader-spec | ific transforms before or after performing
these standard transforms.
"""
__d | ocformat__ = 'reStructuredText'
from docutils import languages, ApplicationError, TransformSpec
class TransformError(ApplicationError): pass
class Transform:
"""
Docutils transform component abstract base class.
"""
default_priority = None
"""Numerical priority of this transform, 0 through 999 (override)."""
def __init__(self, document, startnode=None):
"""
Initial setup for in-place document transforms.
"""
self.document = document
"""The document tree to transform."""
self.startnode = startnode
"""Node from which to begin the transform. For many transforms which
apply to the document as a whole, `startnode` is not set (i.e. its
value is `None`)."""
self.language = languages.get_language(
document.settings.language_code)
"""Language module local to this document."""
def apply(self, **kwargs):
"""Override to apply the transform to the document tree."""
raise NotImplementedError('subclass must override this method')
class Transformer(TransformSpec):
"""
Stores transforms (`Transform` classes) and applies them to document
trees. Also keeps track of components by component type name.
"""
def __init__(self, document):
self.transforms = []
"""List of transforms to apply. Each item is a 3-tuple:
``(priority string, transform class, pending node or None)``."""
self.unknown_reference_resolvers = []
"""List of hook functions which assist in resolving references"""
self.document = document
"""The `nodes.document` object this Transformer is attached to."""
self.applied = []
"""Transforms already applied, in order."""
self.sorted = 0
"""Boolean: is `self.tranforms` sorted?"""
self.components = {}
"""Mapping of component type name to component object. Set by
`self.populate_from_components()`."""
self.serialno = 0
"""Internal serial number to keep track of the add order of
transforms."""
def add_transform(self, transform_class, priority=None, **kwargs):
"""
Store a single transform. Use `priority` to override the default.
`kwargs` is a dictionary whose contents are passed as keyword
arguments to the `apply` method of the transform. This can be used to
pass application-specific data to the transform instance.
"""
if priority is None:
priority = transform_class.default_priority
priority_string = self.get_priority_string(priority)
self.transforms.append(
(priority_string, transform_class, None, kwargs))
self.sorted = 0
def add_transforms(self, transform_list):
"""Store multiple transforms, with default priorities."""
for transform_class in transform_list:
priority_string = self.get_priority_string(
transform_class.default_priority)
self.transforms.append(
(priority_string, transform_class, None, {}))
self.sorted = 0
def add_pending(self, pending, priority=None):
"""Store a transform with an associated `pending` node."""
transform_class = pending.transform
if priority is None:
priority = transform_class.default_priority
priority_string = self.get_priority_string(priority)
self.transforms.append(
(priority_string, transform_class, pending, {}))
self.sorted = 0
def get_priority_string(self, priority):
"""
Return a string, `priority` combined with `self.serialno`.
This ensures FIFO order on transforms with identical priority.
"""
self.serialno += 1
return '%03d-%03d' % (priority, self.serialno)
def populate_from_components(self, components):
"""
Store each component's default transforms, with default priorities.
Also, store components by type name in a mapping for later lookup.
"""
for component in components:
if component is None:
continue
self.add_transforms(component.get_transforms())
self.components[component.component_type] = component
self.sorted = 0
# Set up all of the reference resolvers for this transformer. Each
# component of this transformer is able to register its own helper
# functions to help resolve references.
unknown_reference_resolvers = []
for i in components:
unknown_reference_resolvers.extend(i.unknown_reference_resolvers)
decorated_list = [(f.priority, f) for f in unknown_reference_resolvers]
decorated_list.sort()
self.unknown_reference_resolvers.extend([f[1] for f in decorated_list])
def apply_transforms(self):
"""Apply all of the stored transforms, in priority order."""
self.document.reporter.attach_observer(
self.document.note_transform_message)
while self.transforms:
if not self.sorted:
# Unsorted initially, and whenever a transform is added.
self.transforms.sort()
self.transforms.reverse()
self.sorted = 1
priority, transform_class, pending, kwargs = self.transforms.pop()
transform = transform_class(self.document, startnode=pending)
transform.apply(**kwargs)
self.applied.append((priority, transform_class, pending, kwargs))
|
ricomoss/learn-tech | python/track_1/lesson4/exercise.py | Python | gpl-3.0 | 1,793 | 0.000558 | #!/usr/bin/p | ython
from __future__ import unicode_literals
import os
def wait():
raw_input('\nPress Enter to continue. | ..\n\n')
os.system(['clear', 'cls'][os.name == 'nt'])
# Create a class to handle items in a wallet
class BaseWalletHandler(object):
def __init__(self):
self.items = {
'Driver\'s License': False,
'Credit Card': False,
'Cash': False,
'Change': False,
'Insurance Card': False,
'ICE Info': False,
'Pictures': False,
}
def add_item(self, item):
if item in self.items.keys():
self.items[item] = True
def remove_item(self, item):
if item in self.items.keys():
self.items[item] = False
def show_items(self):
for key, value in self.items.items():
if value is True:
print key
# Can more refactoring happen to clean this up more?
class WalletHandler(BaseWalletHandler):
def __init__(self):
super(WalletHandler, self).__init__()
def add_item(self, item):
super(WalletHandler, self).add_item(item)
if item not in self.items.keys():
self.items[item] = True
def exercise():
wallet_handler = BaseWalletHandler()
wallet_handler.add_item('Driver\'s License')
wallet_handler.add_item('ICE Info')
wallet_handler.add_item('Credit Card')
wallet_handler.add_item('Business Card')
wallet_handler.show_items()
wait()
wallet_handler = WalletHandler()
wallet_handler.add_item('Driver\'s License')
wallet_handler.add_item('ICE Info')
wallet_handler.add_item('Credit Card')
wallet_handler.add_item('Business Card')
wallet_handler.show_items()
wait()
if __name__=='__main__':
exercise()
|
INI-ratlab/ratlab | util/ratbot.py | Python | gpl-3.0 | 8,302 | 0.038063 | #==============================================================================
#
# Copyright (C) 2016 Fabian Schoenfeld
#
# This file is part of the ratlab software. It is free software; you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# a special exception for linking and compiling against the pe library, the
# so-called "runtime exception"; see the file COPYING. If not, see:
# http://www.gnu.org/licenses/
#
#==============================================================================
#======================================================================[ Setup ]
# system
import sys
# math
import math
import numpy as np
import random as rnd
# OpenGL
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GL import *
# utilities / own
import freezeable
Freezeable = freezeable.Freezeable
#------------------------------------------------------------------[ Constants ]
def_RAD2DEG = 180.0/math.pi
def_DEG2RAD = math.pi/180.0
#------------------------------------------------------------------[ Numpy Mod ]
np.seterr(divide='ignore') # ignore 'division by zero' errors (occur on path reset)
#==============================================================[ RatBot Class ]
class RatBot( Freezeable ):
"""
Class to set up and control a virtual rodend.
"""
#----------------------------------------------------------[ Construction ]
def __init__( self, pos, control ):
"""
Constructor. Initializes the rat bot.
pos : Valid 2D position within the simulation world.
control: Simulation control panel. Defined in ratlab.py.
"""
rnd.seed()
# simulation control panel
self.__ctrl__ = control
# path
self.__path__ = []
self.__path__.append( pos )
# follow path if specified via file
if control.setup.rat.path != None:
f = open( './current_experiment/' + control.setup.rat.path )
control.setup.rat.path = np.zeros( [sum(1 for l in f),2] )
f.seek(0)
for i,l in enumerate(f):
c = l.split()
control.setup.rat.path[i] = np.array([c[0],c[1]])
self.__path_index__ = 1
# reset starting position
self.__path__[0] = control.setup.rat.path[0]
# lockdown
self.freeze()
#-----------------------------------------------------------[ Path Control ]
def getPath( self ):
"""
Retrieve the rat's path data so far. The function returns an array of 2D
positions.
"""
return self.__path__
def __gaussianWhiteNoise2D__( self, dir=None ):
# random unrestricted direction
if dir == None or self.__ctrl__.setup.rat.arc == 360.0:
angle = (rnd.random()*360.0) * def_DEG2RAD
return np.array( [math.cos(angle),math.sin(angle)] )
# random direction focused around given velocity vector
else:
try:
dir_n = dir / math.sqrt( dir[0]**2+dir[1]**2 )
dir_a = math.asin( abs(dir_n[1]) ) * def_RAD2DEG
if dir_n[0]<=0 and dir_n[1]>=0: dir_a =180.0-dir_a
elif dir_n[0]<=0 and dir_n[1]<=0: dir_a =180.0+dir_a
elif dir_n[0]>=0 and dir_n[1]<=0: dir_a =360.0-dir_a
rat_fov = self.__ctrl__.setup.rat.arc
angle = (dir_a-rat_fov/2.0 + rnd.random()*rat_fov) * def_DEG2RAD
return np.array( [math.cos(angle),math.sin(angle)] )
except ValueError:
# random rebound in case the path gets stuck in a corner
return self.__gaussianWhiteNoise2D__()
def followPathNodes( self ):
# switch to next nav point when necessary
path = self.__ctrl__.setup.rat.path
pos = self.__path__[ len(self.__path__)-1 ]
dist = np.sqrt(np.vdot(pos-path[self.__path_index__],pos-path[self.__path_index__]))
if dist < self.__ctrl__.setup.rat.speed:
self.__path_index__ += 1
self.__path_index__ %= len(path)
# end of non-loop path: teleport back to starting position
if self.__path_index__ == 0 and self.__ctrl__.setup.rat.path_loop == False:
pos_next = path[0]
trajectory = np.array( path[1]-path[0], dtype=np.float32 )
trajectory /= np.sqrt( np.vdot(trajectory,trajectory) )
self.__path__.append( pos_next )
return (pos_next, trajectory)
# new step
step = np.array( path[self.__path_index__]-pos, dtype=np.float32 )
step /= np.sqrt(np.vdot(step,step))
noise = self.__ctrl__.setup.rat.path_dev
while True:
if np.random.random() > 0.5:
step += np.array( [-step[1],step[0]] )*noise
else:
step += np.array( [step[1],-step[0]] )*noise
step *= self.__ctrl__.setup.rat.speed
# check for valid step
pos_next = pos + step
#if self.__ctrl__.modules.world.validStep( pos, pos_next ) == True:
self.__path__.append( pos_next )
return (pos_next, step)
#else:
# noise *= 0.5
def nextPathStep( self ):
"""
Generate the next step of the rat's movement.
"""
# following a path?
if self.__ctrl__.set | up.rat.path != None:
return self.followPathNodes()
# current position & velocity/direction
pos = self.__path__[len(self.__path__)-1]
pos_next = np.array([np.nan,np.nan])
if len(self.__path__) > 1: vel = pos-self.__path__[len(self.__path__)-2]
else: vel = self.__gaussianWhiteNoise2D__()
# generate next step
while True:
noise = self.__gaussianWhiteNoise2D__(vel)
mom = self.__ctrl__.setup.rat.path_mom
step = vel*mom + noise*(1.0-mom)
step /= np.sqrt(np.vdot(step,st | ep))
step *= self.__ctrl__.setup.rat.speed
# optional movement bias
bias = self.__ctrl__.setup.rat.bias
step += bias*(np.dot(bias,step)**2)*np.sign(np.dot(bias,step))*self.__ctrl__.setup.rat.bias_s
# check for valid step
pos_next = pos + step
if self.__ctrl__.modules.world.validStep( pos, pos_next ) == False: vel *= 0.5
else: break
# set and confirm
self.__path__.append(pos_next)
return (pos_next, pos_next-pos)
if False: ##########################################################################################OLD RULES
# current position and velocity ##########
vel = None #
pos = self.__path__[ len(self.__path__)-1 ] #
pos_next = np.array( [0.0,0.0] ) #
if len( self.__path__ ) == 1: #
vel = self.__gaussianWhiteNoise2D__() #
else: #
vel = pos - self.__path__[ len(self.__path__)-2 ] #
vel *= self.__ctrl__.setup.rat.speed #
# next step #
check = False #
pos_next = np.array( [0.0,0.0] ) #
while check == False: #
# random path variation (rummaging behavior) #
noise = self.__gaussianWhiteNoise2D__( vel ) #
# step width according to momentum term #
momentum = self.__ctrl__.setup.rat.path_mom #
step = vel*momentum + noise*(1.0-momentum) #
# step modified by optional movement bias #
step_n = step/(math.sqrt(step[0]**2+step[1]**2)) #
bias = self.__ctrl__.setup.rat.bias #
step += bias*(np.dot(bias,step_n)**2)*np.sign(np.dot(bias,step_n))*self.__ctrl__.setup.rat.bias_s #
# add up position & check for validity #
pos_next = pos + step #
check = self.__ctrl__.modules.world.validStep( pos, pos_next ) #
if check == False: #
vel *= 0.5 #
vel = pos_next - pos #
pos = pos_next #
self.__path__.append( pos ) #
# return generated state in the format ( [pos_x,pos_y], [vel_x,vel_y] ) #
return (pos, vel) |
chrys87/fenrir | src/fenrirscreenreader/core/settingsData.py | Python | lgpl-3.0 | 3,330 | 0.014786 | #!/bin/python
# -*- coding: utf-8 -*-
# Fenrir TTY screen reader
# By Chrys, Storm Dragon, and contributers.
from fenrirscreenreader.core import debug
settingsData = {
'sound': {
'enabled': True,
'driver': 'genericDriver',
'theme': 'default',
'volume': 1.0,
'genericPlayFileCommand': 'play -q -v fenrirVolume fenrirSoundFile',
'genericFrequencyCommand': 'play -q -v fenrirVolume -n -c1 synth fenrirDuration sine fenrirFrequence'
},
'speech':{
'enabled': True,
'driver': 'genericDriver',
'serverPath': '',
'rate': 0.75,
'pitch': 0.5,
'capitalPitch':0.8,
'volume': 1.0,
'module': '',
'voice': 'en-us',
'language': '',
'autoReadIncoming': True,
'genericSpeechCommand':'espeak -a fenrirVolume -s fenrirRate -p fenrirPitch -v fenrirVoice "fenrirText"',
'fenrirMinVolume':0,
'fenrirMaxVolume':200,
'fenrirMinPitch':0,
'fenrirMaxPitch':99,
'fenrirMinRate':80,
'fenrirMaxRate':450,
},
'braille':{
'enabled': False,
'driver':'brlapiDriver',
'layout': 'en',
'flushMode': 'word', #NONE,FIX,CHAR,WORD
'flushTimeout': 3,
'cursorFocusMode':'page', # page,fixCell
'fixCursorOnCell': -1,
'cursorFollowMode': 'review', # none, review, last, text
'panSizeHorizontal': 0 # 0 = display size
},
'screen':{
'driver': 'vcsaDriver',
'encoding': 'auto',
'screenUpdateDelay': 0.1,
'suspendingScreen': '',
'autodetectSuspendingScreen': False,
},
'general':{
'debugLevel': debug.debugLevel.DEACTIVE,
'debugMode': 'FILE',
'debugFile': '',
'punctuationProfile':'default',
'punctuationLevel': 'some',
'respectPunctuationPause':True,
'newLinePause':True,
'numberOfClipboards': 10,
'emoticons': True,
'fenrirKeys': 'KEY_KP0,KEY_META',
'scriptKeys': 'KEY_COMPOSE',
'timeFormat': '%I | :%M%P',
'dateFormat': '%A, %B %d, %Y',
'autoSpellCheck': False,
'spellCheckLanguage': 'en_US',
'scriptPath': '/us | r/share/fenrirscreenreader/scripts',
'commandPath': '/usr/share/fenrirscreenreader/commands',
'attributeFormatString': 'Background fenrirBGColor,Foreground fenrirFGColor,fenrirUnderline,fenrirBold,fenrirBlink, Font fenrirFont,Fontsize fenrirFontSize',
'autoPresentIndent': False,
'autoPresentIndentMode': 1,
'hasAttributes': True,
'shell': '',
},
'focus':{
'cursor': True,
'highlight': False,
},
'remote':{
'enabled': True,
'driver': 'unixDriver',
'port': 22447,
'socketFile':'',
'enableSettingsRemote': True,
'enableCommandRemote': True,
},
'barrier':{
'enabled': True,
'leftBarriers': '│└┌─',
'rightBarriers': '│┘┐─',
},
'review':{
'lineBreak': True,
'endOfScreen': True,
'leaveReviewOnCursorChange': True,
'leaveReviewOnScreenChange': True,
},
'menu':{
'vmenuPath': '',
'quickMenu': 'speech#rate;speech#pitch;speech#volume',
},
'promote':{
'enabled': True,
'inactiveTimeoutSec': 120,
'list': '',
},
'time':{
'enabled': False,
'presentTime': True,
'presentDate': True,
'delaySec': 0,
'onMinutes': '00,30',
'announce': True,
'interrupt': False,
},
'keyboard':{
'driver': 'evdev',
'device': 'all',
'grabDevices': True,
'ignoreShortcuts': False,
'keyboardLayout': "desktop",
'charEchoMode': 2, # while capslock
'charDeleteEcho': True,
'wordEcho': True,
'interruptOnKeyPress': True,
'interruptOnKeyPressFilter': '',
'doubleTapTimeout': 0.2,
}
}
|
jsha/letsencrypt | acme/acme/messages.py | Python | apache-2.0 | 14,264 | 0.00028 | """ACME protocol messages."""
import collections
import six
from acme import challenges
from acme import errors
from acme import fields
from acme import jose
from acme import util
OLD_ERROR_PREFIX = "urn:acme:error:"
ERROR_PREFIX = "urn:ietf:params:acme:error:"
ERROR_CODES = {
'badCSR': 'The CSR is unacceptable (e.g., due to a short key)',
'badNonce': 'The client sent an unacceptable anti-replay nonce',
'connection': ('The server could not connect to the client to verify the'
' domain'),
'dnssec': 'The server could not validate a DNSSEC signed domain',
# deprecate invalidEmail
'invalidEmail': 'The provided email for a registration was invalid',
'invalidContact': 'The provided contact URI was invalid',
'malformed': 'The request message was malformed',
'rateLimited': 'There were too many requests of a given type',
'serverInternal': 'The server experienced an internal error',
'tls': 'The server experienced a TLS error during domain verification',
'unauthorized': 'The client lacks sufficient authorization',
'unknownHost': 'The server could not resolve a domain name',
}
ERROR_TYPE_DESCRIPTIONS = dict(
(ERROR_PREFIX + name, desc) for name, desc in ERROR_CODES.items())
ERROR_TYPE_DESCRIPTIONS.update(dict( # add errors with old prefix, deprecate me
(OLD_ERROR_PREFIX + name, desc) for name, desc in ERROR_CODES.items()))
def is_acme_error(err):
"""Check if argument is an ACME error."""
if isinstance(err, Error) and (err.typ is not None):
return (ERROR_PREFIX in err.typ) or (OLD_ERROR_PREFIX in err.typ)
else:
return False
@six.python_2_unicode_compatible
class Error(jose.JSONObjectWithFields, errors.Error):
"""ACME error.
https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00
:ivar unicode typ:
| :ivar unicode title:
:ivar unicode detail:
"""
typ = jose.Field('type', omitempty=True, defau | lt='about:blank')
title = jose.Field('title', omitempty=True)
detail = jose.Field('detail', omitempty=True)
@classmethod
def with_code(cls, code, **kwargs):
"""Create an Error instance with an ACME Error code.
:unicode code: An ACME error code, like 'dnssec'.
:kwargs: kwargs to pass to Error.
"""
if code not in ERROR_CODES:
raise ValueError("The supplied code: %s is not a known ACME error"
" code" % code)
typ = ERROR_PREFIX + code
return cls(typ=typ, **kwargs)
@property
def description(self):
"""Hardcoded error description based on its type.
:returns: Description if standard ACME error or ``None``.
:rtype: unicode
"""
return ERROR_TYPE_DESCRIPTIONS.get(self.typ)
@property
def code(self):
"""ACME error code.
Basically self.typ without the ERROR_PREFIX.
:returns: error code if standard ACME code or ``None``.
:rtype: unicode
"""
code = str(self.typ).split(':')[-1]
if code in ERROR_CODES:
return code
def __str__(self):
return b' :: '.join(
part.encode('ascii', 'backslashreplace') for part in
(self.typ, self.description, self.detail, self.title)
if part is not None).decode()
class _Constant(jose.JSONDeSerializable, collections.Hashable): # type: ignore
"""ACME constant."""
__slots__ = ('name',)
POSSIBLE_NAMES = NotImplemented
def __init__(self, name):
self.POSSIBLE_NAMES[name] = self
self.name = name
def to_partial_json(self):
return self.name
@classmethod
def from_json(cls, value):
if value not in cls.POSSIBLE_NAMES:
raise jose.DeserializationError(
'{0} not recognized'.format(cls.__name__))
return cls.POSSIBLE_NAMES[value]
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self.name)
def __eq__(self, other):
return isinstance(other, type(self)) and other.name == self.name
def __hash__(self):
return hash((self.__class__, self.name))
def __ne__(self, other):
return not self == other
class Status(_Constant):
"""ACME "status" field."""
POSSIBLE_NAMES = {} # type: dict
STATUS_UNKNOWN = Status('unknown')
STATUS_PENDING = Status('pending')
STATUS_PROCESSING = Status('processing')
STATUS_VALID = Status('valid')
STATUS_INVALID = Status('invalid')
STATUS_REVOKED = Status('revoked')
class IdentifierType(_Constant):
"""ACME identifier type."""
POSSIBLE_NAMES = {} # type: dict
IDENTIFIER_FQDN = IdentifierType('dns') # IdentifierDNS in Boulder
class Identifier(jose.JSONObjectWithFields):
"""ACME identifier.
:ivar IdentifierType typ:
:ivar unicode value:
"""
typ = jose.Field('type', decoder=IdentifierType.from_json)
value = jose.Field('value')
class Directory(jose.JSONDeSerializable):
"""Directory."""
_REGISTERED_TYPES = {} # type: dict
class Meta(jose.JSONObjectWithFields):
"""Directory Meta."""
terms_of_service = jose.Field('terms-of-service', omitempty=True)
website = jose.Field('website', omitempty=True)
caa_identities = jose.Field('caa-identities', omitempty=True)
@classmethod
def _canon_key(cls, key):
return getattr(key, 'resource_type', key)
@classmethod
def register(cls, resource_body_cls):
"""Register resource."""
resource_type = resource_body_cls.resource_type
assert resource_type not in cls._REGISTERED_TYPES
cls._REGISTERED_TYPES[resource_type] = resource_body_cls
return resource_body_cls
def __init__(self, jobj):
canon_jobj = util.map_keys(jobj, self._canon_key)
# TODO: check that everything is an absolute URL; acme-spec is
# not clear on that
self._jobj = canon_jobj
def __getattr__(self, name):
try:
return self[name.replace('_', '-')]
except KeyError as error:
raise AttributeError(str(error) + ': ' + name)
def __getitem__(self, name):
try:
return self._jobj[self._canon_key(name)]
except KeyError:
raise KeyError('Directory field not found')
def to_partial_json(self):
return self._jobj
@classmethod
def from_json(cls, jobj):
jobj['meta'] = cls.Meta.from_json(jobj.pop('meta', {}))
return cls(jobj)
class Resource(jose.JSONObjectWithFields):
"""ACME Resource.
:ivar acme.messages.ResourceBody body: Resource body.
"""
body = jose.Field('body')
class ResourceWithURI(Resource):
"""ACME Resource with URI.
:ivar unicode uri: Location of the resource.
"""
uri = jose.Field('uri') # no ChallengeResource.uri
class ResourceBody(jose.JSONObjectWithFields):
"""ACME Resource Body."""
class Registration(ResourceBody):
"""Registration Resource Body.
:ivar acme.jose.jwk.JWK key: Public key.
:ivar tuple contact: Contact information following ACME spec,
`tuple` of `unicode`.
:ivar unicode agreement:
"""
# on new-reg key server ignores 'key' and populates it based on
# JWS.signature.combined.jwk
key = jose.Field('key', omitempty=True, decoder=jose.JWK.from_json)
contact = jose.Field('contact', omitempty=True, default=())
agreement = jose.Field('agreement', omitempty=True)
status = jose.Field('status', omitempty=True)
phone_prefix = 'tel:'
email_prefix = 'mailto:'
@classmethod
def from_data(cls, phone=None, email=None, **kwargs):
"""Create registration resource from contact details."""
details = list(kwargs.pop('contact', ()))
if phone is not None:
details.append(cls.phone_prefix + phone)
if email is not None:
details.append(cls.email_prefix + email)
kwargs['contact'] = tuple(details)
return cls(**kwargs)
def _filter_contact(self, prefix):
return tuple(
detail[len(prefix):] for detail in self.contact
if detail.startswith(p |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.