repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
TheTacoScott/JudgeFoo
|
refs/heads/master
|
app/models.py
|
1
|
from django.db import models
from django.contrib.auth.models import User
class Performer(models.Model):
user = models.ForeignKey(User, unique=True)
name = models.CharField(max_length=256)
pic_url = models.CharField(max_length=4096)
copy = models.CharField(max_length=4096)
class Judge(models.Model):
name = models.CharField(max_length=256)
user = models.ForeignKey(User, unique=True)
class Review(models.Model):
performance = models.ForeignKey(Performance)
judge = models.ForeignKey(Judge)
notes = models.CharField(max_length=4096)
class Performance(models.Model):
active = models.BooleanField(default=False)
planned_time = models.DateTimeField()
performers = models.ManyToManyField(Performer)
|
moble/sympy
|
refs/heads/master
|
sympy/polys/tests/test_euclidtools.py
|
98
|
"""Tests for Euclidean algorithms, GCDs, LCMs and polynomial remainder sequences. """
from sympy.polys.rings import ring
from sympy.polys.domains import ZZ, QQ, RR
from sympy.core.compatibility import range
from sympy.polys.specialpolys import (
f_polys,
dmp_fateman_poly_F_1,
dmp_fateman_poly_F_2,
dmp_fateman_poly_F_3)
f_0, f_1, f_2, f_3, f_4, f_5, f_6 = f_polys()
def test_dup_gcdex():
R, x = ring("x", QQ)
f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
g = x**3 + x**2 - 4*x - 4
s = -QQ(1,5)*x + QQ(3,5)
t = QQ(1,5)*x**2 - QQ(6,5)*x + 2
h = x + 1
assert R.dup_half_gcdex(f, g) == (s, h)
assert R.dup_gcdex(f, g) == (s, t, h)
f = x**4 + 4*x**3 - x + 1
g = x**3 - x + 1
s, t, h = R.dup_gcdex(f, g)
S, T, H = R.dup_gcdex(g, f)
assert R.dup_add(R.dup_mul(s, f),
R.dup_mul(t, g)) == h
assert R.dup_add(R.dup_mul(S, g),
R.dup_mul(T, f)) == H
f = 2*x
g = x**2 - 16
s = QQ(1,32)*x
t = -QQ(1,16)
h = 1
assert R.dup_half_gcdex(f, g) == (s, h)
assert R.dup_gcdex(f, g) == (s, t, h)
def test_dup_invert():
R, x = ring("x", QQ)
assert R.dup_invert(2*x, x**2 - 16) == QQ(1,32)*x
def test_dup_euclidean_prs():
R, x = ring("x", QQ)
f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
assert R.dup_euclidean_prs(f, g) == [
f,
g,
-QQ(5,9)*x**4 + QQ(1,9)*x**2 - QQ(1,3),
-QQ(117,25)*x**2 - 9*x + QQ(441,25),
QQ(233150,19773)*x - QQ(102500,6591),
-QQ(1288744821,543589225)]
def test_dup_primitive_prs():
R, x = ring("x", ZZ)
f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
assert R.dup_primitive_prs(f, g) == [
f,
g,
-5*x**4 + x**2 - 3,
13*x**2 + 25*x - 49,
4663*x - 6150,
1]
def test_dup_subresultants():
R, x = ring("x", ZZ)
assert R.dup_resultant(0, 0) == 0
assert R.dup_resultant(1, 0) == 0
assert R.dup_resultant(0, 1) == 0
f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
a = 15*x**4 - 3*x**2 + 9
b = 65*x**2 + 125*x - 245
c = 9326*x - 12300
d = 260708
assert R.dup_subresultants(f, g) == [f, g, a, b, c, d]
assert R.dup_resultant(f, g) == R.dup_LC(d)
f = x**2 - 2*x + 1
g = x**2 - 1
a = 2*x - 2
assert R.dup_subresultants(f, g) == [f, g, a]
assert R.dup_resultant(f, g) == 0
f = x**2 + 1
g = x**2 - 1
a = -2
assert R.dup_subresultants(f, g) == [f, g, a]
assert R.dup_resultant(f, g) == 4
f = x**2 - 1
g = x**3 - x**2 + 2
assert R.dup_resultant(f, g) == 0
f = 3*x**3 - x
g = 5*x**2 + 1
assert R.dup_resultant(f, g) == 64
f = x**2 - 2*x + 7
g = x**3 - x + 5
assert R.dup_resultant(f, g) == 265
f = x**3 - 6*x**2 + 11*x - 6
g = x**3 - 15*x**2 + 74*x - 120
assert R.dup_resultant(f, g) == -8640
f = x**3 - 6*x**2 + 11*x - 6
g = x**3 - 10*x**2 + 29*x - 20
assert R.dup_resultant(f, g) == 0
f = x**3 - 1
g = x**3 + 2*x**2 + 2*x - 1
assert R.dup_resultant(f, g) == 16
f = x**8 - 2
g = x - 1
assert R.dup_resultant(f, g) == -1
def test_dmp_subresultants():
R, x, y = ring("x,y", ZZ)
assert R.dmp_resultant(0, 0) == 0
assert R.dmp_prs_resultant(0, 0)[0] == 0
assert R.dmp_zz_collins_resultant(0, 0) == 0
assert R.dmp_qq_collins_resultant(0, 0) == 0
assert R.dmp_resultant(1, 0) == 0
assert R.dmp_resultant(1, 0) == 0
assert R.dmp_resultant(1, 0) == 0
assert R.dmp_resultant(0, 1) == 0
assert R.dmp_prs_resultant(0, 1)[0] == 0
assert R.dmp_zz_collins_resultant(0, 1) == 0
assert R.dmp_qq_collins_resultant(0, 1) == 0
f = 3*x**2*y - y**3 - 4
g = x**2 + x*y**3 - 9
a = 3*x*y**4 + y**3 - 27*y + 4
b = -3*y**10 - 12*y**7 + y**6 - 54*y**4 + 8*y**3 + 729*y**2 - 216*y + 16
r = R.dmp_LC(b)
assert R.dmp_subresultants(f, g) == [f, g, a, b]
assert R.dmp_resultant(f, g) == r
assert R.dmp_prs_resultant(f, g)[0] == r
assert R.dmp_zz_collins_resultant(f, g) == r
assert R.dmp_qq_collins_resultant(f, g) == r
f = -x**3 + 5
g = 3*x**2*y + x**2
a = 45*y**2 + 30*y + 5
b = 675*y**3 + 675*y**2 + 225*y + 25
r = R.dmp_LC(b)
assert R.dmp_subresultants(f, g) == [f, g, a]
assert R.dmp_resultant(f, g) == r
assert R.dmp_prs_resultant(f, g)[0] == r
assert R.dmp_zz_collins_resultant(f, g) == r
assert R.dmp_qq_collins_resultant(f, g) == r
R, x, y, z, u, v = ring("x,y,z,u,v", ZZ)
f = 6*x**2 - 3*x*y - 2*x*z + y*z
g = x**2 - x*u - x*v + u*v
r = y**2*z**2 - 3*y**2*z*u - 3*y**2*z*v + 9*y**2*u*v - 2*y*z**2*u \
- 2*y*z**2*v + 6*y*z*u**2 + 12*y*z*u*v + 6*y*z*v**2 - 18*y*u**2*v \
- 18*y*u*v**2 + 4*z**2*u*v - 12*z*u**2*v - 12*z*u*v**2 + 36*u**2*v**2
assert R.dmp_zz_collins_resultant(f, g) == r.drop(x)
R, x, y, z, u, v = ring("x,y,z,u,v", QQ)
f = x**2 - QQ(1,2)*x*y - QQ(1,3)*x*z + QQ(1,6)*y*z
g = x**2 - x*u - x*v + u*v
r = QQ(1,36)*y**2*z**2 - QQ(1,12)*y**2*z*u - QQ(1,12)*y**2*z*v + QQ(1,4)*y**2*u*v \
- QQ(1,18)*y*z**2*u - QQ(1,18)*y*z**2*v + QQ(1,6)*y*z*u**2 + QQ(1,3)*y*z*u*v \
+ QQ(1,6)*y*z*v**2 - QQ(1,2)*y*u**2*v - QQ(1,2)*y*u*v**2 + QQ(1,9)*z**2*u*v \
- QQ(1,3)*z*u**2*v - QQ(1,3)*z*u*v**2 + u**2*v**2
assert R.dmp_qq_collins_resultant(f, g) == r.drop(x)
Rt, t = ring("t", ZZ)
Rx, x = ring("x", Rt)
f = x**6 - 5*x**4 + 5*x**2 + 4
g = -6*t*x**5 + x**4 + 20*t*x**3 - 3*x**2 - 10*t*x + 6
assert Rx.dup_resultant(f, g) == 2930944*t**6 + 2198208*t**4 + 549552*t**2 + 45796
def test_dup_discriminant():
R, x = ring("x", ZZ)
assert R.dup_discriminant(0) == 0
assert R.dup_discriminant(x) == 1
assert R.dup_discriminant(x**3 + 3*x**2 + 9*x - 13) == -11664
assert R.dup_discriminant(5*x**5 + x**3 + 2) == 31252160
assert R.dup_discriminant(x**4 + 2*x**3 + 6*x**2 - 22*x + 13) == 0
assert R.dup_discriminant(12*x**7 + 15*x**4 + 30*x**3 + x**2 + 1) == -220289699947514112
def test_dmp_discriminant():
R, x = ring("x", ZZ)
assert R.dmp_discriminant(0) == 0
R, x, y = ring("x,y", ZZ)
assert R.dmp_discriminant(0) == 0
assert R.dmp_discriminant(y) == 0
assert R.dmp_discriminant(x**3 + 3*x**2 + 9*x - 13) == -11664
assert R.dmp_discriminant(5*x**5 + x**3 + 2) == 31252160
assert R.dmp_discriminant(x**4 + 2*x**3 + 6*x**2 - 22*x + 13) == 0
assert R.dmp_discriminant(12*x**7 + 15*x**4 + 30*x**3 + x**2 + 1) == -220289699947514112
assert R.dmp_discriminant(x**2*y + 2*y) == (-8*y**2).drop(x)
assert R.dmp_discriminant(x*y**2 + 2*x) == 1
R, x, y, z = ring("x,y,z", ZZ)
assert R.dmp_discriminant(x*y + z) == 1
R, x, y, z, u = ring("x,y,z,u", ZZ)
assert R.dmp_discriminant(x**2*y + x*z + u) == (-4*y*u + z**2).drop(x)
R, x, y, z, u, v = ring("x,y,z,u,v", ZZ)
assert R.dmp_discriminant(x**3*y + x**2*z + x*u + v) == \
(-27*y**2*v**2 + 18*y*z*u*v - 4*y*u**3 - 4*z**3*v + z**2*u**2).drop(x)
def test_dup_gcd():
R, x = ring("x", ZZ)
f, g = 0, 0
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (0, 0, 0)
f, g = 2, 0
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, 1, 0)
f, g = -2, 0
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, -1, 0)
f, g = 0, -2
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, 0, -1)
f, g = 0, 2*x + 4
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2*x + 4, 0, 1)
f, g = 2*x + 4, 0
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2*x + 4, 1, 0)
f, g = 2, 2
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, 1, 1)
f, g = -2, 2
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, -1, 1)
f, g = 2, -2
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, 1, -1)
f, g = -2, -2
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, -1, -1)
f, g = x**2 + 2*x + 1, 1
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (1, x**2 + 2*x + 1, 1)
f, g = x**2 + 2*x + 1, 2
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (1, x**2 + 2*x + 1, 2)
f, g = 2*x**2 + 4*x + 2, 2
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, x**2 + 2*x + 1, 1)
f, g = 2, 2*x**2 + 4*x + 2
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, 1, x**2 + 2*x + 1)
f, g = 2*x**2 + 4*x + 2, x + 1
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (x + 1, 2*x + 2, 1)
f, g = x + 1, 2*x**2 + 4*x + 2
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (x + 1, 1, 2*x + 2)
f, g = x - 31, x
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (1, f, g)
f = x**4 + 8*x**3 + 21*x**2 + 22*x + 8
g = x**3 + 6*x**2 + 11*x + 6
h = x**2 + 3*x + 2
cff = x**2 + 5*x + 4
cfg = x + 3
assert R.dup_zz_heu_gcd(f, g) == (h, cff, cfg)
assert R.dup_rr_prs_gcd(f, g) == (h, cff, cfg)
f = x**4 - 4
g = x**4 + 4*x**2 + 4
h = x**2 + 2
cff = x**2 - 2
cfg = x**2 + 2
assert R.dup_zz_heu_gcd(f, g) == (h, cff, cfg)
assert R.dup_rr_prs_gcd(f, g) == (h, cff, cfg)
f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
h = 1
cff = f
cfg = g
assert R.dup_zz_heu_gcd(f, g) == (h, cff, cfg)
assert R.dup_rr_prs_gcd(f, g) == (h, cff, cfg)
R, x = ring("x", QQ)
f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
h = 1
cff = f
cfg = g
assert R.dup_qq_heu_gcd(f, g) == (h, cff, cfg)
assert R.dup_ff_prs_gcd(f, g) == (h, cff, cfg)
R, x = ring("x", ZZ)
f = - 352518131239247345597970242177235495263669787845475025293906825864749649589178600387510272*x**49 \
+ 46818041807522713962450042363465092040687472354933295397472942006618953623327997952*x**42 \
+ 378182690892293941192071663536490788434899030680411695933646320291525827756032*x**35 \
+ 112806468807371824947796775491032386836656074179286744191026149539708928*x**28 \
- 12278371209708240950316872681744825481125965781519138077173235712*x**21 \
+ 289127344604779611146960547954288113529690984687482920704*x**14 \
+ 19007977035740498977629742919480623972236450681*x**7 \
+ 311973482284542371301330321821976049
g = 365431878023781158602430064717380211405897160759702125019136*x**21 \
+ 197599133478719444145775798221171663643171734081650688*x**14 \
- 9504116979659010018253915765478924103928886144*x**7 \
- 311973482284542371301330321821976049
assert R.dup_zz_heu_gcd(f, R.dup_diff(f, 1))[0] == g
assert R.dup_rr_prs_gcd(f, R.dup_diff(f, 1))[0] == g
R, x = ring("x", QQ)
f = QQ(1,2)*x**2 + x + QQ(1,2)
g = QQ(1,2)*x + QQ(1,2)
h = x + 1
assert R.dup_qq_heu_gcd(f, g) == (h, g, QQ(1,2))
assert R.dup_ff_prs_gcd(f, g) == (h, g, QQ(1,2))
R, x = ring("x", ZZ)
f = 1317378933230047068160*x + 2945748836994210856960
g = 120352542776360960*x + 269116466014453760
h = 120352542776360960*x + 269116466014453760
cff = 10946
cfg = 1
assert R.dup_zz_heu_gcd(f, g) == (h, cff, cfg)
def test_dmp_gcd():
R, x, y = ring("x,y", ZZ)
f, g = 0, 0
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (0, 0, 0)
f, g = 2, 0
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, 1, 0)
f, g = -2, 0
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, -1, 0)
f, g = 0, -2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, 0, -1)
f, g = 0, 2*x + 4
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2*x + 4, 0, 1)
f, g = 2*x + 4, 0
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2*x + 4, 1, 0)
f, g = 2, 2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, 1, 1)
f, g = -2, 2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, -1, 1)
f, g = 2, -2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, 1, -1)
f, g = -2, -2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, -1, -1)
f, g = x**2 + 2*x + 1, 1
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (1, x**2 + 2*x + 1, 1)
f, g = x**2 + 2*x + 1, 2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (1, x**2 + 2*x + 1, 2)
f, g = 2*x**2 + 4*x + 2, 2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, x**2 + 2*x + 1, 1)
f, g = 2, 2*x**2 + 4*x + 2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, 1, x**2 + 2*x + 1)
f, g = 2*x**2 + 4*x + 2, x + 1
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (x + 1, 2*x + 2, 1)
f, g = x + 1, 2*x**2 + 4*x + 2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (x + 1, 1, 2*x + 2)
R, x, y, z, u = ring("x,y,z,u", ZZ)
f, g = u**2 + 2*u + 1, 2*u + 2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (u + 1, u + 1, 2)
f, g = z**2*u**2 + 2*z**2*u + z**2 + z*u + z, u**2 + 2*u + 1
h, cff, cfg = u + 1, z**2*u + z**2 + z, u + 1
assert R.dmp_zz_heu_gcd(f, g) == (h, cff, cfg)
assert R.dmp_rr_prs_gcd(f, g) == (h, cff, cfg)
assert R.dmp_zz_heu_gcd(g, f) == (h, cfg, cff)
assert R.dmp_rr_prs_gcd(g, f) == (h, cfg, cff)
R, x, y, z = ring("x,y,z", ZZ)
f, g, h = map(R.from_dense, dmp_fateman_poly_F_1(2, ZZ))
H, cff, cfg = R.dmp_zz_heu_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
H, cff, cfg = R.dmp_rr_prs_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
R, x, y, z, u, v = ring("x,y,z,u,v", ZZ)
f, g, h = map(R.from_dense, dmp_fateman_poly_F_1(4, ZZ))
H, cff, cfg = R.dmp_zz_heu_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
R, x, y, z, u, v, a, b = ring("x,y,z,u,v,a,b", ZZ)
f, g, h = map(R.from_dense, dmp_fateman_poly_F_1(6, ZZ))
H, cff, cfg = R.dmp_zz_heu_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
R, x, y, z, u, v, a, b, c, d = ring("x,y,z,u,v,a,b,c,d", ZZ)
f, g, h = map(R.from_dense, dmp_fateman_poly_F_1(8, ZZ))
H, cff, cfg = R.dmp_zz_heu_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
R, x, y, z = ring("x,y,z", ZZ)
f, g, h = map(R.from_dense, dmp_fateman_poly_F_2(2, ZZ))
H, cff, cfg = R.dmp_zz_heu_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
H, cff, cfg = R.dmp_rr_prs_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
f, g, h = map(R.from_dense, dmp_fateman_poly_F_3(2, ZZ))
H, cff, cfg = R.dmp_zz_heu_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
H, cff, cfg = R.dmp_rr_prs_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
R, x, y, z, u, v = ring("x,y,z,u,v", ZZ)
f, g, h = map(R.from_dense, dmp_fateman_poly_F_3(4, ZZ))
H, cff, cfg = R.dmp_inner_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
R, x, y = ring("x,y", QQ)
f = QQ(1,2)*x**2 + x + QQ(1,2)
g = QQ(1,2)*x + QQ(1,2)
h = x + 1
assert R.dmp_qq_heu_gcd(f, g) == (h, g, QQ(1,2))
assert R.dmp_ff_prs_gcd(f, g) == (h, g, QQ(1,2))
R, x, y = ring("x,y", RR)
f = 2.1*x*y**2 - 2.2*x*y + 2.1*x
g = 1.0*x**3
assert R.dmp_ff_prs_gcd(f, g) == \
(1.0*x, 2.1*y**2 - 2.2*y + 2.1, 1.0*x**2)
def test_dup_lcm():
R, x = ring("x", ZZ)
assert R.dup_lcm(2, 6) == 6
assert R.dup_lcm(2*x**3, 6*x) == 6*x**3
assert R.dup_lcm(2*x**3, 3*x) == 6*x**3
assert R.dup_lcm(x**2 + x, x) == x**2 + x
assert R.dup_lcm(x**2 + x, 2*x) == 2*x**2 + 2*x
assert R.dup_lcm(x**2 + 2*x, x) == x**2 + 2*x
assert R.dup_lcm(2*x**2 + x, x) == 2*x**2 + x
assert R.dup_lcm(2*x**2 + x, 2*x) == 4*x**2 + 2*x
def test_dmp_lcm():
R, x, y = ring("x,y", ZZ)
assert R.dmp_lcm(2, 6) == 6
assert R.dmp_lcm(x, y) == x*y
assert R.dmp_lcm(2*x**3, 6*x*y**2) == 6*x**3*y**2
assert R.dmp_lcm(2*x**3, 3*x*y**2) == 6*x**3*y**2
assert R.dmp_lcm(x**2*y, x*y**2) == x**2*y**2
f = 2*x*y**5 - 3*x*y**4 - 2*x*y**3 + 3*x*y**2
g = y**5 - 2*y**3 + y
h = 2*x*y**7 - 3*x*y**6 - 4*x*y**5 + 6*x*y**4 + 2*x*y**3 - 3*x*y**2
assert R.dmp_lcm(f, g) == h
f = x**3 - 3*x**2*y - 9*x*y**2 - 5*y**3
g = x**4 + 6*x**3*y + 12*x**2*y**2 + 10*x*y**3 + 3*y**4
h = x**5 + x**4*y - 18*x**3*y**2 - 50*x**2*y**3 - 47*x*y**4 - 15*y**5
assert R.dmp_lcm(f, g) == h
def test_dmp_content():
R, x,y = ring("x,y", ZZ)
assert R.dmp_content(-2) == 2
f, g, F = 3*y**2 + 2*y + 1, 1, 0
for i in range(0, 5):
g *= f
F += x**i*g
assert R.dmp_content(F) == f.drop(x)
R, x,y,z = ring("x,y,z", ZZ)
assert R.dmp_content(f_4) == 1
assert R.dmp_content(f_5) == 1
R, x,y,z,t = ring("x,y,z,t", ZZ)
assert R.dmp_content(f_6) == 1
def test_dmp_primitive():
R, x,y = ring("x,y", ZZ)
assert R.dmp_primitive(0) == (0, 0)
assert R.dmp_primitive(1) == (1, 1)
f, g, F = 3*y**2 + 2*y + 1, 1, 0
for i in range(0, 5):
g *= f
F += x**i*g
assert R.dmp_primitive(F) == (f.drop(x), F / f)
R, x,y,z = ring("x,y,z", ZZ)
cont, f = R.dmp_primitive(f_4)
assert cont == 1 and f == f_4
cont, f = R.dmp_primitive(f_5)
assert cont == 1 and f == f_5
R, x,y,z,t = ring("x,y,z,t", ZZ)
cont, f = R.dmp_primitive(f_6)
assert cont == 1 and f == f_6
def test_dup_cancel():
R, x = ring("x", ZZ)
f = 2*x**2 - 2
g = x**2 - 2*x + 1
p = 2*x + 2
q = x - 1
assert R.dup_cancel(f, g) == (p, q)
assert R.dup_cancel(f, g, include=False) == (1, 1, p, q)
f = -x - 2
g = 3*x - 4
F = x + 2
G = -3*x + 4
assert R.dup_cancel(f, g) == (f, g)
assert R.dup_cancel(F, G) == (f, g)
assert R.dup_cancel(0, 0) == (0, 0)
assert R.dup_cancel(0, 0, include=False) == (1, 1, 0, 0)
assert R.dup_cancel(x, 0) == (1, 0)
assert R.dup_cancel(x, 0, include=False) == (1, 1, 1, 0)
assert R.dup_cancel(0, x) == (0, 1)
assert R.dup_cancel(0, x, include=False) == (1, 1, 0, 1)
f = 0
g = x
one = 1
assert R.dup_cancel(f, g, include=True) == (f, one)
def test_dmp_cancel():
R, x, y = ring("x,y", ZZ)
f = 2*x**2 - 2
g = x**2 - 2*x + 1
p = 2*x + 2
q = x - 1
assert R.dmp_cancel(f, g) == (p, q)
assert R.dmp_cancel(f, g, include=False) == (1, 1, p, q)
assert R.dmp_cancel(0, 0) == (0, 0)
assert R.dmp_cancel(0, 0, include=False) == (1, 1, 0, 0)
assert R.dmp_cancel(y, 0) == (1, 0)
assert R.dmp_cancel(y, 0, include=False) == (1, 1, 1, 0)
assert R.dmp_cancel(0, y) == (0, 1)
assert R.dmp_cancel(0, y, include=False) == (1, 1, 0, 1)
|
SatelliteQE/robottelo
|
refs/heads/master
|
robottelo/cli/user.py
|
3
|
"""
Usage::
hammer user [OPTIONS] SUBCOMMAND [ARG] ...
Parameters::
SUBCOMMAND subcommand
[ARG] ... subcommand arguments
Subcommands::
add-role Assign a user role
create Create an user.
delete Delete an user.
info Show an user.
list List all users.
remove-role Remove a user role
ssh-keys Managing User SSH Keys.
update Update an user.
"""
from robottelo.cli.base import Base
class User(Base):
"""
Manipulates Foreman's users.
"""
command_base = 'user'
@classmethod
def add_role(cls, options=None):
"""Add a role to a user."""
cls.command_sub = 'add-role'
return cls.execute(cls._construct_command(options), output_format='csv')
@classmethod
def remove_role(cls, options=None):
"""Remove a role from user."""
cls.command_sub = 'remove-role'
return cls.execute(cls._construct_command(options), output_format='csv')
@classmethod
def ssh_keys_add(cls, options=None):
"""
Usage:
hammer user ssh-keys add [OPTIONS]
Options:
--key KEY Public SSH key
--key-file KEY_FILE Path to a SSH public key
--location LOCATION_NAME Location name
--location-id LOCATION_ID
--location-title LOCATION_TITLE Location title
--name NAME
--organization ORGANIZATION_NAME Organization name
--organization-id ORGANIZATION_ID Organization ID
--organization-title ORGANIZATION_TITLE Organization title
--user USER_LOGIN User's login to search by
--user-id USER_ID
"""
cls.command_sub = 'ssh-keys add'
return cls.execute(cls._construct_command(options), output_format='csv')
@classmethod
def ssh_keys_delete(cls, options=None):
"""
Usage:
hammer user ssh-keys delete [OPTIONS]
"""
cls.command_sub = 'ssh-keys delete'
return cls.execute(cls._construct_command(options), output_format='csv')
@classmethod
def ssh_keys_list(cls, options=None):
"""
Usage:
hammer user ssh-keys list [OPTIONS]
"""
cls.command_sub = 'ssh-keys list'
return cls.execute(cls._construct_command(options), output_format='csv')
@classmethod
def ssh_keys_info(cls, options=None):
"""
Usage:
hammer user ssh-keys info [OPTIONS]
"""
cls.command_sub = 'ssh-keys info'
return cls.execute(cls._construct_command(options), output_format='csv')
|
umanoidTyphoon/AMICO-CM_Sketches
|
refs/heads/master
|
demo/csv_files_generator.py
|
1
|
__author__ = 'umanoidTyphoon'
from collections import defaultdict
from datetime import datetime
from time import time
# python-geoip is a library that provides access to GeoIP databases. Currently it only supports accessing MaxMind
# databases. It's similar to other GeoIP libraries but comes under the very liberal BSD license and also provides
# an extra library that optionally ships a recent version of the Geolite2 database as provided by MaxMind.
#
# The python-geoip-geolite2 package includes GeoLite2 data created by MaxMind, available from maxmind.com under
# the Creative Commons Attribution-ShareAlike 3.0 Unported License.
from geoip import geolite2
import copy
import csv
import json
import operator
import util
CLASSIFICATION_GRAPH_ID = 4
DOWNLOAD_GRAPH_ID = 1
MAP_GRAPH_ID = 2
TRAFFIC_GRAPH_ID = 0
TRAINING_GRAPH_ID = 3
APK_FILE_TYPE = 0
DMG_FILE_TYPE = 1
ELF_FILE_TYPE = 2
EXE_FILE_TYPE = 3
PDF_FILE_TYPE = 4
SWF_FILE_TYPE = 5
JAR_FILE_TYPE = 6
RAR_FILE_TYPE = 7
ZIP_FILE_TYPE = 8
TCPSTAT_TIMESTAMP_FIELD = 0
RECEIVED_PACKETS_FIELD = 1
PACKET_AVG_SIZE_FIELD = 2
PACKET_SIZE_DEV_FIELD = 3
BANDWITDH_BPS_FIELD = 4
AMICO_THRESHOLD = 0.4
IN_DIR = "./in"
IN_0 = "10_MIN_filtered_trace_2015-05-12_11_00_00.tcpstat"
IN_1 = "1_HOUR_filtered_trace_2015-05-12_11_00_00.tcpstat"
IN_2 = "filtered_trace_2015-05-12_00_46_12.tcpstat"
OUT_DIR = "./out"
def generate_graph(graph_id):
if graph_id == TRAFFIC_GRAPH_ID:
generate_CSV_traffic_file()
return
if graph_id == DOWNLOAD_GRAPH_ID:
generate_CSV_download_file()
return
if graph_id == MAP_GRAPH_ID:
generate_JSON_map_file()
return
if graph_id == TRAINING_GRAPH_ID:
generate_CSV_training_file()
return
if graph_id == CLASSIFICATION_GRAPH_ID:
generate_CSV_classification_file()
return
def format_row(value_list, max_values):
list_size = len(value_list)
max_iterations = max_values - list_size
for iteration in range(max_iterations):
value_list.append(0)
return value_list
def generate_CSV_download_file():
connection = util.connect_to_db()
connection_cursor = connection.cursor()
csv_writer = None
header = "Second_ID,Mal_APK,Tot_APK,Mal_DMG,Tot_DMG,Mal_ELF,Tot_ELF,Mal_EXE,Tot_EXE,Mal_PDF,Tot_PDF,Mal_SWF," + \
"Tot_SWF,Mal_JAR,Tot_JAR,Mal_RAR,Tot_RAR,Mal_ZIP,Tot_ZIP,Timestamp,Next_Download_Event_[s]"
header_list = ["Second_ID", "Mal_APK", "Tot_APK", "Mal_DMG", "Tot_DMG", "Mal_ELF", "Tot_ELF", "Mal_EXE", "Tot_EXE",
"Mal_PDF", "Tot_PDF", "Mal_SWF", "Tot_SWF", "Mal_JAR", "Tot_JAR", "Mal_RAR", "Tot_RAR", "Mal_ZIP",
"Tot_ZIP", "Timestamp", "Next_Download_Event_[s]"]
created_csv_file = OUT_DIR + "/" + str(DOWNLOAD_GRAPH_ID) + "-downloads_" + \
datetime.fromtimestamp(time()).strftime('%Y-%m-%d_%H-%M-%S') + ".csv"
with open(created_csv_file, "wb") as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(header_list)
csv_map = defaultdict(list)
malware_timestamp_set = set()
##################################################### EXECUTABLES #####################################################
query = """SELECT timestamp, COUNT(pe.file_type) FROM pe_dumps AS pe, amico_scores AS ams WHERE """ + \
"""pe.dump_id = ams.dump_id AND pe.file_type = 'APK' AND ams.score > """ + str(AMICO_THRESHOLD) + \
"""GROUP BY timestamp ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for row in connection_cursor:
if row is not None:
timestamp = str(row[0])
malware_apk_count_per_second = row[1]
csv_map[timestamp].append(malware_apk_count_per_second)
malware_timestamp_set.add(timestamp)
query = """SELECT timestamp, COUNT(file_type) FROM pe_dumps WHERE file_type = 'APK' GROUP BY timestamp """ + \
"""ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for row in connection_cursor:
if row is not None:
timestamp = str(row[0])
total_apk_count_per_second = row[1]
if malware_timestamp_set.__contains__(timestamp):
csv_map[timestamp].append(total_apk_count_per_second)
else:
csv_map[timestamp].extend([0, total_apk_count_per_second])
malware_timestamp_set = set()
query = """SELECT timestamp, COUNT(pe.file_type) FROM pe_dumps AS pe, amico_scores AS ams WHERE """ + \
"""pe.dump_id = ams.dump_id AND pe.file_type = 'DMG' AND ams.score > """ + str(AMICO_THRESHOLD) + \
"""GROUP BY timestamp ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for row in connection_cursor:
if row is not None:
timestamp = str(row[0])
malware_dmg_count_per_second = row[1]
if timestamp not in csv_map:
csv_map[timestamp].extend([0, 0])
csv_map[timestamp].append(malware_dmg_count_per_second)
malware_timestamp_set.add(timestamp)
query = """SELECT timestamp, COUNT(file_type) FROM pe_dumps WHERE file_type = 'DMG' GROUP BY timestamp """ + \
"""ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for row in connection_cursor:
if row is not None:
timestamp = str(row[0])
total_dmg_count_per_second = row[1]
if timestamp not in csv_map:
csv_map[timestamp].extend([0, 0])
if malware_timestamp_set.__contains__(timestamp):
csv_map[timestamp].append(total_dmg_count_per_second)
else:
csv_map[timestamp].extend([0, total_dmg_count_per_second])
malware_timestamp_set = set()
query = """SELECT timestamp, COUNT(pe.file_type) FROM pe_dumps AS pe, amico_scores AS ams WHERE """ + \
"""pe.dump_id = ams.dump_id AND pe.file_type = 'ELF' AND ams.score > """ + str(AMICO_THRESHOLD) + \
"""GROUP BY timestamp ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for row in connection_cursor:
if row is not None:
timestamp = str(row[0])
malware_elf_count_per_second = row[1]
if timestamp not in csv_map:
csv_map[timestamp].extend([0, 0, 0, 0])
csv_map[timestamp].append(malware_elf_count_per_second)
malware_timestamp_set.add(timestamp)
query = """SELECT timestamp, COUNT(file_type) FROM pe_dumps WHERE file_type = 'ELF' GROUP BY timestamp """ + \
"""ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for row in connection_cursor:
if row is not None:
timestamp = str(row[0])
total_elf_count_per_second = row[1]
if timestamp not in csv_map:
csv_map[timestamp].extend([0, 0, 0, 0])
if malware_timestamp_set.__contains__(timestamp):
csv_map[timestamp].append(total_elf_count_per_second)
else:
csv_map[timestamp].extend([0, total_elf_count_per_second])
malware_timestamp_set = set()
query = """SELECT timestamp, COUNT(pe.file_type) FROM pe_dumps AS pe, amico_scores AS ams WHERE """ + \
"""pe.dump_id = ams.dump_id AND pe.file_type = 'EXE' AND ams.score > """ + str(AMICO_THRESHOLD) + \
"""GROUP BY timestamp ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for row in connection_cursor:
if row is not None:
timestamp = str(row[0])
malware_exe_count_per_second = row[1]
if timestamp not in csv_map:
csv_map[timestamp].extend([0, 0, 0, 0, 0, 0])
csv_map[timestamp].append(malware_exe_count_per_second)
malware_timestamp_set.add(timestamp)
query = """SELECT timestamp, COUNT(file_type) FROM pe_dumps WHERE file_type = 'EXE' GROUP BY timestamp """ + \
"""ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for row in connection_cursor:
if row is not None:
timestamp = str(row[0])
total_exe_count_per_second = row[1]
if timestamp not in csv_map:
csv_map[timestamp].extend([0, 0, 0, 0, 0, 0])
if malware_timestamp_set.__contains__(timestamp):
csv_map[timestamp].append(total_exe_count_per_second)
else:
csv_map[timestamp].extend([0, total_exe_count_per_second])
########################################################################################################################
######################################################### PDF #########################################################
malware_timestamp_set = set()
query = """SELECT timestamp, COUNT(pe.file_type) FROM pe_dumps AS pe, amico_scores AS ams WHERE """ + \
"""pe.dump_id = ams.dump_id AND pe.file_type = 'PDF' AND ams.score > """ + str(AMICO_THRESHOLD) + \
"""GROUP BY timestamp ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for row in connection_cursor:
if row is not None:
timestamp = str(row[0])
malware_pdf_count_per_second = row[1]
if timestamp not in csv_map:
csv_map[timestamp].extend([0, 0, 0, 0, 0, 0, 0, 0])
csv_map[timestamp].append(malware_pdf_count_per_second)
malware_timestamp_set.add(timestamp)
query = """SELECT timestamp, COUNT(file_type) FROM pe_dumps WHERE file_type = 'PDF' GROUP BY timestamp """ + \
"""ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for row in connection_cursor:
if row is not None:
timestamp = str(row[0])
total_pdf_count_per_second = row[1]
if timestamp not in csv_map:
csv_map[timestamp].extend([0, 0, 0, 0, 0, 0, 0, 0])
if malware_timestamp_set.__contains__(timestamp):
csv_map[timestamp].append(total_pdf_count_per_second)
else:
csv_map[timestamp].extend([0, total_pdf_count_per_second])
########################################################################################################################
######################################################## FLASH ########################################################
malware_timestamp_set = set()
query = """SELECT timestamp, COUNT(pe.file_type) FROM pe_dumps AS pe, amico_scores AS ams WHERE """ + \
"""pe.dump_id = ams.dump_id AND pe.file_type = 'SWF' AND ams.score > """ + str(AMICO_THRESHOLD) + \
"""GROUP BY timestamp ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for row in connection_cursor:
if row is not None:
timestamp = str(row[0])
malware_swf_count_per_second = row[1]
if timestamp not in csv_map:
csv_map[timestamp].extend([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
csv_map[timestamp].append(malware_swf_count_per_second)
malware_timestamp_set.add(timestamp)
query = """SELECT timestamp, COUNT(file_type) FROM pe_dumps WHERE file_type = 'SWF' GROUP BY timestamp """ + \
"""ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for row in connection_cursor:
if row is not None:
timestamp = str(row[0])
total_swf_count_per_second = row[1]
if timestamp not in csv_map:
csv_map[timestamp].extend([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
if malware_timestamp_set.__contains__(timestamp):
csv_map[timestamp].append(total_swf_count_per_second)
else:
csv_map[timestamp].extend([0, total_swf_count_per_second])
########################################################################################################################
###################################################### COMPRESSED ######################################################
malware_timestamp_set = set()
query = """SELECT timestamp, COUNT(pe.file_type) FROM pe_dumps AS pe, amico_scores AS ams WHERE """ + \
"""pe.dump_id = ams.dump_id AND pe.file_type = 'JAR' AND ams.score > """ + str(AMICO_THRESHOLD) + \
"""GROUP BY timestamp ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for row in connection_cursor:
if row is not None:
timestamp = str(row[0])
malware_jar_count_per_second = row[1]
if timestamp not in csv_map:
csv_map[timestamp].extend([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
csv_map[timestamp].append(malware_jar_count_per_second)
malware_timestamp_set.add(timestamp)
query = """SELECT timestamp, COUNT(file_type) FROM pe_dumps WHERE file_type = 'JAR' GROUP BY timestamp """ + \
"""ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for row in connection_cursor:
if row is not None:
timestamp = str(row[0])
total_jar_count_per_second = row[1]
if timestamp not in csv_map:
csv_map[timestamp].extend([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
if malware_timestamp_set.__contains__(timestamp):
csv_map[timestamp].append(total_jar_count_per_second)
else:
csv_map[timestamp].extend([0, total_jar_count_per_second])
malware_timestamp_set = set()
query = """SELECT timestamp, COUNT(pe.file_type) FROM pe_dumps AS pe, amico_scores AS ams WHERE """ + \
"""pe.dump_id = ams.dump_id AND pe.file_type = 'RAR' AND ams.score > """ + str(AMICO_THRESHOLD) + \
"""GROUP BY timestamp ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for row in connection_cursor:
if row is not None:
timestamp = str(row[0])
malware_rar_count_per_second = row[1]
if timestamp not in csv_map:
csv_map[timestamp].extend([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
csv_map[timestamp].append(malware_rar_count_per_second)
malware_timestamp_set.add(timestamp)
query = """SELECT timestamp, COUNT(file_type) FROM pe_dumps WHERE file_type = 'RAR' GROUP BY timestamp """ + \
"""ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for row in connection_cursor:
if row is not None:
timestamp = str(row[0])
total_rar_count_per_second = row[1]
if timestamp not in csv_map:
csv_map[timestamp].extend([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
if malware_timestamp_set.__contains__(timestamp):
csv_map[timestamp].append(total_rar_count_per_second)
else:
csv_map[timestamp].extend([0, total_rar_count_per_second])
malware_timestamp_set = set()
query = """SELECT timestamp, COUNT(pe.file_type) FROM pe_dumps AS pe, amico_scores AS ams WHERE """ + \
"""pe.dump_id = ams.dump_id AND pe.file_type = 'ZIP' AND ams.score > """ + str(AMICO_THRESHOLD) + \
"""GROUP BY timestamp ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for row in connection_cursor:
if row is not None:
timestamp = str(row[0])
malware_zip_count_per_second = row[1]
if timestamp not in csv_map:
csv_map[timestamp].extend([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
csv_map[timestamp].append(malware_zip_count_per_second)
malware_timestamp_set.add(timestamp)
query = """SELECT timestamp, COUNT(file_type) FROM pe_dumps WHERE file_type = 'ZIP' GROUP BY timestamp """ + \
"""ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for row in connection_cursor:
if row is not None:
timestamp = str(row[0])
total_zip_count_per_second = row[1]
if timestamp not in csv_map:
csv_map[timestamp].extend([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
if malware_timestamp_set.__contains__(timestamp):
csv_map[timestamp].append(total_zip_count_per_second)
else:
csv_map[timestamp].extend([0, total_zip_count_per_second])
########################################################################################################################
sorted_csv_map = sorted(csv_map.items(), key=operator.itemgetter(0))
csv_map_aux = defaultdict(list)
first_useful_date = "2014-11-26 22:55:40"
last_useful_date = "2015-10-01 00:00:00"
# Loop for handling corrupted timestamp
for timestamp, file_list in sorted_csv_map:
if cmp(timestamp, first_useful_date) < 0:
timestamp_split = timestamp.split()
first_useful_date_split = first_useful_date.split()
timestamp_hms = timestamp_split[1]
first_useful_date_ymd = first_useful_date_split[0]
corrected_timestamp = first_useful_date_ymd + " " + timestamp_hms
csv_map_aux[corrected_timestamp] = csv_map.get(timestamp)
else:
break
max_values = len(header.split(',')) - 2
csv_rows = list()
sorted_csv_map_aux = sorted(csv_map_aux.items(), key=operator.itemgetter(0))
UID = 0
for timestamp, file_list in sorted_csv_map_aux:
formatted_row = format_row(file_list, max_values)
formatted_row.insert(0, UID)
formatted_row.append(timestamp)
csv_rows.append(formatted_row)
UID += 1
writable_csv_rows = list()
while csv_rows:
current_row = csv_rows.pop(0)
if not csv_rows:
writable_csv_rows.append(current_row)
continue
next_row = csv_rows[0]
timestamp_index = len(current_row) - 1
current_timestamp_string = current_row[timestamp_index]
next_timestamp_string = next_row[timestamp_index]
current_timestamp = datetime.strptime(current_timestamp_string, '%Y-%m-%d %H:%M:%S')
next_timestamp = datetime.strptime(next_timestamp_string, '%Y-%m-%d %H:%M:%S')
time_delta_in_secs = int((next_timestamp - current_timestamp).total_seconds()) - 1
current_row.append(time_delta_in_secs)
writable_csv_rows.append(current_row)
writable_sorted_csv_map = list()
for timestamp, file_list in sorted_csv_map:
if cmp(timestamp, first_useful_date) < 0 or cmp(timestamp, last_useful_date) > 0:
continue
else:
writable_sorted_csv_map.append([timestamp, file_list])
writable_csv_rows_aux = list()
while writable_sorted_csv_map:
timestamp_file_list_first_pair = writable_sorted_csv_map.pop(0)
timestamp_str_first_pair = timestamp_file_list_first_pair[0]
file_list_first_pair = timestamp_file_list_first_pair[1]
if not writable_sorted_csv_map:
formatted_row = format_row(file_list_first_pair, max_values)
formatted_row.insert(0, UID)
formatted_row.append(timestamp_str_first_pair)
writable_csv_rows_aux.append(formatted_row)
UID += 1
continue
timestamp_file_list_second_pair = writable_sorted_csv_map[0]
timestamp_str_second_pair = timestamp_file_list_second_pair[0]
formatted_row = format_row(file_list_first_pair, max_values)
formatted_row.insert(0, UID)
formatted_row.append(timestamp_str_first_pair)
timestamp_first_pair = datetime.strptime(timestamp_str_first_pair, '%Y-%m-%d %H:%M:%S')
timestamp_second_pair = datetime.strptime(timestamp_str_second_pair, '%Y-%m-%d %H:%M:%S')
time_delta_in_secs = int((timestamp_second_pair - timestamp_first_pair).total_seconds()) - 1
formatted_row.append(time_delta_in_secs)
writable_csv_rows_aux.append(formatted_row)
UID += 1
last_formatted_row_in_writable_csv_rows = writable_csv_rows.pop(len(writable_csv_rows) - 1)
first_formatted_row_in_writable_csv_rows_aux = writable_csv_rows_aux[0]
timestamp_index = len(last_formatted_row_in_writable_csv_rows) - 1
current_timestamp_string = last_formatted_row_in_writable_csv_rows[timestamp_index]
next_timestamp_string = first_formatted_row_in_writable_csv_rows_aux[timestamp_index]
current_timestamp = datetime.strptime(current_timestamp_string, '%Y-%m-%d %H:%M:%S')
next_timestamp = datetime.strptime(next_timestamp_string, '%Y-%m-%d %H:%M:%S')
time_delta_in_secs = int((next_timestamp - current_timestamp).total_seconds()) - 1
last_formatted_row_in_writable_csv_rows.append(time_delta_in_secs)
writable_csv_rows_aux.insert(0, last_formatted_row_in_writable_csv_rows)
with open(created_csv_file, "a") as csv_file:
csv_writer = csv.writer(csv_file, csv.QUOTE_NONNUMERIC)
for row in writable_csv_rows:
csv_writer.writerow(row)
for row in writable_csv_rows_aux:
csv_writer.writerow(row)
def perform_queries_on(connection, server_host_mapping, total_json_map, malware_json_map, file_type, dict_index):
connection_cursor = connection.cursor()
query = """SELECT timestamp, server, host, COUNT(file_type) FROM pe_dumps WHERE file_type = '""" + file_type + \
"""' AND server IS NOT NULL GROUP BY timestamp, server, host ORDER BY timestamp ASC"""
connection_cursor.execute(query)
for db_tuple in connection_cursor:
timestamp = str(db_tuple[0])
timestamp_ymd = timestamp.split()[0]
server = db_tuple[1]
host = db_tuple[2]
total_count_per_second = db_tuple[3]
server_host_mapping[server].add(host)
total_json_map[timestamp_ymd][server][dict_index] += total_count_per_second
query = """SELECT timestamp, server, COUNT(file_type) FROM pe_dumps AS pe, amico_scores AS ams WHERE """ + \
"""pe.dump_id = ams.dump_id AND pe.file_type = '""" + file_type + """' AND ams.score > """ + \
str(AMICO_THRESHOLD) + """ AND server IS NOT NULL GROUP BY timestamp, server ORDER """ +\
"""BY timestamp ASC"""
connection_cursor.execute(query)
for db_tuple in connection_cursor:
timestamp = str(db_tuple[0])
timestamp_ymd = timestamp.split()[0]
server = db_tuple[1]
total_count_per_second = db_tuple[2]
malware_json_map[timestamp_ymd][server][dict_index] += total_count_per_second
# monitoring_server_ip = "127.0.0.1"
# external_server_ip = server
# # external_server_lat, external_server_lon = geolocalize_server(external_server_ip)
# connection_cursor.execute(first_query)
# for row in connection_cursor:
# if row is not None:
# timestamp = str(row[0])
# total_count_per_second = row[1]
#
# dayID = timestamp.split()[0]
return server_host_mapping, total_json_map, malware_json_map
def geolocalize_from_ip(str_ip_address):
match = geolite2.lookup(str_ip_address)
if match is not None:
location = match.location
if location is None:
country = match.country
if country is None:
continent = match.continent
if continent is None:
return None
else:
return [continent, None]
else:
return [country, None]
else:
return location
else:
return match
def encode_data_as_JSON(UID, monitoring_server_ip, total_json_map, malware_json_map):
control_server_list = set()
final_json_object_list = list()
lat_lon_tuple_none_values = 0
lat_lon_tuple_all_values = 0
sorted_total_json_map = sorted(total_json_map.items(), key=operator.itemgetter(0))
for timestamp, servers_dictionary in sorted_total_json_map:
servers_dictionary_items = servers_dictionary.items()
external_server_dict_list = list()
final_json_object = dict()
final_json_object['Day'] = UID
final_json_object['Monitoring Server IP'] = monitoring_server_ip
for server_ip, file_type_total_count_per_day_pair in servers_dictionary_items:
control_server_list.add(server_ip)
server_json_object = dict()
server_json_object['Server IP'] = server_ip
server_json_object['Downloads'] = dict()
apk_total_count_per_day = file_type_total_count_per_day_pair[APK_FILE_TYPE]
dmg_total_count_per_day = file_type_total_count_per_day_pair[DMG_FILE_TYPE]
elf_total_count_per_day = file_type_total_count_per_day_pair[ELF_FILE_TYPE]
exe_total_count_per_day = file_type_total_count_per_day_pair[EXE_FILE_TYPE]
pdf_total_count_per_day = file_type_total_count_per_day_pair[PDF_FILE_TYPE]
swf_total_count_per_day = file_type_total_count_per_day_pair[SWF_FILE_TYPE]
jar_total_count_per_day = file_type_total_count_per_day_pair[JAR_FILE_TYPE]
rar_total_count_per_day = file_type_total_count_per_day_pair[RAR_FILE_TYPE]
zip_total_count_per_day = file_type_total_count_per_day_pair[ZIP_FILE_TYPE]
total_count_per_day = apk_total_count_per_day + dmg_total_count_per_day + elf_total_count_per_day + \
exe_total_count_per_day + pdf_total_count_per_day + swf_total_count_per_day + \
jar_total_count_per_day + rar_total_count_per_day + zip_total_count_per_day
apk_malware_count_per_day = malware_json_map[timestamp][server_ip][APK_FILE_TYPE]
dmg_malware_count_per_day = malware_json_map[timestamp][server_ip][DMG_FILE_TYPE]
elf_malware_count_per_day = malware_json_map[timestamp][server_ip][ELF_FILE_TYPE]
exe_malware_count_per_day = malware_json_map[timestamp][server_ip][EXE_FILE_TYPE]
pdf_malware_count_per_day = malware_json_map[timestamp][server_ip][PDF_FILE_TYPE]
swf_malware_count_per_day = malware_json_map[timestamp][server_ip][SWF_FILE_TYPE]
jar_malware_count_per_day = malware_json_map[timestamp][server_ip][JAR_FILE_TYPE]
rar_malware_count_per_day = malware_json_map[timestamp][server_ip][RAR_FILE_TYPE]
zip_malware_count_per_day = malware_json_map[timestamp][server_ip][ZIP_FILE_TYPE]
malware_count_per_day = apk_malware_count_per_day + dmg_malware_count_per_day + elf_malware_count_per_day + \
exe_malware_count_per_day + pdf_malware_count_per_day + swf_malware_count_per_day + \
jar_malware_count_per_day + rar_malware_count_per_day + zip_malware_count_per_day
server_json_object['Downloads']['Total_APK'] = apk_total_count_per_day
server_json_object['Downloads']['Total_DMG'] = dmg_total_count_per_day
server_json_object['Downloads']['Total_ELF'] = elf_total_count_per_day
server_json_object['Downloads']['Total_EXE'] = exe_total_count_per_day
server_json_object['Downloads']['Total_PDF'] = pdf_total_count_per_day
server_json_object['Downloads']['Total_SWF'] = swf_total_count_per_day
server_json_object['Downloads']['Total_JAR'] = jar_total_count_per_day
server_json_object['Downloads']['Total_RAR'] = rar_total_count_per_day
server_json_object['Downloads']['Total_ZIP'] = zip_total_count_per_day
server_json_object['Downloads']['Total_Count'] = total_count_per_day
server_json_object['Downloads']['Malware_APK'] = apk_malware_count_per_day
server_json_object['Downloads']['Malware_DMG'] = dmg_malware_count_per_day
server_json_object['Downloads']['Malware_ELF'] = elf_malware_count_per_day
server_json_object['Downloads']['Malware_EXE'] = exe_malware_count_per_day
server_json_object['Downloads']['Malware_PDF'] = pdf_malware_count_per_day
server_json_object['Downloads']['Malware_SWF'] = swf_malware_count_per_day
server_json_object['Downloads']['Malware_JAR'] = jar_malware_count_per_day
server_json_object['Downloads']['Malware_RAR'] = rar_malware_count_per_day
server_json_object['Downloads']['Malware_ZIP'] = zip_malware_count_per_day
server_json_object['Downloads']['Malware_Count'] = malware_count_per_day
lat_lon_tuple = geolocalize_from_ip(server_ip)
lat_lon_tuple_all_values += 1
if lat_lon_tuple is not None:
if lat_lon_tuple[0] is None or lat_lon_tuple[1] is None:
server_json_object['Country'] = lat_lon_tuple[0]
if lat_lon_tuple[0] is not None and lat_lon_tuple[1] is not None:
server_json_object['Latitude'] = lat_lon_tuple[0]
server_json_object['Longitude'] = lat_lon_tuple[1]
else:
lat_lon_tuple_none_values += 1
external_server_dict_list.append(server_json_object)
final_json_object['External Server IP list'] = external_server_dict_list
final_json_object['Timestamp'] = timestamp
computed_final_json_object = copy.deepcopy(final_json_object)
final_json_object_list.append(computed_final_json_object)
UID += 1
print "JSON file generator :: Number of servers processed:", len(control_server_list)
print "JSON file generator :: Number of coordinates NOT correctly computed: %d over %d"% (lat_lon_tuple_none_values,
lat_lon_tuple_all_values)
return final_json_object_list
def generate_JSON_map_file():
connection = util.connect_to_db()
dictionary_index = 0
monitoring_server_ip = "127.0.0.1"
server_host_mapping = defaultdict(set)
total_json_map = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
malware_json_map = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
created_json_file = OUT_DIR + "/" + str(MAP_GRAPH_ID) + "-downloads_" + \
datetime.fromtimestamp(time()).strftime('%Y-%m-%d_%H-%M-%S') + ".json"
server_host_mapping, total_json_map, malware_json_map = perform_queries_on(connection, server_host_mapping,
total_json_map, malware_json_map, "APK",
dictionary_index)
dictionary_index += 1
server_host_mapping, total_json_map, malware_json_map = perform_queries_on(connection, server_host_mapping,
total_json_map, malware_json_map, "DMG",
dictionary_index)
dictionary_index += 1
server_host_mapping, total_json_map, malware_json_map = perform_queries_on(connection, server_host_mapping,
total_json_map, malware_json_map, "ELF",
dictionary_index)
dictionary_index += 1
server_host_mapping, total_json_map, malware_json_map = perform_queries_on(connection, server_host_mapping,
total_json_map, malware_json_map, "EXE",
dictionary_index)
dictionary_index += 1
server_host_mapping, total_json_map, malware_json_map = perform_queries_on(connection, server_host_mapping,
total_json_map, malware_json_map, "PDF",
dictionary_index)
dictionary_index += 1
server_host_mapping, total_json_map, malware_json_map = perform_queries_on(connection, server_host_mapping,
total_json_map, malware_json_map, "SWF",
dictionary_index)
dictionary_index += 1
server_host_mapping, total_json_map, malware_json_map = perform_queries_on(connection, server_host_mapping,
total_json_map, malware_json_map, "JAR",
dictionary_index)
dictionary_index += 1
server_host_mapping, total_json_map, malware_json_map = perform_queries_on(connection, server_host_mapping,
total_json_map, malware_json_map, "RAR",
dictionary_index)
dictionary_index += 1
server_host_mapping, total_json_map, malware_json_map = perform_queries_on(connection, server_host_mapping,
total_json_map, malware_json_map, "ZIP",
dictionary_index)
dictionary_index += 1
UID = 0
JSON_object = encode_data_as_JSON(UID, monitoring_server_ip, total_json_map, malware_json_map)
with open(created_json_file, "wb") as json_file:
json.dump(JSON_object, json_file)
def generate_CSV_traffic_file():
tcpstat_file = open(IN_DIR + "/" + IN_2, "r")
header_list = ["Second_ID", "Bandwidth_[bps]", "Timestamp", "Tcpstat_Timestamp", "Received_Packets",
"Packet_Average_Size", "Packet_Size_Standard_Deviation"]
created_csv_file = OUT_DIR + "/" + str(TRAFFIC_GRAPH_ID) + "-downloads_" + \
datetime.fromtimestamp(time()).strftime('%Y-%m-%d_%H-%M-%S') + ".csv"
UID = 0
with open(created_csv_file, "wb") as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(header_list)
for tcpstat_line in tcpstat_file.readlines():
formatted_row = list()
tcpstat_line_split = tcpstat_line.split("\t")
bps = ((tcpstat_line_split[BANDWITDH_BPS_FIELD].split("="))[1]).rstrip()
tcpstat_timestamp = (tcpstat_line_split[TCPSTAT_TIMESTAMP_FIELD].split(":"))[1]
timestamp = datetime.fromtimestamp(int(tcpstat_timestamp)).strftime('%Y-%m-%d %H:%M:%S')
rcv_packets = (tcpstat_line_split[RECEIVED_PACKETS_FIELD].split("="))[1]
packet_avg_size = (tcpstat_line_split[PACKET_AVG_SIZE_FIELD].split("="))[1]
packet_std_dev = (tcpstat_line_split[PACKET_SIZE_DEV_FIELD].split("="))[1]
formatted_row.append(UID); formatted_row.append(bps); formatted_row.append(timestamp);
formatted_row.append(tcpstat_timestamp); formatted_row.append(rcv_packets);
formatted_row.append(packet_avg_size); formatted_row.append(packet_std_dev);
csv_writer.writerow(formatted_row)
UID += 1
# def test_graph_generation():
graph_id = MAP_GRAPH_ID
generate_graph(graph_id)
|
openai/cleverhans
|
refs/heads/master
|
cleverhans/compat.py
|
1
|
"""
Wrapper functions for writing code that is compatible with many versions
of TensorFlow.
"""
import warnings
import tensorflow as tf
# The following 2 imports are not used in this module. They are imported so that users of cleverhans.compat can
# get access to device_lib, app, and flags. A pylint bug makes these imports cause errors when using python3+tf1.8.
# Doing the sanitized import here once makes it possible to do "from cleverhans.compat import flags" throughout the
# library without needing to repeat the pylint boilerplate.
from tensorflow.python.client import device_lib # pylint: disable=no-name-in-module,unused-import
from tensorflow.python.platform import app, flags # pylint: disable=no-name-in-module,unused-import
def _wrap(f):
"""
Wraps a callable `f` in a function that warns that the function is deprecated.
"""
def wrapper(*args, **kwargs):
"""
Issues a deprecation warning and passes through the arguments.
"""
warnings.warn(str(f) + " is deprecated. Switch to calling the equivalent function in tensorflow. "
" This function was originally needed as a compatibility layer for old versions of tensorflow, "
" but support for those versions has now been dropped.")
return f(*args, **kwargs)
return wrapper
reduce_sum = _wrap(tf.reduce_sum)
reduce_max = _wrap(tf.reduce_max)
reduce_min = _wrap(tf.reduce_min)
reduce_mean = _wrap(tf.reduce_mean)
reduce_prod = _wrap(tf.reduce_prod)
reduce_any = _wrap(tf.reduce_any)
def reduce_function(op_func, input_tensor, axis=None, keepdims=None,
name=None, reduction_indices=None):
"""
This function used to be needed to support tf 1.4 and early, but support for tf 1.4 and earlier is now dropped.
:param op_func: expects the function to handle eg: tf.reduce_sum.
:param input_tensor: The tensor to reduce. Should have numeric type.
:param axis: The dimensions to reduce. If None (the default),
reduces all dimensions. Must be in the range
[-rank(input_tensor), rank(input_tensor)).
:param keepdims: If true, retains reduced dimensions with length 1.
:param name: A name for the operation (optional).
:param reduction_indices: The old (deprecated) name for axis.
:return: outputs same value as op_func.
"""
warnings.warn("`reduce_function` is deprecated and may be removed on or after 2019-09-08.")
out = op_func(input_tensor, axis=axis, keepdims=keepdims, name=name, reduction_indices=reduction_indices)
return out
def softmax_cross_entropy_with_logits(sentinel=None,
labels=None,
logits=None,
dim=-1):
"""
Wrapper around tf.nn.softmax_cross_entropy_with_logits_v2 to handle
deprecated warning
"""
# Make sure that all arguments were passed as named arguments.
if sentinel is not None:
name = "softmax_cross_entropy_with_logits"
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)"
% name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
try:
f = tf.nn.softmax_cross_entropy_with_logits_v2
except AttributeError:
raise RuntimeError("This version of TensorFlow is no longer supported. See cleverhans/README.md")
labels = tf.stop_gradient(labels)
loss = f(labels=labels, logits=logits, dim=dim)
return loss
|
rs2/pandas
|
refs/heads/master
|
pandas/core/array_algos/masked_reductions.py
|
1
|
"""
masked_reductions.py is for reduction algorithms using a mask-based approach
for missing values.
"""
from typing import Callable
import numpy as np
from pandas._libs import missing as libmissing
from pandas.compat.numpy import np_version_under1p17
from pandas.core.nanops import check_below_min_count
def _sumprod(
func: Callable,
values: np.ndarray,
mask: np.ndarray,
skipna: bool = True,
min_count: int = 0,
):
"""
Sum or product for 1D masked array.
Parameters
----------
func : np.sum or np.prod
values : np.ndarray
Numpy array with the values (can be of any dtype that support the
operation).
mask : np.ndarray
Boolean numpy array (True values indicate missing values).
skipna : bool, default True
Whether to skip NA.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
"""
if not skipna:
if mask.any() or check_below_min_count(values.shape, None, min_count):
return libmissing.NA
else:
return func(values)
else:
if check_below_min_count(values.shape, mask, min_count):
return libmissing.NA
if np_version_under1p17:
return func(values[~mask])
else:
return func(values, where=~mask)
def sum(values: np.ndarray, mask: np.ndarray, skipna: bool = True, min_count: int = 0):
return _sumprod(
np.sum, values=values, mask=mask, skipna=skipna, min_count=min_count
)
def prod(values: np.ndarray, mask: np.ndarray, skipna: bool = True, min_count: int = 0):
return _sumprod(
np.prod, values=values, mask=mask, skipna=skipna, min_count=min_count
)
def _minmax(func: Callable, values: np.ndarray, mask: np.ndarray, skipna: bool = True):
"""
Reduction for 1D masked array.
Parameters
----------
func : np.min or np.max
values : np.ndarray
Numpy array with the values (can be of any dtype that support the
operation).
mask : np.ndarray
Boolean numpy array (True values indicate missing values).
skipna : bool, default True
Whether to skip NA.
"""
if not skipna:
if mask.any() or not values.size:
# min/max with empty array raise in numpy, pandas returns NA
return libmissing.NA
else:
return func(values)
else:
subset = values[~mask]
if subset.size:
return func(subset)
else:
# min/max with empty array raise in numpy, pandas returns NA
return libmissing.NA
def min(values: np.ndarray, mask: np.ndarray, skipna: bool = True):
return _minmax(np.min, values=values, mask=mask, skipna=skipna)
def max(values: np.ndarray, mask: np.ndarray, skipna: bool = True):
return _minmax(np.max, values=values, mask=mask, skipna=skipna)
|
jusdng/odoo
|
refs/heads/8.0
|
addons/purchase/report/purchase_report.py
|
137
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# Please note that these reports are not multi-currency !!!
#
from openerp.osv import fields,osv
from openerp import tools
class purchase_report(osv.osv):
_name = "purchase.report"
_description = "Purchases Orders"
_auto = False
_columns = {
'date': fields.datetime('Order Date', readonly=True, help="Date on which this document has been created"), # TDE FIXME master: rename into date_order
'state': fields.selection([('draft', 'Request for Quotation'),
('confirmed', 'Waiting Supplier Ack'),
('approved', 'Approved'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')],'Order Status', readonly=True),
'product_id':fields.many2one('product.product', 'Product', readonly=True),
'picking_type_id': fields.many2one('stock.warehouse', 'Warehouse', readonly=True),
'location_id': fields.many2one('stock.location', 'Destination', readonly=True),
'partner_id':fields.many2one('res.partner', 'Supplier', readonly=True),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', readonly=True),
'date_approve':fields.date('Date Approved', readonly=True),
'expected_date':fields.date('Expected Date', readonly=True),
'validator' : fields.many2one('res.users', 'Validated By', readonly=True),
'product_uom' : fields.many2one('product.uom', 'Reference Unit of Measure', required=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'user_id':fields.many2one('res.users', 'Responsible', readonly=True),
'delay':fields.float('Days to Validate', digits=(16,2), readonly=True),
'delay_pass':fields.float('Days to Deliver', digits=(16,2), readonly=True),
'quantity': fields.integer('Unit Quantity', readonly=True), # TDE FIXME master: rename into unit_quantity
'price_total': fields.float('Total Price', readonly=True),
'price_average': fields.float('Average Price', readonly=True, group_operator="avg"),
'negociation': fields.float('Purchase-Standard Price', readonly=True, group_operator="avg"),
'price_standard': fields.float('Products Value', readonly=True, group_operator="sum"),
'nbr': fields.integer('# of Lines', readonly=True), # TDE FIXME master: rename into nbr_lines
'category_id': fields.many2one('product.category', 'Category', readonly=True)
}
_order = 'date desc, price_total desc'
def init(self, cr):
tools.sql.drop_view_if_exists(cr, 'purchase_report')
cr.execute("""
create or replace view purchase_report as (
WITH currency_rate (currency_id, rate, date_start, date_end) AS (
SELECT r.currency_id, r.rate, r.name AS date_start,
(SELECT name FROM res_currency_rate r2
WHERE r2.name > r.name AND
r2.currency_id = r.currency_id
ORDER BY r2.name ASC
LIMIT 1) AS date_end
FROM res_currency_rate r
)
select
min(l.id) as id,
s.date_order as date,
l.state,
s.date_approve,
s.minimum_planned_date as expected_date,
s.dest_address_id,
s.pricelist_id,
s.validator,
spt.warehouse_id as picking_type_id,
s.partner_id as partner_id,
s.create_uid as user_id,
s.company_id as company_id,
l.product_id,
t.categ_id as category_id,
t.uom_id as product_uom,
s.location_id as location_id,
sum(l.product_qty/u.factor*u2.factor) as quantity,
extract(epoch from age(s.date_approve,s.date_order))/(24*60*60)::decimal(16,2) as delay,
extract(epoch from age(l.date_planned,s.date_order))/(24*60*60)::decimal(16,2) as delay_pass,
count(*) as nbr,
sum(l.price_unit*cr.rate*l.product_qty)::decimal(16,2) as price_total,
avg(100.0 * (l.price_unit*cr.rate*l.product_qty) / NULLIF(ip.value_float*l.product_qty/u.factor*u2.factor, 0.0))::decimal(16,2) as negociation,
sum(ip.value_float*l.product_qty/u.factor*u2.factor)::decimal(16,2) as price_standard,
(sum(l.product_qty*cr.rate*l.price_unit)/NULLIF(sum(l.product_qty/u.factor*u2.factor),0.0))::decimal(16,2) as price_average
from purchase_order_line l
join purchase_order s on (l.order_id=s.id)
left join product_product p on (l.product_id=p.id)
left join product_template t on (p.product_tmpl_id=t.id)
LEFT JOIN ir_property ip ON (ip.name='standard_price' AND ip.res_id=CONCAT('product.template,',t.id) AND ip.company_id=s.company_id)
left join product_uom u on (u.id=l.product_uom)
left join product_uom u2 on (u2.id=t.uom_id)
left join stock_picking_type spt on (spt.id=s.picking_type_id)
join currency_rate cr on (cr.currency_id = s.currency_id and
cr.date_start <= coalesce(s.date_order, now()) and
(cr.date_end is null or cr.date_end > coalesce(s.date_order, now())))
group by
s.company_id,
s.create_uid,
s.partner_id,
u.factor,
s.location_id,
l.price_unit,
s.date_approve,
l.date_planned,
l.product_uom,
s.minimum_planned_date,
s.pricelist_id,
s.validator,
s.dest_address_id,
l.product_id,
t.categ_id,
s.date_order,
l.state,
spt.warehouse_id,
u.uom_type,
u.category_id,
t.uom_id,
u.id,
u2.factor
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
joodicator/PageBot
|
refs/heads/master
|
page/modal.py
|
1
|
#===============================================================================
# modal.py - allows certain types of input to be multiplexed between different
# modules, depending on which "mode" is active in each channel.
#
# A module wishing to take control of the channel may call set_mode(chan,M) to
# enter the mode M, which must be unique to that module; then call
# set_mode(chan,M) again after each soft or hard reload if it wishes to retain
# the mode; then clear_mode() when control is to be released.
import inspect
from untwisted.magic import sign
import util
chan_mode = dict()
#-------------------------------------------------------------------------------
# Request for the given mode to be established in the given channel. If there is
# no current mode, the request is successful, and None is returned; otherwise,
# the request fails, and the existing mode is returned.
#
# When 'CONTEND_MODE' is raised with the arguments (bot, id, target), the module
# holding the current mode should indicate to the user what activity is holding
# the mode, and possibly how it can be released.
def set_mode(chan, mode):
existing_mode = get_mode(chan)
if not existing_mode: chan_mode[chan.lower()] = mode
return existing_mode
#-------------------------------------------------------------------------------
# Clear any mode set for the given channel.
def clear_mode(chan):
chan = chan.lower()
if chan in chan_mode: del chan_mode[chan]
#-------------------------------------------------------------------------------
# Return the mode active in the given channel, or None.
def get_mode(chan):
return chan and chan_mode.get(chan.lower())
#-------------------------------------------------------------------------------
# An event handler decorated with @when_mode(M) (beneath any @link decorators)
# will only be called when the channel given by the handler's "chan" or "target"
# argument has the mode M currently established.
def when_mode(mode):
def when_mode_dec(fun):
def when_mode_fun(*args, **kwds):
cargs = inspect.getcallargs(fun, *args, **kwds)
chan = cargs.get('chan') or cargs.get('target')
if get_mode(chan) != mode: return
yield util.sub(fun(*args, **kwds))
return when_mode_fun
return when_mode_dec
|
erickt/hue
|
refs/heads/master
|
desktop/core/ext-py/Paste-1.7.2/tests/urlparser_data/python/__init__.py
|
9480
|
#
|
fossilet/ansible
|
refs/heads/devel
|
contrib/inventory/abiquo.py
|
110
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
External inventory script for Abiquo
====================================
Shamelessly copied from an existing inventory script.
This script generates an inventory that Ansible can understand by making API requests to Abiquo API
Requires some python libraries, ensure to have them installed when using this script.
This script has been tested in Abiquo 3.0 but it may work also for Abiquo 2.6.
Before using this script you may want to modify abiquo.ini config file.
This script generates an Ansible hosts file with these host groups:
ABQ_xxx: Defines a hosts itself by Abiquo VM name label
all: Contains all hosts defined in Abiquo user's enterprise
virtualdatecenter: Creates a host group for each virtualdatacenter containing all hosts defined on it
virtualappliance: Creates a host group for each virtualappliance containing all hosts defined on it
imagetemplate: Creates a host group for each image template containing all hosts using it
'''
# (c) 2014, Daniel Beneyto <daniel.beneyto@abiquo.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import time
import ConfigParser
try:
import json
except ImportError:
import simplejson as json
from ansible.module_utils.urls import open_url
def api_get(link, config):
try:
if link == None:
url = config.get('api','uri') + config.get('api','login_path')
headers = {"Accept": config.get('api','login_type')}
else:
url = link['href'] + '?limit=0'
headers = {"Accept": link['type']}
result = open_url(url, headers=headers, url_username=config.get('auth','apiuser').replace('\n', ''),
url_password=config.get('auth','apipass').replace('\n', ''))
return json.loads(result.read())
except:
return None
def save_cache(data, config):
''' saves item to cache '''
dpath = config.get('cache','cache_dir')
try:
cache = open('/'.join([dpath,'inventory']), 'w')
cache.write(json.dumps(data))
cache.close()
except IOError as e:
pass # not really sure what to do here
def get_cache(cache_item, config):
''' returns cached item '''
dpath = config.get('cache','cache_dir')
inv = {}
try:
cache = open('/'.join([dpath,'inventory']), 'r')
inv = cache.read()
cache.close()
except IOError as e:
pass # not really sure what to do here
return inv
def cache_available(config):
''' checks if we have a 'fresh' cache available for item requested '''
if config.has_option('cache','cache_dir'):
dpath = config.get('cache','cache_dir')
try:
existing = os.stat( '/'.join([dpath,'inventory']))
except:
# cache doesn't exist or isn't accessible
return False
if config.has_option('cache', 'cache_max_age'):
maxage = config.get('cache', 'cache_max_age')
if ((int(time.time()) - int(existing.st_mtime)) <= int(maxage)):
return True
return False
def generate_inv_from_api(enterprise_entity,config):
try:
inventory['all'] = {}
inventory['all']['children'] = []
inventory['all']['hosts'] = []
inventory['_meta'] = {}
inventory['_meta']['hostvars'] = {}
enterprise = api_get(enterprise_entity,config)
vms_entity = next(link for link in (enterprise['links']) if (link['rel']=='virtualmachines'))
vms = api_get(vms_entity,config)
for vmcollection in vms['collection']:
vm_vapp = next(link for link in (vmcollection['links']) if (link['rel']=='virtualappliance'))['title'].replace('[','').replace(']','').replace(' ','_')
vm_vdc = next(link for link in (vmcollection['links']) if (link['rel']=='virtualdatacenter'))['title'].replace('[','').replace(']','').replace(' ','_')
vm_template = next(link for link in (vmcollection['links']) if (link['rel']=='virtualmachinetemplate'))['title'].replace('[','').replace(']','').replace(' ','_')
# From abiquo.ini: Only adding to inventory VMs with public IP
if (config.getboolean('defaults', 'public_ip_only')) == True:
for link in vmcollection['links']:
if (link['type']=='application/vnd.abiquo.publicip+json' and link['rel']=='ip'):
vm_nic = link['title']
break
else:
vm_nic = None
# Otherwise, assigning defined network interface IP address
else:
for link in vmcollection['links']:
if (link['rel']==config.get('defaults', 'default_net_interface')):
vm_nic = link['title']
break
else:
vm_nic = None
vm_state = True
# From abiquo.ini: Only adding to inventory VMs deployed
if ((config.getboolean('defaults', 'deployed_only') == True) and (vmcollection['state'] == 'NOT_ALLOCATED')):
vm_state = False
if not vm_nic == None and vm_state:
if not vm_vapp in inventory.keys():
inventory[vm_vapp] = {}
inventory[vm_vapp]['children'] = []
inventory[vm_vapp]['hosts'] = []
if not vm_vdc in inventory.keys():
inventory[vm_vdc] = {}
inventory[vm_vdc]['hosts'] = []
inventory[vm_vdc]['children'] = []
if not vm_template in inventory.keys():
inventory[vm_template] = {}
inventory[vm_template]['children'] = []
inventory[vm_template]['hosts'] = []
if config.getboolean('defaults', 'get_metadata') == True:
meta_entity = next(link for link in (vmcollection['links']) if (link['rel']=='metadata'))
try:
metadata = api_get(meta_entity,config)
if (config.getfloat("api","version") >= 3.0):
vm_metadata = metadata['metadata']
else:
vm_metadata = metadata['metadata']['metadata']
inventory['_meta']['hostvars'][vm_nic] = vm_metadata
except Exception as e:
pass
inventory[vm_vapp]['children'].append(vmcollection['name'])
inventory[vm_vdc]['children'].append(vmcollection['name'])
inventory[vm_template]['children'].append(vmcollection['name'])
inventory['all']['children'].append(vmcollection['name'])
inventory[vmcollection['name']] = []
inventory[vmcollection['name']].append(vm_nic)
return inventory
except Exception as e:
# Return empty hosts output
return { 'all': {'hosts': []}, '_meta': { 'hostvars': {} } }
def get_inventory(enterprise, config):
''' Reads the inventory from cache or Abiquo api '''
if cache_available(config):
inv = get_cache('inventory', config)
else:
default_group = os.path.basename(sys.argv[0]).rstrip('.py')
# MAKE ABIQUO API CALLS #
inv = generate_inv_from_api(enterprise,config)
save_cache(inv, config)
return json.dumps(inv)
if __name__ == '__main__':
inventory = {}
enterprise = {}
# Read config
config = ConfigParser.SafeConfigParser()
for configfilename in [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'abiquo.ini']:
if os.path.exists(configfilename):
config.read(configfilename)
break
try:
login = api_get(None,config)
enterprise = next(link for link in (login['links']) if (link['rel']=='enterprise'))
except Exception as e:
enterprise = None
if cache_available(config):
inventory = get_cache('inventory', config)
else:
inventory = get_inventory(enterprise, config)
# return to ansible
sys.stdout.write(str(inventory))
sys.stdout.flush()
|
molobrakos/home-assistant
|
refs/heads/master
|
homeassistant/components/tuya/scene.py
|
7
|
"""Support for the Tuya scenes."""
from homeassistant.components.scene import DOMAIN, Scene
from . import DATA_TUYA, TuyaDevice
ENTITY_ID_FORMAT = DOMAIN + '.{}'
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Tuya scenes."""
if discovery_info is None:
return
tuya = hass.data[DATA_TUYA]
dev_ids = discovery_info.get('dev_ids')
devices = []
for dev_id in dev_ids:
device = tuya.get_device_by_id(dev_id)
if device is None:
continue
devices.append(TuyaScene(device))
add_entities(devices)
class TuyaScene(TuyaDevice, Scene):
"""Tuya Scene."""
def __init__(self, tuya):
"""Init Tuya scene."""
super().__init__(tuya)
self.entity_id = ENTITY_ID_FORMAT.format(tuya.object_id())
def activate(self):
"""Activate the scene."""
self.tuya.activate()
|
longsion/OpenBird
|
refs/heads/master
|
cocos2d/plugin/tools/toolsForGame/modifyManifest.py
|
263
|
import sys, string, os
from xml.etree import ElementTree as ET
manifestFile = sys.argv[1]
pluginStr = sys.argv[2]
pluginsDir = sys.argv[3]
androidNS = 'http://schemas.android.com/apk/res/android'
sourceCfgFile = '/android/ForManifest.xml'
def doModify(sourceFile, root):
bRet = False
sourceTree = ET.parse(sourceFile)
sourceRoot = sourceTree.getroot()
# get target content
f = open(manifestFile)
targetContent = f.read()
f.close()
# check config for application
appCfgNode = sourceRoot.find('applicationCfg')
if appCfgNode is not None and len(appCfgNode) > 0:
appKeyWord = appCfgNode.get('keyword')
if appKeyWord != None and len(appKeyWord) > 0:
keyIndex = targetContent.find(appKeyWord)
if -1 == keyIndex:
bRet = True
for node in list(appCfgNode):
root.find('application').append(node)
# check permission config
perCfgNode = sourceRoot.find('permissionCfg')
if perCfgNode is not None and len(perCfgNode) > 0:
for oneNode in list(perCfgNode):
key = '{' + androidNS + '}name'
perAttr = oneNode.get(key)
if perAttr != None and len(perAttr) > 0:
attrIndex = targetContent.find(perAttr)
if -1 == attrIndex:
bRet = True
root.append(oneNode)
return bRet
# parse file AndroidManifest.xml of game project
ET.register_namespace("android", androidNS)
targetTree = ET.parse(manifestFile)
targetRoot = targetTree.getroot()
# traverse all plugins
plugins = pluginStr.split(':')
for pluginName in plugins:
# find the file 'ForManifest.xml'
sourceXml = pluginsDir + '/' + pluginName + sourceCfgFile
if not os.path.exists(sourceXml):
continue
# check & modify target xml
haveChanged = doModify(sourceXml, targetRoot)
if haveChanged:
print 'Modify AndroidManifest.xml for plugin ' + pluginName
targetTree.write(manifestFile, 'UTF-8')
|
ehouarn-perret/EhouarnPerret.Python.Kattis
|
refs/heads/master
|
Trivial/Reversed Binary Numbers.py
|
1
|
n = int(input())
n = bin(n)[:1:-1]
n = int(n, base=2)
print(n)
|
EvanK/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/cloudengine/ce_command.py
|
22
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_command
version_added: "2.3"
author: "JackyGao2016 (@JackyGao2016)"
short_description: Run arbitrary command on HUAWEI CloudEngine devices.
description:
- Sends an arbitrary command to an HUAWEI CloudEngine node and returns
the results read from the device. The ce_command module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
options:
commands:
description:
- The commands to send to the remote HUAWEI CloudEngine device
over the configured provider. The resulting output from the
command is returned. If the I(wait_for) argument is provided,
the module is not returned until the condition is satisfied
or the number of I(retries) has been exceeded.
required: true
wait_for:
description:
- Specifies what to evaluate from the output of the command
and what conditionals to apply. This argument will cause
the task to wait for a particular conditional to be true
before moving forward. If the conditional is not true
by the configured retries, the task fails. See examples.
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the I(wait_for) must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
default: all
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the I(wait_for)
conditionals.
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditional, the interval indicates how to long to wait before
trying the command again.
default: 1
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
- name: CloudEngine command test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Run display version on remote devices"
ce_command:
commands: display version
provider: "{{ cli }}"
- name: "Run display version and check to see if output contains HUAWEI"
ce_command:
commands: display version
wait_for: result[0] contains HUAWEI
provider: "{{ cli }}"
- name: "Run multiple commands on remote nodes"
ce_command:
commands:
- display version
- display device
provider: "{{ cli }}"
- name: "Run multiple commands and evaluate the output"
ce_command:
commands:
- display version
- display device
wait_for:
- result[0] contains HUAWEI
- result[1] contains Device
provider: "{{ cli }}"
"""
RETURN = """
stdout:
description: the set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: the conditionals that failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec, check_args
from ansible.module_utils.network.cloudengine.ce import run_commands
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.network.common.utils import ComplexList
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native
def to_lines(stdout):
lines = list()
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
lines.append(item)
return lines
def parse_commands(module, warnings):
transform = ComplexList(dict(
command=dict(key=True),
output=dict(),
prompt=dict(),
response=dict()
), module)
commands = transform(module.params['commands'])
for _, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('dis'):
warnings.append(
'Only display commands are supported when using check_mode, not '
'executing %s' % item['command']
)
return commands
def to_cli(obj):
cmd = obj['command']
return cmd
def main():
"""entry point for module execution
"""
argument_spec = dict(
# { command: <str>, output: <str>, prompt: <str>, response: <str> }
commands=dict(type='list', required=True),
wait_for=dict(type='list'),
match=dict(default='all', choices=['any', 'all']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(ce_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
try:
conditionals = [Conditional(c) for c in wait_for]
except AttributeError as exc:
module.fail_json(msg=to_native(exc), exception=traceback.format_exc())
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'stdout': responses,
'stdout_lines': to_lines(responses)
})
module.exit_json(**result)
if __name__ == '__main__':
main()
|
molotof/infernal-twin
|
refs/heads/master
|
build/pip/pip/_vendor/cachecontrol/serialize.py
|
317
|
import base64
import io
import json
import zlib
from pip._vendor.requests.structures import CaseInsensitiveDict
from .compat import HTTPResponse, pickle
def _b64_encode_bytes(b):
return base64.b64encode(b).decode("ascii")
def _b64_encode_str(s):
return _b64_encode_bytes(s.encode("utf8"))
def _b64_decode_bytes(b):
return base64.b64decode(b.encode("ascii"))
def _b64_decode_str(s):
return _b64_decode_bytes(s).decode("utf8")
class Serializer(object):
def dumps(self, request, response, body=None):
response_headers = CaseInsensitiveDict(response.headers)
if body is None:
body = response.read(decode_content=False)
# NOTE: 99% sure this is dead code. I'm only leaving it
# here b/c I don't have a test yet to prove
# it. Basically, before using
# `cachecontrol.filewrapper.CallbackFileWrapper`,
# this made an effort to reset the file handle. The
# `CallbackFileWrapper` short circuits this code by
# setting the body as the content is consumed, the
# result being a `body` argument is *always* passed
# into cache_response, and in turn,
# `Serializer.dump`.
response._fp = io.BytesIO(body)
data = {
"response": {
"body": _b64_encode_bytes(body),
"headers": dict(
(_b64_encode_str(k), _b64_encode_str(v))
for k, v in response.headers.items()
),
"status": response.status,
"version": response.version,
"reason": _b64_encode_str(response.reason),
"strict": response.strict,
"decode_content": response.decode_content,
},
}
# Construct our vary headers
data["vary"] = {}
if "vary" in response_headers:
varied_headers = response_headers['vary'].split(',')
for header in varied_headers:
header = header.strip()
data["vary"][header] = request.headers.get(header, None)
# Encode our Vary headers to ensure they can be serialized as JSON
data["vary"] = dict(
(_b64_encode_str(k), _b64_encode_str(v) if v is not None else v)
for k, v in data["vary"].items()
)
return b",".join([
b"cc=2",
zlib.compress(
json.dumps(
data, separators=(",", ":"), sort_keys=True,
).encode("utf8"),
),
])
def loads(self, request, data):
# Short circuit if we've been given an empty set of data
if not data:
return
# Determine what version of the serializer the data was serialized
# with
try:
ver, data = data.split(b",", 1)
except ValueError:
ver = b"cc=0"
# Make sure that our "ver" is actually a version and isn't a false
# positive from a , being in the data stream.
if ver[:3] != b"cc=":
data = ver + data
ver = b"cc=0"
# Get the version number out of the cc=N
ver = ver.split(b"=", 1)[-1].decode("ascii")
# Dispatch to the actual load method for the given version
try:
return getattr(self, "_loads_v{0}".format(ver))(request, data)
except AttributeError:
# This is a version we don't have a loads function for, so we'll
# just treat it as a miss and return None
return
def prepare_response(self, request, cached):
"""Verify our vary headers match and construct a real urllib3
HTTPResponse object.
"""
# Special case the '*' Vary value as it means we cannot actually
# determine if the cached response is suitable for this request.
if "*" in cached.get("vary", {}):
return
# Ensure that the Vary headers for the cached response match our
# request
for header, value in cached.get("vary", {}).items():
if request.headers.get(header, None) != value:
return
body_raw = cached["response"].pop("body")
try:
body = io.BytesIO(body_raw)
except TypeError:
# This can happen if cachecontrol serialized to v1 format (pickle)
# using Python 2. A Python 2 str(byte string) will be unpickled as
# a Python 3 str (unicode string), which will cause the above to
# fail with:
#
# TypeError: 'str' does not support the buffer interface
body = io.BytesIO(body_raw.encode('utf8'))
return HTTPResponse(
body=body,
preload_content=False,
**cached["response"]
)
def _loads_v0(self, request, data):
# The original legacy cache data. This doesn't contain enough
# information to construct everything we need, so we'll treat this as
# a miss.
return
def _loads_v1(self, request, data):
try:
cached = pickle.loads(data)
except ValueError:
return
return self.prepare_response(request, cached)
def _loads_v2(self, request, data):
try:
cached = json.loads(zlib.decompress(data).decode("utf8"))
except ValueError:
return
# We need to decode the items that we've base64 encoded
cached["response"]["body"] = _b64_decode_bytes(
cached["response"]["body"]
)
cached["response"]["headers"] = dict(
(_b64_decode_str(k), _b64_decode_str(v))
for k, v in cached["response"]["headers"].items()
)
cached["response"]["reason"] = _b64_decode_str(
cached["response"]["reason"],
)
cached["vary"] = dict(
(_b64_decode_str(k), _b64_decode_str(v) if v is not None else v)
for k, v in cached["vary"].items()
)
return self.prepare_response(request, cached)
|
johncsnyder/SwiftKitten
|
refs/heads/master
|
cffi/doc/source/conf.py
|
8
|
# -*- coding: utf-8 -*-
#
# CFFI documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 14 16:37:47 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CFFI'
copyright = u'2012-2015, Armin Rigo, Maciej Fijalkowski'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.5'
# The full version, including alpha/beta/rc tags.
release = '1.5.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'CFFIdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'CFFI.tex', u'CFFI Documentation',
u'Armin Rigo, Maciej Fijalkowski', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
crosswalk-project/crosswalk-test-suite
|
refs/heads/master
|
webapi/tct-csp-w3c-tests/csp-py/w3c/csp-self.py
|
25
|
def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "default-src 'self' about: 'unsafe-inline'"
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<html>
<head>
<title>XMLHttpRequest: abort() after send()</title>
<script src="../../resources/testharness.js"></script>
<script src="../../resources/testharnessreport.js"></script>
</head>
<body>
<div id="log"></div>
<script>
var test = async_test()
test.step(function() {
var client = new XMLHttpRequest(),
control_flag = false,
result = [],
expected = [1, 4] // open() -> 1, send() -> 1, abort() -> 4
client.onreadystatechange = function() {
test.step(function() {
result.push(client.readyState)
if(client.readyState == 4) {
control_flag = true
assert_equals(client.responseXML, null)
assert_equals(client.responseText, "")
assert_equals(client.status, 0)
assert_equals(client.statusText, "")
assert_equals(client.getAllResponseHeaders(), "")
}
})
}
client.open("GET", "../resources/delay.py?ms=2000", true)
client.send(null)
client.abort()
assert_true(control_flag)
assert_equals(client.readyState, 0)
assert_array_equals(result, expected)
test.done()
})
</script>
</body>
</html> """
|
kelvin13/Knockout
|
refs/heads/master
|
edit/__init__.py
|
12133432
| |
wwj718/edx-platform
|
refs/heads/master
|
lms/djangoapps/instructor/tests/views/__init__.py
|
12133432
| |
evidation-health/bokeh
|
refs/heads/master
|
examples/glyphs/__init__.py
|
12133432
| |
mbareta/edx-platform-ft
|
refs/heads/open-release/eucalyptus.master
|
common/djangoapps/student/management/__init__.py
|
12133432
| |
GunnerJnr/_CodeInstitute
|
refs/heads/master
|
Stream-3/Full-Stack-Development/15.Paypal-Subscriptions/2.Setup-Products-That-Require-Subscriptions/we_are_social/magazines/views.py
|
1
|
# -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from .models import Magazine
# Create your views here.
@login_required(login_url='/login/')
def all_magazines(request):
magazines = Magazine.objects.all()
return render(request, "magazines/magazines.html", {"magazines": magazines})
|
insiderr/insiderr-app
|
refs/heads/master
|
ios-patches/basemodules/twisted/trial/test/mockcustomsuite2.py
|
59
|
# Copyright (c) 2006 Twisted Matrix Laboratories. See LICENSE for details
"""
Mock test module that contains a C{testSuite} method. L{runner.TestLoader}
should load the tests from the C{testSuite}, not from the C{Foo} C{TestCase}.
See L{twisted.trial.test.test_loader.LoaderTest.test_loadModuleWith_testSuite}.
"""
from twisted.trial import unittest, runner
class Foo(unittest.SynchronousTestCase):
def test_foo(self):
pass
def testSuite():
ts = runner.TestSuite()
ts.name = "MyCustomSuite"
return ts
|
Ultimaker/Cura
|
refs/heads/master
|
plugins/MachineSettingsAction/__init__.py
|
3
|
# Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from . import MachineSettingsAction
def getMetaData():
return {}
def register(app):
return { "machine_action": MachineSettingsAction.MachineSettingsAction() }
|
toopy/django-toopy-editor
|
refs/heads/master
|
bootstrap.py
|
81
|
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os, shutil, sys, tempfile, urllib, urllib2, subprocess
from optparse import OptionParser
if sys.platform == 'win32':
def quote(c):
if ' ' in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
quote = str
# See zc.buildout.easy_install._has_broken_dash_S for motivation and comments.
stdout, stderr = subprocess.Popen(
[sys.executable, '-Sc',
'try:\n'
' import ConfigParser\n'
'except ImportError:\n'
' print 1\n'
'else:\n'
' print 0\n'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
has_broken_dash_S = bool(int(stdout.strip()))
# In order to be more robust in the face of system Pythons, we want to
# run without site-packages loaded. This is somewhat tricky, in
# particular because Python 2.6's distutils imports site, so starting
# with the -S flag is not sufficient. However, we'll start with that:
if not has_broken_dash_S and 'site' in sys.modules:
# We will restart with python -S.
args = sys.argv[:]
args[0:0] = [sys.executable, '-S']
args = map(quote, args)
os.execv(sys.executable, args)
# Now we are running with -S. We'll get the clean sys.path, import site
# because distutils will do it later, and then reset the path and clean
# out any namespace packages from site-packages that might have been
# loaded by .pth files.
clean_path = sys.path[:]
import site # imported because of its side effects
sys.path[:] = clean_path
for k, v in sys.modules.items():
if k in ('setuptools', 'pkg_resources') or (
hasattr(v, '__path__') and
len(v.__path__) == 1 and
not os.path.exists(os.path.join(v.__path__[0], '__init__.py'))):
# This is a namespace package. Remove it.
sys.modules.pop(k)
is_jython = sys.platform.startswith('java')
setuptools_source = 'http://peak.telecommunity.com/dist/ez_setup.py'
distribute_source = 'http://python-distribute.org/distribute_setup.py'
# parsing arguments
def normalize_to_url(option, opt_str, value, parser):
if value:
if '://' not in value: # It doesn't smell like a URL.
value = 'file://%s' % (
urllib.pathname2url(
os.path.abspath(os.path.expanduser(value))),)
if opt_str == '--download-base' and not value.endswith('/'):
# Download base needs a trailing slash to make the world happy.
value += '/'
else:
value = None
name = opt_str[2:].replace('-', '_')
setattr(parser.values, name, value)
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --setup-source and --download-base to point to
local resources, you can keep this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", dest="version",
help="use a specific zc.buildout version")
parser.add_option("-d", "--distribute",
action="store_true", dest="use_distribute", default=False,
help="Use Distribute rather than Setuptools.")
parser.add_option("--setup-source", action="callback", dest="setup_source",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or file location for the setup file. "
"If you use Setuptools, this will default to " +
setuptools_source + "; if you use Distribute, this "
"will default to " + distribute_source + "."))
parser.add_option("--download-base", action="callback", dest="download_base",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or directory for downloading "
"zc.buildout and either Setuptools or Distribute. "
"Defaults to PyPI."))
parser.add_option("--eggs",
help=("Specify a directory for storing eggs. Defaults to "
"a temporary directory that is deleted when the "
"bootstrap script completes."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", None, action="store", dest="config_file",
help=("Specify the path to the buildout configuration "
"file to be used."))
options, args = parser.parse_args()
# if -c was provided, we push it back into args for buildout's main function
if options.config_file is not None:
args += ['-c', options.config_file]
if options.eggs:
eggs_dir = os.path.abspath(os.path.expanduser(options.eggs))
else:
eggs_dir = tempfile.mkdtemp()
if options.setup_source is None:
if options.use_distribute:
options.setup_source = distribute_source
else:
options.setup_source = setuptools_source
if options.accept_buildout_test_releases:
args.append('buildout:accept-buildout-test-releases=true')
args.append('bootstrap')
try:
import pkg_resources
import setuptools # A flag. Sometimes pkg_resources is installed alone.
if not hasattr(pkg_resources, '_distribute'):
raise ImportError
except ImportError:
ez_code = urllib2.urlopen(
options.setup_source).read().replace('\r\n', '\n')
ez = {}
exec ez_code in ez
setup_args = dict(to_dir=eggs_dir, download_delay=0)
if options.download_base:
setup_args['download_base'] = options.download_base
if options.use_distribute:
setup_args['no_fake'] = True
ez['use_setuptools'](**setup_args)
if 'pkg_resources' in sys.modules:
reload(sys.modules['pkg_resources'])
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
cmd = [quote(sys.executable),
'-c',
quote('from setuptools.command.easy_install import main; main()'),
'-mqNxd',
quote(eggs_dir)]
if not has_broken_dash_S:
cmd.insert(1, '-S')
find_links = options.download_base
if not find_links:
find_links = os.environ.get('bootstrap-testing-find-links')
if find_links:
cmd.extend(['-f', quote(find_links)])
if options.use_distribute:
setup_requirement = 'distribute'
else:
setup_requirement = 'setuptools'
ws = pkg_resources.working_set
setup_requirement_path = ws.find(
pkg_resources.Requirement.parse(setup_requirement)).location
env = dict(
os.environ,
PYTHONPATH=setup_requirement_path)
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setup_requirement_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
if is_jython:
import subprocess
exitcode = subprocess.Popen(cmd, env=env).wait()
else: # Windows prefers this, apparently; otherwise we would prefer subprocess
exitcode = os.spawnle(*([os.P_WAIT, sys.executable] + cmd + [env]))
if exitcode != 0:
sys.stdout.flush()
sys.stderr.flush()
print ("An error occurred when trying to install zc.buildout. "
"Look above this message for any errors that "
"were output by easy_install.")
sys.exit(exitcode)
ws.add_entry(eggs_dir)
ws.require(requirement)
import zc.buildout.buildout
zc.buildout.buildout.main(args)
if not options.eggs: # clean up temporary egg directory
shutil.rmtree(eggs_dir)
|
alexanderturner/ansible
|
refs/heads/devel
|
lib/ansible/modules/system/authorized_key.py
|
25
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to add authorized_keys for ssh logins.
(c) 2012, Brad Olson <brado@movedbylight.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: authorized_key
short_description: Adds or removes an SSH authorized key
description:
- "Adds or removes SSH authorized keys for particular user accounts"
version_added: "0.5"
options:
user:
description:
- The username on the remote host whose authorized_keys file will be modified
required: true
key:
description:
- The SSH public key(s), as a string or (since 1.9) url (https://github.com/username.keys)
required: true
path:
description:
- Alternate path to the authorized_keys file
required: false
default: "(homedir)+/.ssh/authorized_keys"
version_added: "1.2"
manage_dir:
description:
- Whether this module should manage the directory of the authorized key file. If
set, the module will create the directory, as well as set the owner and permissions
of an existing directory. Be sure to
set C(manage_dir=no) if you are using an alternate directory for
authorized_keys, as set with C(path), since you could lock yourself out of
SSH access. See the example below.
required: false
choices: [ "yes", "no" ]
default: "yes"
version_added: "1.2"
state:
description:
- Whether the given key (with the given key_options) should or should not be in the file
required: false
choices: [ "present", "absent" ]
default: "present"
key_options:
description:
- A string of ssh key options to be prepended to the key in the authorized_keys file
required: false
default: null
version_added: "1.4"
exclusive:
description:
- Whether to remove all other non-specified keys from the authorized_keys file. Multiple keys
can be specified in a single C(key) string value by separating them by newlines.
- This option is not loop aware, so if you use C(with_) , it will be exclusive per iteration
of the loop, if you want multiple keys in the file you need to pass them all to C(key) in a
single batch as mentioned above.
required: false
choices: [ "yes", "no" ]
default: "no"
version_added: "1.9"
validate_certs:
description:
- This only applies if using a https url as the source of the keys. If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
- Prior to 2.1 the code worked as if this was set to C(yes).
required: false
default: "yes"
choices: ["yes", "no"]
version_added: "2.1"
author: "Ansible Core Team"
'''
EXAMPLES = '''
- name: Set authorized key took from file
authorized_key:
user: charlie
state: present
key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
- name: Set authorized key took from url
authorized_key:
user: charlie
state: present
key: https://github.com/charlie.keys
- name: Set authorized key in alternate location
authorized_key:
user: charlie
state: present
key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
path: /etc/ssh/authorized_keys/charlie
manage_dir: False
- name: Set up multiple authorized keys
authorized_key:
user: deploy
state: present
key: '{{ item }}'
with_file:
- public_keys/doe-jane
- public_keys/doe-john
- name: Set authorized key defining key options
authorized_key:
user: charlie
state: present
key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
key_options: 'no-port-forwarding,from="10.0.1.1"'
- name: Set authorized key without validating the TLS/SSL certificates
authorized_key:
user: charlie
state: present
key: https://github.com/user.keys
validate_certs: False
- name: Set authorized key, removing all the authorized key already set
authorized_key:
user: root
key: '{{ item }}'
state: present
exclusive: True
with_file:
- public_keys/doe-jane
- name: Set authorized key for user ubuntu copying it from current user
authorized_key:
user: ubuntu
state: present
key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
'''
RETURN = '''
exclusive:
description: If the key has been forced to be exclusive or not.
returned: success
type: boolean
sample: False
key:
description: The key that the module was running against.
returned: success
type: string
sample: https://github.com/user.keys
key_option:
description: Key options related to the key.
returned: success
type: string
sameple: null
keyfile:
description: Path for authorzied key file.
returned: success
type: string
sameple: /home/user/.ssh/authorized_keys
manage_dir:
description: Whether this module managed the directory of the authorized key file.
returned: success
type: boolean
sameple: True
path:
description: Alternate path to the authorized_keys file
returned: success
type: string
sameple: null
state:
description: Whether the given key (with the given key_options) should or should not be in the file
returned: success
type: string
sameple: present
unique:
description: Whether the key is unique
returned: success
type: boolean
sameple: false
user:
description: The username on the remote host whose authorized_keys file will be modified
returned: success
type: string
sameple: user
validate_certs:
description: This only applies if using a https url as the source of the keys. If set to C(no), the SSL certificates will not be validated.
returned: success
type: boolean
sameple: true
'''
# Makes sure the public key line is present or absent in the user's .ssh/authorized_keys.
#
# Arguments
# =========
# user = username
# key = line to add to authorized_keys for user
# path = path to the user's authorized_keys file (default: ~/.ssh/authorized_keys)
# manage_dir = whether to create, and control ownership of the directory (default: true)
# state = absent|present (default: present)
#
# see example in examples/playbooks
import os
import pwd
import os.path
import tempfile
import re
import shlex
from operator import itemgetter
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import fetch_url
class keydict(dict):
""" a dictionary that maintains the order of keys as they are added
This has become an abuse of the dict interface. Probably should be
rewritten to be an entirely custom object with methods instead of
bracket-notation.
Our requirements are for a data structure that:
* Preserves insertion order
* Can store multiple values for a single key.
The present implementation has the following functions used by the rest of
the code:
* __setitem__(): to add a key=value. The value can never be disassociated
with the key, only new values can be added in addition.
* items(): to retrieve the key, value pairs.
Other dict methods should work but may be surprising. For instance, there
will be multiple keys that are the same in keys() and __getitem__() will
return a list of the values that have been set via __setitem__.
"""
# http://stackoverflow.com/questions/2328235/pythonextend-the-dict-class
def __init__(self, *args, **kw):
super(keydict,self).__init__(*args, **kw)
self.itemlist = list(super(keydict,self).keys())
def __setitem__(self, key, value):
self.itemlist.append(key)
if key in self:
self[key].append(value)
else:
super(keydict, self).__setitem__(key, [value])
def __iter__(self):
return iter(self.itemlist)
def keys(self):
return self.itemlist
def _item_generator(self):
indexes = {}
for key in self.itemlist:
if key in indexes:
indexes[key] += 1
else:
indexes[key] = 0
yield key, self[key][indexes[key]]
def iteritems(self):
raise NotImplementedError("Do not use this as it's not available on py3")
def items(self):
return list(self._item_generator())
def itervalues(self):
raise NotImplementedError("Do not use this as it's not available on py3")
def values(self):
return [item[1] for item in self.items()]
def keyfile(module, user, write=False, path=None, manage_dir=True):
"""
Calculate name of authorized keys file, optionally creating the
directories and file, properly setting permissions.
:param str user: name of user in passwd file
:param bool write: if True, write changes to authorized_keys file (creating directories if needed)
:param str path: if not None, use provided path rather than default of '~user/.ssh/authorized_keys'
:param bool manage_dir: if True, create and set ownership of the parent dir of the authorized_keys file
:return: full path string to authorized_keys for user
"""
if module.check_mode and path is not None:
keysfile = path
return keysfile
try:
user_entry = pwd.getpwnam(user)
except KeyError:
e = get_exception()
if module.check_mode and path is None:
module.fail_json(msg="Either user must exist or you must provide full path to key file in check mode")
module.fail_json(msg="Failed to lookup user %s: %s" % (user, str(e)))
if path is None:
homedir = user_entry.pw_dir
sshdir = os.path.join(homedir, ".ssh")
keysfile = os.path.join(sshdir, "authorized_keys")
else:
sshdir = os.path.dirname(path)
keysfile = path
if not write:
return keysfile
uid = user_entry.pw_uid
gid = user_entry.pw_gid
if manage_dir:
if not os.path.exists(sshdir):
os.mkdir(sshdir, int('0700', 8))
if module.selinux_enabled():
module.set_default_selinux_context(sshdir, False)
os.chown(sshdir, uid, gid)
os.chmod(sshdir, int('0700', 8))
if not os.path.exists(keysfile):
basedir = os.path.dirname(keysfile)
if not os.path.exists(basedir):
os.makedirs(basedir)
try:
f = open(keysfile, "w") #touches file so we can set ownership and perms
finally:
f.close()
if module.selinux_enabled():
module.set_default_selinux_context(keysfile, False)
try:
os.chown(keysfile, uid, gid)
os.chmod(keysfile, int('0600', 8))
except OSError:
pass
return keysfile
def parseoptions(module, options):
'''
reads a string containing ssh-key options
and returns a dictionary of those options
'''
options_dict = keydict() #ordered dict
if options:
# the following regex will split on commas while
# ignoring those commas that fall within quotes
regex = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''')
parts = regex.split(options)[1:-1]
for part in parts:
if "=" in part:
(key, value) = part.split("=", 1)
options_dict[key] = value
elif part != ",":
options_dict[part] = None
return options_dict
def parsekey(module, raw_key, rank=None):
'''
parses a key, which may or may not contain a list
of ssh-key options at the beginning
rank indicates the keys original ordering, so that
it can be written out in the same order.
'''
VALID_SSH2_KEY_TYPES = [
'ssh-ed25519',
'ecdsa-sha2-nistp256',
'ecdsa-sha2-nistp384',
'ecdsa-sha2-nistp521',
'ssh-dss',
'ssh-rsa',
]
options = None # connection options
key = None # encrypted key string
key_type = None # type of ssh key
type_index = None # index of keytype in key string|list
# remove comment yaml escapes
raw_key = raw_key.replace('\#', '#')
# split key safely
lex = shlex.shlex(raw_key)
lex.quotes = []
lex.commenters = '' #keep comment hashes
lex.whitespace_split = True
key_parts = list(lex)
if key_parts and key_parts[0] == '#':
# comment line, invalid line, etc.
return (raw_key, 'skipped', None, None, rank)
for i in range(0, len(key_parts)):
if key_parts[i] in VALID_SSH2_KEY_TYPES:
type_index = i
key_type = key_parts[i]
break
# check for options
if type_index is None:
return None
elif type_index > 0:
options = " ".join(key_parts[:type_index])
# parse the options (if any)
options = parseoptions(module, options)
# get key after the type index
key = key_parts[(type_index + 1)]
# set comment to everything after the key
if len(key_parts) > (type_index + 1):
comment = " ".join(key_parts[(type_index + 2):])
return (key, key_type, options, comment, rank)
def readfile(filename):
if not os.path.isfile(filename):
return ''
f = open(filename)
try:
return f.read()
finally:
f.close()
def parsekeys(module, lines):
keys = {}
for rank_index, line in enumerate(lines.splitlines(True)):
key_data = parsekey(module, line, rank=rank_index)
if key_data:
# use key as identifier
keys[key_data[0]] = key_data
else:
# for an invalid line, just set the line
# dict key to the line so it will be re-output later
keys[line] = (line, 'skipped', None, None, rank_index)
return keys
def writefile(module, filename, content):
fd, tmp_path = tempfile.mkstemp('', 'tmp', os.path.dirname(filename))
f = open(tmp_path,"w")
try:
f.write(content)
except IOError:
e = get_exception()
module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, str(e)))
f.close()
module.atomic_move(tmp_path, filename)
def serialize(keys):
lines = []
new_keys = keys.values()
# order the new_keys by their original ordering, via the rank item in the tuple
ordered_new_keys = sorted(new_keys, key=itemgetter(4))
for key in ordered_new_keys:
try:
(keyhash, key_type, options, comment, rank) = key
option_str = ""
if options:
option_strings = []
for option_key, value in options.items():
if value is None:
option_strings.append("%s" % option_key)
else:
option_strings.append("%s=%s" % (option_key, value))
option_str = ",".join(option_strings)
option_str += " "
# comment line or invalid line, just leave it
if not key_type:
key_line = key
if key_type == 'skipped':
key_line = key[0]
else:
key_line = "%s%s %s %s\n" % (option_str, key_type, keyhash, comment)
except:
key_line = key
lines.append(key_line)
return ''.join(lines)
def enforce_state(module, params):
"""
Add or remove key.
"""
user = params["user"]
key = params["key"]
path = params.get("path", None)
manage_dir = params.get("manage_dir", True)
state = params.get("state", "present")
key_options = params.get("key_options", None)
exclusive = params.get("exclusive", False)
error_msg = "Error getting key from: %s"
# if the key is a url, request it and use it as key source
if key.startswith("http"):
try:
resp, info = fetch_url(module, key)
if info['status'] != 200:
module.fail_json(msg=error_msg % key)
else:
key = resp.read()
except Exception:
module.fail_json(msg=error_msg % key)
# resp.read gives bytes on python3, convert to native string type
key = to_native(key, errors='surrogate_or_strict')
# extract individual keys into an array, skipping blank lines and comments
new_keys = [s for s in key.splitlines() if s and not s.startswith('#')]
# check current state -- just get the filename, don't create file
do_write = False
params["keyfile"] = keyfile(module, user, do_write, path, manage_dir)
existing_content = readfile(params["keyfile"])
existing_keys = parsekeys(module, existing_content)
# Add a place holder for keys that should exist in the state=present and
# exclusive=true case
keys_to_exist = []
# we will order any non exclusive new keys higher than all the existing keys,
# resulting in the new keys being written to the key file after existing keys, but
# in the order of new_keys
max_rank_of_existing_keys = len(existing_keys)
# Check our new keys, if any of them exist we'll continue.
for rank_index, new_key in enumerate(new_keys):
parsed_new_key = parsekey(module, new_key, rank=rank_index)
if not parsed_new_key:
module.fail_json(msg="invalid key specified: %s" % new_key)
if key_options is not None:
parsed_options = parseoptions(module, key_options)
# rank here is the rank in the provided new keys, which may be unrelated to rank in existing_keys
parsed_new_key = (parsed_new_key[0], parsed_new_key[1], parsed_options, parsed_new_key[3], parsed_new_key[4])
matched = False
non_matching_keys = []
if parsed_new_key[0] in existing_keys:
# Then we check if everything (except the rank at index 4) matches, including
# the key type and options. If not, we append this
# existing key to the non-matching list
# We only want it to match everything when the state
# is present
if parsed_new_key[:4] != existing_keys[parsed_new_key[0]][:4] and state == "present":
non_matching_keys.append(existing_keys[parsed_new_key[0]])
else:
matched = True
# handle idempotent state=present
if state=="present":
keys_to_exist.append(parsed_new_key[0])
if len(non_matching_keys) > 0:
for non_matching_key in non_matching_keys:
if non_matching_key[0] in existing_keys:
del existing_keys[non_matching_key[0]]
do_write = True
# new key that didn't exist before. Where should it go in the ordering?
if not matched:
# We want the new key to be after existing keys if not exclusive (rank > max_rank_of_existing_keys)
total_rank = max_rank_of_existing_keys + parsed_new_key[4]
# replace existing key tuple with new parsed key with its total rank
existing_keys[parsed_new_key[0]] = (parsed_new_key[0], parsed_new_key[1], parsed_new_key[2], parsed_new_key[3], total_rank)
do_write = True
elif state=="absent":
if not matched:
continue
del existing_keys[parsed_new_key[0]]
do_write = True
# remove all other keys to honor exclusive
# for 'exclusive', make sure keys are written in the order the new keys were
if state == "present" and exclusive:
to_remove = frozenset(existing_keys).difference(keys_to_exist)
for key in to_remove:
del existing_keys[key]
do_write = True
if do_write:
filename = keyfile(module, user, do_write, path, manage_dir)
new_content = serialize(existing_keys)
diff = {
'before_header': params['keyfile'],
'after_header': filename,
'before': existing_content,
'after': new_content,
}
if module.check_mode:
module.exit_json(changed=True, diff=diff)
writefile(module, filename, new_content)
params['changed'] = True
params['diff'] = diff
else:
if module.check_mode:
module.exit_json(changed=False)
return params
def main():
module = AnsibleModule(
argument_spec = dict(
user = dict(required=True, type='str'),
key = dict(required=True, type='str'),
path = dict(required=False, type='str'),
manage_dir = dict(required=False, type='bool', default=True),
state = dict(default='present', choices=['absent','present']),
key_options = dict(required=False, type='str'),
unique = dict(default=False, type='bool'),
exclusive = dict(default=False, type='bool'),
validate_certs = dict(default=True, type='bool'),
),
supports_check_mode=True
)
results = enforce_state(module, module.params)
module.exit_json(**results)
if __name__ == '__main__':
main()
|
DamnWidget/mamba
|
refs/heads/master
|
mamba/test/dummy_app/application/controller/contained.py
|
3
|
# -*- encoding: utf-8 -*-
# -*- mamba-file-type: mamba-controller -*-
# Copyright (c) 2012 Oscar Campos <oscar.campos@member.fsf.org>
"""
Test Dummy Container
"""
from zope.interface import implements
from mamba.web.response import Ok
from mamba.core import interfaces
from mamba.application import controller, route
class DummyContained(controller.Controller, controller.ControllerProvider):
"""
I am a dummy controller to test Mamba
"""
implements(interfaces.IController)
name = 'DummyContained'
desc = 'I am a dummy contained created for tests purposes'
loaded = False
__parent__ = 'container'
__route__ = 'contained'
def __init__(self):
"""
Put here your initialization code
"""
super(DummyContained, self).__init__()
@route('/')
def root(self, request):
return Ok(
'<!DOCTYPE html>'
' <html>'
' <head><title>Dummy Root</title></head>'
' <body>'
' <h1>This is the Dummy Contained Root. Fuck yeah!</h1>'
' </body>'
' </html>'
)
|
nickgravgaard/sqlalchemy-fsm
|
refs/heads/master
|
sqlalchemy_fsm/__init__.py
|
1
|
from .fsm import FSMMeta, can_proceed, FSMField
from .transition import transition
|
kcsry/lippukala
|
refs/heads/master
|
lippukala/views.py
|
1
|
import json
from urllib.parse import parse_qs
from django.http import HttpResponse
from django.views.generic import TemplateView
from lippukala.excs import CantUseException
from lippukala.models import Code
def serialize_code(code):
return {
"id": code.id,
"used": bool(code.used_at),
"code": code.code,
"prefix": code.prefix,
"lit": code.literate_code,
"name": code.order.address_text,
"comment": code.order.comment,
"prod": code.product_text,
}
class POSView(TemplateView):
template_name = "lippukala/pos.html"
def get_valid_codes(self, request):
event_filter = request.GET.get("event")
qs = Code.objects.all().select_related("order")
if event_filter:
qs = qs.filter(order__event=event_filter)
return qs
def get_json(self, request):
qs = self.get_valid_codes(request)
data = [serialize_code(code) for code in qs.iterator()]
json_data = json.dumps({"codes": data})
return HttpResponse(json_data, content_type="application/json")
def get(self, request, *args, **kwargs):
if request.GET.get("json"):
return self.get_json(request)
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
json_data = '{"what": true}'
use = (request.POST.get("use") or request.GET.get("use"))
if not use:
try:
use = parse_qs(request.body)["use"][0]
except:
pass
if use:
station = "n/a"
try:
station = request.user.username
except:
pass
station = (request.POST.get("station") or request.GET.get("station") or station)
ids = [int(s, 10) for s in use.split(",")]
codes = []
qs = self.get_valid_codes(request)
for id in ids:
code = qs.get(pk=id)
try:
code.set_used(used_at=station)
except CantUseException:
pass
codes.append(code)
data = [serialize_code(code) for code in codes]
json_data = json.dumps({"codes": data})
return HttpResponse(json_data, content_type="application/json")
|
carquois/blobon
|
refs/heads/master
|
blobon/posts/migrations/0016_auto__add_blogcomment.py
|
1
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'BlogComment'
db.create_table('posts_blogcomment', (
('post_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['posts.Post'], unique=True, primary_key=True)),
('comment', self.gf('django.db.models.fields.TextField')(max_length=10000)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('name', self.gf('django.db.models.fields.CharField')(max_length=140)),
('website', self.gf('django.db.models.fields.URLField')(max_length=300, blank=True)),
('notify_me', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('posts', ['BlogComment'])
def backwards(self, orm):
# Deleting model 'BlogComment'
db.delete_table('posts_blogcomment')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blogs.blog': {
'Meta': {'object_name': 'Blog'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '30'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'posts.album': {
'Meta': {'object_name': 'Album', '_ormbases': ['posts.Post']},
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'post_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['posts.Post']", 'unique': 'True', 'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'})
},
'posts.blogcomment': {
'Meta': {'object_name': 'BlogComment', '_ormbases': ['posts.Post']},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'notify_me': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['posts.Post']", 'unique': 'True', 'primary_key': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'posts.blogpost': {
'Meta': {'object_name': 'BlogPost', '_ormbases': ['posts.Post']},
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'post_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['posts.Post']", 'unique': 'True', 'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'})
},
'posts.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['posts.Post']},
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'is_top': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_video': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pic': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'post_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['posts.Post']", 'unique': 'True', 'primary_key': 'True'}),
'publish_on_facebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'translated_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'youtube_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'posts.link': {
'Meta': {'object_name': 'Link'},
'album': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['posts.Album']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['posts.Image']"}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'posts.post': {
'Meta': {'object_name': 'Post'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'base62id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'posts.video': {
'Meta': {'object_name': 'Video', '_ormbases': ['posts.Post']},
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'post_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['posts.Post']", 'unique': 'True', 'primary_key': 'True'}),
'video_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'youtube_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['posts']
|
luogangyi/bcec-nova
|
refs/heads/stable/icehouse
|
nova/tests/console/__init__.py
|
187
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
|
FrankNagel/qlc
|
refs/heads/master
|
src/webapp/quanthistling/scripts/annotations/annotations_for_parker2010a.py
|
1
|
# -*- coding: utf8 -*-
import sys, os
sys.path.append(os.path.abspath('.'))
import re
from operator import attrgetter
import difflib
# Pylons model init sequence
import pylons.test
import logging
from quanthistling.config.environment import load_environment
from quanthistling.model.meta import Session, metadata
from quanthistling import model
import quanthistling.dictdata.books
from paste.deploy import appconfig
import functions
def annotate_head(entry):
# delete head annotations
head_annotations = [ a for a in entry.annotations if a.value=='head' or a.value=="iso-639-3" or a.value=="doculect"]
for a in head_annotations:
Session.delete(a)
heads = []
tabs = [ a for a in entry.annotations if a.value=="tab" ]
tabs = sorted(tabs, key=attrgetter('start'))
head_start = tabs[2].start + 1
head_end = tabs[3].start
head = entry.fullentry[head_start:head_end]
match_bracket = re.search("\([^)]*\) ?$", head)
if match_bracket:
head_end = head_end - len(match_bracket.group(0))
head = functions.insert_head(entry, head_start, head_end)
heads.append(head)
return heads
def annotate_translations(entry):
head_annotations = [ a for a in entry.annotations if a.value=="translation"]
for a in head_annotations:
Session.delete(a)
tabs = [ a for a in entry.annotations if a.value=="tab" ]
tabs = sorted(tabs, key=attrgetter('start'))
# English
trans_start = tabs[3].start + 1
trans_end = tabs[4].start
start = trans_start
for match in re.finditer(u"(?:[;,] |/|$)", entry.fullentry[trans_start:trans_end]):
# Are we in bracket?
in_bracket = False
for match_bracket in re.finditer("\([^)]*\)", entry.fullentry[trans_start:trans_end]):
if match_bracket.start(0) < match.start(0) and match_bracket.end(0) > match.end(0):
in_bracket = True
if not in_bracket:
end = trans_start + match.start(0)
functions.insert_translation(entry, start, end, lang_iso = 'eng', lang_doculect = 'English')
start = trans_start + match.end(0)
# Spanish
trans_start = tabs[4].start + 1
trans_end = len(entry.fullentry)
start = trans_start
for match in re.finditer(u"(?:[;,] |/|$)", entry.fullentry[trans_start:trans_end]):
# Are we in bracket?
in_bracket = False
for match_bracket in re.finditer("\([^)]*\)", entry.fullentry[trans_start:trans_end]):
if match_bracket.start(0) < match.start(0) and match_bracket.end(0) > match.end(0):
in_bracket = True
if not in_bracket:
end = trans_start + match.start(0)
functions.insert_translation(entry, start, end, lang_iso = 'spa', lang_doculect = 'Spanish')
start = trans_start + match.end(0)
def main(argv):
bibtex_key = u"parker2010a"
if len(argv) < 2:
print "call: annotations_for%s.py ini_file" % bibtex_key
exit(1)
ini_file = argv[1]
conf = appconfig('config:' + ini_file, relative_to='.')
if not pylons.test.pylonsapp:
load_environment(conf.global_conf, conf.local_conf)
# Create the tables if they don't already exist
metadata.create_all(bind=Session.bind)
dictdatas = Session.query(model.Dictdata).join(
(model.Book, model.Dictdata.book_id==model.Book.id)
).filter(model.Book.bibtex_key==bibtex_key).all()
for dictdata in dictdatas:
entries = Session.query(model.Entry).filter_by(dictdata_id=dictdata.id).all()
#entries = Session.query(model.Entry).filter_by(dictdata_id=dictdata.id,startpage=15,pos_on_page=5).all()
startletters = set()
for e in entries:
#one time data conversion
if e.fullentry.find('s@') != -1 or e.fullentry.find('c@') != -1:
e.fullentry = e.fullentry.replace(u's@', u's\u0308').replace(u'c@', u'c\u0308')
functions.print_error_in_entry(e, 'Replacing s@ and c@.')
heads = annotate_head(e)
if not e.is_subentry:
for h in heads:
if len(h) > 0:
startletters.add(h[0].lower())
annotate_translations(e)
dictdata.startletters = unicode(repr(sorted(list(startletters))))
Session.commit()
if __name__ == "__main__":
main(sys.argv)
|
graemian/ami-mooc-pilot
|
refs/heads/master
|
modules/review/domain.py
|
33
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects and constants for use by internal and external clients."""
__author__ = [
'johncox@google.com (John Cox)',
]
# Identifier for reviews that have been computer-assigned.
ASSIGNER_KIND_AUTO = 'AUTO'
# Identifier for reviews that have been assigned by a human.
ASSIGNER_KIND_HUMAN = 'HUMAN'
ASSIGNER_KINDS = (
ASSIGNER_KIND_AUTO,
ASSIGNER_KIND_HUMAN,
)
# Maximum number of ReviewSteps with removed = False, in any REVIEW_STATE, that
# can exist in the backend at a given time.
MAX_UNREMOVED_REVIEW_STEPS = 100
# State of a review that is currently assigned, either by a human or by machine.
REVIEW_STATE_ASSIGNED = 'ASSIGNED'
# State of a review that is complete and may be shown to the reviewee, provided
# the reviewee is themself in a state to see their reviews.
REVIEW_STATE_COMPLETED = 'COMPLETED'
# State of a review that used to be assigned but the assignment has been
# expired. Only machine-assigned reviews can be expired.
REVIEW_STATE_EXPIRED = 'EXPIRED'
REVIEW_STATES = (
REVIEW_STATE_ASSIGNED,
REVIEW_STATE_COMPLETED,
REVIEW_STATE_EXPIRED,
)
class Error(Exception):
"""Base error class."""
class ConstraintError(Error):
"""Raised when data is found indicating a constraint is violated."""
class NotAssignableError(Error):
"""Raised when review assignment is requested but cannot be satisfied."""
class RemovedError(Error):
"""Raised when an op cannot be performed on a step because it is removed."""
def __init__(self, message, value):
"""Constructs a new RemovedError."""
super(RemovedError, self).__init__(message)
self.value = value
def __str__(self):
return '%s: removed is %s' % (self.message, self.value)
class ReviewProcessAlreadyStartedError(Error):
"""Raised when someone attempts to start a review process in progress."""
class TransitionError(Error):
"""Raised when an invalid state transition is attempted."""
def __init__(self, message, before, after):
"""Constructs a new TransitionError.
Args:
message: string. Exception message.
before: string in peer.ReviewStates (though this is unenforced).
State we attempted to transition from.
after: string in peer.ReviewStates (though this is unenforced).
State we attempted to transition to.
"""
super(TransitionError, self).__init__(message)
self.after = after
self.before = before
def __str__(self):
return '%s: attempted to transition from %s to %s' % (
self.message, self.before, self.after)
class Review(object):
"""Domain object for a student work submission."""
def __init__(self, contents=None, key=None):
self._contents = contents
self._key = key
@property
def contents(self):
return self._contents
@property
def key(self):
return self._key
class ReviewStep(object):
"""Domain object for the status of a single review at a point in time."""
def __init__(
self, assigner_kind=None, change_date=None, create_date=None, key=None,
removed=None, review_key=None, review_summary_key=None,
reviewee_key=None, reviewer_key=None, state=None, submission_key=None,
unit_id=None):
self._assigner_kind = assigner_kind
self._change_date = change_date
self._create_date = create_date
self._key = key
self._removed = removed
self._review_key = review_key
self._review_summary_key = review_summary_key
self._reviewee_key = reviewee_key
self._reviewer_key = reviewer_key
self._state = state
self._submission_key = submission_key
self._unit_id = unit_id
@property
def assigner_kind(self):
return self._assigner_kind
@property
def change_date(self):
return self._change_date
@property
def create_date(self):
return self._create_date
@property
def is_assigned(self):
"""Predicate for whether the step is in REVIEW_STATE_ASSIGNED."""
return self.state == REVIEW_STATE_ASSIGNED
@property
def is_completed(self):
"""Predicate for whether the step is in REVIEW_STATE_COMPLETED."""
return self.state == REVIEW_STATE_COMPLETED
@property
def is_expired(self):
"""Predicate for whether the step is in REVIEW_STATE_EXPIRED."""
return self.state == REVIEW_STATE_EXPIRED
@property
def key(self):
return self._key
@property
def removed(self):
return self._removed
@property
def review_key(self):
return self._review_key
@property
def review_summary_key(self):
return self._review_summary_key
@property
def reviewee_key(self):
return self._reviewee_key
@property
def reviewer_key(self):
return self._reviewer_key
@property
def state(self):
return self._state
@property
def submission_key(self):
return self._submission_key
@property
def unit_id(self):
return self._unit_id
class ReviewSummary(object):
"""Domain object for review state aggregate entities."""
def __init__(
self, assigned_count=None, completed_count=None, change_date=None,
create_date=None, key=None, reviewee_key=None, submission_key=None,
unit_id=None):
self._assigned_count = assigned_count
self._completed_count = completed_count
self._change_date = change_date
self._create_date = create_date
self._key = key
self._reviewee_key = reviewee_key
self._submission_key = submission_key
self._unit_id = unit_id
@property
def assigned_count(self):
return self._assigned_count
@property
def completed_count(self):
return self._completed_count
@property
def change_date(self):
return self._change_date
@property
def create_date(self):
return self._create_date
@property
def key(self):
return self._key
@property
def reviewee_key(self):
return self._reviewee_key
@property
def submission_key(self):
return self._submission_key
@property
def unit_id(self):
return self._unit_id
class Submission(object):
"""Domain object for a student work submission."""
def __init__(self, contents=None, key=None):
self._contents = contents
self._key = key
@property
def contents(self):
return self._contents
@property
def key(self):
return self._key
|
timothyb89/stackviz
|
refs/heads/master
|
stackviz/views/devstack/urls.py
|
1
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from stackviz.views.devstack.results import ResultsView
urlpatterns = patterns('',
url(r'^results$', ResultsView.as_view()),
)
|
beni55/sentry
|
refs/heads/master
|
src/sentry/migrations/0043_auto__chg_field_option_value__chg_field_projectoption_value.py
|
6
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Option.value'
db.alter_column('sentry_option', 'value', self.gf('picklefield.fields.PickledObjectField')())
# Changing field 'ProjectOption.value'
db.alter_column('sentry_projectoptions', 'value', self.gf('picklefield.fields.PickledObjectField')())
def backwards(self, orm):
# Changing field 'Option.value'
db.alter_column('sentry_option', 'value', self.gf('django.db.models.fields.TextField')())
# Changing field 'ProjectOption.value'
db.alter_column('sentry_projectoptions', 'value', self.gf('django.db.models.fields.TextField')())
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['auth.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_project_set'", 'to': "orm['auth.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
|
vollov/egallery
|
refs/heads/master
|
gallery/message/migrations/0002_auto_20150518_2158.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
class Migration(migrations.Migration):
dependencies = [
('message', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='message',
name='id',
field=models.CharField(default=uuid.uuid4, max_length=64, serialize=False, verbose_name='Activation key', primary_key=True),
),
]
|
Lujeni/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/ingate/__init__.py
|
12133432
| |
rapilabs/django
|
refs/heads/master
|
tests/messages_tests/__init__.py
|
12133432
| |
sajeeshcs/nested_quota_latest
|
refs/heads/master
|
nova/tests/unit/conductor/__init__.py
|
12133432
| |
amboycharlie/Child-Friendly-LCMS
|
refs/heads/Child-Friendly-LCMS-0.5
|
leonardo/module/nav/widget/treenavigation/__init__.py
|
12133432
| |
bdeniker/sg
|
refs/heads/master
|
sg/__init__.py
|
12133432
| |
Teamxrtc/webrtc-streaming-node
|
refs/heads/master
|
third_party/webrtc/src/chromium/src/tools/swarming_client/third_party/requests/packages/urllib3/contrib/__init__.py
|
12133432
| |
realestate-com-au/python-dashing
|
refs/heads/master
|
dashmat/core_modules/amazon_base/__init__.py
|
12133432
| |
broferek/ansible
|
refs/heads/devel
|
lib/ansible/modules/web_infrastructure/__init__.py
|
12133432
| |
mrucci/moto
|
refs/heads/master
|
moto/rds2/exceptions.py
|
19
|
from __future__ import unicode_literals
import json
from werkzeug.exceptions import BadRequest
class RDSClientError(BadRequest):
def __init__(self, code, message):
super(RDSClientError, self).__init__()
self.description = json.dumps({
"Error": {
"Code": code,
"Message": message,
'Type': 'Sender',
},
'RequestId': '6876f774-7273-11e4-85dc-39e55ca848d1',
})
class DBInstanceNotFoundError(RDSClientError):
def __init__(self, database_identifier):
super(DBInstanceNotFoundError, self).__init__(
'DBInstanceNotFound',
"Database {0} not found.".format(database_identifier))
class DBSecurityGroupNotFoundError(RDSClientError):
def __init__(self, security_group_name):
super(DBSecurityGroupNotFoundError, self).__init__(
'DBSecurityGroupNotFound',
"Security Group {0} not found.".format(security_group_name))
class DBSubnetGroupNotFoundError(RDSClientError):
def __init__(self, subnet_group_name):
super(DBSubnetGroupNotFoundError, self).__init__(
'DBSubnetGroupNotFound',
"Subnet Group {0} not found.".format(subnet_group_name))
|
msarahan/bokeh
|
refs/heads/master
|
bokeh/__init__.py
|
8
|
""" Bokeh is a Python interactive visualization library that targets modern
web browsers for presentation.
Its goal is to provide elegant, concise construction of novel graphics in the
style of d3.js, but also deliver this capability with high-performance
interactivity over very large or streaming datasets. Bokeh can help anyone
who would like to quickly and easily create interactive plots, dashboards,
and data applications.
For full documentation, please visit: http://bokeh.pydata.org
"""
from __future__ import absolute_import, print_function
# configure Bokeh version
from .util.version import __version__; __version__
from .util.version import __base_version__; __base_version__
# configure Bokeh logger
from .util import logconfig
del logconfig
# configure deprecation warnings
import warnings
from .util.deprecate import BokehDeprecationWarning
warnings.simplefilter('always', BokehDeprecationWarning)
# imports below are names we want to make available in the bokeh
# module as transitive imports
from . import sampledata; sampledata
def test(args=None):
''' Run the Bokeh unit tests under the bokeh python directory using ``py.test``.
.. note::
Does not run any BokehJS, examples, or integration tests.
Args:
args(list, optional): command line arguments accepted by ``py.test``
For example, ``args=['-s', '-k charts']`` prevents capture of standard out
and only runs tests that match ``"charts"``. For more ``py.test`` options
see http://pytest.org/latest/usage.html.
Returns:
int: ``py.test`` exitcode
'''
from .util.testing import runtests
return runtests(args)
def license():
''' Print the Bokeh license to the console.
Returns:
None
'''
from os.path import join
with open(join(__path__[0], 'LICENSE.txt')) as lic:
print(lic.read())
|
etashjian/ECE757-final
|
refs/heads/master
|
src/arch/x86/isa/insts/general_purpose/data_transfer/stack_operations.py
|
75
|
# Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop POP_R {
# Make the default data size of pops 64 bits in 64 bit mode
.adjust_env oszIn64Override
ld t1, ss, [1, t0, rsp], dataSize=ssz
addi rsp, rsp, ssz, dataSize=asz
mov reg, reg, t1
};
def macroop POP_M {
# Make the default data size of pops 64 bits in 64 bit mode
.adjust_env oszIn64Override
ld t1, ss, [1, t0, rsp], dataSize=ssz
cda seg, sib, disp, dataSize=ssz
addi rsp, rsp, ssz, dataSize=asz
st t1, seg, sib, disp, dataSize=ssz
};
def macroop POP_P {
# Make the default data size of pops 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t7
ld t1, ss, [1, t0, rsp], dataSize=ssz
cda seg, sib, disp, dataSize=ssz
addi rsp, rsp, ssz, dataSize=asz
st t1, seg, riprel, disp, dataSize=ssz
};
def macroop PUSH_R {
# Make the default data size of pops 64 bits in 64 bit mode
.adjust_env oszIn64Override
st reg, ss, [1, t0, rsp], "-env.stackSize", dataSize=ssz
subi rsp, rsp, ssz
};
def macroop PUSH_I {
# Make the default data size of pops 64 bits in 64 bit mode
.adjust_env oszIn64Override
limm t1, imm
st t1, ss, [1, t0, rsp], "-env.stackSize", dataSize=ssz
subi rsp, rsp, ssz
};
def macroop PUSH_M {
# Make the default data size of pops 64 bits in 64 bit mode
.adjust_env oszIn64Override
ld t1, seg, sib, disp, dataSize=ssz
st t1, ss, [1, t0, rsp], "-env.stackSize", dataSize=ssz
subi rsp, rsp, ssz
};
def macroop PUSH_P {
# Make the default data size of pops 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t7
ld t1, seg, riprel, disp, dataSize=ssz
st t1, ss, [1, t0, rsp], "-env.stackSize", dataSize=ssz
subi rsp, rsp, ssz
};
def macroop PUSHA {
# Check all the stack addresses. We'll assume that if the beginning and
# end are ok, then the stuff in the middle should be as well.
cda ss, [1, t0, rsp], "-env.stackSize", dataSize=ssz
cda ss, [1, t0, rsp], "-8 * env.stackSize", dataSize=ssz
st rax, ss, [1, t0, rsp], "1 * -env.stackSize", dataSize=ssz
st rcx, ss, [1, t0, rsp], "2 * -env.stackSize", dataSize=ssz
st rdx, ss, [1, t0, rsp], "3 * -env.stackSize", dataSize=ssz
st rbx, ss, [1, t0, rsp], "4 * -env.stackSize", dataSize=ssz
st rsp, ss, [1, t0, rsp], "5 * -env.stackSize", dataSize=ssz
st rbp, ss, [1, t0, rsp], "6 * -env.stackSize", dataSize=ssz
st rsi, ss, [1, t0, rsp], "7 * -env.stackSize", dataSize=ssz
st rdi, ss, [1, t0, rsp], "8 * -env.stackSize", dataSize=ssz
subi rsp, rsp, "8 * env.stackSize"
};
def macroop POPA {
# Check all the stack addresses. We'll assume that if the beginning and
# end are ok, then the stuff in the middle should be as well.
ld t1, ss, [1, t0, rsp], "0 * env.stackSize", dataSize=ssz
ld t2, ss, [1, t0, rsp], "7 * env.stackSize", dataSize=ssz
mov rdi, rdi, t1, dataSize=ssz
ld rsi, ss, [1, t0, rsp], "1 * env.stackSize", dataSize=ssz
ld rbp, ss, [1, t0, rsp], "2 * env.stackSize", dataSize=ssz
ld rbx, ss, [1, t0, rsp], "4 * env.stackSize", dataSize=ssz
ld rdx, ss, [1, t0, rsp], "5 * env.stackSize", dataSize=ssz
ld rcx, ss, [1, t0, rsp], "6 * env.stackSize", dataSize=ssz
mov rax, rax, t2, dataSize=ssz
addi rsp, rsp, "8 * env.stackSize", dataSize=asz
};
def macroop LEAVE {
# Make the default data size of pops 64 bits in 64 bit mode
.adjust_env oszIn64Override
mov t1, t1, rbp, dataSize=ssz
ld rbp, ss, [1, t0, t1], dataSize=ssz
mov rsp, rsp, t1, dataSize=ssz
addi rsp, rsp, ssz, dataSize=ssz
};
def macroop ENTER_I_I {
.adjust_env oszIn64Override
# This needs to check all the addresses it writes to before it actually
# writes any values.
# Pull the different components out of the immediate
limm t1, imm, dataSize=8
zexti t2, t1, 15, dataSize=8
srli t1, t1, 16, dataSize=8
zexti t1, t1, 5, dataSize=8
# t1 is now the masked nesting level, and t2 is the amount of storage.
# Push rbp.
st rbp, ss, [1, t0, rsp], "-env.dataSize"
subi rsp, rsp, ssz
# Save the stack pointer for later
mov t6, t6, rsp
# If the nesting level is zero, skip all this stuff.
sub t0, t1, t0, flags=(EZF,), dataSize=2
br label("skipLoop"), flags=(CEZF,)
# If the level was 1, only push the saved rbp
subi t0, t1, 1, flags=(EZF,)
br label("bottomOfLoop"), flags=(CEZF,)
limm t4, "ULL(-1)", dataSize=8
topOfLoop:
ld t5, ss, [dsz, t4, rbp]
st t5, ss, [1, t0, rsp], "-env.dataSize"
subi rsp, rsp, ssz
# If we're not done yet, loop
subi t4, t4, 1, dataSize=8
add t0, t4, t1, flags=(EZF,)
br label("topOfLoop"), flags=(nCEZF,)
bottomOfLoop:
# Push the old rbp onto the stack
st t6, ss, [1, t0, rsp], "-env.dataSize"
subi rsp, rsp, ssz
skipLoop:
sub rsp, rsp, t2, dataSize=ssz
mov rbp, rbp, t6
};
'''
|
mrkm4ntr/incubator-airflow
|
refs/heads/master
|
tests/providers/microsoft/azure/transfers/test_azure_blob_to_gcs.py
|
7
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from airflow.providers.microsoft.azure.transfers.azure_blob_to_gcs import AzureBlobStorageToGCSOperator
WASB_CONN_ID = "wasb_default"
GCP_CONN_ID = "google_cloud_default"
BLOB_NAME = "azure_blob"
FILE_PATH = "/file/to/path"
CONTAINER_NAME = "azure_container"
BUCKET_NAME = "airflow"
OBJECT_NAME = "file.txt"
FILENAME = "file.txt"
GZIP = False
DELEGATE_TO = None
IMPERSONATION_CHAIN = None
TASK_ID = "transfer_file"
class TestAzureBlobStorageToGCSTransferOperator(unittest.TestCase):
def test_init(self):
operator = AzureBlobStorageToGCSOperator(
wasb_conn_id=WASB_CONN_ID,
gcp_conn_id=GCP_CONN_ID,
blob_name=BLOB_NAME,
file_path=FILE_PATH,
container_name=CONTAINER_NAME,
bucket_name=BUCKET_NAME,
object_name=OBJECT_NAME,
filename=FILENAME,
gzip=GZIP,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
task_id=TASK_ID,
)
self.assertEqual(operator.wasb_conn_id, WASB_CONN_ID)
self.assertEqual(operator.blob_name, BLOB_NAME)
self.assertEqual(operator.file_path, FILE_PATH)
self.assertEqual(operator.container_name, CONTAINER_NAME)
self.assertEqual(operator.gcp_conn_id, GCP_CONN_ID)
self.assertEqual(operator.bucket_name, BUCKET_NAME)
self.assertEqual(operator.object_name, OBJECT_NAME)
self.assertEqual(operator.filename, FILENAME)
self.assertEqual(operator.gzip, GZIP)
self.assertEqual(operator.delegate_to, DELEGATE_TO)
self.assertEqual(operator.impersonation_chain, IMPERSONATION_CHAIN)
self.assertEqual(operator.task_id, TASK_ID)
@mock.patch("airflow.providers.microsoft.azure.transfers.azure_blob_to_gcs.WasbHook")
@mock.patch("airflow.providers.microsoft.azure.transfers.azure_blob_to_gcs.GCSHook")
@mock.patch("airflow.providers.microsoft.azure.transfers.azure_blob_to_gcs.tempfile")
def test_execute(self, mock_temp, mock_hook_gcs, mock_hook_wasb):
op = AzureBlobStorageToGCSOperator(
wasb_conn_id=WASB_CONN_ID,
gcp_conn_id=GCP_CONN_ID,
blob_name=BLOB_NAME,
file_path=FILE_PATH,
container_name=CONTAINER_NAME,
bucket_name=BUCKET_NAME,
object_name=OBJECT_NAME,
filename=FILENAME,
gzip=GZIP,
delegate_to=DELEGATE_TO,
impersonation_chain=IMPERSONATION_CHAIN,
task_id=TASK_ID,
)
op.execute(context=None)
mock_hook_wasb.assert_called_once_with(wasb_conn_id=WASB_CONN_ID)
mock_hook_wasb.return_value.get_file.assert_called_once_with(
file_path=mock_temp.NamedTemporaryFile.return_value.__enter__.return_value.name,
container_name=CONTAINER_NAME,
blob_name=BLOB_NAME,
)
mock_hook_gcs.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN
)
mock_hook_gcs.return_value.upload.assert_called_once_with(
bucket_name=BUCKET_NAME,
object_name=OBJECT_NAME,
gzip=GZIP,
filename=mock_temp.NamedTemporaryFile.return_value.__enter__.return_value.name,
)
|
oskar456/youtube-dl
|
refs/heads/master
|
setup.py
|
8
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
import os.path
import warnings
import sys
try:
from setuptools import setup, Command
setuptools_available = True
except ImportError:
from distutils.core import setup, Command
setuptools_available = False
from distutils.spawn import spawn
try:
# This will create an exe that needs Microsoft Visual C++ 2008
# Redistributable Package
import py2exe
except ImportError:
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
print('Cannot import py2exe', file=sys.stderr)
exit(1)
py2exe_options = {
'bundle_files': 1,
'compressed': 1,
'optimize': 2,
'dist_dir': '.',
'dll_excludes': ['w9xpopen.exe', 'crypt32.dll'],
}
# Get the version from youtube_dl/version.py without importing the package
exec(compile(open('youtube_dl/version.py').read(),
'youtube_dl/version.py', 'exec'))
DESCRIPTION = 'YouTube video downloader'
LONG_DESCRIPTION = 'Command-line program to download videos from YouTube.com and other video sites'
py2exe_console = [{
'script': './youtube_dl/__main__.py',
'dest_base': 'youtube-dl',
'version': __version__,
'description': DESCRIPTION,
'comments': LONG_DESCRIPTION,
'product_name': 'youtube-dl',
'product_version': __version__,
}]
py2exe_params = {
'console': py2exe_console,
'options': {'py2exe': py2exe_options},
'zipfile': None
}
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
params = py2exe_params
else:
files_spec = [
('etc/bash_completion.d', ['youtube-dl.bash-completion']),
('etc/fish/completions', ['youtube-dl.fish']),
('share/doc/youtube_dl', ['README.txt']),
('share/man/man1', ['youtube-dl.1'])
]
root = os.path.dirname(os.path.abspath(__file__))
data_files = []
for dirname, files in files_spec:
resfiles = []
for fn in files:
if not os.path.exists(fn):
warnings.warn('Skipping file %s since it is not present. Type make to build all automatically generated files.' % fn)
else:
resfiles.append(fn)
data_files.append((dirname, resfiles))
params = {
'data_files': data_files,
}
if setuptools_available:
params['entry_points'] = {'console_scripts': ['youtube-dl = youtube_dl:main']}
else:
params['scripts'] = ['bin/youtube-dl']
class build_lazy_extractors(Command):
description = 'Build the extractor lazy loading module'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
spawn(
[sys.executable, 'devscripts/make_lazy_extractors.py', 'youtube_dl/extractor/lazy_extractors.py'],
dry_run=self.dry_run,
)
setup(
name='youtube_dl',
version=__version__,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url='https://github.com/rg3/youtube-dl',
author='Ricardo Garcia',
author_email='ytdl@yt-dl.org',
maintainer='Sergey M.',
maintainer_email='dstftw@gmail.com',
packages=[
'youtube_dl',
'youtube_dl.extractor', 'youtube_dl.downloader',
'youtube_dl.postprocessor'],
# Provokes warning on most systems (why?!)
# test_suite = 'nose.collector',
# test_requires = ['nosetest'],
classifiers=[
'Topic :: Multimedia :: Video',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'License :: Public Domain',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
cmdclass={'build_lazy_extractors': build_lazy_extractors},
**params
)
|
anitagraser/processing_pysal
|
refs/heads/master
|
ext-libs/pysal/core/IOHandlers/tests/test_dat.py
|
20
|
import unittest
import pysal
from pysal.core.IOHandlers.dat import DatIO
import tempfile
import os
class test_DatIO(unittest.TestCase):
def setUp(self):
self.test_file = test_file = pysal.examples.get_path('wmat.dat')
self.obj = DatIO(test_file, 'r')
def test_close(self):
f = self.obj
f.close()
self.failUnlessRaises(ValueError, f.read)
def test_read(self):
w = self.obj.read()
self.assertEqual(49, w.n)
self.assertEqual(4.7346938775510203, w.mean_neighbors)
self.assertEqual([0.5, 0.5], w[5.0].values())
def test_seek(self):
self.test_read()
self.failUnlessRaises(StopIteration, self.obj.read)
self.obj.seek(0)
self.test_read()
def test_write(self):
w = self.obj.read()
f = tempfile.NamedTemporaryFile(
suffix='.dat', dir=pysal.examples.get_path(''))
fname = f.name
f.close()
o = pysal.open(fname, 'w')
o.write(w)
o.close()
wnew = pysal.open(fname, 'r').read()
self.assertEqual(wnew.pct_nonzero, w.pct_nonzero)
os.remove(fname)
if __name__ == '__main__':
unittest.main()
|
hellhovnd/django
|
refs/heads/master
|
django/core/management/commands/makemessages.py
|
3
|
import fnmatch
import glob
import os
import re
import sys
from itertools import dropwhile
from optparse import make_option
import django
from django.core.management.base import CommandError, NoArgsCommand
from django.core.management.utils import (handle_extensions, find_command,
popen_wrapper)
from django.utils.functional import total_ordering
from django.utils.text import get_text_list
from django.utils.jslex import prepare_js_for_gettext
plural_forms_re = re.compile(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL)
STATUS_OK = 0
def check_programs(*programs):
for program in programs:
if find_command(program) is None:
raise CommandError("Can't find %s. Make sure you have GNU "
"gettext tools 0.15 or newer installed." % program)
@total_ordering
class TranslatableFile(object):
def __init__(self, dirpath, file_name):
self.file = file_name
self.dirpath = dirpath
def __repr__(self):
return "<TranslatableFile: %s>" % os.sep.join([self.dirpath, self.file])
def __eq__(self, other):
return self.dirpath == other.dirpath and self.file == other.file
def __lt__(self, other):
if self.dirpath == other.dirpath:
return self.file < other.file
return self.dirpath < other.dirpath
def process(self, command, potfile, domain, keep_pot=False):
"""
Extract translatable literals from self.file for :param domain:
creating or updating the :param potfile: POT file.
Uses the xgettext GNU gettext utility.
"""
from django.utils.translation import templatize
if command.verbosity > 1:
command.stdout.write('processing file %s in %s\n' % (self.file, self.dirpath))
_, file_ext = os.path.splitext(self.file)
if domain == 'djangojs' and file_ext in command.extensions:
is_templatized = True
orig_file = os.path.join(self.dirpath, self.file)
with open(orig_file) as fp:
src_data = fp.read()
src_data = prepare_js_for_gettext(src_data)
thefile = '%s.c' % self.file
work_file = os.path.join(self.dirpath, thefile)
with open(work_file, "w") as fp:
fp.write(src_data)
args = [
'xgettext',
'-d', domain,
'--language=C',
'--keyword=gettext_noop',
'--keyword=gettext_lazy',
'--keyword=ngettext_lazy:1,2',
'--keyword=pgettext:1c,2',
'--keyword=npgettext:1c,2,3',
'--from-code=UTF-8',
'--add-comments=Translators',
'--output=-'
]
if command.wrap:
args.append(command.wrap)
if command.location:
args.append(command.location)
args.append(work_file)
elif domain == 'django' and (file_ext == '.py' or file_ext in command.extensions):
thefile = self.file
orig_file = os.path.join(self.dirpath, self.file)
is_templatized = file_ext in command.extensions
if is_templatized:
with open(orig_file, "rU") as fp:
src_data = fp.read()
thefile = '%s.py' % self.file
content = templatize(src_data, orig_file[2:])
with open(os.path.join(self.dirpath, thefile), "w") as fp:
fp.write(content)
work_file = os.path.join(self.dirpath, thefile)
args = [
'xgettext',
'-d', domain,
'--language=Python',
'--keyword=gettext_noop',
'--keyword=gettext_lazy',
'--keyword=ngettext_lazy:1,2',
'--keyword=ugettext_noop',
'--keyword=ugettext_lazy',
'--keyword=ungettext_lazy:1,2',
'--keyword=pgettext:1c,2',
'--keyword=npgettext:1c,2,3',
'--keyword=pgettext_lazy:1c,2',
'--keyword=npgettext_lazy:1c,2,3',
'--from-code=UTF-8',
'--add-comments=Translators',
'--output=-'
]
if command.wrap:
args.append(command.wrap)
if command.location:
args.append(command.location)
args.append(work_file)
else:
return
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
if is_templatized:
os.unlink(work_file)
if not keep_pot and os.path.exists(potfile):
os.unlink(potfile)
raise CommandError(
"errors happened while running xgettext on %s\n%s" %
(self.file, errors))
elif command.verbosity > 0:
# Print warnings
command.stdout.write(errors)
if msgs:
if is_templatized:
old = '#: ' + work_file[2:]
new = '#: ' + orig_file[2:]
msgs = msgs.replace(old, new)
write_pot_file(potfile, msgs)
if is_templatized:
os.unlink(work_file)
def write_pot_file(potfile, msgs):
"""
Write the :param potfile: POT file with the :param msgs: contents,
previously making sure its format is valid.
"""
if os.path.exists(potfile):
# Strip the header
msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
else:
msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
with open(potfile, 'a') as fp:
fp.write(msgs)
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--locale', '-l', default=None, dest='locale', action='append',
help='Creates or updates the message files for the given locale(s) (e.g. pt_BR). '
'Can be used multiple times, accepts a comma-separated list of locale names.'),
make_option('--domain', '-d', default='django', dest='domain',
help='The domain of the message files (default: "django").'),
make_option('--all', '-a', action='store_true', dest='all',
default=False, help='Updates the message files for all existing locales.'),
make_option('--extension', '-e', dest='extensions',
help='The file extension(s) to examine (default: "html,txt", or "js" if the domain is "djangojs"). Separate multiple extensions with commas, or use -e multiple times.',
action='append'),
make_option('--symlinks', '-s', action='store_true', dest='symlinks',
default=False, help='Follows symlinks to directories when examining source code and templates for translation strings.'),
make_option('--ignore', '-i', action='append', dest='ignore_patterns',
default=[], metavar='PATTERN', help='Ignore files or directories matching this glob-style pattern. Use multiple times to ignore more.'),
make_option('--no-default-ignore', action='store_false', dest='use_default_ignore_patterns',
default=True, help="Don't ignore the common glob-style patterns 'CVS', '.*', '*~' and '*.pyc'."),
make_option('--no-wrap', action='store_true', dest='no_wrap',
default=False, help="Don't break long message lines into several lines."),
make_option('--no-location', action='store_true', dest='no_location',
default=False, help="Don't write '#: filename:line' lines."),
make_option('--no-obsolete', action='store_true', dest='no_obsolete',
default=False, help="Remove obsolete message strings."),
make_option('--keep-pot', action='store_true', dest='keep_pot',
default=False, help="Keep .pot file after making messages. Useful when debugging."),
)
help = ("Runs over the entire source tree of the current directory and "
"pulls out all strings marked for translation. It creates (or updates) a message "
"file in the conf/locale (in the django tree) or locale (for projects and "
"applications) directory.\n\nYou must run this command with one of either the "
"--locale or --all options.")
requires_model_validation = False
leave_locale_alone = True
def handle_noargs(self, *args, **options):
locale = options.get('locale')
self.domain = options.get('domain')
self.verbosity = int(options.get('verbosity'))
process_all = options.get('all')
extensions = options.get('extensions')
self.symlinks = options.get('symlinks')
ignore_patterns = options.get('ignore_patterns')
if options.get('use_default_ignore_patterns'):
ignore_patterns += ['CVS', '.*', '*~', '*.pyc']
self.ignore_patterns = list(set(ignore_patterns))
self.wrap = '--no-wrap' if options.get('no_wrap') else ''
self.location = '--no-location' if options.get('no_location') else ''
self.no_obsolete = options.get('no_obsolete')
self.keep_pot = options.get('keep_pot')
if self.domain not in ('django', 'djangojs'):
raise CommandError("currently makemessages only supports domains "
"'django' and 'djangojs'")
if self.domain == 'djangojs':
exts = extensions if extensions else ['js']
else:
exts = extensions if extensions else ['html', 'txt']
self.extensions = handle_extensions(exts)
if (locale is None and not process_all) or self.domain is None:
raise CommandError("Type '%s help %s' for usage information." % (
os.path.basename(sys.argv[0]), sys.argv[1]))
if self.verbosity > 1:
self.stdout.write('examining files with the extensions: %s\n'
% get_text_list(list(self.extensions), 'and'))
# Need to ensure that the i18n framework is enabled
from django.conf import settings
if settings.configured:
settings.USE_I18N = True
else:
settings.configure(USE_I18N = True)
self.invoked_for_django = False
if os.path.isdir(os.path.join('conf', 'locale')):
localedir = os.path.abspath(os.path.join('conf', 'locale'))
self.invoked_for_django = True
# Ignoring all contrib apps
self.ignore_patterns += ['contrib/*']
elif os.path.isdir('locale'):
localedir = os.path.abspath('locale')
else:
raise CommandError("This script should be run from the Django Git "
"tree or your project or app tree. If you did indeed run it "
"from the Git checkout or your project or application, "
"maybe you are just missing the conf/locale (in the django "
"tree) or locale (for project and application) directory? It "
"is not created automatically, you have to create it by hand "
"if you want to enable i18n for your project or application.")
check_programs('xgettext')
# We require gettext version 0.15 or newer.
output, errors, status = popen_wrapper(['xgettext', '--version'])
if status != STATUS_OK:
raise CommandError("Error running xgettext. Note that Django "
"internationalization requires GNU gettext 0.15 or newer.")
match = re.search(r'(?P<major>\d+)\.(?P<minor>\d+)', output)
if match:
xversion = (int(match.group('major')), int(match.group('minor')))
if xversion < (0, 15):
raise CommandError("Django internationalization requires GNU "
"gettext 0.15 or newer. You are using version %s, please "
"upgrade your gettext toolset." % match.group())
potfile = self.build_pot_file(localedir)
# Build po files for each selected locale
locales = []
if locale is not None:
locales += locale.split(',') if not isinstance(locale, list) else locale
elif process_all:
locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % localedir))
locales = [os.path.basename(l) for l in locale_dirs]
if locales:
check_programs('msguniq', 'msgmerge', 'msgattrib')
try:
for locale in locales:
if self.verbosity > 0:
self.stdout.write("processing locale %s\n" % locale)
self.write_po_file(potfile, locale)
finally:
if not self.keep_pot and os.path.exists(potfile):
os.unlink(potfile)
def build_pot_file(self, localedir):
file_list = self.find_files(".")
potfile = os.path.join(localedir, '%s.pot' % str(self.domain))
if os.path.exists(potfile):
# Remove a previous undeleted potfile, if any
os.unlink(potfile)
for f in file_list:
f.process(self, potfile, self.domain, self.keep_pot)
return potfile
def find_files(self, root):
"""
Helper method to get all files in the given root.
"""
def is_ignored(path, ignore_patterns):
"""
Check if the given path should be ignored or not.
"""
for pattern in ignore_patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
dir_suffix = '%s*' % os.sep
norm_patterns = [p[:-len(dir_suffix)] if p.endswith(dir_suffix) else p for p in self.ignore_patterns]
all_files = []
for dirpath, dirnames, filenames in os.walk(root, topdown=True, followlinks=self.symlinks):
for dirname in dirnames[:]:
if is_ignored(os.path.normpath(os.path.join(dirpath, dirname)), norm_patterns):
dirnames.remove(dirname)
if self.verbosity > 1:
self.stdout.write('ignoring directory %s\n' % dirname)
for filename in filenames:
if is_ignored(os.path.normpath(os.path.join(dirpath, filename)), self.ignore_patterns):
if self.verbosity > 1:
self.stdout.write('ignoring file %s in %s\n' % (filename, dirpath))
else:
all_files.append(TranslatableFile(dirpath, filename))
return sorted(all_files)
def write_po_file(self, potfile, locale):
"""
Creates or updates the PO file for self.domain and :param locale:.
Uses contents of the existing :param potfile:.
Uses mguniq, msgmerge, and msgattrib GNU gettext utilities.
"""
args = ['msguniq', '--to-code=utf-8']
if self.wrap:
args.append(self.wrap)
if self.location:
args.append(self.location)
args.append(potfile)
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msguniq\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
basedir = os.path.join(os.path.dirname(potfile), locale, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % str(self.domain))
if os.path.exists(pofile):
with open(potfile, 'w') as fp:
fp.write(msgs)
args = ['msgmerge', '-q']
if self.wrap:
args.append(self.wrap)
if self.location:
args.append(self.location)
args.extend([pofile, potfile])
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msgmerge\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
elif not self.invoked_for_django:
msgs = self.copy_plural_forms(msgs, locale)
msgs = msgs.replace(
"#. #-#-#-#-# %s.pot (PACKAGE VERSION) #-#-#-#-#\n" % self.domain, "")
with open(pofile, 'w') as fp:
fp.write(msgs)
if self.no_obsolete:
args = ['msgattrib', '-o', pofile, '--no-obsolete']
if self.wrap:
args.append(self.wrap)
if self.location:
args.append(self.location)
args.append(pofile)
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msgattrib\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
def copy_plural_forms(self, msgs, locale):
"""
Copies plural forms header contents from a Django catalog of locale to
the msgs string, inserting it at the right place. msgs should be the
contents of a newly created .po file.
"""
django_dir = os.path.normpath(os.path.join(os.path.dirname(django.__file__)))
if self.domain == 'djangojs':
domains = ('djangojs', 'django')
else:
domains = ('django',)
for domain in domains:
django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain)
if os.path.exists(django_po):
with open(django_po, 'rU') as fp:
m = plural_forms_re.search(fp.read())
if m:
if self.verbosity > 1:
self.stdout.write("copying plural forms: %s\n" % m.group('value'))
lines = []
seen = False
for line in msgs.split('\n'):
if not line and not seen:
line = '%s\n' % m.group('value')
seen = True
lines.append(line)
msgs = '\n'.join(lines)
break
return msgs
|
PeterHenell/performance-dashboard
|
refs/heads/master
|
performance-collector2/wmi_source.py
|
1
|
class WMISource:
def get_sources(self):
return []
#
#
# '''
# Original Author/Source:
# http://monkut.webfactional.com/blog/archive/2009/1/21/windows-process-memory-logging-python
# http://stackoverflow.com/questions/276052/how-to-get-current-cpu-and-ram-usage-in-python
# Monitor window processes
# derived from:
# >for sys available mem
# http://msdn2.microsoft.com/en-us/library/aa455130.aspx
# > individual process information and python script examples
# http://www.microsoft.com/technet/scriptcenter/scripts/default.mspx?mfr=true
# NOTE: the WMI interface/process is also available for performing similar tasks
# I'm not using it here because the current method covers my needs, but if someday it's needed
# to extend or improve this module, then may want to investigate the WMI tools available.
# WMI for python:
# http://tgolden.sc.sabren.com/python/wmi.html
# '''
#
# __revision__ = 3
#
# import win32com.client
# from ctypes import *
# from ctypes.wintypes import *
# import pythoncom
# import pywintypes
# import datetime
#
#
# class MEMORYSTATUS(Structure):
# _fields_ = [
# ('dwLength', DWORD),
# ('dwMemoryLoad', DWORD),
# ('dwTotalPhys', DWORD),
# ('dwAvailPhys', DWORD),
# ('dwTotalPageFile', DWORD),
# ('dwAvailPageFile', DWORD),
# ('dwTotalVirtual', DWORD),
# ('dwAvailVirtual', DWORD),
# ]
#
#
# def winmem():
# x = MEMORYSTATUS() # create the structure
# windll.kernel32.GlobalMemoryStatus(byref(x)) # from cytypes.wintypes
# return x
#
#
# class process_stats:
# '''process_stats is able to provide counters of (all?) the items available in perfmon.
# Refer to the self.supported_types keys for the currently supported 'Performance Objects'
# To add logging support for other data you can derive the necessary data from perfmon:
# ---------
# perfmon can be run from windows 'run' menu by entering 'perfmon' and enter.
# Clicking on the '+' will open the 'add counters' menu,
# From the 'Add Counters' dialog, the 'Performance object' is the self.support_types key.
# --> Where spaces are removed and symbols are entered as text (Ex. # == Number, % == Percent)
# For the items you wish to log add the proper attribute name in the list in the self.supported_types dictionary,
# keyed by the 'Performance Object' name as mentioned above.
# ---------
# NOTE: The 'NETFramework_NETCLRMemory' key does not seem to log dotnet 2.0 properly.
# Initially the python implementation was derived from:
# http://www.microsoft.com/technet/scriptcenter/scripts/default.mspx?mfr=true
# '''
# def __init__(self,process_name_list=[],perf_object_list=[],filter_list=[]):
# '''process_names_list == the list of all processes to log (if empty log all)
# perf_object_list == list of process counters to log
# filter_list == list of text to filter
# print_results == boolean, output to stdout
# '''
# pythoncom.CoInitialize() # Needed when run by the same process in a thread
#
# self.process_name_list = process_name_list
# self.perf_object_list = perf_object_list
# self.filter_list = filter_list
#
# self.win32_perf_base = 'Win32_PerfFormattedData_'
#
# # Define new datatypes here!
# self.supported_types = {
# 'NETFramework_NETCLRMemory': [
# 'Name',
# 'NumberTotalCommittedBytes',
# 'NumberTotalReservedBytes',
# 'NumberInducedGC',
# 'NumberGen0Collections',
# 'NumberGen1Collections',
# 'NumberGen2Collections',
# 'PromotedMemoryFromGen0',
# 'PromotedMemoryFromGen1',
# 'PercentTimeInGC',
# 'LargeObjectHeapSize'
# ],
#
# 'PerfProc_Process': [
# 'Name',
# 'PrivateBytes',
# 'ElapsedTime',
# 'IDProcess',# pid
# 'Caption',
# 'CreatingProcessID',
# 'Description',
# 'IODataBytesPersec',
# 'IODataOperationsPersec',
# 'IOOtherBytesPersec',
# 'IOOtherOperationsPersec',
# 'IOReadBytesPersec',
# 'IOReadOperationsPersec',
# 'IOWriteBytesPersec',
# 'IOWriteOperationsPersec'
# ]
# }
#
# def get_pid_stats(self, pid):
# this_proc_dict = {}
#
# pythoncom.CoInitialize() # Needed when run by the same process in a thread
# if not self.perf_object_list:
# perf_object_list = self.supported_types.keys()
#
# for counter_type in perf_object_list:
# strComputer = "."
# objWMIService = win32com.client.Dispatch("WbemScripting.SWbemLocator")
# objSWbemServices = objWMIService.ConnectServer(strComputer,"root\cimv2")
#
# query_str = '''Select * from %s%s''' % (self.win32_perf_base,counter_type)
# colItems = objSWbemServices.ExecQuery(query_str) # "Select * from Win32_PerfFormattedData_PerfProc_Process")# changed from Win32_Thread
#
# if len(colItems) > 0:
# for objItem in colItems:
# if hasattr(objItem, 'IDProcess') and pid == objItem.IDProcess:
#
# for attribute in self.supported_types[counter_type]:
# eval_str = 'objItem.%s' % (attribute)
# this_proc_dict[attribute] = eval(eval_str)
#
# this_proc_dict['TimeStamp'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.') + str(datetime.datetime.now().microsecond)[:3]
# break
#
# return this_proc_dict
#
#
# def get_stats(self):
# '''
# Show process stats for all processes in given list, if none given return all processes
# If filter list is defined return only the items that match or contained in the list
# Returns a list of result dictionaries
# '''
# pythoncom.CoInitialize() # Needed when run by the same process in a thread
# proc_results_list = []
# if not self.perf_object_list:
# perf_object_list = self.supported_types.keys()
#
# for counter_type in perf_object_list:
# strComputer = "."
# objWMIService = win32com.client.Dispatch("WbemScripting.SWbemLocator")
# objSWbemServices = objWMIService.ConnectServer(strComputer,"root\cimv2")
#
# query_str = '''Select * from %s%s''' % (self.win32_perf_base,counter_type)
# colItems = objSWbemServices.ExecQuery(query_str) # "Select * from Win32_PerfFormattedData_PerfProc_Process")# changed from Win32_Thread
#
# try:
# if len(colItems) > 0:
# for objItem in colItems:
# found_flag = False
# this_proc_dict = {}
#
# if not self.process_name_list:
# found_flag = True
# else:
# # Check if process name is in the process name list, allow print if it is
# for proc_name in self.process_name_list:
# obj_name = objItem.Name
# if proc_name.lower() in obj_name.lower(): # will log if contains name
# found_flag = True
# break
#
# if found_flag:
# for attribute in self.supported_types[counter_type]:
# eval_str = 'objItem.%s' % (attribute)
# this_proc_dict[attribute] = eval(eval_str)
#
# this_proc_dict['TimeStamp'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.') + str(datetime.datetime.now().microsecond)[:3]
# proc_results_list.append(this_proc_dict)
#
# except pywintypes.com_error, err_msg:
# # Ignore and continue (proc_mem_logger calls this function once per second)
# continue
# return proc_results_list
#
#
# def get_sys_stats():
# ''' Returns a dictionary of the system stats'''
# pythoncom.CoInitialize() # Needed when run by the same process in a thread
# x = winmem()
#
# sys_dict = {
# 'dwAvailPhys': x.dwAvailPhys,
# 'dwAvailVirtual':x.dwAvailVirtual
# }
# return sys_dict
#
#
# if __name__ == '__main__':
# # This area used for testing only
# sys_dict = get_sys_stats()
#
# stats_processor = process_stats(process_name_list=['process2watch'],perf_object_list=[],filter_list=[])
# proc_results = stats_processor.get_stats()
#
# for result_dict in proc_results:
# print result_dict
#
# import os
# this_pid = os.getpid()
# this_proc_results = stats_processor.get_pid_stats(this_pid)
#
# print 'this proc results:'
# print this_proc_results
|
sexroute/commandergenius
|
refs/heads/sdl_android
|
project/jni/python/src/Tools/pybench/Lookups.py
|
45
|
from pybench import Test
class SpecialClassAttribute(Test):
version = 2.0
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
for i in xrange(self.rounds):
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
def calibrate(self):
class c:
pass
for i in xrange(self.rounds):
pass
class NormalClassAttribute(Test):
version = 2.0
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
for i in xrange(self.rounds):
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
def calibrate(self):
class c:
pass
for i in xrange(self.rounds):
pass
class SpecialInstanceAttribute(Test):
version = 2.0
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
o = c()
for i in xrange(self.rounds):
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
def calibrate(self):
class c:
pass
o = c()
for i in xrange(self.rounds):
pass
class NormalInstanceAttribute(Test):
version = 2.0
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
o = c()
for i in xrange(self.rounds):
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
def calibrate(self):
class c:
pass
o = c()
for i in xrange(self.rounds):
pass
class BuiltinMethodLookup(Test):
version = 2.0
operations = 5*(3*5 + 3*5)
rounds = 70000
def test(self):
l = []
d = {}
for i in xrange(self.rounds):
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
d.has_key
d.has_key
d.has_key
d.has_key
d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
d.has_key
d.has_key
d.has_key
d.has_key
d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
d.has_key
d.has_key
d.has_key
d.has_key
d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
d.has_key
d.has_key
d.has_key
d.has_key
d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
d.has_key
d.has_key
d.has_key
d.has_key
d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
def calibrate(self):
l = []
d = {}
for i in xrange(self.rounds):
pass
|
danielhjames/Booktype
|
refs/heads/master
|
lib/booktype/apps/portal/forms.py
|
6
|
from django import forms
from django.utils.html import escape
from django.forms.utils import ErrorList
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from booktype.utils.misc import booktype_slugify
from booki.editor.models import BookiGroup
from booktype.utils import misc
from booktype.apps.core.forms import BaseBooktypeForm
from widgets import RemovableImageWidget
class SpanErrorList(ErrorList):
def __unicode__(self):
return unicode(self.as_spans())
def as_spans(self):
return "<span style='color: red'>%s</span>" % (
",".join([e for e in self]))
class BaseGroupForm(BaseBooktypeForm, forms.ModelForm):
name = forms.CharField()
description = forms.CharField(
label=_('Description (250 characters)'),
required=False,
max_length=250,
widget=forms.Textarea(attrs={'rows': '10', 'cols': '40'})
)
group_image = forms.FileField(
label=_('Group image'),
required=False,
widget=RemovableImageWidget(attrs={
'label_class': 'checkbox-inline',
'input_class': 'group-image-removable'
}
)
)
class Meta:
model = BookiGroup
fields = [
'name', 'description'
]
def __init__(self, *args, **kwargs):
kwargs.update({'error_class': SpanErrorList})
super(BaseGroupForm, self).__init__(*args, **kwargs)
def clean_name(self):
new_url_name = booktype_slugify(self.cleaned_data['name'])
group_data_url_name = BookiGroup.objects.filter(url_name=new_url_name).exclude(pk=self.instance.pk)
if len(group_data_url_name) > 0:
raise ValidationError(_('Group name is already in use'))
return self.cleaned_data.get('name', '')
def clean_description(self):
return escape(self.cleaned_data.get('description', ''))
def set_group_image(self, group_id, group_image):
try:
filename = misc.set_group_image(group_id, group_image, 240, 240)
if len(filename) == 0:
raise ValidationError(_('Only JPEG file is allowed for group image.'))
else:
misc.set_group_image("{}_small".format(group_id), group_image, 18, 18)
except Exception as err:
# TODO: we should do something here
print err
class GroupCreateForm(BaseGroupForm):
pass
class GroupUpdateForm(BaseGroupForm):
def clean_group_image(self):
group_image = self.files.get('group_image', None)
group_id = str(self.instance.pk)
if group_image:
self.set_group_image(group_id, group_image)
return group_image
|
nzavagli/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Tools/scripts/objgraph.py
|
96
|
#! /usr/bin/env python
# objgraph
#
# Read "nm -o" input (on IRIX: "nm -Bo") of a set of libraries or modules
# and print various interesting listings, such as:
#
# - which names are used but not defined in the set (and used where),
# - which names are defined in the set (and where),
# - which modules use which other modules,
# - which modules are used by which other modules.
#
# Usage: objgraph [-cdu] [file] ...
# -c: print callers per objectfile
# -d: print callees per objectfile
# -u: print usage of undefined symbols
# If none of -cdu is specified, all are assumed.
# Use "nm -o" to generate the input (on IRIX: "nm -Bo"),
# e.g.: nm -o /lib/libc.a | objgraph
import sys
import os
import getopt
import re
# Types of symbols.
#
definitions = 'TRGDSBAEC'
externals = 'UV'
ignore = 'Nntrgdsbavuc'
# Regular expression to parse "nm -o" output.
#
matcher = re.compile('(.*):\t?........ (.) (.*)$')
# Store "item" in "dict" under "key".
# The dictionary maps keys to lists of items.
# If there is no list for the key yet, it is created.
#
def store(dict, key, item):
if dict.has_key(key):
dict[key].append(item)
else:
dict[key] = [item]
# Return a flattened version of a list of strings: the concatenation
# of its elements with intervening spaces.
#
def flat(list):
s = ''
for item in list:
s = s + ' ' + item
return s[1:]
# Global variables mapping defined/undefined names to files and back.
#
file2undef = {}
def2file = {}
file2def = {}
undef2file = {}
# Read one input file and merge the data into the tables.
# Argument is an open file.
#
def readinput(fp):
while 1:
s = fp.readline()
if not s:
break
# If you get any output from this line,
# it is probably caused by an unexpected input line:
if matcher.search(s) < 0: s; continue # Shouldn't happen
(ra, rb), (r1a, r1b), (r2a, r2b), (r3a, r3b) = matcher.regs[:4]
fn, name, type = s[r1a:r1b], s[r3a:r3b], s[r2a:r2b]
if type in definitions:
store(def2file, name, fn)
store(file2def, fn, name)
elif type in externals:
store(file2undef, fn, name)
store(undef2file, name, fn)
elif not type in ignore:
print fn + ':' + name + ': unknown type ' + type
# Print all names that were undefined in some module and where they are
# defined.
#
def printcallee():
flist = file2undef.keys()
flist.sort()
for filename in flist:
print filename + ':'
elist = file2undef[filename]
elist.sort()
for ext in elist:
if len(ext) >= 8:
tabs = '\t'
else:
tabs = '\t\t'
if not def2file.has_key(ext):
print '\t' + ext + tabs + ' *undefined'
else:
print '\t' + ext + tabs + flat(def2file[ext])
# Print for each module the names of the other modules that use it.
#
def printcaller():
files = file2def.keys()
files.sort()
for filename in files:
callers = []
for label in file2def[filename]:
if undef2file.has_key(label):
callers = callers + undef2file[label]
if callers:
callers.sort()
print filename + ':'
lastfn = ''
for fn in callers:
if fn <> lastfn:
print '\t' + fn
lastfn = fn
else:
print filename + ': unused'
# Print undefined names and where they are used.
#
def printundef():
undefs = {}
for filename in file2undef.keys():
for ext in file2undef[filename]:
if not def2file.has_key(ext):
store(undefs, ext, filename)
elist = undefs.keys()
elist.sort()
for ext in elist:
print ext + ':'
flist = undefs[ext]
flist.sort()
for filename in flist:
print '\t' + filename
# Print warning messages about names defined in more than one file.
#
def warndups():
savestdout = sys.stdout
sys.stdout = sys.stderr
names = def2file.keys()
names.sort()
for name in names:
if len(def2file[name]) > 1:
print 'warning:', name, 'multiply defined:',
print flat(def2file[name])
sys.stdout = savestdout
# Main program
#
def main():
try:
optlist, args = getopt.getopt(sys.argv[1:], 'cdu')
except getopt.error:
sys.stdout = sys.stderr
print 'Usage:', os.path.basename(sys.argv[0]),
print '[-cdu] [file] ...'
print '-c: print callers per objectfile'
print '-d: print callees per objectfile'
print '-u: print usage of undefined symbols'
print 'If none of -cdu is specified, all are assumed.'
print 'Use "nm -o" to generate the input (on IRIX: "nm -Bo"),'
print 'e.g.: nm -o /lib/libc.a | objgraph'
return 1
optu = optc = optd = 0
for opt, void in optlist:
if opt == '-u':
optu = 1
elif opt == '-c':
optc = 1
elif opt == '-d':
optd = 1
if optu == optc == optd == 0:
optu = optc = optd = 1
if not args:
args = ['-']
for filename in args:
if filename == '-':
readinput(sys.stdin)
else:
readinput(open(filename, 'r'))
#
warndups()
#
more = (optu + optc + optd > 1)
if optd:
if more:
print '---------------All callees------------------'
printcallee()
if optu:
if more:
print '---------------Undefined callees------------'
printundef()
if optc:
if more:
print '---------------All Callers------------------'
printcaller()
return 0
# Call the main program.
# Use its return value as exit status.
# Catch interrupts to avoid stack trace.
#
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
sys.exit(1)
|
auduny/home-assistant
|
refs/heads/dev
|
homeassistant/util/json.py
|
12
|
"""JSON utility functions."""
import logging
from typing import Union, List, Dict, Optional
import json
import os
import tempfile
from homeassistant.exceptions import HomeAssistantError
_LOGGER = logging.getLogger(__name__)
class SerializationError(HomeAssistantError):
"""Error serializing the data to JSON."""
class WriteError(HomeAssistantError):
"""Error writing the data."""
def load_json(filename: str, default: Union[List, Dict, None] = None) \
-> Union[List, Dict]:
"""Load JSON data from a file and return as dict or list.
Defaults to returning empty dict if file is not found.
"""
try:
with open(filename, encoding='utf-8') as fdesc:
return json.loads(fdesc.read()) # type: ignore
except FileNotFoundError:
# This is not a fatal error
_LOGGER.debug('JSON file not found: %s', filename)
except ValueError as error:
_LOGGER.exception('Could not parse JSON content: %s', filename)
raise HomeAssistantError(error)
except OSError as error:
_LOGGER.exception('JSON file reading failed: %s', filename)
raise HomeAssistantError(error)
return {} if default is None else default
def save_json(filename: str, data: Union[List, Dict],
private: bool = False, *,
encoder: Optional[json.JSONEncoder] = None) -> None:
"""Save JSON data to a file.
Returns True on success.
"""
tmp_filename = ""
tmp_path = os.path.split(filename)[0]
try:
json_data = json.dumps(data, sort_keys=True, indent=4, cls=encoder)
# Modern versions of Python tempfile create this file with mode 0o600
with tempfile.NamedTemporaryFile(mode="w", encoding='utf-8',
dir=tmp_path, delete=False) as fdesc:
fdesc.write(json_data)
tmp_filename = fdesc.name
if not private:
os.chmod(tmp_filename, 0o644)
os.replace(tmp_filename, filename)
except TypeError as error:
_LOGGER.exception('Failed to serialize to JSON: %s',
filename)
raise SerializationError(error)
except OSError as error:
_LOGGER.exception('Saving JSON file failed: %s',
filename)
raise WriteError(error)
finally:
if os.path.exists(tmp_filename):
try:
os.remove(tmp_filename)
except OSError as err:
# If we are cleaning up then something else went wrong, so
# we should suppress likely follow-on errors in the cleanup
_LOGGER.error("JSON replacement cleanup failed: %s", err)
|
netfirms/erpnext
|
refs/heads/develop
|
erpnext/hr/doctype/appraisal_template_goal/appraisal_template_goal.py
|
121
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class AppraisalTemplateGoal(Document):
pass
|
molobrakos/home-assistant
|
refs/heads/master
|
homeassistant/components/moon/__init__.py
|
36
|
"""The moon component."""
|
aeischeid/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/old-tests/webdriver/command_contexts/__init__.py
|
12133432
| |
RobertWWong/WebDev
|
refs/heads/master
|
djangoApp/dasDjangoApp/firstDjango/firstDjango/__init__.py
|
12133432
| |
gidj/pyLinkedList
|
refs/heads/master
|
__init__.py
|
12133432
| |
double12gzh/nova
|
refs/heads/master
|
nova/tests/unit/scheduler/weights/__init__.py
|
12133432
| |
cecep-edu/edx-platform
|
refs/heads/eucalyptus.2
|
cms/djangoapps/models/settings/__init__.py
|
12133432
| |
justin-ho/passwd-mng
|
refs/heads/master
|
pycrypto-2.6.1/lib/Crypto/Util/asn1.py
|
122
|
# -*- coding: ascii -*-
#
# Util/asn1.py : Minimal support for ASN.1 DER binary encoding.
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
from Crypto.Util.number import long_to_bytes, bytes_to_long
import sys
from Crypto.Util.py3compat import *
__all__ = [ 'DerObject', 'DerInteger', 'DerOctetString', 'DerNull', 'DerSequence', 'DerObjectId' ]
class DerObject:
"""Base class for defining a single DER object.
Instantiate this class ONLY when you have to decode a DER element.
"""
# Known TAG types
typeTags = { 'SEQUENCE': 0x30, 'BIT STRING': 0x03, 'INTEGER': 0x02,
'OCTET STRING': 0x04, 'NULL': 0x05, 'OBJECT IDENTIFIER': 0x06 }
def __init__(self, ASN1Type=None, payload=b('')):
"""Initialize the DER object according to a specific type.
The ASN.1 type is either specified as the ASN.1 string (e.g.
'SEQUENCE'), directly with its numerical tag or with no tag
at all (None)."""
if isInt(ASN1Type) or ASN1Type is None:
self.typeTag = ASN1Type
else:
if len(ASN1Type)==1:
self.typeTag = ord(ASN1Type)
else:
self.typeTag = self.typeTags.get(ASN1Type)
self.payload = payload
def isType(self, ASN1Type):
return self.typeTags[ASN1Type]==self.typeTag
def _lengthOctets(self, payloadLen):
"""Return a byte string that encodes the given payload length (in
bytes) in a format suitable for a DER length tag (L).
"""
if payloadLen>127:
encoding = long_to_bytes(payloadLen)
return bchr(len(encoding)+128) + encoding
return bchr(payloadLen)
def encode(self):
"""Return a complete DER element, fully encoded as a TLV."""
return bchr(self.typeTag) + self._lengthOctets(len(self.payload)) + self.payload
def _decodeLen(self, idx, der):
"""Given a (part of a) DER element, and an index to the first byte of
a DER length tag (L), return a tuple with the payload size,
and the index of the first byte of the such payload (V).
Raises a ValueError exception if the DER length is invalid.
Raises an IndexError exception if the DER element is too short.
"""
length = bord(der[idx])
if length<=127:
return (length,idx+1)
payloadLength = bytes_to_long(der[idx+1:idx+1+(length & 0x7F)])
if payloadLength<=127:
raise ValueError("Not a DER length tag.")
return (payloadLength, idx+1+(length & 0x7F))
def decode(self, derEle, noLeftOvers=0):
"""Decode a complete DER element, and re-initializes this
object with it.
@param derEle A complete DER element. It must start with a DER T
tag.
@param noLeftOvers Indicate whether it is acceptable to complete the
parsing of the DER element and find that not all
bytes in derEle have been used.
@return Index of the first unused byte in the given DER element.
Raises a ValueError exception in case of parsing errors.
Raises an IndexError exception if the DER element is too short.
"""
try:
self.typeTag = bord(derEle[0])
if (self.typeTag & 0x1F)==0x1F:
raise ValueError("Unsupported DER tag")
(length,idx) = self._decodeLen(1, derEle)
if noLeftOvers and len(derEle) != (idx+length):
raise ValueError("Not a DER structure")
self.payload = derEle[idx:idx+length]
except IndexError:
raise ValueError("Not a valid DER SEQUENCE.")
return idx+length
class DerInteger(DerObject):
def __init__(self, value = 0):
"""Class to model an INTEGER DER element.
Limitation: only non-negative values are supported.
"""
DerObject.__init__(self, 'INTEGER')
self.value = value
def encode(self):
"""Return a complete INTEGER DER element, fully encoded as a TLV."""
self.payload = long_to_bytes(self.value)
if bord(self.payload[0])>127:
self.payload = bchr(0x00) + self.payload
return DerObject.encode(self)
def decode(self, derEle, noLeftOvers=0):
"""Decode a complete INTEGER DER element, and re-initializes this
object with it.
@param derEle A complete INTEGER DER element. It must start with a DER
INTEGER tag.
@param noLeftOvers Indicate whether it is acceptable to complete the
parsing of the DER element and find that not all
bytes in derEle have been used.
@return Index of the first unused byte in the given DER element.
Raises a ValueError exception if the DER element is not a
valid non-negative INTEGER.
Raises an IndexError exception if the DER element is too short.
"""
tlvLength = DerObject.decode(self, derEle, noLeftOvers)
if self.typeTag!=self.typeTags['INTEGER']:
raise ValueError ("Not a DER INTEGER.")
if bord(self.payload[0])>127:
raise ValueError ("Negative INTEGER.")
self.value = bytes_to_long(self.payload)
return tlvLength
class DerSequence(DerObject):
"""Class to model a SEQUENCE DER element.
This object behave like a dynamic Python sequence.
Sub-elements that are INTEGERs, look like Python integers.
Any other sub-element is a binary string encoded as the complete DER
sub-element (TLV).
"""
def __init__(self, startSeq=None):
"""Initialize the SEQUENCE DER object. Always empty
initially."""
DerObject.__init__(self, 'SEQUENCE')
if startSeq==None:
self._seq = []
else:
self._seq = startSeq
## A few methods to make it behave like a python sequence
def __delitem__(self, n):
del self._seq[n]
def __getitem__(self, n):
return self._seq[n]
def __setitem__(self, key, value):
self._seq[key] = value
def __setslice__(self,i,j,sequence):
self._seq[i:j] = sequence
def __delslice__(self,i,j):
del self._seq[i:j]
def __getslice__(self, i, j):
return self._seq[max(0, i):max(0, j)]
def __len__(self):
return len(self._seq)
def append(self, item):
return self._seq.append(item)
def hasInts(self):
"""Return the number of items in this sequence that are numbers."""
return len(filter(isInt, self._seq))
def hasOnlyInts(self):
"""Return True if all items in this sequence are numbers."""
return self._seq and self.hasInts()==len(self._seq)
def encode(self):
"""Return the DER encoding for the ASN.1 SEQUENCE, containing
the non-negative integers and longs added to this object.
Limitation: Raises a ValueError exception if it some elements
in the sequence are neither Python integers nor complete DER INTEGERs.
"""
self.payload = b('')
for item in self._seq:
try:
self.payload += item
except:
try:
self.payload += DerInteger(item).encode()
except:
raise ValueError("Trying to DER encode an unknown object")
return DerObject.encode(self)
def decode(self, derEle, noLeftOvers=0):
"""Decode a complete SEQUENCE DER element, and re-initializes this
object with it.
@param derEle A complete SEQUENCE DER element. It must start with a DER
SEQUENCE tag.
@param noLeftOvers Indicate whether it is acceptable to complete the
parsing of the DER element and find that not all
bytes in derEle have been used.
@return Index of the first unused byte in the given DER element.
DER INTEGERs are decoded into Python integers. Any other DER
element is not decoded. Its validity is not checked.
Raises a ValueError exception if the DER element is not a
valid DER SEQUENCE.
Raises an IndexError exception if the DER element is too short.
"""
self._seq = []
try:
tlvLength = DerObject.decode(self, derEle, noLeftOvers)
if self.typeTag!=self.typeTags['SEQUENCE']:
raise ValueError("Not a DER SEQUENCE.")
# Scan one TLV at once
idx = 0
while idx<len(self.payload):
typeTag = bord(self.payload[idx])
if typeTag==self.typeTags['INTEGER']:
newInteger = DerInteger()
idx += newInteger.decode(self.payload[idx:])
self._seq.append(newInteger.value)
else:
itemLen,itemIdx = self._decodeLen(idx+1,self.payload)
self._seq.append(self.payload[idx:itemIdx+itemLen])
idx = itemIdx + itemLen
except IndexError:
raise ValueError("Not a valid DER SEQUENCE.")
return tlvLength
class DerOctetString(DerObject):
def __init__(self, value = b('')):
DerObject.__init__(self, 'OCTET STRING')
self.payload = value
def decode(self, derEle, noLeftOvers=0):
p = DerObject.decode(derEle, noLeftOvers)
if not self.isType("OCTET STRING"):
raise ValueError("Not a valid OCTET STRING.")
return p
class DerNull(DerObject):
def __init__(self):
DerObject.__init__(self, 'NULL')
class DerObjectId(DerObject):
def __init__(self):
DerObject.__init__(self, 'OBJECT IDENTIFIER')
def decode(self, derEle, noLeftOvers=0):
p = DerObject.decode(derEle, noLeftOvers)
if not self.isType("OBJECT IDENTIFIER"):
raise ValueError("Not a valid OBJECT IDENTIFIER.")
return p
def isInt(x):
test = 0
try:
test += x
except TypeError:
return 0
return 1
|
kaplun/invenio
|
refs/heads/master
|
modules/websubmit/lib/functions/Send_APP_Mail.py
|
24
|
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
## Description: function Send_APP_Mail
## This function send an email informing the original
## submitter of a document that the referee has approved/
## rejected the document. The email is also sent to the
## referee for checking.
## Author: T.Baron
## PARAMETERS:
## newrnin: name of the file containing the 2nd reference
## addressesAPP: email addresses to which the email will
## be sent (additionally to the author)
## categformatAPP: variable needed to derive the addresses
## mentioned above
import os
import re
from invenio.config import CFG_SITE_NAME, \
CFG_SITE_URL, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_CERN_SITE, \
CFG_SITE_RECORD
from invenio.access_control_admin import acc_get_role_users, acc_get_role_id
from invenio.dbquery import run_sql
from invenio.websubmit_config import CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN
from invenio.errorlib import register_exception
from invenio.search_engine import print_record
from invenio.mailutils import scheduled_send_email
from invenio.bibtask import bibtask_allocate_sequenceid
## The field in which to search for the record submitter/owner's email address:
if CFG_CERN_SITE:
## This is a CERN site - we use 859__f for submitter/record owner's email:
CFG_WEBSUBMIT_RECORD_OWNER_EMAIL = "859__f"
else:
## Non-CERN site. Use 8560_f for submitter/record owner's email:
CFG_WEBSUBMIT_RECORD_OWNER_EMAIL = "8560_f"
def Send_APP_Mail (parameters, curdir, form, user_info=None):
"""
This function send an email informing the original submitter of a
document that the referee has approved/ rejected the document. The
email is also sent to the referee for checking.
Parameters:
* addressesAPP: email addresses of the people who will receive
this email (comma separated list). this parameter may contain
the <CATEG> string. In which case the variable computed from
the [categformatAFP] parameter replaces this string.
eg.: "<CATEG>-email@cern.ch"
* categformatAPP contains a regular expression used to compute
the category of the document given the reference of the
document.
eg.: if [categformatAFP]="TEST-<CATEG>-.*" and the reference
of the document is "TEST-CATEGORY1-2001-001", then the computed
category equals "CATEGORY1"
* newrnin: Name of the file containing the 2nd reference of the
approved document (if any).
* edsrn: Name of the file containing the reference of the
approved document.
"""
global titlevalue,authorvalue, emailvalue,sysno,rn
FROMADDR = '%s Submission Engine <%s>' % (CFG_SITE_NAME,CFG_SITE_SUPPORT_EMAIL)
sequence_id = bibtask_allocate_sequenceid(curdir)
doctype = form['doctype']
titlevalue = titlevalue.replace("\n"," ")
authorvalue = authorvalue.replace("\n","; ")
# variables declaration
categformat = parameters['categformatAPP']
otheraddresses = parameters['addressesAPP']
newrnpath = parameters['newrnin']
## Get the name of the decision file:
try:
decision_filename = parameters['decision_file']
except KeyError:
decision_filename = ""
## Get the name of the comments file:
try:
comments_filename = parameters['comments_file']
except KeyError:
comments_filename = ""
## Now try to read the comments from the comments_filename:
if comments_filename in (None, "", "NULL"):
## We don't have a name for the comments file.
## For backward compatibility reasons, try to read the comments from
## a file called 'COM' in curdir:
if os.path.exists("%s/COM" % curdir):
try:
fh_comments = open("%s/COM" % curdir, "r")
comment = fh_comments.read()
fh_comments.close()
except IOError:
## Unable to open the comments file
exception_prefix = "Error in WebSubmit function " \
"Send_APP_Mail. Tried to open " \
"comments file [%s/COM] but was " \
"unable to." % curdir
register_exception(prefix=exception_prefix)
comment = ""
else:
comment = comment.strip()
else:
comment = ""
else:
## Try to read the comments from the comments file:
if os.path.exists("%s/%s" % (curdir, comments_filename)):
try:
fh_comments = open("%s/%s" % (curdir, comments_filename), "r")
comment = fh_comments.read()
fh_comments.close()
except IOError:
## Oops, unable to open the comments file.
comment = ""
exception_prefix = "Error in WebSubmit function " \
"Send_APP_Mail. Tried to open comments " \
"file [%s/%s] but was unable to." \
% (curdir, comments_filename)
register_exception(prefix=exception_prefix)
else:
comment = comment.strip()
else:
comment = ""
## Now try to read the decision from the decision_filename:
if decision_filename in (None, "", "NULL"):
## We don't have a name for the decision file.
## For backward compatibility reasons, try to read the decision from
## a file called 'decision' in curdir:
if os.path.exists("%s/decision" % curdir):
try:
fh_decision = open("%s/decision" % curdir, "r")
decision = fh_decision.read()
fh_decision.close()
except IOError:
## Unable to open the decision file
exception_prefix = "Error in WebSubmit function " \
"Send_APP_Mail. Tried to open " \
"decision file [%s/decision] but was " \
"unable to." % curdir
register_exception(prefix=exception_prefix)
decision = ""
else:
decision = decision.strip()
else:
decision = ""
else:
## Try to read the decision from the decision file:
try:
fh_decision = open("%s/%s" % (curdir, decision_filename), "r")
decision = fh_decision.read()
fh_decision.close()
except IOError:
## Oops, unable to open the decision file.
decision = ""
exception_prefix = "Error in WebSubmit function " \
"Send_APP_Mail. Tried to open decision " \
"file [%s/%s] but was unable to." \
% (curdir, decision_filename)
register_exception(prefix=exception_prefix)
else:
decision = decision.strip()
if os.path.exists("%s/%s" % (curdir,newrnpath)):
fp = open("%s/%s" % (curdir,newrnpath) , "r")
newrn = fp.read()
fp.close()
else:
newrn = ""
# Document name
res = run_sql("SELECT ldocname FROM sbmDOCTYPE WHERE sdocname=%s", (doctype,))
docname = res[0][0]
# retrieve category
categformat = categformat.replace("<CATEG>", "([^-]*)")
m_categ_search = re.match(categformat, rn)
if m_categ_search is not None:
if len(m_categ_search.groups()) > 0:
## Found a match for the category of this document. Get it:
category = m_categ_search.group(1)
else:
## This document has no category.
category = "unknown"
else:
category = "unknown"
## Get the referee email address:
if CFG_CERN_SITE:
## The referees system in CERN now works with listbox membership.
## List names should take the format
## "service-cds-referee-doctype-category@cern.ch"
## Make sure that your list exists!
## FIXME - to be replaced by a mailing alias in webaccess in the
## future.
referee_listname = "service-cds-referee-%s" % doctype.lower()
if category != "":
referee_listname += "-%s" % category.lower()
referee_listname += "@cern.ch"
addresses = referee_listname
else:
# Build referee's email address
refereeaddress = ""
# Try to retrieve the referee's email from the referee's database
for user in acc_get_role_users(acc_get_role_id("referee_%s_%s" % (doctype,category))):
refereeaddress += user[1] + ","
# And if there is a general referee
for user in acc_get_role_users(acc_get_role_id("referee_%s_*" % doctype)):
refereeaddress += user[1] + ","
refereeaddress = re.sub(",$","",refereeaddress)
# Creation of the mail for the referee
otheraddresses = otheraddresses.replace("<CATEG>",category)
addresses = ""
if refereeaddress != "":
addresses = refereeaddress + ","
if otheraddresses != "":
addresses += otheraddresses
else:
addresses = re.sub(",$","",addresses)
## Add the record's submitter(s) into the list of recipients:
## Get the email address(es) of the record submitter(s)/owner(s) from
## the record itself:
record_owners = print_record(sysno, 'tm', \
[CFG_WEBSUBMIT_RECORD_OWNER_EMAIL]).strip()
if record_owners != "":
record_owners_list = record_owners.split("\n")
record_owners_list = [email.lower().strip() \
for email in record_owners_list]
else:
#if the record owner can not be retrieved from the metadata
#(in case the record has not been inserted yet),
#try to use the global variable emailvalue
try:
record_owners_list = [emailvalue]
except NameError:
record_owners_list = []
record_owners = ",".join([owner for owner in record_owners_list])
if record_owners != "":
addresses += ",%s" % record_owners
if decision == "approve":
mailtitle = "%s has been approved" % rn
mailbody = "The %s %s has been approved." % (docname,rn)
mailbody += "\nIt will soon be accessible here:\n\n<%s/%s/%s>" % (CFG_SITE_URL,CFG_SITE_RECORD,sysno)
else:
mailtitle = "%s has been rejected" % rn
mailbody = "The %s %s has been rejected." % (docname,rn)
if rn != newrn and decision == "approve" and newrn != "":
mailbody += "\n\nIts new reference number is: %s" % newrn
mailbody += "\n\nTitle: %s\n\nAuthor(s): %s\n\n" % (titlevalue,authorvalue)
if comment != "":
mailbody += "Comments from the referee:\n%s\n" % comment
# Send mail to referee if any recipients or copy to admin
if addresses or CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN:
scheduled_send_email(FROMADDR, addresses, mailtitle, mailbody,
copy_to_admin=CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN,
other_bibtasklet_arguments=['-I', str(sequence_id)])
return ""
|
tashaxe/Red-DiscordBot
|
refs/heads/develop
|
lib/youtube_dl/extractor/lnkgo.py
|
33
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
)
class LnkGoIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?lnkgo\.alfa\.lt/visi-video/(?P<show>[^/]+)/ziurek-(?P<id>[A-Za-z0-9-]+)'
_TESTS = [{
'url': 'http://lnkgo.alfa.lt/visi-video/yra-kaip-yra/ziurek-yra-kaip-yra-162',
'info_dict': {
'id': '46712',
'ext': 'mp4',
'title': 'Yra kaip yra',
'upload_date': '20150107',
'description': 'md5:d82a5e36b775b7048617f263a0e3475e',
'age_limit': 7,
'duration': 3019,
'thumbnail': r're:^https?://.*\.jpg$'
},
'params': {
'skip_download': True, # HLS download
},
}, {
'url': 'http://lnkgo.alfa.lt/visi-video/aktualai-pratesimas/ziurek-nerdas-taiso-kompiuteri-2',
'info_dict': {
'id': '47289',
'ext': 'mp4',
'title': 'Nėrdas: Kompiuterio Valymas',
'upload_date': '20150113',
'description': 'md5:7352d113a242a808676ff17e69db6a69',
'age_limit': 18,
'duration': 346,
'thumbnail': r're:^https?://.*\.jpg$'
},
'params': {
'skip_download': True, # HLS download
},
}]
_AGE_LIMITS = {
'N-7': 7,
'N-14': 14,
'S': 18,
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(
url, display_id, 'Downloading player webpage')
video_id = self._search_regex(
r'data-ep="([^"]+)"', webpage, 'video ID')
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
upload_date = unified_strdate(self._search_regex(
r'class="[^"]*meta-item[^"]*air-time[^"]*">.*?<strong>([^<]+)</strong>', webpage, 'upload date', fatal=False))
thumbnail_w = int_or_none(
self._og_search_property('image:width', webpage, 'thumbnail width', fatal=False))
thumbnail_h = int_or_none(
self._og_search_property('image:height', webpage, 'thumbnail height', fatal=False))
thumbnail = {
'url': self._og_search_thumbnail(webpage),
}
if thumbnail_w and thumbnail_h:
thumbnail.update({
'width': thumbnail_w,
'height': thumbnail_h,
})
config = self._parse_json(self._search_regex(
r'episodePlayer\((\{.*?\}),\s*\{', webpage, 'sources'), video_id)
if config.get('pGeo'):
self.report_warning(
'This content might not be available in your country due to copyright reasons')
formats = [{
'format_id': 'hls',
'ext': 'mp4',
'url': config['EpisodeVideoLink_HLS'],
}]
m = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<play_path>.+)$', config['EpisodeVideoLink'])
if m:
formats.append({
'format_id': 'rtmp',
'ext': 'flv',
'url': m.group('url'),
'play_path': m.group('play_path'),
'page_url': url,
})
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'formats': formats,
'thumbnails': [thumbnail],
'duration': int_or_none(config.get('VideoTime')),
'description': description,
'age_limit': self._AGE_LIMITS.get(config.get('PGRating'), 0),
'upload_date': upload_date,
}
|
sphoebs/rockshell
|
refs/heads/master
|
mapreduce/third_party/__init__.py
|
127
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
bgris/ODL_bgris
|
refs/heads/master
|
lib/python3.5/site-packages/Sphinx-1.5.1-py3.5.egg/sphinx/util/images.py
|
4
|
# -*- coding: utf-8 -*-
"""
sphinx.util.images
~~~~~~~~~~~~~~~~~~
Image utility functions for Sphinx.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import imghdr
import imagesize
from os import path
try:
from PIL import Image # check for the Python Imaging Library
except ImportError:
try:
import Image
except ImportError:
Image = None
mime_suffixes = {
'.pdf': 'application/pdf',
'.svg': 'image/svg+xml',
'.svgz': 'image/svg+xml',
}
def get_image_size(filename):
try:
size = imagesize.get(filename)
if size[0] == -1:
size = None
if size is None and Image: # fallback to PIL
im = Image.open(filename)
size = im.size
try:
im.fp.close()
except Exception:
pass
return size
except:
return None
def guess_mimetype(filename):
_, ext = path.splitext(filename)
if ext in mime_suffixes:
return mime_suffixes[ext]
else:
with open(filename, 'rb') as f:
imgtype = imghdr.what(f)
if imgtype:
return 'image/' + imgtype
return None
|
OpenWinCon/OpenWinNet
|
refs/heads/master
|
web-gui/connector/version.py
|
13
|
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""MySQL Connector/Python version information
The file version.py gets installed and is available after installation
as mysql.connector.version.
"""
VERSION = (2, 1, 3, '', 0)
if VERSION[3] and VERSION[4]:
VERSION_TEXT = '{0}.{1}.{2}{3}{4}'.format(*VERSION)
else:
VERSION_TEXT = '{0}.{1}.{2}'.format(*VERSION[0:3])
LICENSE = 'GPLv2 with FOSS License Exception'
EDITION = '' # Added in package names, after the version
|
windelbouwman/ppci-mirror
|
refs/heads/master
|
tools/dbgui/dbguigdb.py
|
1
|
#!/usr/bin/python
import sys
import argparse
import logging
from ppci import api
from ppci.common import logformat
from ppci.binutils.dbg import Debugger
from ppci.binutils.dbg.gdb.client import GdbDebugDriver
from ppci.binutils.dbg.gdb.transport import TCP
from dbgui import DebugUi, QtWidgets
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'obj', help='The object file with the debug information')
parser.add_argument('--port', help='gdb port', type=int, default=1234)
args = parser.parse_args()
logging.basicConfig(format=logformat, level=logging.DEBUG)
app = QtWidgets.QApplication(sys.argv)
obj = api.get_object(args.obj)
arch = api.get_arch(obj.arch)
transport = TCP(args.port)
debug_driver = GdbDebugDriver(arch, transport=transport)
debug_driver.connect()
debugger = Debugger(arch, debug_driver)
debugger.load_symbols(obj, validate=False)
ui = DebugUi(debugger)
ui.memview.address = obj.get_image('ram').address
ui.open_all_source_files()
ui.show()
try:
app.exec_()
finally:
debugger.shutdown()
debug_driver.disconnect()
|
secdev/scapy
|
refs/heads/master
|
scapy/contrib/lldp.py
|
2
|
# scapy.contrib.description = Link Layer Discovery Protocol (LLDP)
# scapy.contrib.status = loads
"""
LLDP - Link Layer Discovery Protocol
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:author: Thomas Tannhaeuser, hecke@naberius.de
:license: GPLv2
This module is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This module is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
:description:
This module provides Scapy layers for the LLDP protocol.
normative references:
- IEEE 802.1AB 2016 - LLDP protocol, topology and MIB description
:TODO:
- | organization specific TLV e.g. ProfiNet
| (see LLDPDUGenericOrganisationSpecific for a starting point)
- Ignore everything after EndofLLDPDUTLV
:NOTES:
- you can find the layer configuration options at the end of this file
- default configuration enforces standard conform:
* | frame structure
| (ChassisIDTLV/PortIDTLV/TimeToLiveTLV/...)
* multiplicity of TLVs (if given by the standard)
* min sizes of strings used by the TLVs
- conf.contribs['LLDP'].strict_mode_disable() -> disable strict mode
"""
from scapy.config import conf
from scapy.error import Scapy_Exception
from scapy.layers.l2 import Ether, Dot1Q
from scapy.fields import MACField, IPField, BitField, \
StrLenField, ByteEnumField, BitEnumField, \
EnumField, ThreeBytesField, BitFieldLenField, \
ShortField, XStrLenField, ByteField, ConditionalField, \
MultipleTypeField
from scapy.packet import Packet, bind_layers
from scapy.modules.six.moves import range
from scapy.data import ETHER_TYPES
from scapy.compat import orb
LLDP_NEAREST_BRIDGE_MAC = '01:80:c2:00:00:0e'
LLDP_NEAREST_NON_TPMR_BRIDGE_MAC = '01:80:c2:00:00:03'
LLDP_NEAREST_CUSTOMER_BRIDGE_MAC = '01:80:c2:00:00:00'
LLDP_ETHER_TYPE = 0x88cc
ETHER_TYPES[LLDP_ETHER_TYPE] = 'LLDP'
class LLDPInvalidFrameStructure(Scapy_Exception):
"""
basic frame structure not standard conform
(missing TLV, invalid order or multiplicity)
"""
pass
class LLDPMissingLowerLayer(Scapy_Exception):
"""
first layer below first LLDPDU must be Ethernet or Dot1q
"""
pass
class LLDPInvalidTLVCount(Scapy_Exception):
"""
invalid number of entries for a specific TLV type
"""
pass
class LLDPInvalidLengthField(Scapy_Exception):
"""
invalid value of length field
"""
pass
class LLDPDU(Packet):
"""
base class for all LLDP data units
"""
TYPES = {
0x00: 'end of LLDPDU',
0x01: 'chassis id',
0x02: 'port id',
0x03: 'time to live',
0x04: 'port description',
0x05: 'system name',
0x06: 'system description',
0x07: 'system capabilities',
0x08: 'management address',
range(0x09, 0x7e): 'reserved - future standardization',
127: 'organisation specific TLV'
}
DOT1Q_HEADER_LEN = 4
ETHER_HEADER_LEN = 14
ETHER_FSC_LEN = 4
ETHER_FRAME_MIN_LEN = 64
LAYER_STACK = []
LAYER_MULTIPLICITIES = {}
def guess_payload_class(self, payload):
# type is a 7-bit bitfield spanning bits 1..7 -> div 2
try:
lldpdu_tlv_type = orb(payload[0]) // 2
return LLDPDU_CLASS_TYPES.get(lldpdu_tlv_type, conf.raw_layer)
except IndexError:
return conf.raw_layer
@staticmethod
def _dot1q_headers_size(layer):
"""
calculate size of lower dot1q layers (if present)
:param layer: the layer to start at
:return: size of vlan headers, layer below lowest vlan header
"""
vlan_headers_size = 0
under_layer = layer
while under_layer and isinstance(under_layer, Dot1Q):
vlan_headers_size += LLDPDU.DOT1Q_HEADER_LEN
under_layer = under_layer.underlayer
return vlan_headers_size, under_layer
def post_build(self, pkt, pay):
under_layer = self.underlayer
if under_layer is None:
if conf.contribs['LLDP'].strict_mode():
raise LLDPMissingLowerLayer('No lower layer (Ethernet '
'or Dot1Q) provided.')
else:
return pkt + pay
if isinstance(under_layer, LLDPDU):
return pkt + pay
frame_size, under_layer = LLDPDU._dot1q_headers_size(under_layer)
if not under_layer or not isinstance(under_layer, Ether):
if conf.contribs['LLDP'].strict_mode():
raise LLDPMissingLowerLayer('No Ethernet layer provided.')
else:
return pkt + pay
frame_size += LLDPDU.ETHER_HEADER_LEN
frame_size += len(pkt) + len(pay) + LLDPDU.ETHER_FSC_LEN
if frame_size < LLDPDU.ETHER_FRAME_MIN_LEN:
return pkt + pay + b'\x00' * (LLDPDU.ETHER_FRAME_MIN_LEN - frame_size) # noqa: E501
return pkt + pay
@staticmethod
def _frame_structure_check(structure_description):
"""
check if the structure of the frame is conform to the basic
frame structure defined by the standard
:param structure_description: string-list reflecting LLDP-msg structure
"""
standard_frame_structure = [LLDPDUChassisID.__name__,
LLDPDUPortID.__name__,
LLDPDUTimeToLive.__name__,
'<...>']
if len(structure_description) < 3:
raise LLDPInvalidFrameStructure(
'Invalid frame structure.\ngot: {}\nexpected: '
'{}'.format(' '.join(structure_description),
' '.join(standard_frame_structure)))
for idx, layer_name in enumerate(standard_frame_structure):
if layer_name == '<...>':
break
if layer_name != structure_description[idx]:
raise LLDPInvalidFrameStructure(
'Invalid frame structure.\ngot: {}\nexpected: '
'{}'.format(' '.join(structure_description),
' '.join(standard_frame_structure)))
@staticmethod
def _tlv_multiplicities_check(tlv_type_count):
"""
check if multiplicity of present TLVs conforms to the standard
:param tlv_type_count: dict containing counte-per-TLV
"""
# * : 0..n, 1 : one and only one.
standard_multiplicities = {
LLDPDUEndOfLLDPDU.__name__: '*',
LLDPDUChassisID.__name__: 1,
LLDPDUPortID.__name__: 1,
LLDPDUTimeToLive.__name__: 1,
LLDPDUPortDescription: '*',
LLDPDUSystemName: '*',
LLDPDUSystemDescription: '*',
LLDPDUSystemCapabilities: '*',
LLDPDUManagementAddress: '*'
}
for tlv_type_name in standard_multiplicities:
standard_tlv_multiplicity = \
standard_multiplicities[tlv_type_name]
if standard_tlv_multiplicity == '*':
continue
try:
if tlv_type_count[tlv_type_name] != standard_tlv_multiplicity:
raise LLDPInvalidTLVCount(
'Invalid number of entries for TLV type '
'{} - expected {} entries, got '
'{}'.format(tlv_type_name,
standard_tlv_multiplicity,
tlv_type_count[tlv_type_name]))
except KeyError:
raise LLDPInvalidTLVCount('Missing TLV layer of type '
'{}.'.format(tlv_type_name))
def pre_dissect(self, s):
if conf.contribs['LLDP'].strict_mode():
if self.__class__.__name__ == 'LLDPDU':
LLDPDU.LAYER_STACK = []
LLDPDU.LAYER_MULTIPLICITIES = {}
else:
LLDPDU.LAYER_STACK.append(self.__class__.__name__)
try:
LLDPDU.LAYER_MULTIPLICITIES[self.__class__.__name__] += 1
except KeyError:
LLDPDU.LAYER_MULTIPLICITIES[self.__class__.__name__] = 1
return s
def dissection_done(self, pkt):
if self.__class__.__name__ == 'LLDPDU' and \
conf.contribs['LLDP'].strict_mode():
LLDPDU._frame_structure_check(LLDPDU.LAYER_STACK)
LLDPDU._tlv_multiplicities_check(LLDPDU.LAYER_MULTIPLICITIES)
super(LLDPDU, self).dissection_done(pkt)
def _check(self):
"""Overwrited by LLDPU objects"""
pass
def post_dissect(self, s):
self._check()
return super(LLDPDU, self).post_dissect(s)
def do_build(self):
self._check()
return super(LLDPDU, self).do_build()
def _ldp_id_adjustlen(pkt, x):
"""Return the length of the `id` field,
according to its real encoded type"""
f, v = pkt.getfield_and_val('id')
length = f.i2len(pkt, v) + 1
if (isinstance(pkt, LLDPDUPortID) and pkt.subtype == 0x4) or \
(isinstance(pkt, LLDPDUChassisID) and pkt.subtype == 0x5):
# Take the ConditionalField into account
length += 1
return length
class LLDPDUChassisID(LLDPDU):
"""
ieee 802.1ab-2016 - sec. 8.5.2 / p. 26
"""
LLDP_CHASSIS_ID_TLV_SUBTYPES = {
0x00: 'reserved',
0x01: 'chassis component',
0x02: 'interface alias',
0x03: 'port component',
0x04: 'MAC address',
0x05: 'network address',
0x06: 'interface name',
0x07: 'locally assigned',
range(0x08, 0xff): 'reserved'
}
SUBTYPE_RESERVED = 0x00
SUBTYPE_CHASSIS_COMPONENT = 0x01
SUBTYPE_INTERFACE_ALIAS = 0x02
SUBTYPE_PORT_COMPONENT = 0x03
SUBTYPE_MAC_ADDRESS = 0x04
SUBTYPE_NETWORK_ADDRESS = 0x05
SUBTYPE_INTERFACE_NAME = 0x06
SUBTYPE_LOCALLY_ASSIGNED = 0x07
fields_desc = [
BitEnumField('_type', 0x01, 7, LLDPDU.TYPES),
BitFieldLenField('_length', None, 9, length_of='id',
adjust=lambda pkt, x: _ldp_id_adjustlen(pkt, x)),
ByteEnumField('subtype', 0x00, LLDP_CHASSIS_ID_TLV_SUBTYPES),
ConditionalField(
ByteField('family', 0),
lambda pkt: pkt.subtype == 0x05
),
MultipleTypeField([
(
MACField('id', None),
lambda pkt: pkt.subtype == 0x04
),
(
IPField('id', None),
lambda pkt: pkt.subtype == 0x05
),
], StrLenField('id', '', length_from=lambda pkt: 0 if pkt._length is
None else pkt._length - 1)
)
]
def _check(self):
"""
run layer specific checks
"""
if conf.contribs['LLDP'].strict_mode() and not self.id:
raise LLDPInvalidLengthField('id must be >= 1 characters long')
class LLDPDUPortID(LLDPDU):
"""
ieee 802.1ab-2016 - sec. 8.5.3 / p. 26
"""
LLDP_PORT_ID_TLV_SUBTYPES = {
0x00: 'reserved',
0x01: 'interface alias',
0x02: 'port component',
0x03: 'MAC address',
0x04: 'network address',
0x05: 'interface name',
0x06: 'agent circuit ID',
0x07: 'locally assigned',
range(0x08, 0xff): 'reserved'
}
SUBTYPE_RESERVED = 0x00
SUBTYPE_INTERFACE_ALIAS = 0x01
SUBTYPE_PORT_COMPONENT = 0x02
SUBTYPE_MAC_ADDRESS = 0x03
SUBTYPE_NETWORK_ADDRESS = 0x04
SUBTYPE_INTERFACE_NAME = 0x05
SUBTYPE_AGENT_CIRCUIT_ID = 0x06
SUBTYPE_LOCALLY_ASSIGNED = 0x07
fields_desc = [
BitEnumField('_type', 0x02, 7, LLDPDU.TYPES),
BitFieldLenField('_length', None, 9, length_of='id',
adjust=lambda pkt, x: _ldp_id_adjustlen(pkt, x)),
ByteEnumField('subtype', 0x00, LLDP_PORT_ID_TLV_SUBTYPES),
ConditionalField(
ByteField('family', 0),
lambda pkt: pkt.subtype == 0x04
),
MultipleTypeField([
(
MACField('id', None),
lambda pkt: pkt.subtype == 0x03
),
(
IPField('id', None),
lambda pkt: pkt.subtype == 0x04
),
], StrLenField('id', '', length_from=lambda pkt: 0 if pkt._length is
None else pkt._length - 1)
)
]
def _check(self):
"""
run layer specific checks
"""
if conf.contribs['LLDP'].strict_mode() and not self.id:
raise LLDPInvalidLengthField('id must be >= 1 characters long')
class LLDPDUTimeToLive(LLDPDU):
"""
ieee 802.1ab-2016 - sec. 8.5.4 / p. 29
"""
fields_desc = [
BitEnumField('_type', 0x03, 7, LLDPDU.TYPES),
BitField('_length', 0x02, 9),
ShortField('ttl', 20)
]
def _check(self):
"""
run layer specific checks
"""
if conf.contribs['LLDP'].strict_mode() and self._length != 2:
raise LLDPInvalidLengthField('length must be 2 - got '
'{}'.format(self._length))
class LLDPDUEndOfLLDPDU(LLDPDU):
"""
ieee 802.1ab-2016 - sec. 8.5.1 / p. 26
"""
fields_desc = [
BitEnumField('_type', 0x00, 7, LLDPDU.TYPES),
BitField('_length', 0x00, 9),
]
def extract_padding(self, s):
return '', s
def _check(self):
"""
run layer specific checks
"""
if conf.contribs['LLDP'].strict_mode() and self._length != 0:
raise LLDPInvalidLengthField('length must be 0 - got '
'{}'.format(self._length))
class LLDPDUPortDescription(LLDPDU):
"""
ieee 802.1ab-2016 - sec. 8.5.5 / p. 29
"""
fields_desc = [
BitEnumField('_type', 0x04, 7, LLDPDU.TYPES),
BitFieldLenField('_length', None, 9, length_of='description'),
StrLenField('description', '', length_from=lambda pkt: pkt._length)
]
class LLDPDUSystemName(LLDPDU):
"""
ieee 802.1ab-2016 - sec. 8.5.6 / p. 30
"""
fields_desc = [
BitEnumField('_type', 0x05, 7, LLDPDU.TYPES),
BitFieldLenField('_length', None, 9, length_of='system_name'),
StrLenField('system_name', '', length_from=lambda pkt: pkt._length)
]
class LLDPDUSystemDescription(LLDPDU):
"""
ieee 802.1ab-2016 - sec. 8.5.7 / p. 31
"""
fields_desc = [
BitEnumField('_type', 0x06, 7, LLDPDU.TYPES),
BitFieldLenField('_length', None, 9, length_of='description'),
StrLenField('description', '', length_from=lambda pkt: pkt._length)
]
class LLDPDUSystemCapabilities(LLDPDU):
"""
ieee 802.1ab-2016 - sec. 8.5.8 / p. 31
"""
fields_desc = [
BitEnumField('_type', 0x07, 7, LLDPDU.TYPES),
BitFieldLenField('_length', 4, 9),
BitField('reserved_5_available', 0, 1),
BitField('reserved_4_available', 0, 1),
BitField('reserved_3_available', 0, 1),
BitField('reserved_2_available', 0, 1),
BitField('reserved_1_available', 0, 1),
BitField('two_port_mac_relay_available', 0, 1),
BitField('s_vlan_component_available', 0, 1),
BitField('c_vlan_component_available', 0, 1),
BitField('station_only_available', 0, 1),
BitField('docsis_cable_device_available', 0, 1),
BitField('telephone_available', 0, 1),
BitField('router_available', 0, 1),
BitField('wlan_access_point_available', 0, 1),
BitField('mac_bridge_available', 0, 1),
BitField('repeater_available', 0, 1),
BitField('other_available', 0, 1),
BitField('reserved_5_enabled', 0, 1),
BitField('reserved_4_enabled', 0, 1),
BitField('reserved_3_enabled', 0, 1),
BitField('reserved_2_enabled', 0, 1),
BitField('reserved_1_enabled', 0, 1),
BitField('two_port_mac_relay_enabled', 0, 1),
BitField('s_vlan_component_enabled', 0, 1),
BitField('c_vlan_component_enabled', 0, 1),
BitField('station_only_enabled', 0, 1),
BitField('docsis_cable_device_enabled', 0, 1),
BitField('telephone_enabled', 0, 1),
BitField('router_enabled', 0, 1),
BitField('wlan_access_point_enabled', 0, 1),
BitField('mac_bridge_enabled', 0, 1),
BitField('repeater_enabled', 0, 1),
BitField('other_enabled', 0, 1),
]
def _check(self):
"""
run layer specific checks
"""
if conf.contribs['LLDP'].strict_mode() and self._length != 4:
raise LLDPInvalidLengthField('length must be 4 - got '
'{}'.format(self._length))
class LLDPDUManagementAddress(LLDPDU):
"""
ieee 802.1ab-2016 - sec. 8.5.9 / p. 32
currently only 0x00..0x1e are used by standards, no way to
use anything > 0xff as management address subtype is only
one octet wide
see https://www.iana.org/assignments/address-family-numbers/address-family-numbers.xhtml # noqa: E501
"""
IANA_ADDRESS_FAMILY_NUMBERS = {
0x00: 'other',
0x01: 'IPv4',
0x02: 'IPv6',
0x03: 'NSAP',
0x04: 'HDLC',
0x05: 'BBN',
0x06: '802',
0x07: 'E.163',
0x08: 'E.164',
0x09: 'F.69',
0x0a: 'X.121',
0x0b: 'IPX',
0x0c: 'Appletalk',
0x0d: 'Decnet IV',
0x0e: 'Banyan Vines',
0x0f: 'E.164 with NSAP',
0x10: 'DNS',
0x11: 'Distinguished Name',
0x12: 'AS Number',
0x13: 'XTP over IPv4',
0x14: 'XTP over IPv6',
0x15: 'XTP native mode XTP',
0x16: 'Fiber Channel World-Wide Port Name',
0x17: 'Fiber Channel World-Wide Node Name',
0x18: 'GWID',
0x19: 'AFI for L2VPN',
0x1a: 'MPLS-TP Section Endpoint ID',
0x1b: 'MPLS-TP LSP Endpoint ID',
0x1c: 'MPLS-TP Pseudowire Endpoint ID',
0x1d: 'MT IP Multi-Topology IPv4',
0x1e: 'MT IP Multi-Topology IPv6'
}
SUBTYPE_MANAGEMENT_ADDRESS_OTHER = 0x00
SUBTYPE_MANAGEMENT_ADDRESS_IPV4 = 0x01
SUBTYPE_MANAGEMENT_ADDRESS_IPV6 = 0x02
SUBTYPE_MANAGEMENT_ADDRESS_NSAP = 0x03
SUBTYPE_MANAGEMENT_ADDRESS_HDLC = 0x04
SUBTYPE_MANAGEMENT_ADDRESS_BBN = 0x05
SUBTYPE_MANAGEMENT_ADDRESS_802 = 0x06
SUBTYPE_MANAGEMENT_ADDRESS_E_163 = 0x07
SUBTYPE_MANAGEMENT_ADDRESS_E_164 = 0x08
SUBTYPE_MANAGEMENT_ADDRESS_F_69 = 0x09
SUBTYPE_MANAGEMENT_ADDRESS_X_121 = 0x0A
SUBTYPE_MANAGEMENT_ADDRESS_IPX = 0x0B
SUBTYPE_MANAGEMENT_ADDRESS_APPLETALK = 0x0C
SUBTYPE_MANAGEMENT_ADDRESS_DECNET_IV = 0x0D
SUBTYPE_MANAGEMENT_ADDRESS_BANYAN_VINES = 0x0E
SUBTYPE_MANAGEMENT_ADDRESS_E_164_WITH_NSAP = 0x0F
SUBTYPE_MANAGEMENT_ADDRESS_DNS = 0x10
SUBTYPE_MANAGEMENT_ADDRESS_DISTINGUISHED_NAME = 0x11
SUBTYPE_MANAGEMENT_ADDRESS_AS_NUMBER = 0x12
SUBTYPE_MANAGEMENT_ADDRESS_XTP_OVER_IPV4 = 0x13
SUBTYPE_MANAGEMENT_ADDRESS_XTP_OVER_IPV6 = 0x14
SUBTYPE_MANAGEMENT_ADDRESS_XTP_NATIVE_MODE_XTP = 0x15
SUBTYPE_MANAGEMENT_ADDRESS_FIBER_CHANNEL_WORLD_WIDE_PORT_NAME = 0x16
SUBTYPE_MANAGEMENT_ADDRESS_FIBER_CHANNEL_WORLD_WIDE_NODE_NAME = 0x17
SUBTYPE_MANAGEMENT_ADDRESS_GWID = 0x18
SUBTYPE_MANAGEMENT_ADDRESS_AFI_FOR_L2VPN = 0x19
SUBTYPE_MANAGEMENT_ADDRESS_MPLS_TP_SECTION_ENDPOINT_ID = 0x1A
SUBTYPE_MANAGEMENT_ADDRESS_MPLS_TP_LSP_ENDPOINT_ID = 0x1B
SUBTYPE_MANAGEMENT_ADDRESS_MPLS_TP_PSEUDOWIRE_ENDPOINT_ID = 0x1C
SUBTYPE_MANAGEMENT_ADDRESS_MT_IP_MULTI_TOPOLOGY_IPV4 = 0x1D
SUBTYPE_MANAGEMENT_ADDRESS_MT_IP_MULTI_TOPOLOGY_IPV6 = 0x1E
INTERFACE_NUMBERING_SUBTYPES = {
0x01: 'unknown',
0x02: 'ifIndex',
0x03: 'system port number'
}
SUBTYPE_INTERFACE_NUMBER_UNKNOWN = 0x01
SUBTYPE_INTERFACE_NUMBER_IF_INDEX = 0x02
SUBTYPE_INTERFACE_NUMBER_SYSTEM_PORT_NUMBER = 0x03
'''
Note - calculation of _length field::
_length = 1@_management_address_string_length +
1@management_address_subtype +
management_address.len +
1@interface_numbering_subtype +
4@interface_number +
1@_oid_string_length +
object_id.len
'''
fields_desc = [
BitEnumField('_type', 0x08, 7, LLDPDU.TYPES),
BitFieldLenField('_length', None, 9, length_of='management_address',
adjust=lambda pkt, x:
8 + len(pkt.management_address) + len(pkt.object_id)),
BitFieldLenField('_management_address_string_length', None, 8,
length_of='management_address',
adjust=lambda pkt, x: len(pkt.management_address) + 1), # noqa: E501
ByteEnumField('management_address_subtype', 0x00,
IANA_ADDRESS_FAMILY_NUMBERS),
XStrLenField('management_address', '',
length_from=lambda pkt: 0
if pkt._management_address_string_length is None else
pkt._management_address_string_length - 1),
ByteEnumField('interface_numbering_subtype',
SUBTYPE_INTERFACE_NUMBER_UNKNOWN,
INTERFACE_NUMBERING_SUBTYPES),
BitField('interface_number', 0, 32),
BitFieldLenField('_oid_string_length', None, 8, length_of='object_id'),
XStrLenField('object_id', '',
length_from=lambda pkt: pkt._oid_string_length),
]
def _check(self):
"""
run layer specific checks
"""
if conf.contribs['LLDP'].strict_mode():
management_address_len = len(self.management_address)
if management_address_len == 0 or management_address_len > 31:
raise LLDPInvalidLengthField(
'management address must be 1..31 characters long - '
'got string of size {}'.format(management_address_len))
class ThreeBytesEnumField(EnumField, ThreeBytesField):
def __init__(self, name, default, enum):
EnumField.__init__(self, name, default, enum, "!I")
class LLDPDUGenericOrganisationSpecific(LLDPDU):
ORG_UNIQUE_CODE_PNO = 0x000ecf
ORG_UNIQUE_CODE_IEEE_802_1 = 0x0080c2
ORG_UNIQUE_CODE_IEEE_802_3 = 0x00120f
ORG_UNIQUE_CODE_TIA_TR_41_MED = 0x0012bb
ORG_UNIQUE_CODE_HYTEC = 0x30b216
ORG_UNIQUE_CODES = {
ORG_UNIQUE_CODE_PNO: "PROFIBUS International (PNO)",
ORG_UNIQUE_CODE_IEEE_802_1: "IEEE 802.1",
ORG_UNIQUE_CODE_IEEE_802_3: "IEEE 802.3",
ORG_UNIQUE_CODE_TIA_TR_41_MED: "TIA TR-41 Committee . Media Endpoint Discovery", # noqa: E501
ORG_UNIQUE_CODE_HYTEC: "Hytec Geraetebau GmbH"
}
fields_desc = [
BitEnumField('_type', 127, 7, LLDPDU.TYPES),
BitFieldLenField('_length', None, 9, length_of='data', adjust=lambda pkt, x: len(pkt.data) + 4), # noqa: E501
ThreeBytesEnumField('org_code', 0, ORG_UNIQUE_CODES),
ByteField('subtype', 0x00),
XStrLenField('data', '',
length_from=lambda pkt: 0 if pkt._length is None else
pkt._length - 4)
]
# 0x09 .. 0x7e is reserved for future standardization and for now treated as Raw() data # noqa: E501
LLDPDU_CLASS_TYPES = {
0x00: LLDPDUEndOfLLDPDU,
0x01: LLDPDUChassisID,
0x02: LLDPDUPortID,
0x03: LLDPDUTimeToLive,
0x04: LLDPDUPortDescription,
0x05: LLDPDUSystemName,
0x06: LLDPDUSystemDescription,
0x07: LLDPDUSystemCapabilities,
0x08: LLDPDUManagementAddress,
127: LLDPDUGenericOrganisationSpecific
}
class LLDPConfiguration(object):
"""
basic configuration for LLDP layer
"""
def __init__(self):
self._strict_mode = True
self.strict_mode_enable()
def strict_mode_enable(self):
"""
enable strict mode and dissector debugging
"""
self._strict_mode = True
def strict_mode_disable(self):
"""
disable strict mode and dissector debugging
"""
self._strict_mode = False
def strict_mode(self):
"""
get current strict mode state
"""
return self._strict_mode
conf.contribs['LLDP'] = LLDPConfiguration()
bind_layers(Ether, LLDPDU, type=LLDP_ETHER_TYPE)
bind_layers(Dot1Q, LLDPDU, type=LLDP_ETHER_TYPE)
|
mitchrule/Miscellaneous
|
refs/heads/master
|
Django_Project/django/Lib/site-packages/wheel/pkginfo.py
|
565
|
"""Tools for reading and writing PKG-INFO / METADATA without caring
about the encoding."""
from email.parser import Parser
try:
unicode
_PY3 = False
except NameError:
_PY3 = True
if not _PY3:
from email.generator import Generator
def read_pkg_info_bytes(bytestr):
return Parser().parsestr(bytestr)
def read_pkg_info(path):
with open(path, "r") as headers:
message = Parser().parse(headers)
return message
def write_pkg_info(path, message):
with open(path, 'w') as metadata:
Generator(metadata, maxheaderlen=0).flatten(message)
else:
from email.generator import BytesGenerator
def read_pkg_info_bytes(bytestr):
headers = bytestr.decode(encoding="ascii", errors="surrogateescape")
message = Parser().parsestr(headers)
return message
def read_pkg_info(path):
with open(path, "r",
encoding="ascii",
errors="surrogateescape") as headers:
message = Parser().parse(headers)
return message
def write_pkg_info(path, message):
with open(path, "wb") as out:
BytesGenerator(out, maxheaderlen=0).flatten(message)
|
AdaEne/CATMAID
|
refs/heads/master
|
django/applications/catmaid/migrations/0042_add_reviewer_whitelist.py
|
3
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ReviewerWhitelist'
db.create_table('reviewer_whitelist', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catmaid.Project'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('reviewer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['auth.User'])),
('accept_after', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(1, 1, 1, 0, 0))),
))
db.send_create_signal(u'catmaid', ['ReviewerWhitelist'])
# Adding unique constraint on 'ReviewerWhitelist', fields ['project', 'user', 'reviewer']
db.create_unique('reviewer_whitelist', ['project_id', 'user_id', 'reviewer_id'])
def backwards(self, orm):
# Removing unique constraint on 'ReviewerWhitelist', fields ['project', 'user', 'reviewer']
db.delete_unique('reviewer_whitelist', ['project_id', 'user_id', 'reviewer_id'])
# Deleting model 'ReviewerWhitelist'
db.delete_table('reviewer_whitelist')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'catmaid.apikey': {
'Meta': {'object_name': 'ApiKey'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'catmaid.brokenslice': {
'Meta': {'object_name': 'BrokenSlice', 'db_table': "'broken_slice'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {}),
'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Stack']"})
},
u'catmaid.cardinalityrestriction': {
'Meta': {'object_name': 'CardinalityRestriction', 'db_table': "'cardinality_restriction'"},
'cardinality_type': ('django.db.models.fields.IntegerField', [], {}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'restricted_link': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.ClassClass']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
u'catmaid.changerequest': {
'Meta': {'object_name': 'ChangeRequest', 'db_table': "'change_request'"},
'approve_action': ('django.db.models.fields.TextField', [], {}),
'completion_time': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'connector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Connector']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('catmaid.fields.Double3DField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'change_recipient'", 'db_column': "'recipient_id'", 'to': u"orm['auth.User']"}),
'reject_action': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'treenode': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Treenode']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'validate_action': ('django.db.models.fields.TextField', [], {})
},
u'catmaid.class': {
'Meta': {'object_name': 'Class', 'db_table': "'class'"},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.classclass': {
'Meta': {'object_name': 'ClassClass', 'db_table': "'class_class'"},
'class_a': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'classes_a'", 'db_column': "'class_a'", 'to': u"orm['catmaid.Class']"}),
'class_b': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'classes_b'", 'db_column': "'class_b'", 'to': u"orm['catmaid.Class']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Relation']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.classinstance': {
'Meta': {'object_name': 'ClassInstance', 'db_table': "'class_instance'"},
'class_column': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Class']", 'db_column': "'class_id'"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.classinstanceclassinstance': {
'Meta': {'object_name': 'ClassInstanceClassInstance', 'db_table': "'class_instance_class_instance'"},
'class_instance_a': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cici_via_a'", 'db_column': "'class_instance_a'", 'to': u"orm['catmaid.ClassInstance']"}),
'class_instance_b': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cici_via_b'", 'db_column': "'class_instance_b'", 'to': u"orm['catmaid.ClassInstance']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Relation']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.concept': {
'Meta': {'object_name': 'Concept', 'db_table': "'concept'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.connector': {
'Meta': {'object_name': 'Connector', 'db_table': "'connector'"},
'confidence': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'connector_editor'", 'db_column': "'editor_id'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location_x': ('django.db.models.fields.FloatField', [], {}),
'location_y': ('django.db.models.fields.FloatField', [], {}),
'location_z': ('django.db.models.fields.FloatField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.connectorclassinstance': {
'Meta': {'object_name': 'ConnectorClassInstance', 'db_table': "'connector_class_instance'"},
'class_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.ClassInstance']"}),
'connector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Connector']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Relation']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.dataview': {
'Meta': {'ordering': "('position',)", 'object_name': 'DataView', 'db_table': "'data_view'"},
'comment': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'config': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'data_view_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.DataViewType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.TextField', [], {})
},
u'catmaid.dataviewtype': {
'Meta': {'object_name': 'DataViewType', 'db_table': "'data_view_type'"},
'code_type': ('django.db.models.fields.TextField', [], {}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.TextField', [], {})
},
u'catmaid.deprecatedappliedmigrations': {
'Meta': {'object_name': 'DeprecatedAppliedMigrations', 'db_table': "'applied_migrations'"},
'id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'primary_key': 'True'})
},
u'catmaid.deprecatedsession': {
'Meta': {'object_name': 'DeprecatedSession', 'db_table': "'sessions'"},
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_accessed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'session_id': ('django.db.models.fields.CharField', [], {'max_length': '26'})
},
u'catmaid.location': {
'Meta': {'object_name': 'Location', 'db_table': "'location'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'location_editor'", 'db_column': "'editor_id'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location_x': ('django.db.models.fields.FloatField', [], {}),
'location_y': ('django.db.models.fields.FloatField', [], {}),
'location_z': ('django.db.models.fields.FloatField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.log': {
'Meta': {'object_name': 'Log', 'db_table': "'log'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'freetext': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('catmaid.fields.Double3DField', [], {}),
'operation_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.message': {
'Meta': {'object_name': 'Message', 'db_table': "'message'"},
'action': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.TextField', [], {'default': "'New message'", 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.overlay': {
'Meta': {'object_name': 'Overlay', 'db_table': "'overlay'"},
'default_opacity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'file_extension': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_base': ('django.db.models.fields.TextField', [], {}),
'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Stack']"}),
'tile_height': ('django.db.models.fields.IntegerField', [], {'default': '512'}),
'tile_source_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'tile_width': ('django.db.models.fields.IntegerField', [], {'default': '512'}),
'title': ('django.db.models.fields.TextField', [], {})
},
u'catmaid.project': {
'Meta': {'object_name': 'Project', 'db_table': "'project'"},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stacks': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catmaid.Stack']", 'through': u"orm['catmaid.ProjectStack']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.TextField', [], {})
},
u'catmaid.projectstack': {
'Meta': {'object_name': 'ProjectStack', 'db_table': "'project_stack'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'orientation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Stack']"}),
'translation': ('catmaid.fields.Double3DField', [], {'default': '(0, 0, 0)'})
},
u'catmaid.regionofinterest': {
'Meta': {'object_name': 'RegionOfInterest', 'db_table': "'region_of_interest'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'roi_editor'", 'db_column': "'editor_id'", 'to': u"orm['auth.User']"}),
'height': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location_x': ('django.db.models.fields.FloatField', [], {}),
'location_y': ('django.db.models.fields.FloatField', [], {}),
'location_z': ('django.db.models.fields.FloatField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'rotation_cw': ('django.db.models.fields.FloatField', [], {}),
'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Stack']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'width': ('django.db.models.fields.FloatField', [], {}),
'zoom_level': ('django.db.models.fields.IntegerField', [], {})
},
u'catmaid.regionofinterestclassinstance': {
'Meta': {'object_name': 'RegionOfInterestClassInstance', 'db_table': "'region_of_interest_class_instance'"},
'class_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.ClassInstance']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'region_of_interest': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.RegionOfInterest']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Relation']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.relation': {
'Meta': {'object_name': 'Relation', 'db_table': "'relation'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isreciprocal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'relation_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uri': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.relationinstance': {
'Meta': {'object_name': 'RelationInstance', 'db_table': "'relation_instance'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Relation']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.restriction': {
'Meta': {'object_name': 'Restriction', 'db_table': "'restriction'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'restricted_link': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.ClassClass']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.review': {
'Meta': {'object_name': 'Review', 'db_table': "'review'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'review_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'reviewer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'skeleton': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.ClassInstance']"}),
'treenode': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Treenode']"})
},
u'catmaid.reviewerwhitelist': {
'Meta': {'unique_together': "(('project', 'user', 'reviewer'),)", 'object_name': 'ReviewerWhitelist', 'db_table': "'reviewer_whitelist'"},
'accept_after': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'reviewer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.settings': {
'Meta': {'object_name': 'Settings', 'db_table': "'settings'"},
'key': ('django.db.models.fields.TextField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
u'catmaid.stack': {
'Meta': {'object_name': 'Stack', 'db_table': "'stack'"},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dimension': ('catmaid.fields.Integer3DField', [], {}),
'file_extension': ('django.db.models.fields.TextField', [], {'default': "'jpg'", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_base': ('django.db.models.fields.TextField', [], {}),
'metadata': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'num_zoom_levels': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'resolution': ('catmaid.fields.Double3DField', [], {}),
'tile_height': ('django.db.models.fields.IntegerField', [], {'default': '256'}),
'tile_source_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'tile_width': ('django.db.models.fields.IntegerField', [], {'default': '256'}),
'title': ('django.db.models.fields.TextField', [], {}),
'trakem2_project': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'catmaid.textlabel': {
'Meta': {'object_name': 'Textlabel', 'db_table': "'textlabel'"},
'colour': ('catmaid.fields.RGBAField', [], {'default': '(1, 0.5, 0, 1)'}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'font_name': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'font_size': ('django.db.models.fields.FloatField', [], {'default': '32'}),
'font_style': ('django.db.models.fields.TextField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'scaling': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'default': "'Edit this text ...'"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
u'catmaid.textlabellocation': {
'Meta': {'object_name': 'TextlabelLocation', 'db_table': "'textlabel_location'"},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('catmaid.fields.Double3DField', [], {}),
'textlabel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Textlabel']"})
},
u'catmaid.treenode': {
'Meta': {'object_name': 'Treenode', 'db_table': "'treenode'"},
'confidence': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'treenode_editor'", 'db_column': "'editor_id'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location_x': ('django.db.models.fields.FloatField', [], {}),
'location_y': ('django.db.models.fields.FloatField', [], {}),
'location_z': ('django.db.models.fields.FloatField', [], {}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': u"orm['catmaid.Treenode']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'radius': ('django.db.models.fields.FloatField', [], {}),
'skeleton': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.ClassInstance']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.treenodeclassinstance': {
'Meta': {'object_name': 'TreenodeClassInstance', 'db_table': "'treenode_class_instance'"},
'class_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.ClassInstance']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Relation']"}),
'treenode': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Treenode']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.treenodeconnector': {
'Meta': {'object_name': 'TreenodeConnector', 'db_table': "'treenode_connector'"},
'confidence': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'connector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Connector']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Relation']"}),
'skeleton': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.ClassInstance']"}),
'treenode': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Treenode']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'color': ('catmaid.fields.RGBAField', [], {'default': '(0.8122197914467499, 1.0, 0.9295521795841548, 1)'}),
'display_stack_reference_lines': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'independent_ontology_workspace_is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'inverse_mouse_wheel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_cropping_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_ontology_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_roi_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_segmentation_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_tagging_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_text_label_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_tracing_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tracing_overlay_scale': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'tracing_overlay_screen_scaling': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['catmaid']
|
seyriz/taiga-contrib-google-auth
|
refs/heads/master
|
back/tests/__init__.py
|
12133432
| |
kageiit/infer
|
refs/heads/master
|
infer/lib/python/inferlib/capture/__init__.py
|
12133432
| |
xfournet/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/conf/locale/ka/__init__.py
|
12133432
| |
vergecurrency/VERGE
|
refs/heads/master
|
contrib/linearize/linearize-hashes.py
|
1
|
#!/usr/bin/env python3
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
try: # Python 3
import http.client as httplib
except ImportError: # Python 2
import httplib
import json
import re
import base64
import sys
import os
import os.path
settings = {}
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class VERGERPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = httplib.HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{'Authorization': self.authhdr,
'Content-type': 'application/json'})
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = {'version': '1.1',
'method': method,
'id': idx}
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = VERGERPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x, resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x,
': ', resp_obj['error'], file=sys.stderr)
sys.exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks
def get_rpc_cookie():
# Open the cookie file
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r', encoding="ascii") as f:
combined = f.readline()
combined_split = combined.split(":")
settings['rpcuser'] = combined_split[0]
settings['rpcpassword'] = combined_split[1]
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1], encoding="utf8")
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 20103
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
use_userpass = True
use_datadir = False
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
use_userpass = False
if 'datadir' in settings and not use_userpass:
use_datadir = True
if not use_userpass and not use_datadir:
print("Missing datadir or username and/or password in cfg file",
file=sys.stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
# Get the rpc user and pass from the cookie if the datadir is set
if use_datadir:
get_rpc_cookie()
get_block_hashes(settings)
|
dhowland/EasyAVR
|
refs/heads/master
|
keymapper/easykeymap/cfgparse.py
|
1
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
#
# Easy AVR USB Keyboard Firmware Keymapper
# Copyright (C) 2013-2016 David Howland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""This module contains code to parse EasyAVR layout modification .cfg files.
The .cfg files are similar to ini files, but contain a very restrictive syntax
consisting of only headers and three functions.
"""
import re
regex_header = re.compile(r"^\[(.+)\]$")
regex_command = re.compile(r"^(MAKE_[SPACERKYBLN]+)\(([ ,\d-]+)\)$")
def parse(cfg_path):
"""Parse the file at `cfg_path` as a layout mod .cfg file. It returns
a map of (row, col) coordinates to (width, height) or width attributes.
"""
cfg = {}
with open(cfg_path, 'r') as infd:
mod_map = {}
mod_name = None
for i, line in enumerate(infd):
line = line.strip()
if not line:
continue
if line.startswith('#'):
continue
m = regex_header.match(line)
if m:
if mod_name:
cfg[mod_name] = mod_map
mod_name = m.group(1)
mod_map = {}
continue
m = regex_command.match(line)
if m:
try:
args = m.group(2).split(',')
if m.group(1) == "MAKE_KEY":
row = int(args[0].strip())
col = int(args[1].strip())
width = int(args[2].strip())
height = int(args[3].strip())
mod_map[(row, col)] = (width, height)
elif m.group(1) == "MAKE_SPACER":
row = int(args[0].strip())
col = int(args[1].strip())
width = int(args[2].strip())
mod_map[(row, col)] = width
elif m.group(1) == "MAKE_BLANK":
row = int(args[0].strip())
col = int(args[1].strip())
width = int(args[2].strip()) * -1
mod_map[(row, col)] = width
else:
raise Exception("%s:%d: invalid command %s" %
(cfg_path, i, m.group(1)))
except IndexError:
raise Exception("%s:%d: Not enough arguments" %
(cfg_path, i))
except ValueError:
raise Exception("%s:%d: Argument is not integer" %
(cfg_path, i))
continue
raise Exception("%s:%d: Unrecognized: %s" %
(cfg_path, i, line))
if mod_name:
cfg[mod_name] = mod_map
return cfg
|
redhat-openstack/nova
|
refs/heads/f22-patches
|
nova/api/openstack/compute/contrib/server_list_multi_status.py
|
97
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
class Server_list_multi_status(extensions.ExtensionDescriptor):
"""Allow to specify multiple status values concurrently in the servers
list API..
"""
name = "ServerListMultiStatus"
alias = "os-server-list-multi-status"
namespace = ("http://docs.openstack.org/compute/ext/"
"os-server-list-multi-status/api/v2")
updated = "2014-05-11T00:00:00Z"
|
dmick/autokey
|
refs/heads/master
|
src/lib/gtkui/popupmenu.py
|
47
|
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Chris Dekter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time, logging
from gi.repository import Gtk, Gdk
from autokey.configmanager import *
from autokey.model import Folder # TODO remove later
_logger = logging.getLogger("phrase-menu")
class PopupMenu(Gtk.Menu):
"""
A popup menu that allows the user to select a phrase.
"""
def __init__(self, service, folders=[], items=[], onDesktop=True, title=None):
Gtk.Menu.__init__(self)
#self.set_take_focus(ConfigManager.SETTINGS[MENU_TAKES_FOCUS])
self.__i = 1
self.service = service
if ConfigManager.SETTINGS[SORT_BY_USAGE_COUNT]:
_logger.debug("Sorting phrase menu by usage count")
folders.sort(key=lambda obj: obj.usageCount, reverse=True)
items.sort(key=lambda obj: obj.usageCount, reverse=True)
else:
_logger.debug("Sorting phrase menu by item name/title")
folders.sort(key=lambda obj: str(obj))
items.sort(key=lambda obj: str(obj))
if len(folders) == 1 and len(items) == 0 and onDesktop:
# Only one folder - create menu with just its folders and items
for folder in folders[0].folders:
menuItem = Gtk.MenuItem(label=self.__getMnemonic(folder.title, onDesktop))
menuItem.set_submenu(PopupMenu(service, folder.folders, folder.items, onDesktop))
menuItem.set_use_underline(True)
self.append(menuItem)
if len(folders[0].folders) > 0:
self.append(Gtk.SeparatorMenuItem())
self.__addItemsToSelf(folders[0].items, service, onDesktop)
else:
# Create phrase folder section
for folder in folders:
menuItem = Gtk.MenuItem(label=self.__getMnemonic(folder.title, onDesktop))
menuItem.set_submenu(PopupMenu(service, folder.folders, folder.items, False))
menuItem.set_use_underline(True)
self.append(menuItem)
if len(folders) > 0:
self.append(Gtk.SeparatorMenuItem())
self.__addItemsToSelf(items, service, onDesktop)
self.show_all()
def __getMnemonic(self, desc, onDesktop):
if 1 < 10 and '_' not in desc and onDesktop:
ret = "_%d - %s" % (self.__i, desc)
self.__i += 1
return ret
else:
return desc
def show_on_desktop(self):
Gdk.threads_enter()
time.sleep(0.2)
self.popup(None, None, None, None, 1, 0)
Gdk.threads_leave()
def remove_from_desktop(self):
Gdk.threads_enter()
self.popdown()
Gdk.threads_leave()
def __addItemsToSelf(self, items, service, onDesktop):
# Create phrase section
if ConfigManager.SETTINGS[SORT_BY_USAGE_COUNT]:
items.sort(key=lambda obj: obj.usageCount, reverse=True)
else:
items.sort(key=lambda obj: str(obj))
i = 1
for item in items:
#if onDesktop:
# menuItem = Gtk.MenuItem(item.get_description(service.lastStackState), False)
#else:
menuItem = Gtk.MenuItem(label=self.__getMnemonic(item.description, onDesktop))
menuItem.connect("activate", self.__itemSelected, item)
menuItem.set_use_underline(True)
self.append(menuItem)
def __itemSelected(self, widget, item):
self.service.item_selected(item)
# Testing stuff - remove later ----
class MockPhrase:
def __init__(self, description):
self.description = description
class MockExpansionService:
def phrase_selected(self, event, phrase):
print phrase.description
if __name__ == "__main__":
Gdk.threads_init()
myFolder = PhraseFolder("Some phrases")
myFolder.add_phrase(MockPhrase("phrase 1"))
myFolder.add_phrase(MockPhrase("phrase 2"))
myFolder.add_phrase(MockPhrase("phrase 3"))
myPhrases = []
myPhrases.append(MockPhrase("phrase 1"))
myPhrases.append(MockPhrase("phrase 2"))
myPhrases.append(MockPhrase("phrase 3"))
menu = PhraseMenu(MockExpansionService(), [myFolder], myPhrases)
menu.show_on_desktop()
Gtk.main()
|
EmanueleCannizzaro/scons
|
refs/heads/master
|
test/MSVS/vs-7.0-variant_dir.py
|
1
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/MSVS/vs-7.0-variant_dir.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Test that we can generate Visual Studio 7.0 project (.vcproj) and
solution (.sln) files that look correct when using a variant_dir.
"""
import TestSConsMSVS
test = TestSConsMSVS.TestSConsMSVS()
host_arch = test.get_vs_host_arch()
# Make the test infrastructure think we have this version of MSVS installed.
test._msvs_versions = ['7.0']
expected_slnfile = TestSConsMSVS.expected_slnfile_7_0
expected_vcprojfile = TestSConsMSVS.expected_vcprojfile_7_0
SConscript_contents = TestSConsMSVS.SConscript_contents_7_0
test.subdir('src')
test.write('SConstruct', """\
SConscript('src/SConscript', variant_dir='build')
""")
test.write(['src', 'SConscript'], SConscript_contents%{'HOST_ARCH': host_arch})
test.run(arguments=".")
project_guid = "{25F6CE89-8E22-2910-8B6E-FFE6DC1E2792}"
vcproj = test.read(['src', 'Test.vcproj'], 'r')
expect = test.msvs_substitute(expected_vcprojfile, '7.0', None, 'SConstruct',
project_guid=project_guid)
# don't compare the pickled data
assert vcproj[:len(expect)] == expect, test.diff_substr(expect, vcproj)
test.must_exist(test.workpath('src', 'Test.sln'))
sln = test.read(['src', 'Test.sln'], 'r')
expect = test.msvs_substitute(expected_slnfile, '7.0', 'src',
project_guid=project_guid)
# don't compare the pickled data
assert sln[:len(expect)] == expect, test.diff_substr(expect, sln)
test.must_match(['build', 'Test.vcproj'], """\
This is just a placeholder file.
The real project file is here:
%s
""" % test.workpath('src', 'Test.vcproj'),
mode='r')
test.must_match(['build', 'Test.sln'], """\
This is just a placeholder file.
The real workspace file is here:
%s
""" % test.workpath('src', 'Test.sln'),
mode='r')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
schana/scheduling-python
|
refs/heads/master
|
scheduling/model/relationships.py
|
2
|
from scheduling import db
certifications_members = db.Table('certifications_members', db.Model.metadata,
db.Column('cert_id', db.Integer, db.ForeignKey('certification.id')),
db.Column('member_id', db.Integer, db.ForeignKey('member.id')))
certifications_site_types = db.Table('certifications_site_types', db.Model.metadata,
db.Column('cert_id', db.Integer, db.ForeignKey('certification.id')),
db.Column('site_type_id', db.Integer, db.ForeignKey('site_type.id')))
events_members = db.Table('events_members', db.Model.metadata,
db.Column('event_id', db.Integer, db.ForeignKey('event.id')),
db.Column('member_id', db.Integer, db.ForeignKey('member.id')))
class Certification(db.Model):
__tablename__ = 'certification'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
def __repr__(self):
return '<{} {}>'.format(self.__tablename__, self.name)
|
ByteInternet/libcloud
|
refs/heads/byte
|
integration/driver/test.py
|
16
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from libcloud.common.base import JsonResponse, ConnectionUserAndKey
from libcloud.compute.base import NodeDriver, Node
from libcloud.utils.py3 import b
class TestResponseType(JsonResponse):
pass
class TestConnection(ConnectionUserAndKey):
host = 'localhost'
secure = True
responseCls = TestResponseType
allow_insecure = True
def __init__(self, user_id, key, secure=True, host=None, port=None,
url=None, timeout=None, proxy_url=None,
api_version=None, **conn_kwargs):
super(TestConnection, self).__init__(
user_id=user_id,
key=key,
secure=secure,
host=host, port=port,
url=url, timeout=timeout,
proxy_url=proxy_url)
def add_default_headers(self, headers):
user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
headers['Authorization'] = 'Basic %s' % (user_b64)
return headers
class TestNodeDriver(NodeDriver):
connectionCls = TestConnection
type = 'testing'
api_name = 'testing'
name = 'Test Compute Driver'
website = 'http://libcloud.apache.org'
features = {'create_node': ['ssh_key', 'password']}
def __init__(self, key, secret=None, secure=True,
host=None, port=None, **kwargs):
super(TestNodeDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port,
**kwargs)
def list_nodes(self):
r = self.connection.request('/compute/nodes')
nodes = []
for node in r.object:
nodes.append(Node(driver=self, **node))
return nodes
def ex_report_data(self):
r = self.connection.request('/compute/report_data', raw=True)
return r.response.read()
|
sabi0/intellij-community
|
refs/heads/master
|
python/testData/types/NotMatchedOverloadsAndImplementationInImportedClass/b/__init__.py
|
87
|
from typing import overload
class A:
@overload
def foo(self, value: int) -> int:
pass
@overload
def foo(self, value: str) -> str:
pass
def foo(self, value):
return None
|
barnsnake351/nova
|
refs/heads/master
|
nova/api/openstack/compute/schemas/aggregates.py
|
59
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.api.validation import parameter_types
availability_zone = copy.deepcopy(parameter_types.name)
availability_zone['type'] = ['string', 'null']
create = {
'type': 'object',
'properties': {
'type': 'object',
'aggregate': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'availability_zone': availability_zone,
},
'required': ['name'],
'additionalProperties': False,
},
},
'required': ['aggregate'],
'additionalProperties': False,
}
update = {
'type': 'object',
'properties': {
'type': 'object',
'aggregate': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'availability_zone': availability_zone
},
'additionalProperties': False,
'anyOf': [
{'required': ['name']},
{'required': ['availability_zone']}
]
},
},
'required': ['aggregate'],
'additionalProperties': False,
}
add_host = {
'type': 'object',
'properties': {
'type': 'object',
'add_host': {
'type': 'object',
'properties': {
'host': parameter_types.hostname,
},
'required': ['host'],
'additionalProperties': False,
},
},
'required': ['add_host'],
'additionalProperties': False,
}
remove_host = {
'type': 'object',
'properties': {
'type': 'object',
'remove_host': {
'type': 'object',
'properties': {
'host': parameter_types.hostname,
},
'required': ['host'],
'additionalProperties': False,
},
},
'required': ['remove_host'],
'additionalProperties': False,
}
set_metadata = {
'type': 'object',
'properties': {
'type': 'object',
'set_metadata': {
'type': 'object',
'properties': {
'metadata': parameter_types.metadata_with_null
},
'required': ['metadata'],
'additionalProperties': False,
},
},
'required': ['set_metadata'],
'additionalProperties': False,
}
|
andxeg/heat-templates
|
refs/heads/master
|
tests/software_config/test_heat_config.py
|
7
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import os
import tempfile
import fixtures
from testtools import matchers
from tests.software_config import common
class HeatConfigTest(common.RunScriptTest):
fake_hooks = ['cfn-init', 'chef', 'puppet', 'salt', 'script']
data = [
{
'id': '1111',
'group': 'chef',
'inputs': [{
'name': 'deploy_signal_id',
'value': 'mock://192.0.2.2/foo'
}],
'config': 'one'
}, {
'id': '2222',
'group': 'cfn-init',
'inputs': [],
'config': 'two'
}, {
'id': '3333',
'group': 'salt',
'inputs': [{'name': 'foo', 'value': 'bar'}],
'outputs': [{'name': 'foo'}],
'config': 'three'
}, {
'id': '4444',
'group': 'puppet',
'inputs': [],
'config': 'four'
}, {
'id': '5555',
'group': 'script',
'inputs': [{
'name': 'deploy_status_code', 'value': '-1'
}, {
'name': 'deploy_stderr', 'value': 'A bad thing happened'
}, {
'name': 'deploy_signal_id',
'value': 'mock://192.0.2.3/foo'
}],
'config': 'five'
}, {
'id': '6666',
'group': 'no-such-hook',
'inputs': [],
'config': 'six'
}]
outputs = {
'chef': {
'deploy_status_code': '0',
'deploy_stderr': 'stderr',
'deploy_stdout': 'stdout'
},
'cfn-init': {
'deploy_status_code': '0',
'deploy_stderr': 'stderr',
'deploy_stdout': 'stdout'
},
'salt': {
'deploy_status_code': '0',
'deploy_stderr': 'stderr',
'deploy_stdout': 'stdout',
'foo': 'bar'
},
'puppet': {
'deploy_status_code': '0',
'deploy_stderr': 'stderr',
'deploy_stdout': 'stdout'
},
'script': {
'deploy_status_code': '-1',
'deploy_stderr': 'A bad thing happened',
'deploy_stdout': 'stdout'
}
}
def setUp(self):
super(HeatConfigTest, self).setUp()
self.fake_hook_path = self.relative_path(__file__, 'hook-fake.py')
self.heat_config_path = self.relative_path(
__file__,
'../..',
'hot/software-config/elements',
'heat-config/os-refresh-config/configure.d/55-heat-config')
self.hooks_dir = self.useFixture(fixtures.TempDir())
self.deployed_dir = self.useFixture(fixtures.TempDir())
with open(self.fake_hook_path) as f:
fake_hook = f.read()
for hook in self.fake_hooks:
hook_name = self.hooks_dir.join(hook)
with open(hook_name, 'w') as f:
os.utime(hook_name, None)
f.write(fake_hook)
f.flush()
os.chmod(hook_name, 0o755)
def write_config_file(self, data):
config_file = tempfile.NamedTemporaryFile()
config_file.write(json.dumps(data))
config_file.flush()
return config_file
def run_heat_config(self, data):
with self.write_config_file(data) as config_file:
env = os.environ.copy()
env.update({
'HEAT_CONFIG_HOOKS': self.hooks_dir.join(),
'HEAT_CONFIG_DEPLOYED': self.deployed_dir.join(),
'HEAT_SHELL_CONFIG': config_file.name
})
returncode, stdout, stderr = self.run_cmd(
[self.heat_config_path], env)
self.assertEqual(0, returncode, stderr)
def test_hooks_exist(self):
self.assertThat(
self.hooks_dir.join('no-such-hook'),
matchers.Not(matchers.FileExists()))
for hook in self.fake_hooks:
hook_path = self.hooks_dir.join(hook)
self.assertThat(hook_path, matchers.FileExists())
def test_run_heat_config(self):
self.run_heat_config(self.data)
for config in self.data:
hook = config['group']
stdin_path = self.hooks_dir.join('%s.stdin' % hook)
stdout_path = self.hooks_dir.join('%s.stdout' % hook)
deployed_file = self.deployed_dir.join('%s.json' % config['id'])
if hook == 'no-such-hook':
self.assertThat(
stdin_path, matchers.Not(matchers.FileExists()))
self.assertThat(
stdout_path, matchers.Not(matchers.FileExists()))
continue
self.assertThat(stdin_path, matchers.FileExists())
self.assertThat(stdout_path, matchers.FileExists())
# parsed stdin should match the config item
self.assertEqual(config,
self.json_from_file(stdin_path))
# parsed stdin should match the written deployed file
self.assertEqual(config,
self.json_from_file(deployed_file))
self.assertEqual(self.outputs[hook],
self.json_from_file(stdout_path))
# clean up files in preperation for second run
os.remove(stdin_path)
os.remove(stdout_path)
# run again with no changes, assert no new files
self.run_heat_config(self.data)
for config in self.data:
hook = config['group']
stdin_path = self.hooks_dir.join('%s.stdin' % hook)
stdout_path = self.hooks_dir.join('%s.stdout' % hook)
self.assertThat(
stdin_path, matchers.Not(matchers.FileExists()))
self.assertThat(
stdout_path, matchers.Not(matchers.FileExists()))
# run again changing the puppet config
data = copy.deepcopy(self.data)
for config in data:
if config['id'] == '4444':
config['id'] = '44444444'
self.run_heat_config(data)
for config in self.data:
hook = config['group']
stdin_path = self.hooks_dir.join('%s.stdin' % hook)
stdout_path = self.hooks_dir.join('%s.stdout' % hook)
if hook == 'puppet':
self.assertThat(stdin_path, matchers.FileExists())
self.assertThat(stdout_path, matchers.FileExists())
else:
self.assertThat(
stdin_path, matchers.Not(matchers.FileExists()))
self.assertThat(
stdout_path, matchers.Not(matchers.FileExists()))
|
raschuetz/foundations-homework
|
refs/heads/master
|
07/data-analysis/lib/python3.5/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py
|
1731
|
from __future__ import absolute_import, division, unicode_literals
from . import _base
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
class Filter(_base.Filter):
def __iter__(self):
for token in _base.Filter.__iter__(self):
if token["type"] in ("StartTag", "EmptyTag"):
attrs = OrderedDict()
for name, value in sorted(token["data"].items(),
key=lambda x: x[0]):
attrs[name] = value
token["data"] = attrs
yield token
|
steny138/PythonTaipeiOpendata
|
refs/heads/master
|
manage.py
|
2
|
import os
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from app import app, db
app.config.from_object(os.environ['APP_SETTINGS'])
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
ivrson9/cdol
|
refs/heads/master
|
contents/img_google/.eggs/py2app-0.10-py2.7.egg/py2app/bootstrap/ctypes_setup.py
|
10
|
def _setup_ctypes():
from ctypes.macholib import dyld
import os
frameworks = os.path.join(os.environ['RESOURCEPATH'], '..', 'Frameworks')
dyld.DEFAULT_FRAMEWORK_FALLBACK.insert(0, frameworks)
dyld.DEFAULT_LIBRARY_FALLBACK.insert(0, frameworks)
_setup_ctypes()
|
hohanhb285/chrome-app-samples
|
refs/heads/master
|
in-app-payments-with-server-validation/backend-appengine/main.py
|
9
|
# Copyright 2012 Google Inc. All Rights Reserved.
# pylint: disable-msg=C6409,C6203
"""In-App Payments - Online Store Python Sample"""
# standard library imports
from cgi import escape
import os
import json
import time
import logging
# third-party imports
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
import jwt
# application-specific imports
from sellerinfo import SELLER_ID
from sellerinfo import SELLER_SECRET
def postbacks_requestkey(request_name=None):
return db.Key.from_path('Postbacks', request_name or 'default_queue')
class Postbacks(db.Model):
jwtPostback = db.TextProperty()
orderId = db.StringProperty()
saleType = db.StringProperty()
price = db.StringProperty()
currencyCode = db.StringProperty()
sellerData = db.StringProperty()
itemName = db.StringProperty()
recurrenceFrequency = db.StringProperty()
recurrencePrice = db.StringProperty()
class MainHandler(webapp.RequestHandler):
"""Handles /"""
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write("Hello, World.")
class GenerateJWTSingleItemHandler(webapp.RequestHandler):
"""Generates a single item JWT - handles /generate"""
def post(self):
currTime = int(time.time())
expTime = currTime + (60 * 60 * 24 * 365)
jwtInfo = {'iss': SELLER_ID,
'aud': 'Google',
'typ': 'google/payments/inapp/item/v1',
'iat': currTime,
'exp': expTime,
'request': {'name': self.request.get('itemName', None),
'description': self.request.get('description', None),
'price': self.request.get('price', '1.00'),
'currencyCode': 'USD',
'sellerData': self.request.get('sellerData', None)
}
}
token = jwt.encode(jwtInfo, SELLER_SECRET)
result = {'encodedJWT': token}
result['jwt'] = jwtInfo
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(result))
class GenerateJWTSubscriptionHandler(webapp.RequestHandler):
"""Generates a subscription JWT - handles /generateSubscription"""
def post(self):
currTime = int(time.time())
expTime = currTime + (60 * 60 * 24 * 365)
jwtInfo = {'iss': SELLER_ID,
'aud': 'Google',
'typ': 'google/payments/inapp/subscription/v1',
'iat': currTime,
'exp': expTime,
'request': {'name': self.request.get('itemName', None),
'description': self.request.get('description', None),
'sellerData': self.request.get('sellerData', None),
'initialPayment': {
'price': self.request.get('initialPrice', None),
'currencyCode': 'USD',
'paymentType': 'prorated'
},
'recurrence': {
'price': self.request.get('recurringPrice', None),
'currencyCode': 'USD',
'frequency': 'monthly',
'startTime': int(time.time() + 2600000)
}
}
}
token = jwt.encode(jwtInfo, SELLER_SECRET)
result = {'encodedJWT': token}
result['jwt'] = jwtInfo
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(result))
class PostbackHandler(webapp.RequestHandler):
"""Handles server postback from Wallet - received at /postback"""
def post(self):
"""Handles post request."""
encodedJWT = self.request.get('jwt', None)
if encodedJWT is not None:
# jwt.decode won't accept unicode, cast to str
# http://github.com/progrium/pyjwt/issues/4
decodedJWT = jwt.decode(str(encodedJWT), SELLER_SECRET)
logging.info("Postback Handler")
logging.info("Encoded JWT: " + str(encodedJWT))
logging.info("Decoded JWT: " + str(decodedJWT))
# validate the payment request and respond back to Google
if decodedJWT['iss'] == 'Google' and decodedJWT['aud'] == SELLER_ID:
if ('response' in decodedJWT and
'orderId' in decodedJWT['response'] and
'request' in decodedJWT):
orderId = decodedJWT['response']['orderId']
requestInfo = decodedJWT['request']
pb = Postbacks(parent=postbacks_requestkey())
pb.jwtPostback = encodedJWT
pb.orderId = orderId
pb.sellerData = requestInfo.get('sellerData')
pb.itemName = requestInfo.get('name')
pb.saleType = decodedJWT['typ']
if (decodedJWT['typ'] == 'google/payments/inapp/item/v1/postback/buy'):
pb.price = requestInfo['price']
pb.currencyCode = requestInfo['currencyCode']
elif (decodedJWT['typ'] == 'google/payments/inapp/subscription/v1/postback/buy'):
pb.price = requestInfo['initialPayment']['price']
pb.currencyCode = requestInfo['initialPayment']['currencyCode']
pb.recurrencePrice = requestInfo['recurrence']['price']
pb.recurrenceFrequency = requestInfo['recurrence']['frequency']
pb.put()
# respond back to complete payment
self.response.out.write(orderId)
else:
self.error(404)
class VerifyPurchaseHandler(webapp.RequestHandler):
"""Verifies a purchase was made - received at /verify"""
def get(self):
orderId = self.request.get('orderId', None)
orderQuery = db.GqlQuery('SELECT * from Postbacks where orderId=:1', orderId)
order = orderQuery.get()
if order is not None:
result = {'success': True,
'orderId': order.orderId,
'itemName': order.itemName}
else:
result = {'success': False}
self.response.headers['Access-Control-Allow-Origin'] = "*"
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(result))
application = webapp.WSGIApplication([
('/', MainHandler),
('/generate', GenerateJWTSingleItemHandler),
('/generateSubscription', GenerateJWTSubscriptionHandler),
('/verify', VerifyPurchaseHandler),
('/postback', PostbackHandler),
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
EduardoRG/sdn_ryu
|
refs/heads/master
|
dragon/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
sestrella/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/google/gcp_compute_url_map_info.py
|
4
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_url_map_info
description:
- Gather info for GCP UrlMap
short_description: Gather info for GCP UrlMap
version_added: '2.7'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
- A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters).
- Each additional filter in the list will act be added as an AND condition (filter1
and filter2) .
type: list
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: get info on an URL map
gcp_compute_url_map_info:
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
defaultService:
description:
- A reference to BackendService resource if none of the hostRules match.
returned: success
type: dict
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
hostRules:
description:
- The list of HostRules to use against the URL.
returned: success
type: complex
contains:
description:
description:
- An optional description of this HostRule. Provide this property when you
create the resource.
returned: success
type: str
hosts:
description:
- The list of host patterns to match. They must be valid hostnames, except
* will match any string of ([a-z0-9-.]*). In that case, * must be the
first character and must be followed in the pattern by either - or .
returned: success
type: list
pathMatcher:
description:
- The name of the PathMatcher to use to match the path portion of the URL
if the hostRule matches the URL's host portion.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
fingerprint:
description:
- Fingerprint of this resource. This field is used internally during updates
of this resource.
returned: success
type: str
name:
description:
- Name of the resource. Provided by the client when the resource is created.
The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
pathMatchers:
description:
- The list of named PathMatchers to use against the URL.
returned: success
type: complex
contains:
defaultService:
description:
- A reference to a BackendService resource. This will be used if none of
the pathRules defined by this PathMatcher is matched by the URL's path
portion.
returned: success
type: dict
description:
description:
- An optional description of this resource.
returned: success
type: str
name:
description:
- The name to which this PathMatcher is referred by the HostRule.
returned: success
type: str
pathRules:
description:
- The list of path rules.
returned: success
type: complex
contains:
paths:
description:
- 'The list of path patterns to match. Each must start with / and the
only place a * is allowed is at the end following a /. The string
fed to the path matcher does not include any text after the first
? or #, and those chars are not allowed here.'
returned: success
type: list
service:
description:
- A reference to the BackendService resource if this rule is matched.
returned: success
type: dict
tests:
description:
- The list of expected URL mappings. Requests to update this UrlMap will succeed
only if all of the test cases pass.
returned: success
type: complex
contains:
description:
description:
- Description of this test case.
returned: success
type: str
host:
description:
- Host portion of the URL.
returned: success
type: str
path:
description:
- Path portion of the URL.
returned: success
type: str
service:
description:
- A reference to expected BackendService resource the given URL should be
mapped to.
returned: success
type: dict
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/urlMaps".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
return auth.list(link, return_if_object, array_name='items', params={'filter': query})
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
|
gruunday/useradm
|
refs/heads/master
|
scripts/vote.py
|
1
|
#!/usr/bin/python
import os
import re
import sys
from ..useradm.rberror import RBError
from ..useradm.rbuser import RBUser
from ..useradm.rbuserdb import RBUserDB
voteregister = 'voted.txt'
def main():
"""Program entry function."""
voted = {}
if os.path.exists(voteregister):
fd = open(voteregister, 'r')
for line in fd.readlines():
voted[line.rstrip()] = 1
fd.close()
fd = open(voteregister, 'a')
udb = RBUserDB()
udb.connect()
while 1:
usr = RBUser()
tmp = None
while not tmp:
tmp = input("Please enter Username/Student ID/Student Card: ")
res = re.search(r'\D*\d{2}(\d{8})\d{3}\D*', tmp)
if res:
usr.id = int(res.group(1))
print('CARD', usr.id)
else:
res = re.search(r'^(\d{8})$', tmp)
if res:
usr.id = int(tmp)
print('ID', usr.id)
try:
if usr.id:
udb.get_user_byid(usr)
udb.show(usr)
else:
usr.uid = tmp
udb.get_user_byname(usr)
udb.show(usr)
except RBError:
print('[31;1mNO SUCH USER YOU FUCKING DICKHEAD[0m')
else:
if usr.uid in voted:
print('\n[31;1mGO FUCK YOUSELF YOU TWO-VOTING PRICK[0m\n')
continue
if usr.usertype not in ('member', 'committe', 'staff'):
print('''
\n
[31;1mTELL THE COCKMUCH TO GET A REAL MEMBER ACCOUNT[0m
\n
''')
elif usr.yearsPaid <= 0:
print('\n[31;1mTELL THE SCABBY BASTARD TO PISS OFF[0m\n')
else:
fd.write('%s\n' % usr.uid)
fd.flush()
voted[usr.uid] = 1
print('\n[32;1mBIG VOTE GO NOW![0m\n')
fd.close()
sys.exit(0)
if __name__ == "__main__":
main()
|
odoobgorg/odoo
|
refs/heads/9.0
|
addons/website_blog/controllers/main.py
|
12
|
# -*- coding: utf-8 -*-
import datetime
import json
import werkzeug
from openerp import tools
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website.models.website import slug, unslug
from openerp.exceptions import UserError
from openerp.osv.orm import browse_record
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
from openerp.tools import html2plaintext
class QueryURL(object):
def __init__(self, path='', path_args=None, **args):
self.path = path
self.args = args
self.path_args = set(path_args or [])
def __call__(self, path=None, path_args=None, **kw):
path = path or self.path
for k, v in self.args.items():
kw.setdefault(k, v)
path_args = set(path_args or []).union(self.path_args)
paths, fragments = [], []
for key, value in kw.items():
if value and key in path_args:
if isinstance(value, browse_record):
paths.append((key, slug(value)))
else:
paths.append((key, value))
elif value:
if isinstance(value, list) or isinstance(value, set):
fragments.append(werkzeug.url_encode([(key, item) for item in value]))
else:
fragments.append(werkzeug.url_encode([(key, value)]))
for key, value in paths:
path += '/' + key + '/%s' % value
if fragments:
path += '?' + '&'.join(fragments)
return path
class WebsiteBlog(http.Controller):
_blog_post_per_page = 20
_post_comment_per_page = 10
def nav_list(self, blog=None):
blog_post_obj = request.registry['blog.post']
domain = blog and [('blog_id', '=', blog.id)] or []
groups = blog_post_obj.read_group(
request.cr, request.uid, domain, ['name', 'create_date'],
groupby="create_date", orderby="create_date desc", context=request.context)
for group in groups:
begin_date = datetime.datetime.strptime(group['__domain'][0][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
end_date = datetime.datetime.strptime(group['__domain'][1][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
group['date_begin'] = '%s' % datetime.date.strftime(begin_date, tools.DEFAULT_SERVER_DATE_FORMAT)
group['date_end'] = '%s' % datetime.date.strftime(end_date, tools.DEFAULT_SERVER_DATE_FORMAT)
return groups
@http.route([
'/blog',
'/blog/page/<int:page>',
], type='http', auth="public", website=True)
def blogs(self, page=1, **post):
cr, uid, context = request.cr, request.uid, request.context
blog_obj = request.registry['blog.post']
total = blog_obj.search(cr, uid, [], count=True, context=context)
pager = request.website.pager(
url='/blog',
total=total,
page=page,
step=self._blog_post_per_page,
)
post_ids = blog_obj.search(cr, uid, [], offset=(page-1)*self._blog_post_per_page, limit=self._blog_post_per_page, context=context)
posts = blog_obj.browse(cr, uid, post_ids, context=context)
blog_url = QueryURL('', ['blog', 'tag'])
return request.website.render("website_blog.latest_blogs", {
'posts': posts,
'pager': pager,
'blog_url': blog_url,
})
@http.route([
'/blog/<model("blog.blog"):blog>',
'/blog/<model("blog.blog"):blog>/page/<int:page>',
'/blog/<model("blog.blog"):blog>/tag/<string:tag>',
'/blog/<model("blog.blog"):blog>/tag/<string:tag>/page/<int:page>',
], type='http', auth="public", website=True)
def blog(self, blog=None, tag=None, page=1, **opt):
""" Prepare all values to display the blog.
:return dict values: values for the templates, containing
- 'blog': current blog
- 'blogs': all blogs for navigation
- 'pager': pager of posts
- 'active_tag_ids' : list of active tag ids,
- 'tags_list' : function to built the comma-separated tag list ids (for the url),
- 'tags': all tags, for navigation
- 'nav_list': a dict [year][month] for archives navigation
- 'date': date_begin optional parameter, used in archives navigation
- 'blog_url': help object to create URLs
"""
date_begin, date_end = opt.get('date_begin'), opt.get('date_end')
cr, uid, context = request.cr, request.uid, request.context
blog_post_obj = request.registry['blog.post']
blog_obj = request.registry['blog.blog']
blog_ids = blog_obj.search(cr, uid, [], order="create_date asc", context=context)
blogs = blog_obj.browse(cr, uid, blog_ids, context=context)
# build the domain for blog post to display
domain = []
# retrocompatibility to accept tag as slug
active_tag_ids = tag and map(int, [unslug(t)[1] for t in tag.split(',')]) or []
if active_tag_ids:
domain += [('tag_ids', 'in', active_tag_ids)]
if blog:
domain += [('blog_id', '=', blog.id)]
if date_begin and date_end:
domain += [("create_date", ">=", date_begin), ("create_date", "<=", date_end)]
blog_url = QueryURL('', ['blog', 'tag'], blog=blog, tag=tag, date_begin=date_begin, date_end=date_end)
blog_post_ids = blog_post_obj.search(cr, uid, domain, order="create_date desc", context=context)
blog_posts = blog_post_obj.browse(cr, uid, blog_post_ids, context=context)
pager = request.website.pager(
url=request.httprequest.path.partition('/page/')[0],
total=len(blog_posts),
page=page,
step=self._blog_post_per_page,
url_args=opt,
)
pager_begin = (page - 1) * self._blog_post_per_page
pager_end = page * self._blog_post_per_page
blog_posts = blog_posts[pager_begin:pager_end]
all_tags = blog.all_tags()[blog.id]
# function to create the string list of tag ids, and toggle a given one.
# used in the 'Tags Cloud' template.
def tags_list(tag_ids, current_tag):
tag_ids = list(tag_ids) # required to avoid using the same list
if current_tag in tag_ids:
tag_ids.remove(current_tag)
else:
tag_ids.append(current_tag)
tag_ids = request.registry['blog.tag'].browse(cr, uid, tag_ids, context=context).exists()
return ','.join(map(slug, tag_ids))
values = {
'blog': blog,
'blogs': blogs,
'main_object': blog,
'tags': all_tags,
'active_tag_ids': active_tag_ids,
'tags_list' : tags_list,
'blog_posts': blog_posts,
'pager': pager,
'nav_list': self.nav_list(blog),
'blog_url': blog_url,
'date': date_begin,
}
response = request.website.render("website_blog.blog_post_short", values)
return response
@http.route(['/blog/<model("blog.blog"):blog>/feed'], type='http', auth="public")
def blog_feed(self, blog, limit='15'):
v = {}
v['blog'] = blog
v['base_url'] = request.env['ir.config_parameter'].get_param('web.base.url')
v['posts'] = request.env['blog.post'].search([('blog_id','=', blog.id)], limit=min(int(limit), 50))
r = request.render("website_blog.blog_feed", v, headers=[('Content-Type', 'application/atom+xml')])
return r
@http.route([
'''/blog/<model("blog.blog"):blog>/post/<model("blog.post", "[('blog_id','=',blog[0])]"):blog_post>''',
], type='http', auth="public", website=True)
def blog_post(self, blog, blog_post, tag_id=None, page=1, enable_editor=None, **post):
""" Prepare all values to display the blog.
:return dict values: values for the templates, containing
- 'blog_post': browse of the current post
- 'blog': browse of the current blog
- 'blogs': list of browse records of blogs
- 'tag': current tag, if tag_id in parameters
- 'tags': all tags, for tag-based navigation
- 'pager': a pager on the comments
- 'nav_list': a dict [year][month] for archives navigation
- 'next_post': next blog post, to direct the user towards the next interesting post
"""
cr, uid, context = request.cr, request.uid, request.context
tag_obj = request.registry['blog.tag']
blog_post_obj = request.registry['blog.post']
date_begin, date_end = post.get('date_begin'), post.get('date_end')
pager_url = "/blogpost/%s" % blog_post.id
pager = request.website.pager(
url=pager_url,
total=len(blog_post.website_message_ids),
page=page,
step=self._post_comment_per_page,
scope=7
)
pager_begin = (page - 1) * self._post_comment_per_page
pager_end = page * self._post_comment_per_page
comments = blog_post.website_message_ids[pager_begin:pager_end]
tag = None
if tag_id:
tag = request.registry['blog.tag'].browse(request.cr, request.uid, int(tag_id), context=request.context)
blog_url = QueryURL('', ['blog', 'tag'], blog=blog_post.blog_id, tag=tag, date_begin=date_begin, date_end=date_end)
if not blog_post.blog_id.id == blog.id:
return request.redirect("/blog/%s/post/%s" % (slug(blog_post.blog_id), slug(blog_post)))
tags = tag_obj.browse(cr, uid, tag_obj.search(cr, uid, [], context=context), context=context)
# Find next Post
all_post_ids = blog_post_obj.search(cr, uid, [('blog_id', '=', blog.id)], context=context)
# should always return at least the current post
current_blog_post_index = all_post_ids.index(blog_post.id)
nb_posts = len(all_post_ids)
next_post_id = all_post_ids[(current_blog_post_index + 1) % nb_posts] if nb_posts > 1 else None
next_post = next_post_id and blog_post_obj.browse(cr, uid, next_post_id, context=context) or False
values = {
'tags': tags,
'tag': tag,
'blog': blog,
'blog_post': blog_post,
'blog_post_cover_properties': json.loads(blog_post.cover_properties),
'main_object': blog_post,
'nav_list': self.nav_list(blog),
'enable_editor': enable_editor,
'next_post': next_post,
'next_post_cover_properties': json.loads(next_post.cover_properties) if next_post else {},
'date': date_begin,
'blog_url': blog_url,
'pager': pager,
'comments': comments,
}
response = request.website.render("website_blog.blog_post_complete", values)
request.session[request.session_id] = request.session.get(request.session_id, [])
if not (blog_post.id in request.session[request.session_id]):
request.session[request.session_id].append(blog_post.id)
# Increase counter
blog_post_obj.write(cr, SUPERUSER_ID, [blog_post.id], {
'visits': blog_post.visits+1,
},context=context)
return response
def _blog_post_message(self, blog_post_id, message_content, **post):
cr, uid, context = request.cr, request.uid, request.context
BlogPost = request.registry['blog.post']
User = request.registry['res.users']
# for now, only portal and user can post comment on blog post.
if uid == request.website.user_id.id:
raise UserError(_('Public user cannot post comments on blog post.'))
# get the partner of the current user
user = User.browse(cr, uid, uid, context=context)
partner_id = user.partner_id.id
message_id = BlogPost.message_post(
cr, uid, int(blog_post_id),
body=message_content,
message_type='comment',
subtype='mt_comment',
author_id=partner_id,
path=post.get('path', False),
context=context)
return message_id
def _get_discussion_detail(self, ids, publish=False, **post):
cr, uid, context = request.cr, request.uid, request.context
values = []
mail_obj = request.registry.get('mail.message')
for message in mail_obj.browse(cr, SUPERUSER_ID, ids, context=context):
values.append({
"id": message.id,
"author_name": message.author_id.name,
"author_image": message.author_id.image and \
("data:image/png;base64,%s" % message.author_id.image) or \
'/website_blog/static/src/img/anonymous.png',
"date": message.date,
'body': html2plaintext(message.body),
'website_published' : message.website_published,
'publish' : publish,
})
return values
@http.route(['/blog/post_discussion'], type='json', auth="public", website=True)
def post_discussion(self, blog_post_id, **post):
cr, uid = request.cr, request.uid
publish = request.registry['res.users'].has_group(cr, uid, 'base.group_website_publisher')
id = self._blog_post_message(blog_post_id, post.get('comment'), **post)
return self._get_discussion_detail([id], publish, **post)
@http.route('/blog/<int:blog_id>/post/new', type='http', auth="public", website=True)
def blog_post_create(self, blog_id, **post):
cr, uid, context = request.cr, request.uid, request.context
new_blog_post_id = request.registry['blog.post'].create(cr, uid, {
'blog_id': blog_id,
'website_published': False,
}, context=context)
new_blog_post = request.registry['blog.post'].browse(cr, uid, new_blog_post_id, context=context)
return werkzeug.utils.redirect("/blog/%s/post/%s?enable_editor=1" % (slug(new_blog_post.blog_id), slug(new_blog_post)))
@http.route('/blog/post_duplicate', type='http', auth="public", website=True, methods=['POST'])
def blog_post_copy(self, blog_post_id, **post):
""" Duplicate a blog.
:param blog_post_id: id of the blog post currently browsed.
:return redirect to the new blog created
"""
cr, uid, context = request.cr, request.uid, request.context
create_context = dict(context, mail_create_nosubscribe=True)
nid = request.registry['blog.post'].copy(cr, uid, int(blog_post_id), {}, context=create_context)
new_blog_post = request.registry['blog.post'].browse(cr, uid, nid, context=context)
post = request.registry['blog.post'].browse(cr, uid, nid, context)
return werkzeug.utils.redirect("/blog/%s/post/%s?enable_editor=1" % (slug(post.blog_id), slug(new_blog_post)))
@http.route('/blog/post_get_discussion/', type='json', auth="public", website=True)
def discussion(self, post_id=0, path=None, count=False, **post):
cr, uid, context = request.cr, request.uid, request.context
mail_obj = request.registry.get('mail.message')
domain = [('res_id', '=', int(post_id)), ('model', '=', 'blog.post'), ('path', '=', path)]
#check current user belongs to website publisher group
publish = request.registry['res.users'].has_group(cr, uid, 'base.group_website_publisher')
if not publish:
domain.append(('website_published', '=', True))
ids = mail_obj.search(cr, SUPERUSER_ID, domain, count=count)
if count:
return ids
return self._get_discussion_detail(ids, publish, **post)
@http.route('/blog/post_get_discussions/', type='json', auth="public", website=True)
def discussions(self, post_id=0, paths=None, count=False, **post):
ret = []
for path in paths:
result = self.discussion(post_id=post_id, path=path, count=count, **post)
ret.append({"path": path, "val": result})
return ret
@http.route('/blog/post_change_background', type='json', auth="public", website=True)
def change_bg(self, post_id=0, cover_properties={}, **post):
if not post_id:
return False
return request.registry['blog.post'].write(request.cr, request.uid, [int(post_id)], {'cover_properties': json.dumps(cover_properties)}, request.context)
@http.route('/blog/get_user/', type='json', auth="public", website=True)
def get_user(self, **post):
return [False if request.session.uid else True]
|
MyRobotLab/myrobotlab
|
refs/heads/develop
|
src/main/resources/resource/VirtualDevice/Arduino.py
|
1
|
#############################################
# This is a basic script to emulate the hardware of
# an Arduino microcontroller. The VirtualDevice
# service will execute this script when
# createVirtualArduino(port) is called
import time
import math
import threading
from random import randint
from org.myrobotlab.codec.serial import ArduinoMsgCodec
working = False
worker = None
analogReadPollingPins = []
digitalReadPollingPins = []
def work():
"""thread worker function"""
global working, analogReadPollingPins
x = 0
working = True
while(working):
x = x + 0.09
y = int(math.cos(x) * 100 + 150)
# retcmd = "publishPin/" + str(pin) + "/3/"+ str(y) +"\n"
# uart.write(codec.encode(retcmd))
for pinx in digitalReadPollingPins:
retcmd = "publishPin/" + str(pinx) + "/0/"+str(randint(0,1))+"\n"
uart.write(codec.encode(retcmd))
for pinx in analogReadPollingPins:
#retcmd = "publishPin/" + str(pinx) + "/4/"+ str(y) +"\n"
#retcmd = "publishPin/" + str(pinx) + "/" + str(int(pinx)%4) + "/"+ str(y) +"\n"
retcmd = "publishPin/" + str(pinx) + "/1/"+ str(y) +"\n"
uart.write(codec.encode(retcmd))
sleep(0.001)
#print (y)
# TODO -------
# if (digitalReadPollingPins.length() == 0 && analogReadPollingPins.length() == 0
# working = False
print("I am done !")
codec = ArduinoMsgCodec()
virtual = Runtime.start("virtual", "VirtualDevice")
logic = virtual.getLogic()
# get uarts and subscribe to them
for uartName in virtual.getUarts().keySet():
uart = virtual.getUart(uartName)
logic.subscribe(uart.getName(), "publishRX")
logic.subscribe(uart.getName(), "onConnect")
logic.subscribe(uart.getName(), "onPortNames")
logic.subscribe(uart.getName(), "onDisconnect")
def onRX(b):
global working, worker, analogReadPollingPins
print("onByte", b)
command = codec.decode(b)
if command != None and len(command) > 0 :
print("decoded", command)
# rstrip strips the \n from the record
command = command.rstrip()
clist = command.split('/')
if command == "getVersion":
uart.write(codec.encode("publishVersion/"+ str(ArduinoMsgCodec.MRLCOMM_VERSION) +"\n"))
elif command.startswith("digitalReadPollingStart"):
print("digitalReadPollingStart")
pin = clist[1]
digitalReadPollingPins.append(pin)
if worker == None:
worker = threading.Thread(name='worker', target=work)
worker.setDaemon(True)
worker.start()
elif command.startswith("digitalReadPollingStop"):
print("digitalReadPollingStop")
pin = clist[1]
digitalReadPollingPins.remove(pin)
elif command.startswith("analogReadPollingStart"):
print("analogReadPollingStart")
pin = clist[1]
analogReadPollingPins.append(pin)
if worker == None:
worker = threading.Thread(name='worker', target=work)
worker.setDaemon(True)
worker.start()
elif command.startswith("analogReadPollingStop"):
print("analogReadPollingStop")
pin = clist[1]
analogReadPollingPins.remove(pin)
def off():
working = False
worker = None
def onConnect(portName):
print("onConnect to ", portName)
# FIXME ??? is this bad algorithm to determine callback method name ?
# seems somebody is expecting it this way
def onOnConnect(portName):
print("onOnConnect connected to ", portName)
def onPortNames(portName):
print("onPortNames TODO - list portNames")
def onOnPortNames(portName):
print("onOnPortNames TODO - list portNames")
def onDisconnect(portName):
print("onDisconnect from ", portName)
def onOnDisconnect(portName):
print("onOnDisconnect from ", portName)
# WHAT THE HECK IS THIS ABOUT ?
# TODO - find out
def serial1RX(data):
print("serial1RX ", data)
def serial2RX(data):
print("serial2RX ", data)
def serial3RX(data):
print("serial3RX ", data)
def serial4RX(data):
print("serial4RX ", data)
def serial5RX(data):
print("serial5RX ", data)
def serial6RX(data):
print("serial6RX ", data)
def serial7RX(data):
print("serial7RX ", data)
def serial8RX(data):
print("serial8RX ", data)
|
openstack/barbican
|
refs/heads/master
|
bin/versionbuild.py
|
4
|
#!/usr/bin/env python
# Copyright (c) 2013-2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Version build stamping script.
This module generates and inserts a patch component of the semantic version
stamp for Barbican, intended to ensure that a strictly monotonically increasing
version is produced for consecutive development releases. Some repositories
such as yum use this increasing semantic version to select the latest
package for installations.
This process may not be required if a bug in the 'pbr' library is fixed:
https://bugs.launchpad.net/pbr/+bug/1206730
"""
import os
import re
from datetime import datetime
from time import mktime
# Determine version of this application.
SETUP_FILE = 'setup.cfg'
VERSIONFILE = os.path.join(SETUP_FILE)
current_dir = os.getcwd()
if current_dir.endswith('bin'):
VERSIONFILE = os.path.join('..', SETUP_FILE)
def get_patch():
"""Return a strictly monotonically increasing version patch.
This method is providing the 'patch' component of the semantic version
stamp for Barbican. It currently returns an epoch in seconds, but
could use a build id from the build system.
"""
dt = datetime.now()
return int(mktime(dt.timetuple()))
def update_versionfile(patch):
"""Update the version information in setup.cfg per the provided patch.
PBR will generate a version stamp based on the version attribute in the
setup.cfg file, appending information such as git SHA code to it. To make
this generated version friendly to packaging systems such as YUM, this
function appends the provided patch to the base version. This function
assumes the base version in setup.cfg is of the form 'xx.yy' such as
'2014.2'. It will replace a third element found after this base with the
provided patch.
"""
version_regex = re.compile(r'(^\s*version\s*=\s*\w*\.\w*)(.*)')
temp_name = VERSIONFILE + '~'
with open(VERSIONFILE, 'r') as file_old:
with open(temp_name, 'w') as file_new:
for line in file_old:
match = version_regex.match(line)
if match:
file_new.write(''.join(
[match.group(1).strip(), '.', str(patch), '\n']))
else:
file_new.write(line)
# Replace the original setup.cfg with the modified one.
os.rename(temp_name, VERSIONFILE)
if __name__ == '__main__':
patch = get_patch()
update_versionfile(patch)
|
dblia/nosql-ganeti
|
refs/heads/couch
|
lib/client/gnt_group.py
|
3
|
#
#
# Copyright (C) 2010, 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Node group related commands"""
# pylint: disable=W0401,W0614
# W0401: Wildcard import ganeti.cli
# W0614: Unused import %s from wildcard import (since we need cli)
from ganeti.cli import *
from ganeti import constants
from ganeti import opcodes
from ganeti import utils
from ganeti import compat
from cStringIO import StringIO
#: default list of fields for L{ListGroups}
_LIST_DEF_FIELDS = ["name", "node_cnt", "pinst_cnt", "alloc_policy", "ndparams"]
_ENV_OVERRIDE = compat.UniqueFrozenset(["list"])
def AddGroup(opts, args):
"""Add a node group to the cluster.
@param opts: the command line options selected by the user
@type args: list
@param args: a list of length 1 with the name of the group to create
@rtype: int
@return: the desired exit code
"""
ipolicy = CreateIPolicyFromOpts(
ispecs_mem_size=opts.ispecs_mem_size,
ispecs_cpu_count=opts.ispecs_cpu_count,
ispecs_disk_count=opts.ispecs_disk_count,
ispecs_disk_size=opts.ispecs_disk_size,
ispecs_nic_count=opts.ispecs_nic_count,
ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
group_ipolicy=True)
(group_name,) = args
diskparams = dict(opts.diskparams)
if opts.disk_state:
disk_state = utils.FlatToDict(opts.disk_state)
else:
disk_state = {}
hv_state = dict(opts.hv_state)
op = opcodes.OpGroupAdd(group_name=group_name, ndparams=opts.ndparams,
alloc_policy=opts.alloc_policy,
diskparams=diskparams, ipolicy=ipolicy,
hv_state=hv_state,
disk_state=disk_state)
SubmitOrSend(op, opts)
def AssignNodes(opts, args):
"""Assign nodes to a group.
@param opts: the command line options selected by the user
@type args: list
@param args: args[0]: group to assign nodes to; args[1:]: nodes to assign
@rtype: int
@return: the desired exit code
"""
group_name = args[0]
node_names = args[1:]
op = opcodes.OpGroupAssignNodes(group_name=group_name, nodes=node_names,
force=opts.force)
SubmitOrSend(op, opts)
def _FmtDict(data):
"""Format dict data into command-line format.
@param data: The input dict to be formatted
@return: The formatted dict
"""
if not data:
return "(empty)"
return utils.CommaJoin(["%s=%s" % (key, value)
for key, value in data.items()])
def ListGroups(opts, args):
"""List node groups and their properties.
@param opts: the command line options selected by the user
@type args: list
@param args: groups to list, or empty for all
@rtype: int
@return: the desired exit code
"""
desired_fields = ParseFields(opts.output, _LIST_DEF_FIELDS)
fmtoverride = {
"node_list": (",".join, False),
"pinst_list": (",".join, False),
"ndparams": (_FmtDict, False),
}
cl = GetClient(query=True)
return GenericList(constants.QR_GROUP, desired_fields, args, None,
opts.separator, not opts.no_headers,
format_override=fmtoverride, verbose=opts.verbose,
force_filter=opts.force_filter, cl=cl)
def ListGroupFields(opts, args):
"""List node fields.
@param opts: the command line options selected by the user
@type args: list
@param args: fields to list, or empty for all
@rtype: int
@return: the desired exit code
"""
cl = GetClient(query=True)
return GenericListFields(constants.QR_GROUP, args, opts.separator,
not opts.no_headers, cl=cl)
def SetGroupParams(opts, args):
"""Modifies a node group's parameters.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the node group name
@rtype: int
@return: the desired exit code
"""
allmods = [opts.ndparams, opts.alloc_policy, opts.diskparams, opts.hv_state,
opts.disk_state, opts.ispecs_mem_size, opts.ispecs_cpu_count,
opts.ispecs_disk_count, opts.ispecs_disk_size,
opts.ispecs_nic_count, opts.ipolicy_vcpu_ratio,
opts.ipolicy_spindle_ratio, opts.diskparams]
if allmods.count(None) == len(allmods):
ToStderr("Please give at least one of the parameters.")
return 1
if opts.disk_state:
disk_state = utils.FlatToDict(opts.disk_state)
else:
disk_state = {}
hv_state = dict(opts.hv_state)
diskparams = dict(opts.diskparams)
# set the default values
to_ipolicy = [
opts.ispecs_mem_size,
opts.ispecs_cpu_count,
opts.ispecs_disk_count,
opts.ispecs_disk_size,
opts.ispecs_nic_count,
]
for ispec in to_ipolicy:
for param in ispec:
if isinstance(ispec[param], basestring):
if ispec[param].lower() == "default":
ispec[param] = constants.VALUE_DEFAULT
# create ipolicy object
ipolicy = CreateIPolicyFromOpts(
ispecs_mem_size=opts.ispecs_mem_size,
ispecs_cpu_count=opts.ispecs_cpu_count,
ispecs_disk_count=opts.ispecs_disk_count,
ispecs_disk_size=opts.ispecs_disk_size,
ispecs_nic_count=opts.ispecs_nic_count,
ipolicy_disk_templates=opts.ipolicy_disk_templates,
ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
group_ipolicy=True,
allowed_values=[constants.VALUE_DEFAULT])
op = opcodes.OpGroupSetParams(group_name=args[0],
ndparams=opts.ndparams,
alloc_policy=opts.alloc_policy,
hv_state=hv_state,
disk_state=disk_state,
diskparams=diskparams,
ipolicy=ipolicy)
result = SubmitOrSend(op, opts)
if result:
ToStdout("Modified node group %s", args[0])
for param, data in result:
ToStdout(" - %-5s -> %s", param, data)
return 0
def RemoveGroup(opts, args):
"""Remove a node group from the cluster.
@param opts: the command line options selected by the user
@type args: list
@param args: a list of length 1 with the name of the group to remove
@rtype: int
@return: the desired exit code
"""
(group_name,) = args
op = opcodes.OpGroupRemove(group_name=group_name)
SubmitOrSend(op, opts)
def RenameGroup(opts, args):
"""Rename a node group.
@param opts: the command line options selected by the user
@type args: list
@param args: a list of length 2, [old_name, new_name]
@rtype: int
@return: the desired exit code
"""
group_name, new_name = args
op = opcodes.OpGroupRename(group_name=group_name, new_name=new_name)
SubmitOrSend(op, opts)
def EvacuateGroup(opts, args):
"""Evacuate a node group.
"""
(group_name, ) = args
cl = GetClient()
op = opcodes.OpGroupEvacuate(group_name=group_name,
iallocator=opts.iallocator,
target_groups=opts.to,
early_release=opts.early_release)
result = SubmitOrSend(op, opts, cl=cl)
# Keep track of submitted jobs
jex = JobExecutor(cl=cl, opts=opts)
for (status, job_id) in result[constants.JOB_IDS_KEY]:
jex.AddJobId(None, status, job_id)
results = jex.GetResults()
bad_cnt = len([row for row in results if not row[0]])
if bad_cnt == 0:
ToStdout("All instances evacuated successfully.")
rcode = constants.EXIT_SUCCESS
else:
ToStdout("There were %s errors during the evacuation.", bad_cnt)
rcode = constants.EXIT_FAILURE
return rcode
def _FormatDict(custom, actual, level=2):
"""Helper function to L{cli.FormatParameterDict}.
@param custom: The customized dict
@param actual: The fully filled dict
"""
buf = StringIO()
FormatParameterDict(buf, custom, actual, level=level)
return buf.getvalue().rstrip("\n")
def GroupInfo(_, args):
"""Shows info about node group.
"""
cl = GetClient(query=True)
selected_fields = ["name",
"ndparams", "custom_ndparams",
"diskparams", "custom_diskparams",
"ipolicy", "custom_ipolicy"]
result = cl.QueryGroups(names=args, fields=selected_fields,
use_locking=False)
for (name,
ndparams, custom_ndparams,
diskparams, custom_diskparams,
ipolicy, custom_ipolicy) in result:
ToStdout("Node group: %s" % name)
ToStdout(" Node parameters:")
ToStdout(_FormatDict(custom_ndparams, ndparams))
ToStdout(" Disk parameters:")
ToStdout(_FormatDict(custom_diskparams, diskparams))
ToStdout(" Instance policy:")
ToStdout(_FormatDict(custom_ipolicy, ipolicy))
commands = {
"add": (
AddGroup, ARGS_ONE_GROUP,
[DRY_RUN_OPT, ALLOC_POLICY_OPT, NODE_PARAMS_OPT, DISK_PARAMS_OPT,
HV_STATE_OPT, DISK_STATE_OPT, PRIORITY_OPT,
SUBMIT_OPT] + INSTANCE_POLICY_OPTS,
"<group_name>", "Add a new node group to the cluster"),
"assign-nodes": (
AssignNodes, ARGS_ONE_GROUP + ARGS_MANY_NODES,
[DRY_RUN_OPT, FORCE_OPT, PRIORITY_OPT, SUBMIT_OPT],
"<group_name> <node>...", "Assign nodes to a group"),
"list": (
ListGroups, ARGS_MANY_GROUPS,
[NOHDR_OPT, SEP_OPT, FIELDS_OPT, VERBOSE_OPT, FORCE_FILTER_OPT],
"[<group_name>...]",
"Lists the node groups in the cluster. The available fields can be shown"
" using the \"list-fields\" command (see the man page for details)."
" The default list is (in order): %s." % utils.CommaJoin(_LIST_DEF_FIELDS)),
"list-fields": (
ListGroupFields, [ArgUnknown()], [NOHDR_OPT, SEP_OPT], "[fields...]",
"Lists all available fields for node groups"),
"modify": (
SetGroupParams, ARGS_ONE_GROUP,
[DRY_RUN_OPT, SUBMIT_OPT, ALLOC_POLICY_OPT, NODE_PARAMS_OPT, HV_STATE_OPT,
DISK_STATE_OPT, DISK_PARAMS_OPT, PRIORITY_OPT] + INSTANCE_POLICY_OPTS,
"<group_name>", "Alters the parameters of a node group"),
"remove": (
RemoveGroup, ARGS_ONE_GROUP, [DRY_RUN_OPT, PRIORITY_OPT, SUBMIT_OPT],
"[--dry-run] <group-name>",
"Remove an (empty) node group from the cluster"),
"rename": (
RenameGroup, [ArgGroup(min=2, max=2)],
[DRY_RUN_OPT, SUBMIT_OPT, PRIORITY_OPT],
"[--dry-run] <group-name> <new-name>", "Rename a node group"),
"evacuate": (
EvacuateGroup, [ArgGroup(min=1, max=1)],
[TO_GROUP_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT, SUBMIT_OPT, PRIORITY_OPT],
"[-I <iallocator>] [--to <group>]",
"Evacuate all instances within a group"),
"list-tags": (
ListTags, ARGS_ONE_GROUP, [],
"<group_name>", "List the tags of the given group"),
"add-tags": (
AddTags, [ArgGroup(min=1, max=1), ArgUnknown()],
[TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
"<group_name> tag...", "Add tags to the given group"),
"remove-tags": (
RemoveTags, [ArgGroup(min=1, max=1), ArgUnknown()],
[TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
"<group_name> tag...", "Remove tags from the given group"),
"info": (
GroupInfo, ARGS_MANY_GROUPS, [], "<group_name>", "Show group information"),
}
def Main():
return GenericMain(commands,
override={"tag_type": constants.TAG_NODEGROUP},
env_override=_ENV_OVERRIDE)
|
jnewland/home-assistant
|
refs/heads/ci
|
tests/components/config/test_init.py
|
8
|
"""Test config init."""
import asyncio
from unittest.mock import patch
from homeassistant.const import EVENT_COMPONENT_LOADED
from homeassistant.setup import async_setup_component, ATTR_COMPONENT
from homeassistant.components import config
from tests.common import mock_coro, mock_component
@asyncio.coroutine
def test_config_setup(hass, loop):
"""Test it sets up hassbian."""
yield from async_setup_component(hass, 'config', {})
assert 'config' in hass.config.components
@asyncio.coroutine
def test_load_on_demand_already_loaded(hass, aiohttp_client):
"""Test getting suites."""
mock_component(hass, 'zwave')
with patch.object(config, 'SECTIONS', []), \
patch.object(config, 'ON_DEMAND', ['zwave']), \
patch('homeassistant.components.config.zwave.async_setup') as stp:
stp.return_value = mock_coro(True)
yield from async_setup_component(hass, 'config', {})
yield from hass.async_block_till_done()
assert stp.called
@asyncio.coroutine
def test_load_on_demand_on_load(hass, aiohttp_client):
"""Test getting suites."""
with patch.object(config, 'SECTIONS', []), \
patch.object(config, 'ON_DEMAND', ['zwave']):
yield from async_setup_component(hass, 'config', {})
assert 'config.zwave' not in hass.config.components
with patch('homeassistant.components.config.zwave.async_setup') as stp:
stp.return_value = mock_coro(True)
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {ATTR_COMPONENT: 'zwave'})
yield from hass.async_block_till_done()
assert stp.called
|
CARocha/ciat_plataforma
|
refs/heads/master
|
mapeo/migrations/0002_auto__chg_field_organizaciones_departamento__chg_field_organizaciones_.py
|
3
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Organizaciones.departamento'
db.alter_column(u'mapeo_organizaciones', 'departamento_id', self.gf('smart_selects.db_fields.ChainedForeignKey')(to=orm['lugar.Departamento'], null=True))
# Changing field 'Organizaciones.siglas'
db.alter_column(u'mapeo_organizaciones', 'siglas', self.gf('django.db.models.fields.CharField')(default=2, max_length=200))
# Changing field 'Organizaciones.municipio'
db.alter_column(u'mapeo_organizaciones', 'municipio_id', self.gf('smart_selects.db_fields.ChainedForeignKey')(to=orm['lugar.Municipio'], null=True))
def backwards(self, orm):
# Changing field 'Organizaciones.departamento'
db.alter_column(u'mapeo_organizaciones', 'departamento_id', self.gf('smart_selects.db_fields.ChainedForeignKey')(default=2, to=orm['lugar.Departamento']))
# Changing field 'Organizaciones.siglas'
db.alter_column(u'mapeo_organizaciones', 'siglas', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
# Changing field 'Organizaciones.municipio'
db.alter_column(u'mapeo_organizaciones', 'municipio_id', self.gf('smart_selects.db_fields.ChainedForeignKey')(default=2, to=orm['lugar.Municipio']))
models = {
u'configuracion.areaaccion': {
'Meta': {'object_name': 'AreaAccion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'configuracion.plataforma': {
'Meta': {'object_name': 'Plataforma'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'sitio_accion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configuracion.SitioAccion']"})
},
u'configuracion.sector': {
'Meta': {'object_name': 'Sector'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'configuracion.sitioaccion': {
'Meta': {'object_name': 'SitioAccion'},
'area_accion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configuracion.AreaAccion']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'lugar.comunidad': {
'Meta': {'ordering': "['nombre']", 'object_name': 'Comunidad'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'lugar.departamento': {
'Meta': {'ordering': "['nombre']", 'object_name': 'Departamento'},
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.municipio': {
'Meta': {'ordering': "['departamento__nombre', 'nombre']", 'object_name': 'Municipio'},
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.pais': {
'Meta': {'object_name': 'Pais'},
'codigo': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'mapeo.organizaciones': {
'Meta': {'ordering': "[u'nombre']", 'unique_together': "((u'font_color', u'nombre'),)", 'object_name': 'Organizaciones'},
'area_accion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configuracion.AreaAccion']"}),
'contacto': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'correo_electronico': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'departamento': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Departamento']", 'null': 'True', 'blank': 'True'}),
'direccion': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'font_color': ('mapeo.models.ColorField', [], {'unique': 'True', 'max_length': '10', 'blank': 'True'}),
'fundacion': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'generalidades': ('ckeditor.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': (u'sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'municipio': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Municipio']", 'null': 'True', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'plataforma': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configuracion.Plataforma']"}),
'rss': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'sector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configuracion.Sector']"}),
'siglas': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'sitio_accion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configuracion.SitioAccion']"}),
'sitio_web': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'telefono': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'temas': ('ckeditor.fields.RichTextField', [], {'null': 'True', 'blank': 'True'})
},
u'mapeo.persona': {
'Meta': {'object_name': 'Persona'},
'cedula': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'comunidad': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Comunidad']"}),
'departamento': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'edad': ('django.db.models.fields.IntegerField', [], {}),
'finca': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nivel_educacion': ('django.db.models.fields.IntegerField', [], {}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizacion': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'org'", 'symmetrical': 'False', 'to': u"orm['mapeo.Organizaciones']"}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'sexo': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['mapeo']
|
mdworks2016/work_development
|
refs/heads/master
|
Python/20_Third_Certification/venv/lib/python3.7/site-packages/billiard/__init__.py
|
1
|
"""Python multiprocessing fork with improvements and bugfixes"""
#
# Package analogous to 'threading.py' but using processes
#
# multiprocessing/__init__.py
#
# This package is intended to duplicate the functionality (and much of
# the API) of threading.py but uses processes instead of threads. A
# subpackage 'multiprocessing.dummy' has the same API but is a simple
# wrapper for 'threading'.
#
# Try calling `multiprocessing.doc.main()` to read the html
# documentation in a webbrowser.
#
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
import sys
from . import context
VERSION = (3, 6, 3, 0)
__version__ = '.'.join(map(str, VERSION[0:4])) + "".join(VERSION[4:])
__author__ = 'R Oudkerk / Python Software Foundation'
__author_email__ = 'python-dev@python.org'
__maintainer__ = 'Asif Saif Uddin'
__contact__ = "auvipy@gmail.com"
__homepage__ = "https://github.com/celery/billiard"
__docformat__ = "restructuredtext"
# -eof meta-
#
# Copy stuff from default context
#
globals().update((name, getattr(context._default_context, name))
for name in context._default_context.__all__)
__all__ = context._default_context.__all__
#
# XXX These should not really be documented or public.
#
SUBDEBUG = 5
SUBWARNING = 25
#
# Alias for main module -- will be reset by bootstrapping child processes
#
if '__main__' in sys.modules:
sys.modules['__mp_main__'] = sys.modules['__main__']
def ensure_multiprocessing():
from ._ext import ensure_multiprocessing
return ensure_multiprocessing()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.