repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
qrsforever/workspace | python/learn/base/atexit/autorm.py | Python | mit | 190 | 0.015789 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import atexit
import time
import os
def cleanup(fi | le):
print(file)
atexit.register(cleanup, "/tmp/a.txt")
time.sleep(100)
os.waitpi | d(0,0)
|
Shaswat27/sympy | sympy/stats/tests/test_finite_rv.py | Python | bsd-3-clause | 8,667 | 0.001961 | from sympy.core.compatibility import range
from sympy import (FiniteSet, S, Symbol, sqrt,
symbols, simplify, Eq, cos, And, Tuple, Or, Dict, sympify, binomial,
cancel, KroneckerDelta)
from sympy.concrete.expr_with_limits import AddWithLimits
from sympy.matrices import Matrix
from sympy.stats import (DiscreteUniform, Die, Bernoulli, Coin, Binomial,
Hypergeometric, Rademacher, P, E, variance, covariance, skewness, sample,
density, where, FiniteRV, pspace, cdf,
correlation, moment, cmoment, smoment)
from sympy.stats.frv_types import DieDistribution
from sympy.utilities.pytest import raises, slow
from sympy.abc import p, x, i
oo = S.Infinity
def BayesTest(A, B):
assert P(A, B) == P(And(A, B)) / P(B)
assert P(A, B) == P(B, A) * P(A) / P(B)
def test_discreteuniform():
# Symbolic
a, b, c = symbols('a b c')
X = DiscreteUniform('X', [a, b, c])
assert E(X) == (a + b + c)/3
assert simplify(variance(X)
- ((a**2 + b**2 + c**2)/3 - (a/3 + b/3 + c/3)**2)) == 0
assert P(Eq(X, a)) == P(Eq(X, b)) == P(Eq(X, c)) == S('1/3')
Y = DiscreteUniform('Y', range(-5, 5))
# Numeric
assert E(Y) == S('-1/2')
assert variance(Y) == S('33/4')
for x in range(-5, 5):
assert P(Eq(Y, x)) == S('1/10')
assert P(Y <= x) == S(x + 6)/10
assert P(Y >= x) == S(5 - x)/10
assert dict(density(Die('D', 6)).items()) == \
dict(density(DiscreteUniform('U', range(1, 7))).items())
def test_dice():
# TODO: Make iid method!
X, Y, Z = Die('X', 6), Die('Y', 6), Die('Z', 6)
a, b = symbols('a b')
assert E(X) == 3 + S.Half
assert variance(X) == S(35)/12
assert E(X + Y) == 7
assert E(X + X) == 7
assert E(a*X + b) == a*E(X) + b
assert variance(X + Y) == variance(X) + variance(Y) == cmoment(X + Y, 2)
assert variance(X + X) == 4 * variance(X) == cmoment(X + X, 2)
assert cmoment(X, 0) == 1
assert cmoment(4*X, 3) == 64*cmoment(X, 3)
assert covariance(X, Y) == S.Zero
assert covariance(X, X + Y) == variance(X)
assert density(Eq(cos(X*S.Pi), 1))[True] == S.Half
assert correlation(X, Y) == 0
assert correlation(X, Y) == correlation(Y, X)
assert smoment(X + Y, 3) == skewness(X + Y)
assert smoment(X, 0) == 1
assert P(X > 3) == S.Half
assert P(2*X > 6) == S.Half
assert P(X > Y) == S(5)/12
assert P(Eq(X, Y)) == P(Eq(X, 1))
assert E(X, X > 3) == 5 == moment(X, 1, 0, X > 3)
assert E(X, Y > 3) == E(X) == moment(X, 1, 0, Y > 3)
assert E(X + Y, Eq(X, Y)) == E(2*X)
assert moment(X, 0) == 1
assert moment(5*X, 2) == 25*moment(X, 2)
assert P(X > 3, X > 3) == S.One
assert P(X > Y, Eq(Y, 6)) == S.Zero
assert P(Eq(X + Y, 12)) == S.One/36
assert P(Eq(X + Y, 12), Eq(X, 6)) == S.One/6
assert density(X + Y) == density(Y + Z) != density(X + X)
d = density(2*X + Y**Z)
assert d[S(22)] == S.One/108 and d[S(4100)] == S.One/216 and S(3130) not in d
assert pspace(X).domain.as_boolean() == Or(
*[Eq(X.symbol, i) for i in [1, 2, 3, 4, 5, 6]])
assert where(X > 3).set == FiniteSet(4, 5, 6)
def test_given():
X = Die('X', 6)
assert density(X, X > 5) == {S(6): S(1)}
assert where(X > 2, X > 5).as_boolean() == Eq(X.symbol, 6)
assert sample(X, X > 5) == 6
def test_domains():
X, Y = Die('x', 6), Die('y', 6)
x, | y = X.symbol, Y.symbol
# Domains
d = where(X > Y)
assert d.condition == (x > y)
d = where(And(X > Y, Y > 3))
assert d.as_boolean() == Or(And(Eq(x, 5), Eq(y, 4)), And(Eq(x, 6),
Eq(y, 5)), And(Eq(x, 6), Eq(y, 4)))
assert len(d.elements) == 3
assert len(pspace(X + Y).domain.elements) == 36
Z = Die('x', 4)
raises(ValueError, lambda: P(X > Z)) # Two domains with same internal symbol
assert pspace(X + Y).domain.set | == FiniteSet(1, 2, 3, 4, 5, 6)**2
assert where(X > 3).set == FiniteSet(4, 5, 6)
assert X.pspace.domain.dict == FiniteSet(
*[Dict({X.symbol: i}) for i in range(1, 7)])
assert where(X > Y).dict == FiniteSet(*[Dict({X.symbol: i, Y.symbol: j})
for i in range(1, 7) for j in range(1, 7) if i > j])
def test_dice_bayes():
X, Y, Z = Die('X', 6), Die('Y', 6), Die('Z', 6)
BayesTest(X > 3, X + Y < 5)
BayesTest(Eq(X - Y, Z), Z > Y)
BayesTest(X > 3, X > 2)
def test_die_args():
raises(ValueError, lambda: Die('X', -1)) # issue 8105: negative sides.
raises(ValueError, lambda: Die('X', 0))
raises(ValueError, lambda: Die('X', 1.5)) # issue 8103: non integer sides.
k = Symbol('k')
sym_die = Die('X', k)
raises(ValueError, lambda: density(sym_die).dict)
def test_bernoulli():
p, a, b = symbols('p a b')
X = Bernoulli('B', p, a, b)
assert E(X) == a*p + b*(-p + 1)
assert density(X)[a] == p
assert density(X)[b] == 1 - p
X = Bernoulli('B', p, 1, 0)
assert E(X) == p
assert simplify(variance(X)) == p*(1 - p)
assert E(a*X + b) == a*E(X) + b
assert simplify(variance(a*X + b)) == simplify(a**2 * variance(X))
def test_cdf():
D = Die('D', 6)
o = S.One
assert cdf(
D) == sympify({1: o/6, 2: o/3, 3: o/2, 4: 2*o/3, 5: 5*o/6, 6: o})
def test_coins():
C, D = Coin('C'), Coin('D')
H, T = symbols('H, T')
assert P(Eq(C, D)) == S.Half
assert density(Tuple(C, D)) == {(H, H): S.One/4, (H, T): S.One/4,
(T, H): S.One/4, (T, T): S.One/4}
assert dict(density(C).items()) == {H: S.Half, T: S.Half}
F = Coin('F', S.One/10)
assert P(Eq(F, H)) == S(1)/10
d = pspace(C).domain
assert d.as_boolean() == Or(Eq(C.symbol, H), Eq(C.symbol, T))
raises(ValueError, lambda: P(C > D)) # Can't intelligently compare H to T
def test_binomial_verify_parameters():
raises(ValueError, lambda: Binomial('b', .2, .5))
raises(ValueError, lambda: Binomial('b', 3, 1.5))
def test_binomial_numeric():
nvals = range(5)
pvals = [0, S(1)/4, S.Half, S(3)/4, 1]
for n in nvals:
for p in pvals:
X = Binomial('X', n, p)
assert E(X) == n*p
assert variance(X) == n*p*(1 - p)
if n > 0 and 0 < p < 1:
assert skewness(X) == (1 - 2*p)/sqrt(n*p*(1 - p))
for k in range(n + 1):
assert P(Eq(X, k)) == binomial(n, k)*p**k*(1 - p)**(n - k)
@slow
def test_binomial_symbolic():
n = 10 # Because we're using for loops, can't do symbolic n
p = symbols('p', positive=True)
X = Binomial('X', n, p)
assert simplify(E(X)) == n*p == simplify(moment(X, 1))
assert simplify(variance(X)) == n*p*(1 - p) == simplify(cmoment(X, 2))
assert cancel((skewness(X) - (1-2*p)/sqrt(n*p*(1-p)))) == 0
# Test ability to change success/failure winnings
H, T = symbols('H T')
Y = Binomial('Y', n, p, succ=H, fail=T)
assert simplify(E(Y) - (n*(H*p + T*(1 - p)))) == 0
def test_hypergeometric_numeric():
for N in range(1, 5):
for m in range(0, N + 1):
for n in range(1, N + 1):
X = Hypergeometric('X', N, m, n)
N, m, n = map(sympify, (N, m, n))
assert sum(density(X).values()) == 1
assert E(X) == n * m / N
if N > 1:
assert variance(X) == n*(m/N)*(N - m)/N*(N - n)/(N - 1)
# Only test for skewness when defined
if N > 2 and 0 < m < N and n < N:
assert skewness(X) == simplify((N - 2*m)*sqrt(N - 1)*(N - 2*n)
/ (sqrt(n*m*(N - m)*(N - n))*(N - 2)))
def test_rademacher():
X = Rademacher('X')
assert E(X) == 0
assert variance(X) == 1
assert density(X)[-1] == S.Half
assert density(X)[1] == S.Half
def test_FiniteRV():
F = FiniteRV('F', {1: S.Half, 2: S.One/4, 3: S.One/4})
assert dict(density(F).items()) == {S(1): S.Half, S(2): S.One/4, S(3): S.One/4}
assert P(F >= 2) == S.Half
assert pspace(F).domain.as_boolean() == Or(
*[Eq(F.symbol, i) for i in [1, 2, 3]])
def test_density_call():
x = Bernoulli('x', p)
d = density(x)
assert d(0) == 1 - p
assert d(S.Zero) == 1 - p
|
jiaphuan/models | research/object_detection/utils/np_mask_ops.py | Python | apache-2.0 | 4,214 | 0.005458 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for [N, height, width] numpy arrays representing masks.
Example mask operations that are supported:
* Areas: compute mask areas
* IOU: pairwise intersection-over-union scores
"""
import numpy as np
EPSILON = 1e-7
def area(masks):
"""Computes area of masks.
Args:
masks: Numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N*1] representing mask areas.
Raises:
ValueError: If masks.dtype is not np.uint8
"""
if masks.dtype != np.uint8:
raise ValueError('Masks type should be np.uint8')
return np.sum(masks, axis=(1, 2), dtype=np.float32)
def intersection(masks1, masks2):
"""Compute pairwise intersection areas between masks.
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding M masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N*M] representing pairwise intersection area.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError('masks1 and masks2 should be of type np.uint8')
n = masks1.shape[0]
m = masks2.shape[0]
answer = np.zeros([n, m], dtype=np.float32)
for i in np.arange(n):
for j in np.arange(m):
answer[i, j] = np.sum(np.minimum(masks1[i], masks2[j]), dtype=np.float32)
return answer
def iou(masks1, masks2):
"""Computes pairwise intersection-over-union between mask collections.
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError('masks1 and masks2 should be of type np.uint8')
intersect = intersection(masks1, masks2)
area1 = area(masks1)
area2 = area(masks2)
union = np.expand_dims(area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / np.maximum(union, EPSILON)
def ioa(masks1, masks2):
"""Computes pairwise intersection-over-area between box collections.
Intersection-over-area (ioa) between two masks, mask1 and mask2 is defined as
their intersection area over mask2's area. Note that ioa is not symmet | ric,
that is, IOA(mask1, mask2) != IOA(mask2, mask1).
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
| Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError('masks1 and masks2 should be of type np.uint8')
intersect = intersection(masks1, masks2)
areas = np.expand_dims(area(masks2), axis=0)
return intersect / (areas + EPSILON)
|
pombredanne/zero-install | zeroinstall/0launch-gui/browser.py | Python | lgpl-2.1 | 419 | 0.031026 | # Copyright (C) 2009, Thomas Leonard
# See | the README file for details | , or visit http://0install.net.
import os, sys
def open_in_browser(link):
browser = os.environ.get('BROWSER', 'firefox')
child = os.fork()
if child == 0:
# We are the child
try:
os.spawnlp(os.P_NOWAIT, browser, browser, link)
os._exit(0)
except Exception, ex:
print >>sys.stderr, "Error", ex
os._exit(1)
os.waitpid(child, 0)
|
RagtagOpen/bidwire | bidwire/alembic/versions/b73811be5f44_add_all_basic_bid_fields.py | Python | mit | 1,068 | 0 | """add all basic bid fields
Revision ID: b73811be5f44
Revises: 72beaff4cf57
Create Date: 2017-04-03 00:53:50.692376
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b73811be5f44'
down_revision = '72beaff4cf57'
branch_label | s = None
depends_on = None
def upgrade():
op.add_column('bids', sa.Column('description', sa.Text))
op.add_column('bids', sa.Column('department', sa.Text))
op.add_column('bids', sa.Column('organization', sa.Text))
op.add_column('bids', sa.Column('location', sa.Text))
op.add_column('bids', sa.Column('open_date', sa.DateTime))
op.add_column('bids', sa.Column('items', sa.JSON))
def downgrade():
op.drop_column('bids', sa.Column('description', sa.Text))
op | .drop_column('bids', sa.Column('department', sa.Text))
op.drop_column('bids', sa.Column('organization', sa.Text))
op.drop_column('bids', sa.Column('location', sa.Text))
op.drop_column('bids', sa.Column('open_date', sa.DateTime))
op.drop_column('bids', sa.Column('items', sa.JSON))
|
fiete201/qutebrowser | qutebrowser/__init__.py | Python | gpl-3.0 | 1,276 | 0 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along | with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""A | keyboard-driven, vim-like browser based on PyQt5."""
import os.path
__author__ = "Florian Bruhin"
__copyright__ = "Copyright 2014-2021 Florian Bruhin (The Compiler)"
__license__ = "GPL"
__maintainer__ = __author__
__email__ = "mail@qutebrowser.org"
__version__ = "2.0.0"
__version_info__ = tuple(int(part) for part in __version__.split('.'))
__description__ = "A keyboard-driven, vim-like browser based on PyQt5."
basedir = os.path.dirname(os.path.realpath(__file__))
|
HiSPARC/station-software | user/python/Lib/test/test_int.py | Python | gpl-3.0 | 20,013 | 0.000999 | import sys
import unittest
from test import test_support
from test.test_support import run_unittest, have_unicode
import math
L = [
('0', 0),
('1', 1),
('9', 9),
('10', 10),
('99', 99),
('100', 100),
('314', 314),
(' 314', 314),
('314 ', 314),
(' \t\t 314 \t\t ', 314),
(repr(sys.maxint), sys.maxint),
(' 1x', ValueError),
(' 1 ', 1),
(' 1\02 ', ValueError),
('', ValueError),
(' ', ValueError),
(' \t\t ', ValueError)
]
if have_unicode:
L += [
(unicode('0'), 0),
(unicode('1'), 1),
(unicode('9'), 9),
(unicode('10'), 10),
(unicode('99'), 99),
(unicode('100'), 100),
(unicode('314'), 314),
(unicode(' 314'), 314),
(unicode('\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
(unicode(' \t\t 314 \t\t '), 314),
(unicode(' 1x'), ValueError),
(unicode(' 1 '), 1),
(unicode(' 1\02 '), ValueError),
(unicode(''), ValueError),
(unicode(' '), ValueError),
(unicode(' \t\t '), ValueError),
(unichr(0x200), ValueError),
]
class IntSubclass(int):
pass
class IntLongCommonTests(object):
"""Mixin of test cases to share between both test_int and test_long."""
# Change to int or long in the TestCase subclass.
ntype = None
def test_no_args(self):
self.assertEqual(self.ntype(), 0)
def test_keyword_args(self):
# Test invoking constructor using keyword arguments.
self.assertEqual(self.ntype(x=1.2), 1)
self.assertEqual(self.ntype('100', base=2), 4)
self.assertEqual(self.ntype(x='100', base=2), 4)
self.assertRaises(TypeError, self.ntype, base=10)
self.assertRaises(TypeError, self.ntype, base=0)
class IntTestCases(IntLongCommonTests, unittest.TestCase):
ntype = int
def test_basic(self):
self.assertEqual(int(314), 314)
self.assertEqual(int(3.14), 3)
self.assertEqual(int(314L), 314)
# Check that conversion from float truncates towards zero
self.assertEqual(int(-3.14), -3)
self.assertEqual(int(3.9), 3)
self.assertEqual(int(-3.9), -3)
self.assertEqual(int(3.5), 3)
self.assertEqual(int(-3.5), -3)
# Different base:
self.assertEqual(int("10",16), 16L)
if have_unicode:
self.assertEqual(int(unicode("10"),16), 16L)
# Test conversion from strings and various anomalies
for s, v in L:
for sign in "", "+", "-":
for prefix in "", " ", "\t", " \t\t ":
ss = prefix + sign + s
vv = v
if sign == "-" and v is not ValueError:
vv = -v
try:
self.assertEqual(int(ss), vv)
except v:
pass
s = repr(-1-sys.maxint)
x = int(s)
self.assertEqual(x+1, -sys.maxint)
self.assertIsInstance(x, int)
# should return long
self.assertEqual(int(s[1:]), sys.maxint+1)
# should return long
x = int(1e100)
self.assertIsInstance(x, lon | g)
x = int(-1e100)
self.assertIsInstance(x, long)
# SF bug 434186: 0x80000000/2 != 0x80000000>>1.
# Worked by accident in Windows release build, but failed in debug build.
# Failed in all Linux builds.
x = -1-sys.maxint
self.assertEqual(x >> 1, x//2)
self.assertRaises(ValueError, int, '123\0')
| self.assertRaises(ValueError, int, '53', 40)
# SF bug 1545497: embedded NULs were not detected with
# explicit base
self.assertRaises(ValueError, int, '123\0', 10)
self.assertRaises(ValueError, int, '123\x00 245', 20)
x = int('1' * 600)
self.assertIsInstance(x, long)
if have_unicode:
x = int(unichr(0x661) * 600)
self.assertIsInstance(x, long)
self.assertRaises(TypeError, int, 1, 12)
self.assertEqual(int('0123', 0), 83)
self.assertEqual(int('0x123', 16), 291)
# Bug 1679: "0x" is not a valid hex literal
self.assertRaises(ValueError, int, "0x", 16)
self.assertRaises(ValueError, int, "0x", 0)
self.assertRaises(ValueError, int, "0o", 8)
self.assertRaises(ValueError, int, "0o", 0)
self.assertRaises(ValueError, int, "0b", 2)
self.assertRaises(ValueError, int, "0b", 0)
# SF bug 1334662: int(string, base) wrong answers
# Various representations of 2**32 evaluated to 0
# rather than 2**32 in previous versions
self.assertEqual(int('100000000000000000000000000000000', 2), 4294967296L)
self.assertEqual(int('102002022201221111211', 3), 4294967296L)
self.assertEqual(int('10000000000000000', 4), 4294967296L)
self.assertEqual(int('32244002423141', 5), 4294967296L)
self.assertEqual(int('1550104015504', 6), 4294967296L)
self.assertEqual(int('211301422354', 7), 4294967296L)
self.assertEqual(int('40000000000', 8), 4294967296L)
self.assertEqual(int('12068657454', 9), 4294967296L)
self.assertEqual(int('4294967296', 10), 4294967296L)
self.assertEqual(int('1904440554', 11), 4294967296L)
self.assertEqual(int('9ba461594', 12), 4294967296L)
self.assertEqual(int('535a79889', 13), 4294967296L)
self.assertEqual(int('2ca5b7464', 14), 4294967296L)
self.assertEqual(int('1a20dcd81', 15), 4294967296L)
self.assertEqual(int('100000000', 16), 4294967296L)
self.assertEqual(int('a7ffda91', 17), 4294967296L)
self.assertEqual(int('704he7g4', 18), 4294967296L)
self.assertEqual(int('4f5aff66', 19), 4294967296L)
self.assertEqual(int('3723ai4g', 20), 4294967296L)
self.assertEqual(int('281d55i4', 21), 4294967296L)
self.assertEqual(int('1fj8b184', 22), 4294967296L)
self.assertEqual(int('1606k7ic', 23), 4294967296L)
self.assertEqual(int('mb994ag', 24), 4294967296L)
self.assertEqual(int('hek2mgl', 25), 4294967296L)
self.assertEqual(int('dnchbnm', 26), 4294967296L)
self.assertEqual(int('b28jpdm', 27), 4294967296L)
self.assertEqual(int('8pfgih4', 28), 4294967296L)
self.assertEqual(int('76beigg', 29), 4294967296L)
self.assertEqual(int('5qmcpqg', 30), 4294967296L)
self.assertEqual(int('4q0jto4', 31), 4294967296L)
self.assertEqual(int('4000000', 32), 4294967296L)
self.assertEqual(int('3aokq94', 33), 4294967296L)
self.assertEqual(int('2qhxjli', 34), 4294967296L)
self.assertEqual(int('2br45qb', 35), 4294967296L)
self.assertEqual(int('1z141z4', 36), 4294967296L)
# tests with base 0
# this fails on 3.0, but in 2.x the old octal syntax is allowed
self.assertEqual(int(' 0123 ', 0), 83)
self.assertEqual(int(' 0123 ', 0), 83)
self.assertEqual(int('000', 0), 0)
self.assertEqual(int('0o123', 0), 83)
self.assertEqual(int('0x123', 0), 291)
self.assertEqual(int('0b100', 0), 4)
self.assertEqual(int(' 0O123 ', 0), 83)
self.assertEqual(int(' 0X123 ', 0), 291)
self.assertEqual(int(' 0B100 ', 0), 4)
self.assertEqual(int('0', 0), 0)
self.assertEqual(int('+0', 0), 0)
self.assertEqual(int('-0', 0), 0)
self.assertEqual(int('00', 0), 0)
self.assertRaises(ValueError, int, '08', 0)
self.assertRaises(ValueError, int, '-012395', 0)
# without base still base 10
self.assertEqual(int('0123'), 123)
self.assertEqual(int('0123', 10), 123)
# tests with prefix and base != 0
self.assertEqual(int('0x123', 16), 291)
self.assertEqual(int('0o123', 8), 83)
self.assertEqual(int('0b100', 2), 4)
self.assertEqual(int('0X123', 16), 291)
self.assertEqual(int('0O123', 8), 83)
self.assertEqual(int('0B100', 2), 4)
# the code has special checks f |
robinandeer/puzzle | puzzle/plugins/gemini/__init__.py | Python | mit | 78 | 0.012821 | f | rom .mixins import (CaseMixin, VariantMixin)
from .plugin import GeminiPlug | in |
Jaaga/mooc-tracker | web/mooctracker/mooctracker/settings.py | Python | mit | 3,176 | 0.001889 | """
Django settings for mooctracker project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h7eiqg(&tly=ollt*axnw!7f8k(zc6y0u-)%$t$y78+ilwjy)('
# SECU | RITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# installed apps
'rest_framework',
'social_auth',
# our own apps
'api',
'students',
'pages',
| 'courses',
'projects',
'academics',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mooctracker.urls'
WSGI_APPLICATION = 'mooctracker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
AUTHENTICATION_BACKENDS = (
'social_auth.backends.google.GoogleOAuthBackend',
'social_auth.backends.google.GoogleOAuth2Backend',
'social_auth.backends.google.GoogleBackend',
'social_auth.backends.contrib.readability.ReadabilityBackend',
'social_auth.backends.contrib.fedora.FedoraBackend',
'social_auth.backends.OpenIDBackend',
'django.contrib.auth.backends.ModelBackend',
)
GOOGLE_OAUTH2_CLIENT_ID = '325221067434-uh0hp886c498dnda8b9u2dep54gdmr1c'
GOOGLE_OAUTH2_CLIENT_SECRET = 'F4gVKxkp9fw6foZreLfnpGO6'
LOGIN_URL = '/login-form/'
LOGIN_REDIRECT_URL = '/api/students/'
LOGIN_ERROR_URL = '/login-error/'
SOCIAL_AUTH_DEFAULT_USERNAME = 'new_social_auth_user'
SOCIAL_AUTH_UID_LENGTH = 16
SOCIAL_AUTH_ASSOCIATION_HANDLE_LENGTH = 16
SOCIAL_AUTH_NONCE_SERVER_URL_LENGTH = 16
SOCIAL_AUTH_ASSOCIATION_SERVER_URL_LENGTH = 16
SOCIAL_AUTH_ASSOCIATION_HANDLE_LENGTH = 16
SOCIAL_AUTH_ENABLED_BACKENDS = ('google') |
spotify/pyeos | test/unit/config.py | Python | apache-2.0 | 730 | 0 | # Copyright 2014 Spotify AB. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy | of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is | distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
hostname = '192.168.76.10'
username = 'dbarroso'
password = 'this_is_not_a_secure_password'
use_ssl = True
|
previtus/MGR-Project-Code | Settings/set2_dataset-expansion/expand_dataset_forserver_minlen30_kfold_forcomparison.py | Python | mit | 4,101 | 0.014874 | def Setup(Settings,DefaultModel):
# set2_for_results/expand_dataset_forserver_minlen30_kfold_forcomparison.py
Settings["experiment_name"] = "ExpandDataset_5556x_minlen30_640px_kfold10_originalComparison"
Settings["graph_histories"] = ['together']
n=0
from keras.preprocessing.image import ImageDataGenerator
from DatasetHandler.custom_image import ImageDataGenerator as custom_ImageDataGenerator
image_generator = custom_ImageDataGenerator(
featurewise_center = False, # set input mean to 0 over the dataset
samplewise_center = False, # set each sample mean to 0
featurewise_std_normalization = False, # divide inputs by std of the dataset
samplewise_std_normalization = False, # divide each input by its std
zca_whitening = False, # apply ZCA whitening
rotation_range = 0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range = 0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range = 0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip = True, # randomly flip images
vertical_flip = False, # randomly flip images
)
defaults = custom_ImageDataGenerator(
featurewise_center=False, # Set input mean to 0 over the dataset, feature-wise.
samplewise_center=False, # Set each sample mean to 0.
featurewise_std_normalization=False, # Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization=False, # Divide each input by its std.
zca_whitening=False, # Apply ZCA whitening.
rotation_range=0., # Degree range for random rotations.
width_shift_range=0., # Range for random horizontal shifts (fraction of total width).
height_shift_range=0., # Range for random vertical shifts (fraction of total height).
s | hear_range=0., # Shear Intensity (Shear angle in counter-clockwise direction as radians)
zoom_range=0., # Float or [lower, upper]. Range for random zoom. If a float, [lower, upper] = [1-zoom_range, 1+zoom_range].
channel_shift_range=0., # Range for random channel shifts.
fill_mode='nearest', # One of {"constant", "nearest", "reflect" or | "wrap"}. Points outside the boundaries of the input are filled according to the given mode.
cval=0., # Float or Int. Value used for points outside the boundaries when fill_mode = "constant".
horizontal_flip=False, # Randomly flip inputs horizontally.
vertical_flip=False, # Randomly flip inputs vertically.
rescale=None, # rescaling factor. Defaults to None. If None or 0, no rescaling is applied, otherwise we multiply the data by the value provided (before applying any other transformation).
preprocessing_function=None,
# function that will be implied on each input. The function will run before any other modification on it.
# The function should take one argument: one image (Numpy tensor with rank 3), and should output a Numpy tensor with the same shape.
)
# Set these values:
number_of_images_from_one = 2
source_dataset = "5556x_minlen30_640px"
target_dataset = "5556x_minlen30_640px_2x_expanded"
pixels = 640
epochs = 500
use_dump_file = 'SegmentsData_marked_R100_4Tables.dump' # -> * new XYZ_expanded.dump
model_type = 'img_osm_mix'
Settings["models"][n]["model_type"] = model_type
Settings["models"][n]["dataset_name"] = source_dataset
Settings["models"][n]["pixels"] = pixels
Settings["models"][n]["cnn_model"] = 'resnet50'
Settings["models"][n]["unique_id"] = 'original: ' + source_dataset
Settings["models"][n]["cooking_method"] = 'generators' # 'direct' or 'generators'
Settings["models"][n]["epochs"] = epochs
Settings["models"][n]["dump_file_override"] = use_dump_file
Settings["models"][n]["k_fold_crossvalidation"] = True
Settings["models"][n]["crossvalidation_k"] = 10
Settings["graph_histories"] = []
return Settings
|
tiangolo/fastapi | docs_src/query_params/tutorial006b.py | Python | mit | 301 | 0 | from typing import Union |
from fastapi import FastAPI
app = FastAPI()
@app.get("/items/{item_id | }")
async def read_user_item(
item_id: str, needy: str, skip: int = 0, limit: Union[int, None] = None
):
item = {"item_id": item_id, "needy": needy, "skip": skip, "limit": limit}
return item
|
TheOtherOtherOperation/vdbsetup | vdbsetup.py | Python | mit | 34,423 | 0.003166 | #!/usr/bin/env python3
#
# vdbsetup.py - script for building Vdbench configurations
#
# Author: Ramon A. Lovato (ramonalovato.com)
# For: DeepStorage, LLC (deepstorage.net)
#
import argparse
import os.path
import os
import re
import statistics
import textwrap
import random
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import pylab
from collections import OrderedDict
DEFAULT_MAJOR_DELIMITER = " *= *"
DEFAULT_MINOR_DELIMITER = " *, *"
DEFAULT_MONTE_CARLO_SAMPLES = 200000
DEFAULT_SAMPLE_SCALE = 10000
MAX_RANGE_RETRIES = 10
INPUT_TEMPLATE_CONTENT = """#
# vdbsetup input file example
#
#
# General
#
dedupratio=2
dedupunit=4k
compratio=1.5
#
# SDs
#
luns=lun1,lun2,lun3
# Optional: o_direct provided by default
# openflags=
#
# WDs
#
wdcount=1
xfersize=4k
seekpct=100
rdpct=75
percentdisk=100.0
#
# RDs
#
iorate=1000
format=yes
elapsed=60
interval=1
threads=2
#
# Distribution
#
hotspotnum=10
hotspotcap=25
hotspotiopct=10
disttype=gaussian
# Note: only required if disttype=gaussian
distribution=0.75,0.5
"""
#
# Helper dictionaries for single-entry input parsing.
#
# Dictionary of validation lambdas.
validators = {
"dedupratio": lambda v: float(v) >= 0,
"compratio": lambda v: float(v) >= 0,
"wdcount": lambda v: float(v) > 0,
"seekpct": lambda v: 0 <= float(v) <= 100,
"rdpct": lambda v: 0 <= float(v) <= 100,
"percentdisk": lambda v: 0 <= float(v) <= 100,
"iorate": lambda v: float(v) > 0,
"format": lambda v: re.match("(yes)|(no)", v.lower()),
"threads": lambda v: int(v) > 0,
"elapsed": lambda v: int(v) > 0,
"interval": lambda v: int(v) > 0,
"hotspotnum": lambda v: int(v) >= 0,
"hotspotcap": lambda v: 0 <= float(v) <= 100,
"hotspotiopct": lambda v: 0 <= float(v) <= 100,
"disttype": lambda v: re.match("(even)|(gaussian)|(uniform)", v.lower())
}
# Dictionary of processing lambdas.
processors = {
"dedupratio": lambda v: float(v),
"compratio": lambda v: float(v),
"wdcount": lambda v: int(v),
"seekpct": lambda v: float(v),
"rdpct": lambda v: float(v),
"percentdisk": lambda v: float(v),
"iorate": lambda v: float(v),
"format": lambda v: v.lower(),
"threads": lambda v: int(v),
"elapsed": lambda v: int(v),
"interval": lambda v: int(v),
"hotspotnum": lambda v: int(v),
"hotspotcap": lambda v: float(v),
"hotspotiopct": lambda v: float(v),
"disttype": lambda v: v.lower()
}
# Dictionary of custom usage messages.
messages = {
"dedupratio": 'Key "dedupratio" requires nonnegative value.',
"compratio": 'Key "compratio" requires nonnegative value.',
"wdcount": 'Key "wdcount" requires positive integer value.',
"seekpct": 'Key "seekpct" requires percentage in range [0, 100].',
"rdpct": 'Key "rdpct" requires percentage in range [0, 100].',
"percentdisk": 'Key "percentdisk" requires single percentage in range [0, 100].',
"iorate": 'Key "iorate" requires positive IOPS value.',
"format": 'Key "format" must be one of "yes", "no".',
"threads": 'Key "threads" requires positive integer queue depth.',
"elapsed": 'Key "elapsed" requires positive integer number of seconds.',
"interval": 'Key "interval" requires positive integer number of seconds.',
"hotspotnum": 'Key "hotspotnum" requires nonnegative integer number of hotspots.',
"hotspotcap": 'Key "hotspotcap" requires percentage in range [0, 100].',
"hotspotiopct": 'Key "hotspotiopct" requires percentage in range [0, 100].',
"disttype": 'Key "disttype" must be one of "even", "gaussian", "uniform".'
}
multiValidators = {
"luns": lambda v: len(v) > 0,
"openflags": lambda v: len(v) > 0,
"distribution": lambda v:
config["disttype"] == "gaussian" and
len(v) == 2 and
len(list(filter(lambda w: float(w) >= 0, v))) == 2
}
multiProcessors = {
"luns": lambda v: v,
"openflags": lambda v: v,
"distribution": lambda v: list(map(float, v))
}
multiMessages = {
"luns": 'Key "luns" requires at least one LUN',
"openflags": 'Key "openflags" requires at least one flag.',
'"min,max", such that 0 <= min <= max <= 100.'
"distribution": 'Key "distribution" is only valid for Gaussian '
'distributions, and keys "hotspotnum" and "disttype" must '
'be set first. Values must be of form '
'"SKEW_STD_DEV,RANGE_STD_DEV", where both standard '
'deviations are nonnegative floating point values.'
}
# Uses an OrderedDict because certain parameters must be specified before
# other parameters.
config = OrderedDict({
# General
"dedupratio": None, # Deduplication ratio
"dedupunit": None, # Deduplication unit
"compratio": None, # Compression ratio
# SDs
"luns": None, # Luns, list OK
# WDs
"wdcount": None, # Number of workloads
"xfersize": None, # Block size
"seekpct": None, # Percent random
"rdpct": None, # Percent read (vs. write)
"percentdisk": None, # How much of the total disk to use
# RDs
"iorate": None, # IOPS
"format": None, # Pre-format lun
"threads | ": None, # Qeueue depth
"elapsed": None, # | Duration
"interval": None, # Update frequency
# Distribution
"hotspotnum": None, # Number of hotspots
"hotspotcap": None, # Total capacity percent for all hotspots
"hotspotiopct": None, # Percent of IO for ALL hotspots
"disttype": None # Distribution type: even, gaussian, uniform
#
# Added inline only if needed
#
# "openflags": []
# - open flags for SDs
# - o_direct provided by default as block devices require it
# "distribution": []
# - parameters for the distribution
#
# Type | Params
# ------------------------------------------
# - gaussian | STANDARD_DEVIATION_SKEW, STANDARD_DEVIATION_RANGE
})
#
# Factories.
#
# Parent class.
class Factory:
def __init__(self, name_type="name", name=None, keys=None):
self.name_type=name_type
self.params = OrderedDict()
self.params[name_type] = name
for key in keys:
self.addKey(key)
def addKey(self, key):
self.params[key] = None
def set(self, key, *values):
if not key in self.params:
self.addKey(key)
if len(values) < 1:
raise ValueError('Error: no values passed for key "{}".'.format(
key))
elif len(values) == 1:
self.params[key] = values[0]
else:
self.params[key] = values
def setName(self, name):
self.set(self.name_type, name)
def append(self, key, *values):
if len(values) == 0:
return
if not key in self.params:
self.set(key, values)
else:
if not isinstance(self.params[key], list):
self.params[key] = [self.params[key]]
for v in values:
if isinstance(v, list):
for w in v:
self.params[key].append(w)
else:
self.params[key].append(v)
def toString(self):
partials = []
for k, v in self.params.items():
if v == None:
raise Exception('Error: key "{}" not assigned (value None).'.format(k))
if isinstance(v, list) or isinstance(v, tuple):
if len(v) == 0:
raise Exception('Error: key {} has length 0.'.format(k))
if len(v) == 1:
partial = "{}={}".format(k, truncate(v[0]))
else:
partial = "{}=({})".format(
k, ",".join([str(truncate(w)) for w in v]))
else:
partial = "{}={}".format(k, truncate(v))
partials.append(partial)
return ",".join(partials)
class SDFactory(Factory) |
neumerance/deploy | .venv/bin/rst2xml.py | Python | apache-2.0 | 619 | 0.001616 | #!/var/www/horizon/.venv/bin/python
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the publi | c domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + defa | ult_description)
publish_cmdline(writer_name='xml', description=description)
|
9929105/KEEP | keep_backend/api/serializers.py | Python | mit | 4,452 | 0.024259 | import re
import StringIO
import unicodecsv
from django.utils.text import slugify
from tastypie.serializers import Serializer
class CSVSerializer( Serializer ):
formats = [ | 'json', 'jsonp', 'csv' ]
content_types = {
'json': 'application/json',
'jsonp': 'text/javascript',
'csv': 'text/csv',
}
FLOAT_TYPE = re.compile(r'^(\d+\.\d*|\d*\.\d+)$')
INT_TYPE = re.compile(r'^\d+$')
LOCATION_TYPE = re.compile(r'^(\-?\d+(\. | \d+)?) \s*(\-?\d+(\.\d+)?) \s*(\d+) \s*(\d+)$')
def _sniff_type( self, val ):
'''
A really stupid and bare-bones approach to data type detection.
'''
if self.FLOAT_TYPE.match( val ):
return 'decimal'
elif self.INT_TYPE.match( val ):
return 'int'
elif self.LOCATION_TYPE.match( val ):
return 'geopoint'
else:
return 'text'
def _format_data( self, field_type, field_value ):
'''
Given a certain data type, format the value into a string that could
possibly be parsed later by our same code.
Params
------
field_type : string
A string representation the type of data this field holds.
field_value : Object
A python object ( string, dict, etc ) that is our internal
representation of this data. This will be converted to a
standard string format that can be later parsed back into the
system if necessary.
'''
# If the field_value is None, simply return an empty string.
if field_value is None:
return ''
if field_type == 'geopoint':
# Converts geopoints into an X,Y coordinate string
coords = field_value.get( 'coordinates' )
return '%s, %s' % ( str( coords[0] ), str( coords[1] ) )
elif 'select all' in field_type:
# Converts a list into a comma-seperated list of values
return ','.join( field_value )
else:
return str( field_value )
def to_csv( self, data, options=None ):
'''
Converts the JSON representation from the data API into a CSV file.
'''
options = options or {}
data = self.to_simple( data, options )
raw_data = StringIO.StringIO()
writer = unicodecsv.DictWriter( raw_data,
[ x[ 'name' ] for x in data[ 'meta' ][ 'fields' ] ],
extrasaction='ignore')
writer.writeheader()
for item in data.get( 'data', [] ):
# Loops through the field list and format each data value according to
# the type.
row = {}
for field in data[ 'meta' ][ 'fields' ]:
# Grab the field details and convert the field into a string.
field_name = field.get( 'name' )
field_type = field.get( 'type' )
field_value = item.get( 'data' ).get( field_name, None )
row[ field_name ] = self._format_data( field_type, field_value )
writer.writerow( row )
return raw_data.getvalue()
def from_csv( self, csv_data ):
fields, data = ( [], [] )
csv_file = unicodecsv.DictReader( csv_data )
# Grab the headers and create fields for the new repo
headers = csv_file.fieldnames
# Used to sniff the data types
sniff = csv_file.next()
# Go through each field name and attempt to parse the corresponding
# data type.
for idx, header in enumerate( headers ):
if len( header ) == 0:
raise Exception( 'Column headers can not be blank!' )
fields.append({
'name': slugify( unicode( header ) ),
'label': header,
'type': self._sniff_type( sniff[ header ] )
})
# Parse the first data row
datum = {}
for field in fields:
datum[ field[ 'name' ] ] = sniff.get( field[ 'label' ], None )
data.append( datum )
# Now parse the rest of them
for item in csv_file:
datum = {}
for field in fields:
datum[ field[ 'name' ] ] = item.get( field[ 'label' ], None )
data.append( datum )
return ( fields, data )
|
scraperwiki/stock-tool | tests.py | Python | agpl-3.0 | 5,818 | 0 | #!/usr/bin/env python
# encoding: utf-8
import datetime
import unittest
import mock
import pandas as pd
import pandas_finance
from pandas.util.testing import assert_frame_equal
from nose.tools import assert_equal
from freezegun import freeze_time
class GetStockTestCase(unittest.TestCase):
@mock.patch('pandas_finance.web.DataReader')
def test_get_stock_called_correctly(self, mock_datareader):
start = datetime.datetime(1999, 4, 3, 0, 0)
end = datetime.datetime(2005, 2, 5, 0, 0)
pandas_finance.get_stock('AAPL', start, end)
mock_datareader.assert_called_with('AAPL', 'yahoo', start, end)
def test_get_required_tickers_parses_tickers_with_newline(self):
m = mock.mock_open(read_data='TWTR,FB,AAPL,MSFT\n')
textfile = 'tickers.txt'
with mock.patch('pandas_finance.open', m, create=True):
result = pandas_finance.get_required_tickers(textfile)
assert_equal('TWTR,FB,AAPL,MSFT', result)
class ScrapeStockTestCase(unittest.TestCase):
def setUp(self):
"""Run once before each test in this test class."""
self.start = datetime.datetime(2014, 04, 29).date()
self.end = self.start
input_values = {'Volume': [12033400],
'Adj Close': [592.33],
'High': [595.98],
'Low': [589.51],
'Close': [592.33],
'Open': [593.74]}
index_label = [self.start]
input_columns = ['Open', 'High', 'Low', 'Close', 'Volume', 'Adj Close']
self.input_frame = pd.DataFrame(input_values,
columns=input_columns,
index=index_label)
self.input_frame.index.name = 'Date'
output_values = input_values
# get_stock converts datetime to | isoformat string.
output_values['Date'] = '2014-04-29'
output_values['Stock'] = 'AAPL'
output_columns = ['Date'] + input_columns + ['Stock']
self.output_frame = pd.DataFrame(output_values, columns=output_columns)
@mock.patch('pandas_finance.write_frame_to_sql')
@mock.patch('pandas_finance.get_stock')
def test_scrape_stock_gives_a_valid_frame(self, mock_get_stock,
| mock_write_frame):
mock_get_stock.return_value = self.input_frame
pandas_finance.scrape_stock('AAPL', self.start, self.end)
# Hacky workaround:
# Can't seem to use mock.assert_called_with; problem when comparing
# dataframes, grab argument directly and compare it to expected frame.
frame_called_with = mock_write_frame.call_args_list[0][0][0]
assert_frame_equal(self.output_frame, frame_called_with)
class InstallCrontabTestCase(unittest.TestCase):
pass
class GetDatesTestCase(unittest.TestCase):
@freeze_time('2014-04-10 15:05:05')
def test_get_dates(self):
start, end = pandas_finance.get_dates()
assert_equal(datetime.datetime(1900, 1, 1, 0, 0), start)
assert_equal(datetime.datetime.today(), end)
class MainTestCase(unittest.TestCase):
def setUp(self):
patcher1 = mock.patch('pandas_finance.sqlite3.connect')
self.mock_sqlite3_connect = patcher1.start()
self.addCleanup(patcher1.stop)
patcher2 = mock.patch('pandas_finance.get_required_tickers')
self.mock_get_required_tickers = patcher2.start()
self.mock_get_required_tickers.return_value = 'TWTR,FB,AAPL'
self.addCleanup(patcher2.stop)
patcher3 = mock.patch('pandas_finance.scrape_stock')
self.mock_scrape_stock = patcher3.start()
self.addCleanup(patcher3.stop)
patcher4 = mock.patch('pandas_finance.scraperwiki.status')
self.mock_status = patcher4.start()
self.addCleanup(patcher4.stop)
patcher5 = mock.patch('pandas_finance.install_crontab')
self.mock_install_crontab = patcher5.start()
self.addCleanup(patcher5.stop)
patcher6 = mock.patch('pandas_finance.get_dates')
self.mock_get_dates = patcher6.start()
self.addCleanup(patcher6.stop)
self.start_date = datetime.datetime(1900, 1, 1, 0, 0)
self.end_date = datetime.datetime(2013, 10, 20, 10, 20, 23, 1234)
self.mock_get_dates.return_value = self.start_date, self.end_date
def test_get_dates_call(self):
pandas_finance.main('tickers.txt')
self.mock_get_dates.assert_called_with()
def test_sqlite3_connect_call(self):
pandas_finance.main('tickers.txt')
self.mock_sqlite3_connect.assert_called_with('scraperwiki.sqlite')
def test_database_execute_call(self):
sqlite_execute_mock = mock.Mock(name='RETURNMOCK')
self.mock_sqlite3_connect.return_value = sqlite_execute_mock
pandas_finance.main('tickers.txt')
expected_call = 'drop table if exists stocks;'
sqlite_execute_mock.execute.assert_called_with(expected_call)
def test_get_required_tickers_call(self):
pandas_finance.main('tickers.txt')
self.mock_get_required_tickers.assert_called_with('tickers.txt')
def test_scrape_stock_calls(self):
pandas_finance.main('tickers.txt')
expected_calls = []
for stock in ['TWTR', 'FB', 'AAPL']:
expected_calls.append(mock.call(stock, self.start_date,
self.end_date))
self.mock_scrape_stock.assert_has_calls(expected_calls)
def test_scraperwiki_status_called(self):
pandas_finance.main('tickers.txt')
self.mock_status.assert_called_with('ok')
def test_install_crontab_called(self):
pandas_finance.main('tickers.txt')
self.mock_install_crontab.assert_called_with()
|
steenzout/python-storj | tests/unit/exception_test.py | Python | mit | 2,882 | 0 | # -*- coding: utf-8 -*-
"""Test cases for the storj.exception module."""
from .. import AbstractTestCase
from storj.exception import \
BridgeError, \
ClientError, \
FarmerError, \
HashMismatchError, \
SuppliedTokenNotAcceptedError
class BridgeErrorTestCase(AbstractTestCase):
"""Test case for the BridgeError."""
@staticmethod
def assertBridgeError(error, code, message):
"""Assert BridgeError expected conditions.
Args:
error (:py:class:`storj.exception.BridgeError`): result.
code (int): expected error code.
message (str): expected error message.
"""
assert code == error.code
assert message == error.message
assert '[%d] %s' % (code, message) == str(error)
def test(se | lf):
expected_code = 0
expected_message = 'error'
error = BridgeError(expected_code, expected_message)
self.assertBridgeError(error, expected_code, expected_message)
class ClientErrorTestCase(AbstractTestCase):
"""Test case for the C | lientError."""
@staticmethod
def assertClientError(error, message):
"""Assert ClientError expected conditions.
Args:
error (:py:class:`storj.exception.BridgeError`): result.
message (str): expected error message.
"""
assert message == error.message
assert message == str(error)
def test(self):
expected = 'error'
error = ClientError(expected)
self.assertClientError(error, expected)
class FarmerErrorTestCase(AbstractTestCase):
"""Test case for the FarmerError."""
@staticmethod
def assertFarmerError(error, code, message):
"""Assert FarmerError expected conditions.
Args:
error (:py:class:`storj.exception.FarmerError`): result.
code (int): expected error code.
message (str): expected error message.
"""
assert code == error.code
assert message == error.message
assert '[%d] %s' % (code, message) == str(error)
def test(self):
expected_code = 0
expected_message = 'error'
error = FarmerError(expected_code, expected_message)
self.assertFarmerError(error, expected_code, expected_message)
class HashMismatchErrorErrorTestCase(FarmerErrorTestCase):
"""Test case for the HashMismatchError."""
def test(self):
expected = ''
error = HashMismatchError()
self.assertFarmerError(error, HashMismatchError.CODE, expected)
class SuppliedTokenNotAcceptedErrorTestCase(FarmerErrorTestCase):
"""Test case for the SuppliedTokenNotAcceptedError."""
def test(self):
expected = ''
error = SuppliedTokenNotAcceptedError()
self.assertFarmerError(
error, SuppliedTokenNotAcceptedError.CODE, expected)
|
eoogbe/api-client-staging | generated/python/gapic-google-cloud-spanner-v1/google/cloud/gapic/spanner/v1/spanner_client.py | Python | bsd-3-clause | 38,513 | 0.001792 | # Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/spanner/v1/spanner.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.spanner.v1 Spanner API."""
import collections
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
from google.gax.utils import oneof
import google.gax
from google.cloud.gapic.spanner.v1 import enums
from google.cloud.proto.spanner.v1 import keys_pb | 2
from google.cloud.proto.sp | anner.v1 import mutation_pb2
from google.cloud.proto.spanner.v1 import spanner_pb2
from google.cloud.proto.spanner.v1 import transaction_pb2
from google.protobuf import struct_pb2
class SpannerClient(object):
"""
Cloud Spanner API
The Cloud Spanner API can be used to manage sessions and execute
transactions on data stored in Cloud Spanner databases.
"""
SERVICE_ADDRESS = 'spanner.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = ('https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data', )
_DATABASE_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/instances/{instance}/databases/{database}')
_SESSION_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/instances/{instance}/databases/{database}/sessions/{session}'
)
@classmethod
def database_path(cls, project, instance, database):
"""Returns a fully-qualified database resource name string."""
return cls._DATABASE_PATH_TEMPLATE.render({
'project': project,
'instance': instance,
'database': database,
})
@classmethod
def session_path(cls, project, instance, database, session):
"""Returns a fully-qualified session resource name string."""
return cls._SESSION_PATH_TEMPLATE.render({
'project': project,
'instance': instance,
'database': database,
'session': session,
})
@classmethod
def match_project_from_database_name(cls, database_name):
"""Parses the project from a database resource.
Args:
database_name (string): A fully-qualified path representing a database
resource.
Returns:
A string representing the project.
"""
return cls._DATABASE_PATH_TEMPLATE.match(database_name).get('project')
@classmethod
def match_instance_from_database_name(cls, database_name):
"""Parses the instance from a database resource.
Args:
database_name (string): A fully-qualified path representing a database
resource.
Returns:
A string representing the instance.
"""
return cls._DATABASE_PATH_TEMPLATE.match(database_name).get('instance')
@classmethod
def match_database_from_database_name(cls, database_name):
"""Parses the database from a database resource.
Args:
database_name (string): A fully-qualified path representing a database
resource.
Returns:
A string representing the database.
"""
return cls._DATABASE_PATH_TEMPLATE.match(database_name).get('database')
@classmethod
def match_project_from_session_name(cls, session_name):
"""Parses the project from a session resource.
Args:
session_name (string): A fully-qualified path representing a session
resource.
Returns:
A string representing the project.
"""
return cls._SESSION_PATH_TEMPLATE.match(session_name).get('project')
@classmethod
def match_instance_from_session_name(cls, session_name):
"""Parses the instance from a session resource.
Args:
session_name (string): A fully-qualified path representing a session
resource.
Returns:
A string representing the instance.
"""
return cls._SESSION_PATH_TEMPLATE.match(session_name).get('instance')
@classmethod
def match_database_from_session_name(cls, session_name):
"""Parses the database from a session resource.
Args:
session_name (string): A fully-qualified path representing a session
resource.
Returns:
A string representing the database.
"""
return cls._SESSION_PATH_TEMPLATE.match(session_name).get('database')
@classmethod
def match_session_from_session_name(cls, session_name):
"""Parses the session from a session resource.
Args:
session_name (string): A fully-qualified path representing a session
resource.
Returns:
A string representing the session.
"""
return cls._SESSION_PATH_TEMPLATE.match(session_name).get('session')
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
credentials=None,
ssl_credentials=None,
scopes=None,
client_config=None,
app_name=None,
app_version='',
lib_name=None,
lib_version='',
metrics_headers=()):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
credentials (object): The authorization credentials to attach to
requests. These credentials identify this application to the
service.
ssl_credentials (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
scopes (list[string]): A list of OAuth2 scopes to attach to requests.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
app_name (string): The name of the application calling
the service. Recommended for analytics purposes.
app_version (string): The version of the application calling
the service. Recommended for analytics purposes.
lib_name (string): The API library software used for calling
the service. (Unless you are writing an API client itself,
leave this as default.)
lib_version (string): The API library software version used
for calling the service. (Unless you are writing an API client
itself, leave this as default.)
metrics_headers (dict): A dictionary of values for tracking
c |
ctuning/ck | ck/repo/module/soft.template/module.py | Python | bsd-3-clause | 826 | 0.001211 | #
# Collective Knowledge (Base soft templates to be reused via inheritance mechanism)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
##
# Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net
#
cfg = {} # Will be updated | by CK (meta description of this module)
work = {} # Will be updated by CK (temporal data)
ck = None # Will be updated by CK (initialized CK kernel)
# Local settings
#################################################################### | ##########
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return': 0}
|
enovance/hardware | hardware/tests/test_matcher.py | Python | apache-2.0 | 21,360 | 0 | # Copyright (C) 2013-2015 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from hardware import matcher
class TestMatcher(unittest.TestCase):
def test_equal(self):
lines = [('system', 'product', 'serial', 'CZJ31402CD')]
spec = ('system', 'product', 'serial', 'CZJ31402CD')
arr = {}
self.assertTrue(matcher.match_spec(spec, lines, arr))
def test_not_equal(self):
lines = [('system', 'product', 'serial', 'CZJ31402CD')]
spec = ('system', 'product', 'serial', 'CZJ31402CE')
arr = {}
self.assertFalse(matcher.match_spec(spec, lines, arr))
def test_var(self):
lines = [('disk', '1I:1:1', 'size', '1000GB')]
spec = ('disk', '$disk8', 'size', '1000GB')
arr = {}
self.assertTrue(matcher.match_spec(spec, lines, arr))
self.assertEqual(arr, {'disk8': '1I:1:1'})
def test_vars(self):
lines = [
('system', 'product', 'serial', 'CZJ31402CD'),
('disk', '1I:1:1', 'size', '1000GB'),
('disk', '1I:1:1', 'type', 'SATA'),
('disk', '1I:1:1', 'control', 'hpa'),
('disk', '1I:1:2', 'size', '1000GB'),
('disk', '1I:1:2', 'type', 'SATA'),
('disk', '1I:1:2', 'control', 'hpa'),
('disk', '1I:1:3', 'size', '1000GB'),
('disk', '1I:1:3', 'type', 'SATA'),
('disk', '1I:1:3', 'control', 'hpa'),
('disk', '1I:1:4', 'size', '1000GB'),
('disk', '1I:1:4', 'type', 'SATA'),
('disk', '1I:1:4', 'control', 'hpa'),
('disk', '2I:1:5', 'size', '1000GB'),
('disk', '2I:1:5', 'type', 'SATA'),
('disk', '2I:1:5', 'control', 'hpa'),
('disk', '2I:1:6', 'size', '1000GB'),
('disk', '2I:1:6', 'type', 'SATA'),
('disk', '2I:1:6', 'control', 'hpa'),
('disk', '2I:1:7', 'size', '100GB'),
('disk', '2I:1:7', 'type', 'SSDSATA'),
('disk', '2I:1:7', 'control', 'hpa'),
('disk', '2I:1:8', 'size', '100GB'),
('disk', '2I:1:8', 'type', 'SSDSATA'),
('disk', '2I:1:8', 'control', 'hpa'),
]
specs = [('system', 'product', 'serial', 'CZJ31402CD'),
('disk', '$disk1', 'size', '100GB'),
('disk', '$disk2', 'size', '100GB'),
('disk', '$disk3', 'size', '1000GB'),
('disk', '$disk4', 'size', '1000GB'),
('disk', '$disk5', 'size', '1000GB'),
('disk', '$disk6', 'size', '1000 | GB'),
('disk', '$disk7', 'size', '1000GB'),
('disk', '$disk8', 'size', '1000GB')]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr,
{'disk1': '2I:1:7',
'disk2': '2I:1:8',
'disk3': '1I:1:1',
'disk4': '1I:1:2',
'disk5': '1I:1:3',
'disk6': | '1I:1:4',
'disk7': '2I:1:5',
'disk8': '2I:1:6',
}
)
def test_already_bound(self):
lines = [
('disk', '1I:1:2', 'size', '100GB'),
('disk', '1I:1:1', 'size', '1000GB'),
('disk', '1I:1:1', 'control', 'hpa'),
('disk', '1I:1:2', 'control', 'hpa'),
]
specs = [
('disk', '$disk1', 'size', '100GB'),
('disk', '$disk1', 'control', 'hpa'),
('disk', '$disk2', 'size', '1000GB'),
]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr,
{'disk1': '1I:1:2',
'disk2': '1I:1:1',
})
def test_order(self):
specs = [
('disk', '$disk1', 'size', '100'),
('disk', '$disk1', 'slot', '$slot1'),
('disk', '$disk2', 'size', '1000'),
('disk', '$disk2', 'slot', '$slot2'),
]
lines = [
('disk', '1I:1:1', 'size', '1000'),
('disk', '1I:1:1', 'control', 'hpa'),
('disk', '1I:1:1', 'slot', '2'),
('disk', '2I:1:8', 'size', '100'),
('disk', '2I:1:8', 'control', 'hpa'),
('disk', '2I:1:8', 'slot', '2'),
]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
def test_2vars(self):
specs = [
('disk', '$disk', 'size', '$size'),
]
lines = [
('disk', 'vda', 'size', '8'),
]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr,
{'size': '8',
'disk': 'vda',
})
def test_2dollars(self):
specs = [
('disk', '$$disk', 'size', '$size'),
]
lines = [
('disk', 'vda', 'size', '8'),
]
arr = {}
arr2 = {}
self.assertTrue(matcher.match_all(lines, specs, arr, arr2))
self.assertEqual(arr,
{'size': '8',
'disk': 'vda',
})
self.assertEqual(arr2,
{'disk': 'vda',
})
def test_multiple_vars(self):
specs = [
('disk', 'vda', 'size', '8'),
('disk', 'vdb', 'size', '16'),
]
specs2 = [
('disk', 'vda', 'size', '8'),
('disk', 'vdb', 'size', '8'),
]
lines = [
('disk', 'vda', 'size', '8'),
('disk', 'vdb', 'size', '8'),
]
arr = {}
self.assertFalse(matcher.match_all(lines, specs, arr, {}))
self.assertTrue(matcher.match_all(lines, specs2, arr, {}), lines)
def test_multiple(self):
spec = ('disk', '$disk', 'size', '8')
lines = [
('disk', 'vda', 'size', '8'),
('disk', 'vdb', 'size', '8'),
]
arr = {}
self.assertTrue(matcher.match_multiple(lines, spec, arr))
self.assertEqual(arr['disk'], ['vda', 'vdb'])
def test_gt(self):
specs = [('disk', '$disk', 'size', 'gt(10)')]
lines = [
('disk', 'vda', 'size', '20'),
]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr['disk'], 'vda')
def test_ge(self):
specs = [('disk', '$disk', 'size', 'ge(10.1)')]
lines = [
('disk', 'vda', 'size', '10.5'),
]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr['disk'], 'vda')
def test_lt(self):
specs = [('disk', '$disk', 'size', 'lt(30)')]
lines = [
('disk', 'vda', 'size', '20'),
]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr['disk'], 'vda')
def test_le(self):
specs = [('disk', '$disk', 'size', 'le(20)')]
lines = [
('disk', 'vda', 'size', '20'),
]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr['disk'], 'vda')
def test_not(self):
specs = [('network', '$eth', 'serial', '$mac=not(regexp(^28:d2:))')]
lines = [('network', 'eth0', 'serial', '20:d2:44:1b:0a:8b')]
arr = {}
self.assertTrue(matcher.ma |
oscm/devops | docker/docker.py | Python | mit | 942 | 0.005308 | #!/usr/bin/env python3
import sys
sys.path.insert(0, '/Users/neo/workspace/devops')
from netkiller.docker import *
# from environment.experiment import experiment
# from environment.development import development
# from environment.production import production
from compose.devops import devops
from compose.demo import demo
# from libexec.portainer import portainer
# print(test)
# exit()
if __name__ == "__main__":
try:
docker = Docker()
# docker.env({'DOCKER_HOST':'ssh://root@192.168.30.13','COMPOSE_PROJECT_NAME':'experiment'})
# docker.sysctl({"vm.max_ma | p_count": "262144"})
# docker.environment(experiment)
# docker.environment(development)
# docker.environment(logging)
docker.environment(devops)
# docker.environment(portainer)
docker.environment(demo)
docker.main()
except Keyboa | rdInterrupt:
print("Crtl+C Pressed. Shutting down.") |
AsherYang/ThreeLine | server/ffstore/mgrsys/RandomPwd.py | Python | apache-2.0 | 580 | 0 | #! /usr/bin/python
# -*- coding:utf-8 -*-
| """
Author: AsherYang
Email: ouyangfan1991@gmail.com
Date: 2018/6/29
Desc: 产生6位随机短信验证码类
"""
import random
class RandomPwd:
def __init__(self):
pass
def genPwd(self):
a_list = []
while le | n(a_list) < 6:
x = random.randint(0, 9)
# if x not in s:
a_list.append(x)
print a_list
string = ''.join(list(map(str, a_list)))
return string
if __name__ == '__main__':
randomPwd = RandomPwd()
print randomPwd.genPwd()
|
skibyte/gdblib | gdblib/util.py | Python | lgpl-3.0 | 864 | 0.006944 | #
# GdbLib - A Gdb python library.
# Copyright (C) 2012 Fernando Castillo
#
| # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Found | ation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
def change(values):
pass
def removeFile(path):
index = path.rfind(os.sep)
return path[:index]
|
vansky/meg_playground | scripts/tileRegressionSeeRSquared_EpochsBack.py | Python | gpl-2.0 | 11,071 | 0.029988 | import scipy
import pickle
import numpy
import sklearn.linear_model
import matplotlib #needed to avoid having to run an X-server just to output to a png
matplotlib.use('Agg') #needed to avoid having to run an X-server just to output to a png
import pylab
import re
def adjustR2(R2, numFeatures, numSamples):
#1/0
#return R2
return R2-(1-R2)*(float(numFeatures)/(numSamples-numFeatures-1))
def mynormalise(A):
A = scipy.stats.zscore(A)
A[numpy.isnan(A)] = 0
return A
wordPropsFile = '/home/corpora/original/english/meg_hod_corpus/projects/heartOfDarkness/wordTimesAnnots/hod_JoeTimes_LoadsaFeaturesV3.tab' # best yet! have automatic Stanford tagging, several word length and freq measures, and also the 2 and 3 token back-grams
wordProps = scipy.genfromtxt(wordPropsFile,delimiter='\t',names=True,dtype="i4,f4,f4,S50,S50,i2,i2,i2,S10,f4,f4,f4,f4,f4,f4,f4,f4,f4,f4,f4")
epochedGroupMeanAreasensorsMeanSensors = pickle.load(open('/home/corpora/original/english/meg_hod_corpus/usr/data/meg/heartOfDarkness/meanPartsMeanChannelsLeftTempPNPgradsUPMC4_TSSS_0.1-8Hz_@125.pkl'))
#dimensions of original input file: (14595 epochs, 375 channels)
#epochedGroupMeanAreasensorsMeanSensors = pickle.load(open('/usr1/meg/audioBookHofD/groupedDatasets/meanPartsMeanChannelsLeftTempPNPgradsUPMC4_TSSS_0.1-8Hz_@125.pkl'))
#epochedGroupMeanAreasensorsMeanSensors = pickle.load(open('/usr1/meg/audioBookHofD/groupedDatasets/meanPartsMeanChannelsLeftTempNPNgradsUPMC4_TSSS_0.1-8Hz_@125.pkl'))
#epochedGroupMeanAreasensorsMeanSensors = pickle.load(open('/usr1/meg/audioBookHofD/groupedDatasets/meanPartsMeanChannelsLeftAntTempMagsUPMC4_TSSS_0.1-8Hz_@125.pkl'))
#epochedGroupMeanAreasensorsMeanSensors = pickle.load(open('/usr1/meg/audioBookHofD/groupedDatasets/meanPartsMeanLeftTempMagsGradsUPMC4_TSSS_0.1-8Hz_@125.pkl'))
wordTrials = numpy.array([p != '' for p in wordProps['stanfPOS']])
#epochTimeSelection = range(750) #range(125,625) # -0.5 to +1.5s
epochStart = -1; # 1s before onset
epochEnd = 2; # 2s after onset
samplingRate = 125
epochSamples = epochedGroupMeanAreasensorsMeanSensors.shape[1]
epochLength = epochEnd-epochStart;
zeroSample = (abs(epochStart)/float(epochLength)*epochSamples)
wordEpochs = epochedGroupMeanAreasensorsMeanSensors[wordTrials]
wordFeatures = wordProps[wordTrials]
# TEMP, make task smaller
wordEpochs = wordEpochs[:]
#wordFeatures =
#wordFeatures[:100]['surprisal2back_COCA']
#regParam = [0.001,0.01, 0.1, 1, 10, 1e+2, 1e+3]
regParam = [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 1e+2, 2e+2, 5e+2, 1e+3, 2e+3, 5e+3]
#epochHistory=0
features = [
# 'random',
# 'track',
# 'sentenceSerial', # will be perfectly correlated with one from previous epoch; see below, first one gets re-introduced
# 'runSerial',
# 'storySerial', # is linear combination of track and runserial
# 'stanfPOS',
# 'duration',
# 'lenLett',
# 'lenPhonCMU',
# 'lenSyllCMU',
'logFreq_ANC',
# 'tokenPrimed',
# 'tokenPosPrimed',
'surprisal2back_COCA',
# 'bigramLogProbBack_COCA',
# 'trigramLogProbBack_COCA',
# 'bigramEntropy_COCA_previous', # will be perfectly correlated with one from previous epoch;
'bigramEntropy_COCA_here',
]
labelMap = {
'logFreq_ANC': 'freq',
'surprisal2back_COCA': 'surprisal',
'bigramEntropy_COCA_here': 'entropy',
'sentenceSerial': 'position',
}
legendLabels = features
explanatoryFeatures = numpy.zeros((wordFeatures.shape)) # dummy
#explanatoryFeatures = numpy.array([])
for feature in features:
if feature == 'duration':
explanatoryFeatures = numpy.vstack((explanatoryFeatures, wordFeatures['offTime']-wordFeatures['onTime']))
elif feature == 'random':
explanatoryFeatures = numpy.vstack((explanatoryFeatures, numpy.random.random((wordFeatures.shape))))
else:
explanatoryFeatures = numpy.vstack((explanatoryFeatures, wordFeatures[feature]))
#print '\tlearning semantic dimension',dim,'non-zeros, sum',len(trainY)-sum(trainY==0),sum(trainY), time.ctime()
#explanatoryFeatures[-1,:] = (explanatoryFeatures[-1,:] - numpy.mean(explanatoryFeatures[-1,:]))/numpy.std(explanatoryFeatures[-1,:])
explanatoryFeatures = explanatoryFeatures[1:].T # strip zeros out again
#explanatoryFeatures = explanatoryFeatures.T # strip zeros out again
#features.insert(0,'dummyConstant')
def commonPlotProps():
#pylab.plot((0,epochSamples),(0,0),'k--')
#pylab.ylim((-2.5e-13,2.5e-13)) #((-5e-14,5e-14)) # better way just to get the ones it choose itself?
#pylab.plot((zeroSample,zeroSample),(0,0.01),'k--')
pylab.xticks(numpy.linspace(0,epochSamples,7),epochStart+(numpy.linspace(0,epochSamples,7)/samplingRate))
pylab.xlabel('time (s) relative to auditory onset') #+refEvent)
pylab.xlim((62,313))
pylab.show()
pylab.axhline(0, color='k', linestyle='--')
pylab.axvline(125, color='k', linestyle='--')
#if epochsBack > 0:
# historyFeatures = numpy.zeros((explanatoryFeatures.shape[0], explanatoryFeatures.shape[1]*(epochsBack+1)))
for epochHistory in [3]: #range(10):
modelTrainingFit = []
modelTestCorrelation = []
modelParameters = []
legendLabels = features
tmpFeatures = explanatoryFeatures.copy()
tmpLegend = legendLabels[:]
for epochsBack in range(1,epochHistory+1):
epochFeatures = numpy.zeros(tmpFeatures.shape)
epochFeatures[epochsBack:,:] = tmpFeatures[:-epochsBack,:]
explanatoryFeatures = numpy.hstack((explanatoryFeatures,epochFeatures))
legendLabels = legendLabels + [l+'-'+str(epochsBack) for l in tmpLegend]
# put in sentence serial - can't leave in history, cos is too highly correlated...
explanatoryFeatures = numpy.vstack((explanatoryFeatures.T, wordFeatures['sentenceSerial'])).T
features.append('sentenceSerial')
legendLabels.append('sentenceSerial')
#explanatoryFeatures = mynormalise(explanatoryFeatures)
#pylab.figure(); pylab.imshow(explanatoryFeatures,interpolation='nearest', aspect='auto'); pylab.show()
#1/0
for t in range(epochSamples):
#print 'fitting at timepoint',t
# NOTES # tried a load of different versions, and straight linear regression does as well as any of them, measured in terms of R^2
#lm = sklearn.linear_model.LinearRegression(fit_intercept=True, normalize=True)
#lm = sklearn.linear_model.RidgeCV(fit_intercept=True, normalize=True, alphas=regParam) #, 10000, 100000])
lm = sklearn.linear_model.LassoLars(alpha=0.0001) #(alpha=1.0, fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=2.2204460492503131e-16, copy_X=True)
#lm = sklearn.linear_model.Ridge(fit_intercept=True, normalize=True)
#lm = sklearn.linear_model.RidgeCV(fit_intercept=True, normalize=True, alphas=[1000000000]) #, 10000, 100000]) # with one test, and 9 explanatory variables, found that got alpha down to .01 for the times when we have a big R^2
#lm = sklearn.linear_model.RidgeCV(fit_intercept=True, normalize=True, alphas=[1]) #, 10000, 100000])
#lm = sklearn.linear_model.RidgeCV(fit_intercept=True, normalize=True, alphas=[1e-6,1e-4,1e-2,1e+0,1e+2, 1e+4, 1e+6]) #, 10000, 100000])
#lm = sklearn.linear_model.ElasticNetCV(0.5) # rho (l1/l2 balance) parameter range suggested by doc pages
#lm = sklearn.linear_model.ElasticNetCV([.1, .5, .7, .9, .95, .99, 1]) # rho (l1/l2 balance) parameter range suggested by doc pages
# found that ended up taking rho of 1, and zero betas, so trying more L2 biased rho (documentation seems to be contradictory about l1/l2 | aka lasso/ridge)
trainX = mynormalise(explanatoryFeatures)
trainY = mynormalise(wordEpochs[:,t])
#trainX = explanatoryFeatures
#trainY = wordEpochs[:,t]
trainedLM = lm.fit(trainX,trainY)
modelParameters.append(lm)
#guessY = lm.predict(testX)
#guessTestSemantics[:,dim] = guessY
modelTrainingFit.append(adjustR2(lm.score(trainX,trainY), trainX.shape[1], trainX.shape[0]))
#modelTestCorrelation.append(numpy.corrcoef(guessTestSemantics[:,dim],real | TestSemantics[:,dim])[1,0])
#print '\t\tdone, explaining R^2 of',modelTrainingFit[-1],'reg param',trainedLM.alpha_,'betas',modelParameters[-1].coef_
#print '\t\tdone, explaining R^2 of',modelTrainingFit[-1],'betas',modelParameters[-1].coef_, lm.alpha_ #'chose reg param of',lm.best_alpha # lm.alphas_[0] # lm.best_alpha for ridge; lm.alphas_[0] f |
ericremoreynolds/fsiox | server.py | Python | mit | 559 | 0.007156 | import gevent
import gevent.monkey
gevent.monkey.patch_all()
from time import sleep
import flask
from flask_socketio import SocketIO, emit
app = flask.Fl | ask(__name__)
app.config['SECRET_KEY'] = 'secret!'
io = SocketIO(app, resource="/api/v1/socket")
@io.on('connect')
def on_connect():
print("Client connected")
@io.on('hello_f | rom_client')
def on_hello_from_client():
for i in range(3):
emit('message_from_server', str(i))
print("Sent %s" % i)
sleep(1)
if __name__ == '__main__':
io.run(app, port=9090, debug=True) |
mattmcd/PyAnalysis | mda/tutorial/__init__.py | Python | apache-2.0 | 107 | 0.009346 | # List | of modules exported by mda.tutorial
# from mda.tutorial import *
__all__ = ['t | imes2', 'pandastut']
|
torchingloom/edx-platform | pavelib/assets.py | Python | agpl-3.0 | 6,729 | 0 | """
Asset compilation and collection.
"""
from __future__ import print_function
import argparse
from paver.easy import sh, path, task, cmdopts, needs, consume_args, call_task
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import glob
import traceback
from .utils.envs import Env
from .utils.cmd import cmd, django_cmd
COFFEE_DIRS = ['lms', 'cms', 'common']
SASS_LOAD_PATHS = ['./common/static/sass']
SASS_UPDATE_DIRS = ['*/static']
SASS_CACHE_PATH = '/tmp/sass-cache'
class CoffeeScriptWatcher(PatternMatchingEventHandler):
"""
Watches for coffeescript changes
"""
ignore_directories = True
patterns = ['*.coffee']
def register(self, observer):
"""
register files with observer
"""
dirnames = set()
for filename in sh(coffeescript_files(), capture=True).splitlines():
dirnames.add(path(filename).dirname())
for dirname in dirnames:
observer.schedule(self, dirname)
def on_modified(self, event):
print('\tCHANGED:', event.src_path)
try:
compile_coffeescript(event.src_path)
except Exception: # pylint: disable=W0703
traceback.print_exc()
class SassWatcher(PatternMatchingEventHandler):
"""
Watches for sass file changes
"""
ignore_directories = True
patterns = ['*.scss']
ignore_patterns = ['common/static/xmodule/*']
def register(self, observer):
"""
register files with observer
"""
for dirname in SASS_LOAD_PATHS + SASS_UPDATE_DIRS + theme_sass_paths():
paths = []
if '*' in dirname:
paths.extend(glob.glob(dirname))
else:
paths.append(dirname)
for dirname in paths:
observer.schedule(self, dirname, recursive=True)
def on_modified(self, event):
print('\tCHANGED:', event.src_path)
try:
compile_sass()
except Exception: # pylint: disable=W0703
traceback.print_exc()
class XModuleSassWatcher(SassWatcher):
"""
Watches for sass file changes
"""
ignore_directories = True
ignore_patterns = []
def register(self, observer):
"""
register files with observer
"""
observer.schedule(self, 'common/lib/xmodule/', recursive=True)
def on_modified(self, event):
print('\tCHANGED:', event.src_path)
try:
process_xmodule_assets()
except Exception: # pylint: disable=W0703
traceback.print_exc()
def theme_sass_paths():
"""
Return the a list of paths to the theme's sass assets,
or an empty list if no theme is configured.
"""
edxapp_env = Env()
if edxapp_env.feature_flags.get('USE_CUSTOM_THEME', False):
theme_name = edxapp_env.env_tokens.get('THEME_NAME', '')
parent_dir = path(edxapp_env.REPO_ROOT).abspath().parent
theme_root = parent_dir / "themes" / theme_name
return [theme_root / | "static" / "sass"]
else:
return []
def coffeescript_files():
"""
return find command for paths containing coffee files
"""
dirs = " ".join([Env.REPO_ROOT / coffee | _dir for coffee_dir in COFFEE_DIRS])
return cmd('find', dirs, '-type f', '-name \"*.coffee\"')
def compile_coffeescript(*files):
"""
Compile CoffeeScript to JavaScript.
"""
if not files:
files = ["`{}`".format(coffeescript_files())]
sh(cmd(
"node_modules/.bin/coffee", "--compile", *files
))
def compile_sass(debug=False):
"""
Compile Sass to CSS.
"""
theme_paths = theme_sass_paths()
sh(cmd(
'sass', '' if debug else '--style compressed',
"--cache-location {cache}".format(cache=SASS_CACHE_PATH),
"--load-path", " ".join(SASS_LOAD_PATHS + theme_paths),
"--update", "-E", "utf-8", " ".join(SASS_UPDATE_DIRS + theme_paths)
))
def compile_templated_sass(systems, settings):
"""
Render Mako templates for Sass files.
`systems` is a list of systems (e.g. 'lms' or 'studio' or both)
`settings` is the Django settings module to use.
"""
for sys in systems:
sh(django_cmd(sys, settings, 'preprocess_assets'))
def process_xmodule_assets():
"""
Process XModule static assets.
"""
sh('xmodule_assets common/static/xmodule')
def collect_assets(systems, settings):
"""
Collect static assets, including Django pipeline processing.
`systems` is a list of systems (e.g. 'lms' or 'studio' or both)
`settings` is the Django settings module to use.
"""
for sys in systems:
sh(django_cmd(sys, settings, "collectstatic --noinput > /dev/null"))
@task
@cmdopts([('background', 'b', 'Background mode')])
def watch_assets(options):
"""
Watch for changes to asset files, and regenerate js/css
"""
observer = Observer()
CoffeeScriptWatcher().register(observer)
SassWatcher().register(observer)
XModuleSassWatcher().register(observer)
print("Starting asset watcher...")
observer.start()
if not getattr(options, 'background', False):
# when running as a separate process, the main thread needs to loop
# in order to allow for shutdown by contrl-c
try:
while True:
observer.join(2)
except KeyboardInterrupt:
observer.stop()
print("\nStopped asset watcher.")
@task
@needs('pavelib.prereqs.install_prereqs')
@consume_args
def update_assets(args):
"""
Compile CoffeeScript and Sass, then collect static assets.
"""
parser = argparse.ArgumentParser(prog='paver update_assets')
parser.add_argument(
'system', type=str, nargs='*', default=['lms', 'studio'],
help="lms or studio",
)
parser.add_argument(
'--settings', type=str, default="dev",
help="Django settings module",
)
parser.add_argument(
'--debug', action='store_true', default=False,
help="Disable Sass compression",
)
parser.add_argument(
'--skip-collect', dest='collect', action='store_false', default=True,
help="Skip collection of static assets",
)
parser.add_argument(
'--watch', action='store_true', default=False,
help="Watch files for changes",
)
args = parser.parse_args(args)
compile_templated_sass(args.system, args.settings)
process_xmodule_assets()
compile_coffeescript()
compile_sass(args.debug)
if args.collect:
collect_assets(args.system, args.settings)
if args.watch:
call_task('watch_assets', options={'background': not args.debug})
|
hyphenliu/cnminlangwebcollect | src/gui/about.py | Python | gpl-3.0 | 935 | 0.010989 | # -*- coding:utf-8 -*-
'''
Created on 2013年12月31日
@author: Hyphen.Liu
'''
import wx
import wx.html
import globalvar.guiGlobalVar as ggv
class About(wx.Dialog):
'''
:关于本软件的功能等信息界面
'''
text = ggv.aboutText
def __init__(self, parent):
'''
:创建窗口并显示软件功能相关信息
:param parent:主界面窗口
'''
wx.Dialog.__init__(self, parent, -1, '关于本软件',
size=(330, 290) )
html = wx.html.HtmlWindow(self) |
html.SetPage(self.text)
button = wx.Button(self, wx.ID_OK, u"确认")
sizer = wx.BoxSizer(wx.VERTICAL) #使用容器包含部件
sizer.Add(html, 1, wx.EXPAND|wx.ALL, 0)
sizer.Add(button, 0, wx.ALIGN_CENTER|wx.ALL, 5)
self.SetS | izer(sizer) #添加容器到窗口
self.Layout() |
1986ks/chainer | chainer/functions/activation/clipped_relu.py | Python | mit | 1,839 | 0.002175 | from chainer import cuda
from chainer import function
from chainer import utils
from chainer.utils import type_check
import numpy
class ClippedReLU(function.Function):
"""Clipped Rectifier Unit function.
Clipped ReLU is written as :math:`ClippedReLU(x, z) = | \min(\max(0, x), z)`,
where :math:`z(>0)` is a parameter to cap return value of ReLU.
"""
def __init__(self, z):
if not isinstance(z, float):
raise TypeError('z must be float value')
# z must be positive.
assert z > 0
self.cap = z
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(x_type.dtype == numpy.float32) |
def forward_cpu(self, x):
return utils.force_array(numpy.minimum(
numpy.maximum(0, x[0]), self.cap)).astype(numpy.float32),
def backward_cpu(self, x, gy):
return utils.force_array(
gy[0] * (0 < x[0]) * (x[0] < self.cap)).astype(numpy.float32),
def forward_gpu(self, x):
return cuda.elementwise(
'T x, T cap', 'T y', 'y = min(max(x, (T)0), cap)',
'clipped_relu_fwd')(x[0], self.cap),
def backward_gpu(self, x, gy):
gx = cuda.elementwise(
'T x, T gy, T z', 'T gx',
'gx = ((x > 0) & (x < z))? gy : 0',
'clipped_relu_bwd')(x[0], gy[0], self.cap)
return gx,
def clipped_relu(x, z=20.0):
"""Clipped Rectifier Unit function.
This function is expressed as :math:`ClippedReLU(x, z)
= \min(\max(0, x), z)`, where :math:`z(>0)` is a clipping value.
Args:
x (~chainer.Variable): Input variable.
z (float): Clipping value. (default = 20.0)
Returns:
~chainer.Variable: Output variable.
"""
return ClippedReLU(z)(x)
|
nkraft/educollections | test_educollections.py | Python | bsd-2-clause | 1,009 | 0.000991 | from educollections import ArrayList
def print_list_state(lst):
print('Size is', lst.size())
print('Contents are', lst)
print()
arr = ArrayList(10)
print('Capacity is', arr.capacity())
print_list_state(arr)
for i in range(10):
print('Prepend', i)
arr.prepend(i)
print_list_state(arr)
for i in range(10):
print('Item at index', i, 'is', arr.get(i))
print_list_state(arr)
for i in range(10):
print('Assign index', i, 'with', 10 - i)
arr.set(i, 10 - i)
print_list_state(arr)
arr.clear()
print_list_state(arr)
for i in range(10):
print('Append', i)
arr.append(i)
print_list_state(arr)
for i in [9, 4, 1, 6, 3, 0, 0, 0, 1, 0]:
item = arr.remove(i);
print('Removed', item, | 'from index', i)
print_list_state(arr)
arr.clear()
pr | int_list_state(arr)
for i in range(5):
print('Append', i)
arr.append(i)
print_list_state(arr)
for i in [2, 3, 0, 7, 8]:
print('Insert', i + 10, 'at index', i)
arr.insert(i, i + 10)
print_list_state(arr)
|
ULHPC/modules | easybuild/easybuild-easyblocks/easybuild/easyblocks/q/quantumespresso.py | Python | mit | 14,580 | 0.003704 | ##
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Quantum ESPRESSO, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import os
import re
import shutil
import sys
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
class EB_QuantumESPRESSO(ConfigureMake):
"""Support for building and installing Quantum ESPRESSO."""
@staticmethod
def extra_options():
"""Custom easyconfig parameters for Quantum ESPRESSO."""
extra_vars = {
'hybrid': [False, "Enable hybrid build (with OpenMP)", CUSTOM],
'with_scalapack': [True, "Enable ScaLAPACK support", CUSTOM],
}
return ConfigureMake.extra_options(extra_vars)
def __init__(self, *args, **kwargs):
"""Add extra config options specific to Quantum ESPRESSO."""
super(EB_QuantumESPRESSO, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.install_subdir = "espresso-%s" % self.version
def patch_step(self):
"""Patch files from build dir (not start dir)."""
super(EB_QuantumESPRESSO, self).patch_step(beginpath=self.builddir)
def configure_step(self):
"""Custom configuration procedure for Quantum ESPRESSO."""
if self.cfg['hybrid']:
self.cfg.update('configopts', '--enable-openmp')
if not self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', '--disable-parallel')
if not self.cfg['with_scalapack']:
self.cfg.update('configopts', '--without-scalapack')
repls = []
if self.toolchain.comp_family() in [toolchain.INTELCOMP]:
# set preprocessor command (-E to stop after preprocessing, -C to preserve comments)
cpp = "%s -E -C" % os.getenv('CC')
repls.append(('CPP', cpp, False))
env.setvar('CPP', cpp)
super(EB_QuantumESPRESSO, self).configure_step()
# compose list of DFLAGS (flag, value, keep_stuff)
# for guidelines, see include/defs.h.README in sources
dflags = []
comp_fam_dflags = {
toolchain.INTELCOMP: '-D__INTEL',
toolchain.GCC: '-D__GFORTRAN -D__STD_F95',
}
dflags.append(comp_fam_dflags[self.toolchain.comp_family()])
libfft = os.getenv('LIBFFT')
if libfft:
if "fftw3" in libfft:
dflags.append('-D__FFTW3')
else:
dflags.append('-D__FFTW')
env.setvar('FFTW_LIBS', libfft)
if get_software_root('ACML'):
dflags.append('-D__ACML')
if self.toolchain.options.get('usempi', None):
dflags.append('-D__MPI -D__PARA')
if self.cfg['hybrid']:
dflags.append(" -D__OPENMP")
if self.cfg['with_scalapack']:
dflags.append(" -D__SCALAPACK")
# always include -w to supress warnings
dflags.append('-w')
repls.append(('DFLAGS', ' '.join(dflags), False))
# complete C/Fortran compiler and LD flags
if self.cfg['hybrid']:
repls.append(('LDFLAGS', self.toolchain.get_flag('openmp'), True))
repls.append(('(?:C|F90|F)FLAGS', self.toolchain.get_flag('openmp'), True))
# obtain library settings
libs = []
for lib in ['BLAS', 'LAPACK', 'FFT', 'SCALAPACK']:
val = os.getenv('LIB%s' % lib) |
repls.append(('%s_LIBS' % lib, val, False))
libs.append(val)
libs = ' '.join(libs)
repls.append(('BLAS_LIBS_SWITCH', 'external', False))
repls.append(('LAPACK_LIBS_SWITCH', 'external', False))
repls.append(('LD_LIBS', | os.getenv('LIBS'), False))
self.log.debug("List of replacements to perform: %s" % repls)
# patch make.sys file
fn = os.path.join(self.cfg['start_dir'], 'make.sys')
try:
for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):
for (k, v, keep) in repls:
# need to use [ \t]* instead of \s*, because vars may be undefined as empty,
# and we don't want to include newlines
if keep:
line = re.sub(r"^(%s\s*=[ \t]*)(.*)$" % k, r"\1\2 %s" % v, line)
else:
line = re.sub(r"^(%s\s*=[ \t]*).*$" % k, r"\1%s" % v, line)
# fix preprocessing directives for .f90 files in make.sys if required
if self.toolchain.comp_family() in [toolchain.GCC]:
line = re.sub("\$\(MPIF90\) \$\(F90FLAGS\) -c \$<",
"$(CPP) -C $(CPPFLAGS) $< -o $*.F90\n" +
"\t$(MPIF90) $(F90FLAGS) -c $*.F90 -o $*.o",
line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read()))
# patch default make.sys for wannier
if LooseVersion(self.version) >= LooseVersion("5"):
fn = os.path.join(self.cfg['start_dir'], 'install', 'make_wannier90.sys')
else:
fn = os.path.join(self.cfg['start_dir'], 'plugins', 'install', 'make_wannier90.sys')
try:
for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):
line = re.sub(r"^(LIBS\s*=\s*).*", r"\1%s" % libs, line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read()))
# patch Makefile of want plugin
wantprefix = 'want-'
wantdirs = [d for d in os.listdir(self.builddir) if d.startswith(wantprefix)]
if len(wantdirs) > 1:
raise EasyBuildError("Found more than one directory with %s prefix, help!", wantprefix)
if len(wantdirs) != 0:
fn = os.path.join(self.builddir, wantdirs[0], 'conf', 'make.sys.in')
try:
for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):
# fix preprocessing directives for .f90 files in make.sys if required
if self.toolchain.comp_family() in [toolchain.GCC]:
line = re.sub("@f90rule@",
"$(CPP) -C $(CPPFLAGS) $< -o $*.F90\n" +
"\t$(MPIF90) $(F90FLAGS) -c $*.F90 -o $*.o",
line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
# move non-espresso directories to where they |
spahan/unixdmoain | lib/test/dbmcache.py | Python | bsd-3-clause | 164 | 0.012195 |
import UniDomain.UniDomain as UniDomain
import UniDomain.dbmcache as dbmcache
host = UniDomain.host()
db = dbmcache | .dbmNode(dbpath=host.config["cachedir"])
| |
ales-erjavec/orange-bio | orangecontrib/bio/widgets3/OWMAPlot.py | Python | gpl-3.0 | 18,714 | 0.000641 | import sys
import os
from functools import partial, reduce
import numpy
from AnyQt.QtGui import QPainter
from AnyQt.QtCore import QObject, QSize, QThread, QThreadPool, Slot
import pyqtgraph as pg
import Orange.data
from Orange.widgets import widget, gui, settings
from Orange.widgets.utils import concurrent
from ..utils import expression
def is_discrete(var):
return isinstance(var, Orange.data.DiscreteVariable)
def group_mask_columns(data, key, values):
"""
Return a boolean array mask of data columns (variables).
The mask will be True wherever the `var.attributes[key]` contains
one of `values`.
Parameters
----------
data : Orange.data.Table
Source data table.
key : str
The variable label key (where `values` are looked for)
values : sequence of str
The values (for the corresponding `key`) selecting the columns.
"""
target = set([(key, value) for value in values])
mask = [not target.isdisjoint(var.attributes.items())
for var in data.domain.attributes]
return numpy.array(mask, dtype=bool)
def group_mask_rows(data, var, values):
"""
Return a boolean array mask for data rows (instances).
The mask will be True wherever the row's entry for `var` contains
one of the `values`.
Parameters
----------
data : Orange.data.Table
Source data table.
var : Orange.data.DiscreteVariable
The variable/column on which to match `values`.
values : sequence of str
The values to select (must be a subset of `var.values`)
"""
var = data.domain[var]
col_view, _ = data.get_column_view(var)
target_ind = [var.values.index(t) for t in values]
mask = numpy.zeros_like(col_view, dtype=bool)
for i in target_ind:
mask |= col_view == i
return mask
def group_mask(data, key, values, axis=1):
if axis == 1:
return group_mask_columns(data, key, values)
elif axis == 0:
return group_mask_rows(data, key, values)
else:
raise ValueError("0 <= axis < 2")
def table_take(data, indices, axis=0):
if axis == 0:
return data[indices]
elif axis == 1:
return data[:, indices]
class ScatterPlotItem(pg.ScatterPlotItem):
def paint(self, painter, option, widget):
if self.opts["antialias"]:
painter.setRenderHint(QPainter.Antialiasing, True)
if self.opts["pxMode"]:
painter.setRenderHint(QPainter.SmoothPixmapTransform, True)
super().paint(painter, option, widget)
class ProgressBarDiscard(QObject):
def __init__(self, parent, redirect):
QObject.__init__(self, parent)
self.redirect = redirect
self._delay = False
@Slot(float)
def progressBarSet(self, value):
# Discard OWBaseWidget.progressBarSet call, because it calls qApp.processEvents
# which can result in 'event queue climbing' and max. recursion error if GUI thread
# gets another advance signal before it finishes with this one
if not self._delay:
try:
self._delay = True
| self.redirect.pro | gressBarSet(value)
finally:
self._delay = False
def withexcepthook(func):
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except BaseException as ex:
sys.excepthook(*sys.exc_info())
raise
return wrapped
class OWMAPlot(widget.OWWidget):
name = "MA Plot"
description = "Normalize expression array data on a MA - plot"
icon = "../widgets/icons/MAPlot.svg"
priority = 5000
inputs = [("Expression array", Orange.data.Table, "setData")]
outputs = [("Normalized expression array", Orange.data.Table,
widget.Default),
("Filtered expression array", Orange.data.Table)]
CENTER_METHODS = [("Average", expression.MA_center_average),
("Lowess (fast - interpolated)", expression.MA_center_lowess_fast),
("Lowess", expression.MA_center_lowess)]
MERGE_METHODS = [("Average", numpy.ma.average),
("Median", numpy.ma.median),
("Geometric mean", expression.geometric_mean)]
settingsHandler = settings.DomainContextHandler()
appendZScore = settings.Setting(False)
appendRIValues = settings.Setting(False)
selectedGroup = settings.ContextSetting(0)
selectedCenterMethod = settings.Setting(0)
selectedMergeMethod = settings.Setting(0)
zCutoff = settings.Setting(1.96)
autoCommit = settings.Setting(False)
def __init__(self, parent=None):
super().__init__(parent)
# GUI
box = gui.widgetBox(self.controlArea, "Info", addSpace=True)
self.infoBox = gui.widgetLabel(box, "No data on input.")
box = gui.widgetBox(self.controlArea, "Split by", addSpace=True)
self.groupCombo = gui.comboBox(
box, self, "selectedGroup", callback=self.onGroupSelection)
gui.comboBox(self.controlArea, self, "selectedCenterMethod",
box="Center Fold-change Using",
items=[name for name, _ in self.CENTER_METHODS],
callback=self.onCenterMethodChange,
addSpace=True)
gui.comboBox(self.controlArea, self, "selectedMergeMethod",
box="Merge Replicates",
items=[name for name, _ in self.MERGE_METHODS],
tooltip="Select the method for replicate merging",
callback=self.onMergeMethodChange,
addSpace=True)
box = gui.doubleSpin(self.controlArea, self, "zCutoff", 0.0, 3.0, 0.01,
box="Z-Score Cutoff",
callback=[self.replotMA, self.commitIf])
gui.separator(self.controlArea)
box = gui.widgetBox(self.controlArea, "Ouput")
gui.checkBox(box, self, "appendZScore", "Append Z-Scores",
tooltip="Append calculated Z-Scores to output",
callback=self.commitIf)
gui.checkBox(box, self, "appendRIValues",
"Append Log Ratio and Intensity values",
tooltip="Append calculated Log Ratio and Intensity "
"values to output data",
callback=self.commitIf)
gui.rubber(self.controlArea)
gui.auto_commit(self.controlArea, self, "autoCommit", "Commit")
self.graph = pg.PlotWidget(background="w")
self.graph.getAxis("bottom").setLabel("Intensity: log<sub>10</sub>(R*G)")
self.graph.getAxis("left").setLabel("Log ratio: log<sub>2</sub>(R/G)")
self.mainArea.layout().addWidget(self.graph)
self.groups = []
self.split_data = None, None
self.merged_splits = None, None
self.centered = None, None
self.changedFlag = False
self.data = None
self._executor = concurrent.ThreadExecutor(
threadPool=QThreadPool(maxThreadCount=1))
def sizeHint(self):
return QSize(800, 600)
def onGroupSelection(self):
if self.data:
self.updateInfoBox()
self.splitData()
self.runNormalization()
def onCenterMethodChange(self):
if self.data:
self.runNormalization()
def onMergeMethodChange(self):
if self.data:
self.splitData()
self.runNormalization()
def proposeGroups(self, data):
col_labels = [attr.attributes.items()
for attr in data.domain.attributes]
col_labels = sorted(reduce(set.union, col_labels, set()))
col_labels = [(key, value, 1) for key, value in col_labels]
attrs = [attr for attr in data.domain.variables + data.domain.metas
if is_discrete(attr)]
row_labels = [(attr.name, value, 0)
for attr in attrs for value in attr.values]
def filterSingleValues(labels):
ret = []
for name, value, axis in labels:
match = |
barseghyanartur/django-rest-framework-hstore | tests/drf_hstore_tests/urls.py | Python | mit | 492 | 0.010163 | from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = pa | tterns('',
# Uncomment the next line to enable the admin:
url(r'^admin/', include | (admin.site.urls)),
)
from .views import urlpatterns as test_urlpatterns
urlpatterns += test_urlpatterns
if 'grappelli' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'^grappelli/', include('grappelli.urls')),
) |
mhbu50/frappe | frappe/core/doctype/communication/communication.py | Python | mit | 17,315 | 0.027317 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from collections import Counter
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.utils import validate_email_address, strip_html, cstr, time_diff_in_seconds
from frappe.core.doctype.communication.email import validate_email
from frappe.core.doctype.communication.mixins import CommunicationEmailMixin
from frappe.core.utils import get_parent_doc
from frappe.utils.bot import BotReply
from frappe.utils import parse_addr, split_emails
from frappe.core.doctype.comment.comment import update_comment_in_doc
from email.utils import parseaddr
from urllib.parse import unquote
from frappe.utils.user import is_system_user
from frappe.contacts.doctype.contact.contact import get_contact_name
from frappe.automation.doctype.assignment_rule.assignment_rule import apply as apply_assignment_rule
exclude_from_linked_with = True
class Communication(Document, CommunicationEmailMixin):
"""Communication represents an external communication like Email.
"""
no_feed_on_delete = True
DOCTYPE = 'Communication'
def onload(self):
"""create email flag queue"""
if self.communication_type == "Communication" and self.communication_medium == "Email" \
and self.sent_or_received == "Received" and self.uid and self.uid != -1:
email_flag_queue = frappe.db.get_value("Email Flag Queue", {
"communication": self.name,
"is_completed": 0})
if email_flag_queue:
return
frappe.get_doc({
"doctype": "Email Flag Queue",
"action": "Read",
"communication": self.name,
"uid": self.uid,
"email_account": self.email_account
}).insert(ignore_permissions=True)
frappe.db.commit()
def validate(self):
self.validate_reference()
if not self.user:
self.user = frappe.session.user
if not self.subject:
self.subject = strip_html((self.content or "")[:141])
if not self.sent_or_received:
self.seen = 1
self.sent_or_received = "Sent"
self.set_status()
validate_email(self)
if self.communication_medium == "Email":
self.parse_email_for_timeline_links()
self.set_timeline_links()
self.deduplicate_timeline_links()
self.set_sender_full_name()
def validate_reference(self):
if self.reference_doctype and self.reference_name:
if not self.reference_owner:
self.reference_owner = frappe.db.get_value(self.reference_doctype, self.reference_name, "owner")
# prevent communication against a child table
if frappe.get_meta(self.reference_doctype).istable:
frappe.throw(_("Cannot create a {0} against a child document: {1}")
.format(_(self.communication_type), _(self.reference_doctype)))
# Prevent circular linking of Communication DocTypes
if self.reference_doctype == "Communication":
circular_linking = False
doc = get_parent_doc(self)
while doc.reference_doctype == "Communication":
if get_parent_doc(doc).name==self.name:
circular_linking = True
break
doc = get_parent_doc(doc)
if circular_linking:
frappe.throw(_("Please make sure the Reference Communication Docs are not circularly linked."), frappe.CircularLinkingError)
def after_insert(self):
if not (self.reference_doctype and self.reference_name):
return
if self.reference_doctype == "Communication" and self.sent_or_received == "Sent":
frappe.db.set_value("Communication", self.reference_name, "status", "Replied")
if self.communication_type == "Communication":
self.notify_change('add')
elif self.communication_type in ("Chat", "Notification", "Bot"):
if self.reference_name == frappe.session.user:
message = self.as_dict()
message['broadcast'] = True
frappe.publish_realtime('new_message', message, after_commit=True)
else:
# reference_name contains the user who is addressed in the messages' | page comment
frappe.publish_realtime('new_message', self.as_dict(),
user=self.reference_name, after_commit=True)
def on_update(self):
# add to _comment property of the doctype, so it shows up in
# comments count for the list view
update_comment_in_doc(self)
if self.comment_type != 'Updated':
update_parent_document_on_communication(self)
self.bot_reply()
def on_trash(self):
if self.communication_type == "Communication":
self.notify_change('delete')
@property
def sende | r_mailid(self):
return parse_addr(self.sender)[1] if self.sender else ""
@staticmethod
def _get_emails_list(emails=None, exclude_displayname = False):
"""Returns list of emails from given email string.
* Removes duplicate mailids
* Removes display name from email address if exclude_displayname is True
"""
emails = split_emails(emails) if isinstance(emails, str) else (emails or [])
if exclude_displayname:
return [email.lower() for email in set([parse_addr(email)[1] for email in emails]) if email]
return [email.lower() for email in set(emails) if email]
def to_list(self, exclude_displayname = True):
"""Returns to list.
"""
return self._get_emails_list(self.recipients, exclude_displayname=exclude_displayname)
def cc_list(self, exclude_displayname = True):
"""Returns cc list.
"""
return self._get_emails_list(self.cc, exclude_displayname=exclude_displayname)
def bcc_list(self, exclude_displayname = True):
"""Returns bcc list.
"""
return self._get_emails_list(self.bcc, exclude_displayname=exclude_displayname)
def get_attachments(self):
attachments = frappe.get_all(
"File",
fields=["name", "file_name", "file_url", "is_private"],
filters = {"attached_to_name": self.name, "attached_to_doctype": self.DOCTYPE}
)
return attachments
def notify_change(self, action):
frappe.publish_realtime('update_docinfo_for_{}_{}'.format(self.reference_doctype, self.reference_name), {
'doc': self.as_dict(),
'key': 'communications',
'action': action
}, after_commit=True)
def set_status(self):
if not self.is_new():
return
if self.reference_doctype and self.reference_name:
self.status = "Linked"
elif self.communication_type=="Communication":
self.status = "Open"
else:
self.status = "Closed"
# set email status to spam
email_rule = frappe.db.get_value("Email Rule", { "email_id": self.sender, "is_spam":1 })
if self.communication_type == "Communication" and self.communication_medium == "Email" \
and self.sent_or_received == "Sent" and email_rule:
self.email_status = "Spam"
@classmethod
def find(cls, name, ignore_error=False):
try:
return frappe.get_doc(cls.DOCTYPE, name)
except frappe.DoesNotExistError:
if ignore_error:
return
raise
@classmethod
def find_one_by_filters(cls, *, order_by=None, **kwargs):
name = frappe.db.get_value(cls.DOCTYPE, kwargs, order_by=order_by)
return cls.find(name) if name else None
def update_db(self, **kwargs):
frappe.db.set_value(self.DOCTYPE, self.name, kwargs)
def set_sender_full_name(self):
if not self.sender_full_name and self.sender:
if self.sender == "Administrator":
self.sender_full_name = frappe.db.get_value("User", "Administrator", "full_name")
self.sender = frappe.db.get_value("User", "Administrator", "email")
elif self.sender == "Guest":
self.sender_full_name = self.sender
self.sender = None
else:
if self.sent_or_received=='Sent':
validate_email_address(self.sender, throw=True)
sender_name, sender_email = parse_addr(self.sender)
if sender_name == sender_email:
sender_name = None
self.sender = sender_email
self.sender_full_name = sender_name
if not self.sender_full_name:
self.sender_full_name = frappe.db.get_value('User', self.sender, 'full_name')
if not self.sender_full_name:
first_name, last_name = frappe.db.get_value('Contact',
filters={'email_id': sender_email},
fieldname=['first_name', 'last_name']
) or [None, None]
self.sender_full_name = (first_name or '') + (last_name or '')
if not self.sender_full_name:
self.sender_full_name = sender_email
def bot_reply(self):
if self.comment_type == 'Bot' and self.communication_type == 'Chat':
reply = BotReply().get_reply |
ministryofjustice/addressfinder | manage.py | Python | mit | 256 | 0 | #!/usr/bin/env python
import os |
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "addressfinder.settings")
from django.core.management import execute_from_command_line
execute_from_comman | d_line(sys.argv)
|
JasonLC506/CollaborativeFiltering | traintestSplit.py | Python | mit | 1,241 | 0.016116 | import numpy as np
def split(frac_training,frac_valid, datafile, file_train, file_valid, file_test):
f_train = open(file_train, "w")
f_valid = open(file_valid, "w")
f_test = open | (file_test, "w")
ntrain = 0
nvalid = 0
ntest = 0
with open(datafile, "r") as f:
for line in f:
setID = np.argmax(np.random.multinomial(1, [frac_training, frac_valid, 1-frac_training-frac_valid], size = 1))
if setID == 0:
f_train.write(line)
ntrain += | 1
elif setID == 1:
f_valid.write(line)
nvalid += 1
elif setID == 2:
f_test.write(line)
ntest += 1
else:
print "error"
print ntrain
print nvalid
print ntest
if __name__ == "__main__":
frac_training = 0.7
frac_valid = 0.1
datafile = "data/reaction_NYTWaPoWSJ_K10"
split(frac_training = frac_training, frac_valid = frac_valid,
datafile = datafile,
file_train = datafile + "_" + str(frac_training)+"train",
file_valid = datafile + "_" + str(frac_valid)+"valid",
file_test = datafile + "_" + str(1- frac_training - frac_valid)+"test")
|
kaushik94/sympy | sympy/core/decorators.py | Python | bsd-3-clause | 4,618 | 0.000217 | """
SymPy core decorators.
The purpose of this module is to expose decorators without any other
dependencies, so that they can be easily imported anywhere in sympy/core.
"""
from __future__ import print_function, division
from functools import wraps
from .sympify import SympifyError, sympify
from sympy.core.compatibility import get_function_code
def deprecated(**decorator_kwargs):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
from sympy.utilities.exceptions import SymPyDeprecationWarning
def _warn_deprecation(wrapped, stacklevel):
decorator_kwargs.setdefault('feature', wrapped.__name__)
SymPyDeprecationWarning(**decorator_kwargs).warn(stacklevel=stacklevel)
def deprecated_decorator(wrapped):
if hasattr(wrapped, '__mro__'): # wrapped is actually a class
class wrapper(wrapped):
__doc__ = wrapped.__doc__
__name__ = wrapped.__name__
__module__ = wrapped.__module__
_sympy_deprecated_func = wrapped
def __init__(self, *args, **kwargs):
_warn_deprecation(wrapped, 4)
super(wrapper, self).__init__(*args, **kwargs)
else:
@wraps(wrapped)
def wrapper(*args, **kwargs):
_warn_deprecation(wrapped, 3)
return wrapped(*args, **kwargs)
wrapper._sympy_deprecated_func = wrapped
return wrapper
return deprecated_decorator
def _sympifyit(arg, retval=None):
"""decorator to smartly _sympify function arguments
@_sympifyit('other', NotImplemented)
def add(self, other):
...
In add, other can be thought of as already being a SymPy object.
If it is not, the code is likely to catch an exception, then other will
be explicitly _sympified, and the whole code restarted.
if _sympify(arg) fails, NotImplemented will be returned
see: __sympifyit
"""
def deco(func):
return __sympifyit(func, arg, retval)
return deco
def __sympifyit(func, arg, retval=None):
"""decorator to _sympify `arg` argument for function `func`
don't use directly -- use _sympifyit instead
"""
# we support f(a,b) only
if not get_function_code(func).co_argcount:
raise LookupError("func not found")
# only b is _sympified
assert get_function_code(func).co_varnames[1] == arg
if retval is None:
@wraps(func)
| def __sympifyit_wrapper(a, b):
return func(a, sympify(b, strict=True))
else:
@wraps(func)
def __sympifyit_wrapper(a, b):
try:
# If an external class has _op_priority, it knows how to deal
| # with sympy objects. Otherwise, it must be converted.
if not hasattr(b, '_op_priority'):
b = sympify(b, strict=True)
return func(a, b)
except SympifyError:
return retval
return __sympifyit_wrapper
def call_highest_priority(method_name):
"""A decorator for binary special methods to handle _op_priority.
Binary special methods in Expr and its subclasses use a special attribute
'_op_priority' to determine whose special method will be called to
handle the operation. In general, the object having the highest value of
'_op_priority' will handle the operation. Expr and subclasses that define
custom binary special methods (__mul__, etc.) should decorate those
methods with this decorator to add the priority logic.
The ``method_name`` argument is the name of the method of the other class
that will be called. Use this decorator in the following manner::
# Call other.__rmul__ if other._op_priority > self._op_priority
@call_highest_priority('__rmul__')
def __mul__(self, other):
...
# Call other.__mul__ if other._op_priority > self._op_priority
@call_highest_priority('__mul__')
def __rmul__(self, other):
...
"""
def priority_decorator(func):
@wraps(func)
def binary_op_wrapper(self, other):
if hasattr(other, '_op_priority'):
if other._op_priority > self._op_priority:
f = getattr(other, method_name, None)
if f is not None:
return f(self)
return func(self, other)
return binary_op_wrapper
return priority_decorator
|
ma-renaud/Basement_Monitoring_GUI | state_machine.py | Python | gpl-3.0 | 218 | 0 | class StateMachine:
def __init__(self, initial_state):
self.currentState = initial_state
# Template method:
def proces | s(self, inputs):
for i in | inputs:
self.currentState.run(i)
|
Arno-Nymous/pyload | module/database/StorageDatabase.py | Python | gpl-3.0 | 1,952 | 0.005123 | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: mkaay
"""
from module.database import style
from module.database import DatabaseBackend
class StorageMethods():
@style.queue
def setStorage(db, identifier, key, v | alue):
db.c.ex | ecute("SELECT id FROM storage WHERE identifier=? AND key=?", (identifier, key))
if db.c.fetchone() is not None:
db.c.execute("UPDATE storage SET value=? WHERE identifier=? AND key=?", (value, identifier, key))
else:
db.c.execute("INSERT INTO storage (identifier, key, value) VALUES (?, ?, ?)", (identifier, key, value))
@style.queue
def getStorage(db, identifier, key=None):
if key is not None:
db.c.execute("SELECT value FROM storage WHERE identifier=? AND key=?", (identifier, key))
row = db.c.fetchone()
if row is not None:
return row[0]
else:
db.c.execute("SELECT key, value FROM storage WHERE identifier=?", (identifier, ))
d = {}
for row in db.c:
d[row[0]] = row[1]
return d
@style.queue
def delStorage(db, identifier, key):
db.c.execute("DELETE FROM storage WHERE identifier=? AND key=?", (identifier, key))
DatabaseBackend.registerSub(StorageMethods)
|
mattrobenolt/django-casscache | example/example/settings.py | Python | bsd-3-clause | 5,718 | 0.001224 | # Django settings for example project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_@k=t)ziw_ff(s)i@w6x!tprya%=t*k=63nr&y4h)duw5(&txx'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'example.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'example.wsgi.a | pplication'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django. | contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django_casscache',
)
CACHES = {
'default': {
'BACKEND': 'django_casscache.backend.CasscacheCache',
'LOCATION': ['127.0.0.1:9042'],
'TIMEOUT': 0,
'OPTIONS': {
'keyspace': 'threads',
'columnfamily': 'threads',
}
}
}
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
JiscPER/magnificent-octopus | octopus/modules/account/authorise.py | Python | apache-2.0 | 679 | 0.010309 | from octopus.core import app
class Authorise(object):
@classmethod
def has_role(cls, role, reference):
# if we are the super user we can do anything
| if app.config["ACCOUNT_SUPER_USER_ROLE"] in reference:
return True
# if the user's role list contains the role explicitly then do it
if role in reference:
return True
# later we will want to expand the user's roles to the full set and see if the role is in there
# as and when that level of functionality is required
# | but for now, if we get here we have failed to authorise
return False
|
hanya/MRI | pythonpath/mytools_Mri/web.py | Python | apache-2.0 | 12,480 | 0.005849 | # Copyright 2011 Tsutomu Uchino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uno
try:
from subprocess import Popen
def execute(path, url):
Popen([path, url]).pid
except:
import os
try:
import thread
except:
import _thread as thread
def execute(path, url):
if os.sep == '\\':
ctx = uno.getComponentContext()
systemexecute = ctx.getServiceManager().createInstanceWithContext(
"com.sun.star.system.SystemShellExecute", ctx)
systemexecute.execute(
path, url, 1)
else:
thread.start_new_thread(
lambda path, url: os.system("%s %s" % (path, url)), (path, url)
)
class Web(object):
"""Open web file in browser."""
def __init__(self, browser):
self.set_browser(browser)
def set_browser(self, browser):
"""set browser path."""
self.browser = browser
def open_url(self, url):
"""open url with browser."""
try:
execute(self.browser, url)
except:
raise
class IDL(Web):
""" Including to support opening IDL document. """
def __init__(self, cast, config):
self.set_browser(config.browser)
self.cast = cast
self.set_sdk_path(config.sdk_p | ath)
def set_sdk_path(self, sdk_path):
"""set sdk directory path."""
if sdk_path.endswith('/'):
path = sdk_path
else:
path = "%s/" % sdk_path
self.sdk_path = path
def set_browser(self, browser):
"""set browser path."""
We | b.set_browser(self, uno.fileUrlToSystemPath(browser).replace("\\", '/'))
def open_url(self, url):
try:
Web.open_url(self, url)
except:
self.cast.error("Fix your browser configuration.")
def open_idl_reference(self, idltarget, word=''):
"""Open IDL Reference."""
if idltarget:
if word:
template = "%sdocs/common/ref/%s.html#%s"
idlurl = template % (self.sdk_path, idltarget.replace('.', '/'), word)
else:
template = "%sdocs/common/ref/%s.html"
idlurl = template % (self.sdk_path, idltarget.replace('.', '/'))
self.open_url(idlurl)
else:
self.cast.status('IDL target was not found.')
class DoxygenIDLRef(IDL):
""" Link to Doxygen based IDL reference. """
class UnableToResolveAnchorException(Exception):
""" Exception if type name might be abbreviated in its IDL definition. """
def __init__(self, cast, config):
super(DoxygenIDLRef, self).__init__(cast, config)
self.index = {} # file_name: list, from search/*_XX.js
# ToDo: Use index file to get link always? Convert keyword list into dict.
def open_idl_reference(self, idltarget, word=""):
if idltarget:
# ToDo support link to anchor
if word:
template = "{BASE}docs/idl/ref/{TYPE}{NAME}.html#{ANCHOR}"
try:
type, target = self._get_target(idltarget)
except Exception as e:
self.cast.status("Error: " + str(e))
return
try:
anchor = self._get_anchor(idltarget, word, type)
idlurl = template.format(BASE=self.sdk_path,
TYPE=type, NAME=target, ANCHOR=anchor)
except self.UnableToResolveAnchorException:
idlurl = self._get_idlurl_from_index(idltarget, word, type)
else:
template = "%sdocs/idl/ref/%s%s.html"
type, target = self._get_target(idltarget)
idlurl = template % (self.sdk_path, type, target)
self.open_url(idlurl)
else:
self.cast.status("IDL target was not found.")
def _get_idlurl_from_index(self, idltarget, word, type):
""" Find entry in index files generated by Doxygen. """
target = idltarget
if type in ("service", "struct", "exception"):
pass
else:
target = idltarget + "::" + word
try:
idl = self.cast.engine.tdm.getByHierarchicalName(target)
except:
idl = None
if idl:
idl_type_name = idl.getTypeClass().value
full_name = idl.getName()
if idl_type_name == "INTERFACE_METHOD":
prefix = "functions"
declared_class = full_name[0:full_name.rfind("::")]
elif idl_type_name == "INTERFACE_ATTRIBUTE" or \
idl_type_name == "SERVICE":
prefix = "variables"
declared_class = self._find_property_declearing_type(idl, idltarget, word)
elif idl_type_name in ("STRUCT", "EXCEPTION"):
prefix = "variables"
declared_class = idltarget
declared_class = declared_class.replace(".", "::")
path = self._search_in_index(prefix, word, declared_class)
return "{BASE}docs/idl/ref/{}".format(path, BASE=self.sdk_path)
def _search_in_index(self, prefix, word, declared_class):
import json
index_file = "{}_{}.js".format(prefix, hex(ord(word[0].lower()))[2:].lower())
if not index_file in self.index:
index_path = "{BASE}docs/idl/ref/search/{}".format(index_file, BASE=self.sdk_path)
if index_path.startswith("file://"):
with open(uno.fileUrlToSystemPath(index_path), "r", encoding="utf-8") as f:
s = f.read()
elif index_path.startswith("http://"):
import urllib.request
f = urllib.request.urlopen(index_path)
s = f.read().decode("utf-8")
s = s[16:-2].replace("'", '"')
j = json.loads(s, "utf-8")
self.index[index_file] = j
j = self.index[index_file]
_word = word.lower()
found = None
for item in j:
if item[0] == _word:
entries = item[1]
if entries[0] == word:
if len(entries) == 2:
if entries[1][2] == declared_class:
found = entries[1][0]
break
else:
_name = "{}::{}()".format(declared_class, word)
for entry in entries[1:]:
if entry[2] == _name:
found = entry[0]
break
return found.lstrip("../")
def _get_target(self, idltarget):
idl_type = self.cast.engine.get_module_type(idltarget)
if idl_type in ("CONSTANTS", "TYPEDEF"):
return "namespace", idltarget.replace(".", "_1_1")
elif idl_type == "ENUM":
return "namespace", idltarget[0:idltarget.rfind(".")].replace(".", "_1_1")
return idl_type.lower(), idltarget.replace(".", "_1_1")
def _find_property_declearing_type(self, idl, idltarget, word):
full_name = "{BASE}.{NAME}".format(BASE=idltarget, NAME=word)
found = None
for prop in idl.getProperties():
if prop.getName() == full_name:
found = prop
break
if found:
return self._check_type_name(found.getPropertyTypeDescription(), idlta |
aeklant/scipy | pavement.py | Python | bsd-3-clause | 23,552 | 0.003397 | """
This paver file is intended to help with the release process as much as
possible. It relies on virtualenv to generate 'bootstrap' environments as
independent from the user system as possible (e.g., to make sure the sphinx doc
is built against the built SciPy, not an installed one).
The release is assumed to be done on OS X. See release.sh for a script that
employs the Paver tasks defined in this file, and builds everything required
for a release at once.
Building a Windows installer from Wine
======================================
The Python version the installer is for can be specified with the ``-p 2.6``
switch (this works for all installer tasks). To build a simple (no SSE
instructions) installer, do::
paver bdist_wininst_simple
This assumes that blas/lapack are in c:\local\lib inside drive_c. You will
have to make sure your Wine python locations (WINE_PYS) are configured
correctly.
The superpack requires all the Atlas libraries for every arch to be installed
(see SITECFG), and can then be built as follows::
paver bdist_superpack
Building an installer for OS X
==============================
For a simple installer, which is just an mpkg inside a dmg, do::
paver simple_dmg
For a more fancy installer, which includes documentation and looks better, do::
paver pdf # needs to be done only once
paver dmg
Building changelog + notes
==========================
Assumes you have git and the binaries/tarballs in installers/::
paver write_release_and_log
This automatically puts the checksum into NOTES.txt and writes the Changelog,
which can be uploaded to Github Releases (and maybe sourceforge for historical
reasons, see gh-4939).
TODO
====
- make it more easily customizable (through command line args)
- missing targets: install & test, sdist test, debian packaging
- fix bdist_mpkg: we build the same source twice -> how to make sure we use
the same underlying Python for egg install in venv and for bdist_mpkg
"""
import os
import sys
import subprocess
import re
import shutil
import warnings
from hashlib import md5
from hashlib import sha256
try:
from paver.tasks import VERSION as _PVER
if not _PVER >= '1.0':
raise RuntimeError("paver version >= 1.0 required (was %s)" % _PVER)
except ImportError:
raise RuntimeError("paver version >= 1.0 required")
import paver
import paver.doctools
import paver.path
from paver.easy import options, Bunch, task, needs, dry, sh, call_task, cmdopts
sys.path.insert(0, os.path.dirname(__file__))
try:
setup_py = __import__("setup")
FULLVERSION = setup_py.VERSION
# This is duplicated from setup.py
if os.path.exists('.git'):
GIT_REVISION = setup_py.git_version()
else:
GIT_REVISION = "Unknown"
if not setup_py.ISRELEASED:
if GIT_REVISION == "Unknown":
FULLVERSION += '.dev0+Unknown'
else:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
finally:
sys.path.pop(0)
try:
# Ensure sensible file permissions
os.umask(0o022)
except AttributeError:
# No umask on non-posix
pass
#-----------------------------------
# Things to be changed for a release
#-----------------------------------
# Source of the release notes
RELEASE = 'doc/release/1.5.0-notes.rst'
# Start/end of the log (from git)
LOG_START = 'v1.4.0'
LOG_END = 'master'
#-------------------------------------------------------
# Hardcoded build/install dirs, virtualenv options, etc.
#-------------------------------------------------------
# Default Python version
PYVER="3.6"
# Paver options object, holds all default dirs
options(bootstrap=Bunch(bootstrap_dir="bootstrap"),
virtualenv=Bunch(packages_to_install=["sphinx==1.8.5", "numpydoc"],
no_site_packages=False),
sphinx=Bunch(builddir="build", sourcedir="source", docroot='doc'),
superpack=Bunch(builddir="build-superpack",
bindir=os.path.join("build-superpack","binaries")),
| installers=Bunch(releasedir="release",
installersdir=os.path.join("release", "installers")),
doc=Bunch(doc_root="doc",
sdir=os.path.join("doc", "source"),
bdir=os.path.join("doc", "build"),
bdir_latex=os.path.join("doc", "build", "latex"),
destdir_pdf=os.pat | h.join("build_doc", "pdf")),
html=Bunch(builddir=os.path.join("build", "html")),
dmg=Bunch(python_version=PYVER),
bdist_wininst_simple=Bunch(python_version=PYVER),)
# Where we can find BLAS/LAPACK/ATLAS on Windows/Wine
SITECFG = {"sse3" : {'BLAS': 'None', 'LAPACK': 'None',
'ATLAS': r'C:\local\lib\atlas\sse3'},
"sse2" : {'BLAS': 'None', 'LAPACK': 'None',
'ATLAS': r'C:\local\lib\atlas\sse2'},
"nosse" : {'ATLAS': 'None', 'BLAS': r'C:\local\lib\atlas\nosse',
'LAPACK': r'C:\local\lib\atlas\nosse'}}
# Wine config for win32 builds
if sys.platform == "win32":
WINE_PY35 = [r"C:\Python35\python.exe"]
WINDOWS_ENV = os.environ
MAKENSIS = ["makensis"]
elif sys.platform == "darwin":
WINE_PY35 = ["wine", os.environ['HOME'] + "/.wine/drive_c/Python35/python.exe"]
WINDOWS_ENV = os.environ
WINDOWS_ENV["DYLD_FALLBACK_LIBRARY_PATH"] = "/usr/X11/lib:/usr/lib"
MAKENSIS = ["wine", "makensis"]
else:
WINE_PY35 = [os.environ['HOME'] + "/.wine/drive_c/Python35/python.exe"]
WINDOWS_ENV = os.environ
MAKENSIS = ["wine", "makensis"]
WINE_PYS = {'3.5':WINE_PY35}
# Framework Python locations on OS X
MPKG_PYTHON = {
"3.5": "/Library/Frameworks/Python.framework/Versions/3.5/bin/python3"
}
# Full path to the *static* gfortran runtime
LIBGFORTRAN_A_PATH = "/usr/local/lib/libgfortran.a"
#--------------------------------------
# Utility functions and bootstrap stuff
#--------------------------------------
def parse_numpy_version(pyexec):
if isinstance(pyexec, str):
cmd = [pyexec, "-c", "'import numpy; print(numpy.version.version)'"]
else:
# sequence for pyexec
cmd = pyexec + ["-c", "'import numpy; print(numpy.version.version)'"]
# Execute in shell because launching python from python does not work
# (hangs)
p = subprocess.Popen(" ".join(cmd), stdout=subprocess.PIPE, shell=True)
out = p.communicate()[0]
if p.returncode:
raise RuntimeError("Command %s failed" % " ".join(cmd))
a = re.compile("^([0-9]+)\.([0-9]+)\.([0-9]+)")
if a:
return tuple([int(i) for i in a.match(out).groups()[:3]])
else:
raise ValueError("Could not parse version (%s)" % out)
@task
def bootstrap():
"""create virtualenv in ./install"""
try:
import virtualenv
except ImportError:
raise RuntimeError("virtualenv is needed for bootstrap")
bdir = options.bootstrap_dir
if not os.path.exists(bdir):
os.makedirs(bdir)
bscript = "bootstrap.py"
options.virtualenv.script_name = os.path.join(options.bootstrap_dir,
bscript)
options.bootstrap.no_site_packages = False
call_task('paver.virtual.bootstrap')
sh('cd %s; %s %s' % (bdir, sys.executable, bscript))
@task
def clean():
"""Remove build, dist, egg-info garbage."""
d = ['build', 'dist', 'scipy.egg-info']
for i in d:
if os.path.exists(i):
shutil.rmtree(i)
bdir = os.path.join('doc', options.sphinx.builddir)
if os.path.exists(bdir):
shutil.rmtree(bdir)
@task
def clean_bootstrap():
bdir = os.path.join(options.bootstrap.bootstrap_dir)
if os.path.exists(bdir):
shutil.rmtree(bdir)
@task
@needs('clean', 'clean_bootstrap')
def nuke():
"""Remove everything: build dir, installers, bootstrap dirs, etc..."""
for d in [options.superpack.builddir, options.installers.releasedir]:
if os.path.exists(d):
shutil.rmtree(d)
#--------------------
# Documentation tasks
#--------------------
@task
def html(options):
"""Build SciPy documentation and put it into build/docs"""
# Don't use paver html target because of scipy bootstrapping pro |
jiaphuan/models | research/real_nvp/imnet_formatting.py | Python | apache-2.0 | 3,296 | 0 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""LSUN dataset formatting.
Download and format the Imagenet dataset as follow:
mkdir [IMAGENET_PATH]
cd [IMAGENET_PATH]
for FILENAME in train_32x32.tar valid_32x32.tar trai | n_64x64.tar valid_64x64.tar
do
curl -O http://image-net.org/small/$FILENAME
tar -xvf $FILENAME
done
Then use the script as follow:
for DIRNAME in train_32x32 valid_32x32 train_64x64 valid_64x64
do
python imnet_formatting.py \
--file_out $DIRNAME \
--fn_root $DIRNAME
done
"""
from __future__ import print_function
import os
import os.path
import scipy.io
import scipy.io.wavfile
import scipy.ndimage
import tensorflow as tf
tf.flags.DE | FINE_string("file_out", "",
"Filename of the output .tfrecords file.")
tf.flags.DEFINE_string("fn_root", "", "Name of root file path.")
FLAGS = tf.flags.FLAGS
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def main():
"""Main converter function."""
# LSUN
fn_root = FLAGS.fn_root
img_fn_list = os.listdir(fn_root)
img_fn_list = [img_fn for img_fn in img_fn_list
if img_fn.endswith('.png')]
num_examples = len(img_fn_list)
n_examples_per_file = 10000
for example_idx, img_fn in enumerate(img_fn_list):
if example_idx % n_examples_per_file == 0:
file_out = "%s_%05d.tfrecords"
file_out = file_out % (FLAGS.file_out,
example_idx // n_examples_per_file)
print("Writing on:", file_out)
writer = tf.python_io.TFRecordWriter(file_out)
if example_idx % 1000 == 0:
print(example_idx, "/", num_examples)
image_raw = scipy.ndimage.imread(os.path.join(fn_root, img_fn))
rows = image_raw.shape[0]
cols = image_raw.shape[1]
depth = image_raw.shape[2]
image_raw = image_raw.astype("uint8")
image_raw = image_raw.tostring()
example = tf.train.Example(
features=tf.train.Features(
feature={
"height": _int64_feature(rows),
"width": _int64_feature(cols),
"depth": _int64_feature(depth),
"image_raw": _bytes_feature(image_raw)
}
)
)
writer.write(example.SerializeToString())
if example_idx % n_examples_per_file == (n_examples_per_file - 1):
writer.close()
writer.close()
if __name__ == "__main__":
main()
|
jdrusso/last_letter | uav_utils/setup.py | Python | gpl-3.0 | 307 | 0.003257 | ## | ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
| packages=['uav_utils'],
package_dir={'': 'src'})
setup(**setup_args)
|
laonawuli/addrest | web2py/applications/admin/controllers/default.py | Python | mit | 73,035 | 0.003711 | # -*- coding: utf-8 -*-
EXPERIMENTAL_STUFF = True
MAXNFILES = 1000
if EXPERIMENTAL_STUFF:
if is_mobile:
response.view = response.view.replace('default/', 'default.mobile/')
response.menu = []
import re
from gluon.admin import *
from gluon.fileutils import abspath, read_file, write_file
from gluon.utils import web2py_uuid
from gluon.tools import Config
from gluon.compileapp import find_exposed_functions
from glob import glob
import shutil
import platform
try:
import git
if git.__version__ < '0.3.1':
raise ImportError("Your version of git is %s. Upgrade to 0.3.1 or better." % git.__version__)
have_git = True
except ImportError, e:
have_git = False
GIT_MISSING = 'Requires gitpython module, but not installed or incompatible version: %s' % e
from gluon.languages import (read_possible_languages, read_dict, write_dict,
read_plural_dict, write_plural_dict)
if DEMO_MODE and request.function in ['change_password', 'pack',
'pack_custom','pack_plugin', 'upgrade_web2py', 'uninstall',
'cleanup', 'compile_app', 'remove_compiled_app', 'delete',
'delete_plugin', 'create_file', 'upload_file', 'update_languages',
'reload_routes', 'git_push', 'git_pull', 'install_plugin']:
session.flash = T('disabled in demo mode')
redirect(URL('site'))
if is_gae and request.function in ('edit', 'edit_language',
'edit_plurals', 'update_languages', 'create_file', 'install_plugin'):
session.flash = T('disabled in GAE mode')
redirect(URL('site'))
if not is_manager() and request.function in ['change_password', 'upgrade_web2py']:
session.flash = T('disabled in multi user mode')
redirect(URL('site'))
if FILTER_APPS and request.args(0) and not request.args(0) in FILTER_APPS:
session.flash = T('disabled in demo mode')
redirect(URL('site'))
if not session.token:
session.token = web2py_uuid()
def count_lines(data):
return len([line for line in data.split('\n') if line.strip() and not line.startswith('#')])
def log_progress(app, mode='EDIT', filename=None, progress=0):
progress_file = os.path.join(apath(app, r=request), 'progress.log')
now = str(request.now)[:19]
if not os.path.exists(progress_file):
safe_open(progress_file, 'w').write('[%s] START\n' % now)
if filename:
safe_open(progress_file, 'a').write(
'[%s] %s %s: %s\n' % (now, mode, filename, progress))
def safe_open(a, b):
if (DEMO_MODE or is_gae) and ('w' in b or 'a' in b):
class tmp:
def write(self, data):
pass
def close(self):
pass
return tmp()
return open(a, b)
def safe_read(a, b='r'):
safe_file = safe_open(a, b)
try:
return safe_file.read()
finally:
safe_file.close()
def safe_write(a, value, b='w'):
safe_file = safe_open(a, b)
try:
safe_file.write(value)
finally:
safe_file.close()
def get_app(name=None):
app = name or request.args(0)
if (app and os.path.exists(apath(app, r=request)) and
(not MULTI_USER_MODE or is_manager() or
db(db.app.name == app)(db.app.owner == auth.user.id).count())):
return app
session.flash = T('App does not exist or you are not authorized')
redirect(URL('site'))
def index():
""" Index handler """
send = request.vars.send
if DEMO_MODE:
session.authorized = True
session.last_time = t0
if not send:
send = URL('site')
if session.authorized:
redirect(send)
elif request.vars.password:
if verify_password(request.vars.password[:1024]):
session.authorized = True
login_record(True)
if CHECK_VERSION:
session.check_version = True
else:
session.check_version = False
session.last_time = t0
if isinstance(send, list): # ## why does this happen?
send = str(send[0])
redirect(send)
else:
times_denied = login_record(False)
if times_denied >= allowed_number_of_attempts:
response.flash = \
T('admin disabled because too many invalid login attempts')
elif times_denied == allowed_number_of_attempts - 1:
response.flash = \
T('You have one more login attempt before you are locked out')
else:
response.flash = T('invalid password.')
return dict(send=send)
def check_version():
""" Checks if web2py is up to date """
session.forget()
session._unlock(response)
new_version, version = check_new_version(request.env.web2py_version,
WEB2PY_VERSION_URL)
if new_version == -1:
return A(T('Unable to check for upgrades'), _href=WEB2PY_URL)
elif new_version != True:
return A(T('web2py is up to date'), _href=WEB2PY_URL)
elif platform.system().lower() in ('windows', 'win32', 'win64') and os.path.exists("web2py.exe"):
return SPAN('You should upgrade to %s' % version.split('(')[0])
else:
return sp_button(URL('upgrade_web2py'), T('upgrade now to %s') % version.split('(')[0])
def logout():
""" Logout handler """
session.authorized = None
if MULTI_USER_MODE:
redirect(URL('user/logout'))
redirect(URL('index'))
def change_password():
if session.pam_user:
session.flash = T(
'PAM authenticated user, cannot change password here')
redirect(URL('site'))
form = SQLFORM.factory(Field('current_admin_password', 'password'),
Field('new_admin_password',
'password', requires=IS_STRONG()),
Field('new_admin_password_again', 'password'),
_class="span4 well")
if form.accepts(request.vars):
if not verify_password(request.vars.current_admin_password):
form.errors.current_admin_password = T('invalid password')
elif form.vars.new_admin_password != form.vars.new_admin_password_again:
form.errors.new_admin_password_again = T('no match')
else:
path = abspath('parameters_%s.py' % request.env.server_port)
safe_write(path, 'password="%s"' % CRYPT()(
request.vars.new_admin_password)[0])
session.flash = T('password changed')
redirect(URL('site'))
return dict(form=form)
def site():
""" Site handler """
myversion = request.env.web2py_version
# Shortcut to make the elif statements more legible
file_or_appurl = 'file' in request.vars or 'appurl' in request.vars
class IS_VALID_APPNAME(object):
def __call__(self, value):
if not re.compile('^\w+$').match(value):
return (value, T('Invalid application name'))
if not request.vars.overwrite and \
os.path.exists(os.path.join(apath(r=request), value)):
return (value, T('Application exists already'))
return (value, None)
is_appname = IS_VALID_APPNAME()
form_create = SQLFORM.factory(Field('name', requires=is_appname),
table_name='appcreate')
form_update = SQLFORM.factory(Field('name', requires=is_appname),
Field('file', 'upload', uploadfield=False),
Field('url'),
Field('overwrite', 'boolean'),
table_name='appupdate')
form_create.process()
form_update.process()
if DEMO_MODE:
pass
elif form_create.accepted:
# create a new application
appname = cleanpath(form_create.vars.name)
created, error = app_create(appname, request, info=True)
if created:
if MULTI_USER_MODE:
| db.app.insert(name=appname, owner=auth.user.id)
log_progress(appname)
session.flash = T('ne | w application "%s" created', appname)
redirect(URL('design' |
MakeHer/edx-platform | common/test/acceptance/tests/lms/test_lms_matlab_problem.py | Python | agpl-3.0 | 3,475 | 0.00259 | # -*- coding: utf-8 -*-
"""
Test for matlab problems
"""
import time
from ...pages.lms.matlab_problem import MatlabProblemPage
from ...fixtures.course import XBlockFixtureDesc
from ...fixtures.xqueue import XQueueResponseFixture
from .test_lms_problems import ProblemsTest
from textwrap import dedent
class MatlabProblemTest(ProblemsTest):
"""
| Tests that verify matlab problem "Run Code".
"""
def get_problem(self):
"""
Create a matlab problem for the test.
"""
problem_data = dedent("""
<problem markdown="null">
<text>
<p>
Write MATLAB code to create the following row vector and store it in a variable named <code>V</code>.
| </p>
<table id="a0000000466" class="equation" width="100%" cellspacing="0" cellpadding="7" style="table-layout:auto">
<tr>
<td class="equation">[1 1 2 3 5 8 13]</td>
</tr>
</table>
<p>
<coderesponse queuename="matlab">
<matlabinput rows="10" cols="40" mode="" tabsize="4">
<plot_payload>
</plot_payload>
</matlabinput>
<codeparam>
<initial_display/>
<answer_display>
</answer_display>
<grader_payload>
</grader_payload>
</codeparam>
</coderesponse>
</p>
</text>
</problem>
""")
return XBlockFixtureDesc('problem', 'Test Matlab Problem', data=problem_data)
def _goto_matlab_problem_page(self):
"""
Open matlab problem page with assertion.
"""
self.courseware_page.visit()
matlab_problem_page = MatlabProblemPage(self.browser)
self.assertEqual(matlab_problem_page.problem_name, 'TEST MATLAB PROBLEM')
return matlab_problem_page
def test_run_code(self):
"""
Test "Run Code" button functionality.
"""
# Enter a submission, which will trigger a pre-defined response from the XQueue stub.
self.submission = "a=1" + self.unique_id[0:5]
self.xqueue_grade_response = {'msg': self.submission}
matlab_problem_page = self._goto_matlab_problem_page()
# Configure the XQueue stub's response for the text we will submit
if self.xqueue_grade_response is not None:
XQueueResponseFixture(self.submission, self.xqueue_grade_response).install()
matlab_problem_page.set_response(self.submission)
matlab_problem_page.click_run_code()
self.assertEqual(
u'Submitted. As soon as a response is returned, this message will be replaced by that feedback.',
matlab_problem_page.get_grader_msg(".external-grader-message")[0]
)
# Wait 5 seconds for xqueue stub server grader response sent back to lms.
time.sleep(5)
self.assertEqual(u'', matlab_problem_page.get_grader_msg(".external-grader-message")[0])
self.assertEqual(
self.xqueue_grade_response.get("msg"),
matlab_problem_page.get_grader_msg(".ungraded-matlab-result")[0]
)
|
the-duck/launcher-next | src/duck/launcher/defaultConfig.py | Python | gpl-2.0 | 1,166 | 0.031732 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#########
#Copyright (C) 2014-2015 Mark Spurgeon <theduck.dev@gmail.com>
# This program is free software: you can redistribute it and/or | modify
| # it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########
Dict={
"r":255,
"g":92,
"b":36,
"r2":40,
"g2":40,
"b2":40,
"alpha":200,
"font":"Droid Sans",
"font-r":255,
"font-g":255,
"font-b":255,
"animation-speed":1.5,
"size":40,
"dock-apps":["Firefox Web Browser"],
"icon-size":95,
"blocks":"(lp0\n(dp1\nS'apps'\np2\n(lp3\nS'Firefox Web Browser'\np4\nasS'name'\np5\nS'Example'\np6\nsa.",
"init-manager":"systemd"
}
|
LLNL/spack | var/spack/repos/builtin/packages/sirius/package.py | Python | lgpl-2.1 | 11,824 | 0.003298 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Sirius(CMakePackage, CudaPackage):
"""Domain specific library for electronic structure calculations"""
homepage = "https://github.com/electronic-structure/SIRIUS"
url = "https://github.com/electronic-structure/SIRIUS/archive/v6.1.5.tar.gz"
list_url = "https://github.com/electronic-structure/SIRIUS/releases"
git = "https://github.com/electronic-structure/SIRIUS.git"
maintainers = ['simonpintarelli', 'haampie', 'dev-zero', 'AdhocMan', 'toxa81']
version('develop', branch='develop')
version('master', branch='master')
version('7.3.0', sha256='69b5cf356adbe181be6c919032859c4e0160901ff42a885d7e7ea0f38cc772e2')
version('7.2.7', sha256='929bf7f131a4847624858b9c4295532c24b0c06f6dcef5453c0dfc33fb78eb03')
version('7.2.6', sha256='e751fd46cdc7c481ab23b0839d3f27fb00b75dc61dc22a650c92fe8e35336e3a')
version('7.2.5', sha256='794e03d4da91025f77542d3d593d87a8c74e980394f658a0210a4fd91c011f22')
version('7.2.4', sha256='aeed0e83b80c3a79a9469e7f3fe10d80ad331795e38dbc3c49cb0308e2bd084d')
version('7.2.3', sha256='6c10f0e87e50fcc7cdb4d1b2d35e91dba6144de8f111e36c7d08912e5942a906')
version('7.2.1', sha256='01bf6c9893ff471473e13351ca7fdc2ed6c1f4b1bb7afa151909ea7cd6fa0de7')
version('7.2.0', sha256='537800459db8a7553d7aa251c19f3a31f911930194b068bc5bca2dfb2c9b71db')
version('7.0.2', sha256='ee613607ce3be0b2c3f69b560b2415ce1b0e015179002aa90739430dbfaa0389')
version('7.0.1', sha256='cca11433f86e7f4921f7956d6589f27bf0fd5539f3e8f96e66a3a6f274888595')
version('7.0.0', sha256='da783df11e7b65668e29ba8d55c8a6827e2216ad6d88040f84f42ac20fd1bb99')
version('6.5.7', sha256='d886c3066163c43666ebac2ea50351df03907b5686671e514a75f131ba51b43c')
version('6.5.6', sha256='c8120100bde4477545eae489ea7f9140d264a3f88696ec92728616d78f214cae')
version('6.5.5', sha256='0b23d3a8512682eea67aec57271031c65f465b61853a165015b38f7477651dd1')
version('6.5.4', sha256='5f731926b882a567d117afa5e0ed33291f1db887fce52f371ba51f014209b85d')
version('6.5.3', sha256='eae0c303f332425a8c792d4455dca62557931b28a5df8b4c242652d5ffddd580')
version('6.5.2', sha256='c18adc45b069ebae03f94eeeeed031ee99b3d8171fa6ee73c7c6fb1e42397fe7')
version('6.5.1', sha256='599dd0fa25a4e83db2a359257a125e855d4259188cf5b0065b8e7e66378eacf3')
version('6.5.0', sha256='5544f3abbb71dcd6aa08d18aceaf53c38373de4cbd0c3af44fbb39c20cfeb7cc')
version('6.4.4', sha256='1c5de9565781847658c3cc11edcb404e6e6d1c5a9dfc81e977de7a9a7a162c8a')
version('6.4.3', sha256='4d1effeadb84b3e1efd7d9ac88018ef567aa2e0aa72e1112f0abf2e493e2a189')
version('6.4.2', sha256='40b9b66deebb6538fc0f4cd802554d0d763ea6426b9b2f0e8db8dc617e494479')
version('6.4.1', sha256='86f25c71517952a63e92e0a9bcf66d27e4afb2b0d67cf84af480f116b8e7f53c')
version('6.4.0', sha256='bc61758b71dd2996e2ff515b8c3560b2c69c00931cb2811a163a31bcfea4436e')
version('6.3.4', sha256='8839e988b4bb6ef99b6180f7fba03a5537e31fce51bb3e4c2298b513d6a07e0a')
version('6.3.3', sha256='7ba30a4e5c9a545433251211454ec0d59b74ba8941346057bc7de11e7f6886f7')
version('6.3.2', sha256='1723e5ad338dad9a816369a6957101b2cae7214425406b12e8712c82447a7ee5')
version('6.1.5', sha256='379f0a2e5208fd6d91c2bd4939c3a5c40002975fb97652946fa1bfe4a3ef97cb')
amdgpu_targets = (
'gfx701', 'gfx801', 'gfx802', 'gfx803',
'gfx900', 'gfx906', 'gfx908', 'gfx1010',
'gfx1011', 'gfx1012'
)
variant('shared', default=True, description="Build shared libraries")
variant('openmp', default=True, description="Build with OpenMP support")
variant('boost_filesystem', default=False,
description="Use Boost filesystem for self-consistent field method "
"mini-app. Only required when the compiler does not "
"support std::experimental::filesystem nor std::filesystem")
variant('fortran', default=False, description="Build Fortran bindings")
variant('python', default=False, description="Build Python bindings")
variant('memory_pool', default=True, description="Build with memory pool")
variant('elpa', default=False, description="Use ELPA")
variant('vdwxc', default=False, description="Enable libvdwxc support")
variant('scalapack', default=False, description="Enable scalapack support")
variant('magma', default=False, description="Enable MAGMA support")
variant('nlcglib', default=False, description="enable robust wave function optimization")
variant('rocm', default=False, description='Use ROCm GPU support')
variant('amdgpu_target', default='gfx803,gfx900,gfx906', multi=True, values=amdgpu_targets, when='+rocm')
variant('build_type', default='Release',
description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo'))
variant('apps', default=True, description="Build applications")
variant('tests', default=False, description="Build tests")
variant('single_precision', default=False, description="Use single precision arithmetics")
variant('profiler', default=True, description="Use internal profiler to measure execution time")
depends_on('python', type=('build', 'run'))
depends_on('mpi')
depends_on('gsl')
depends_on('lapack')
depends_on('fftw-api@3')
depends_on('libxc@3.0.0:')
depends_on('libxc@4.0.0:', when='@7.2.0:')
depends_on('spglib')
depends_on('hdf5+hl')
depends_on('pkgconfig', type='build')
depends_on('py-numpy', when='+python', type=('build', 'run'))
depends_on('py-scipy', when='+python', type=('build', 'run'))
depends_on('py-h5py', when='+python', type=('build', 'run'))
depends_on('py-mpi4py', when='+python', type=('build', 'run'))
depends_on('py-pyyaml', when='+python', type=('build', 'run'))
depends_on('py-mpi4py', when='+python', type=('build', 'run'))
depends_on('py-voluptuous', when='+python', type=('build', 'run'))
depends_on('py-pybind11', when='+python', type=('build', 'run'))
depends_on('magma', when='+magma')
depends_on('boost cxxstd=14 +filesystem', when='+boost_filesystem')
depends_on('spfft@0.9.6: +mpi', when='@6.4.0:')
depends_on('spfft@0.9.13:', when='@7.0.1:')
depends_on('spfft+single_precision', when='+single_precision ^spfft')
depends_on('spfft+cuda', when='+cuda ^spfft')
depends_on('spfft+rocm', when='+rocm ^spfft')
depends_on('spfft+openmp', when='+openmp ^spfft')
depends_on('spla@1.1.0:', when='@7.0.2:')
depends_on('spla+cuda', when='+cuda ^spla')
depends_on('spla+rocm', when='+rocm ^spla')
depends_on('spla+openmp', when='+openmp ^spla')
depends_on('nlcglib', when='+nlcglib')
depends_on('libvdwxc@0.3.0:+mpi', when='+vdwxc')
depends_on('scalapack', when='+scalapack')
# rocm
depends_on('hip', when='+rocm')
depends_on('rocblas', when='+rocm')
# FindHIP cmake script only works for < 4.1
depends_on('hip@:4.0', when='@:7.2.0 +rocm')
extends('python', when='+python')
conflicts('+shared', when='@6.3.0:6.4')
conflicts('+boost_filesystem', when='~apps')
conflicts('^libxc@5.0.0') # known to produce incorrect results
conflicts('+single_precision', when='@:7.2.4')
# Propagate openmp to blas
depends_on('openblas threads=openmp', when='+openmp ^openbl | as')
depends_on('amdblis threads=openmp', when='+openmp ^amdblis')
depends_on('blis threads=openmp', when='+openmp ^blis')
depends_on('intel-mkl threads=openmp', when='+openmp ^intel-mkl')
depends_on('elpa+openmp', when='+elpa+openmp')
depends_on('elpa~openmp', when='+elpa~openmp')
# TODO:
# add support | for CRAY_LIBSCI, testing
patch("strip-spglib-include-subfolder.patch", when='@6.1.5')
patch("link-libraries-fortran.patch", when='@6.1.5')
patch("cmake-fix-shared-library-installation.patch", when='@6.1.5')
patch("mpi_datatypes.patch", when="@:7.2.6")
@property
def libs(self):
libraries |
static-code-generators/taskzilla | taskzilla/migrations/0001_initial.py | Python | mit | 949 | 0.003161 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('title', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='User',
fields=[ |
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
],
),
migrations.AddField(
model_name='task',
name='subscribers',
field=models.ManyToManyField(to='taskz | illa.User'),
),
]
|
openevacmap/openevac-back | src/routes/stat.py | Python | agpl-3.0 | 1,126 | 0.005329 | # -*- coding: utf-8 -*-
'''
Created on 21 janv. 2016
@author: christian
'''
import falcon
import os
import config
import db
class Stat(object):
'''
Get global statistics
'''
def on_get(self, req, resp):
'''Return global statistics
'''
dbc = db.connect()
cur = dbc.cursor()
query = """select format('{" | nb_maps":%s,"nb_addr":%s,"last_map":"%s"}',
count(*),
count(distinct(address)),
left(max(time)::text,19)) as stats from maps;"""
cur.execute(query)
stats = cur.fetchone()[0]
resp.set_header('X-Powered-By', 'OpenEvacMap')
if stats is None:
resp.status = falcon.HTTP_404
| else:
resp.status = falcon.HTTP_200
resp.set_header('Access-Control-Allow-Origin', '*')
resp.set_header('Access-Control-Allow-Headers',
'X-Requested-With')
resp.body = (stats)
cur.close()
dbc.close()
|
pierg75/pier-sosreport | sos/plugins/nss.py | Python | gpl-2.0 | 1,276 | 0 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, | Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class NSS(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""Network Security Services configuration
"""
plugin_name = "nss"
profiles = ('network', 'security')
packages = ('nss',)
verify_packages = | ('nss.*',)
def setup(self):
self.add_forbidden_path([
"/etc/pki/nssdb/cert*",
"/etc/pki/nssdb/key*",
"/etc/pki/nssdb/secmod.db"
])
self.add_copy_spec("/etc/pki/nssdb/pkcs11.txt")
# vim: set et ts=4 sw=4 :
|
makerbot/conveyor | virtualenv.py | Python | agpl-3.0 | 101,937 | 0.002953 | #!/usr/bin/env python
"""Crea | te a "virtual" Python installation
"""
# If you change the version here, change it in setup.py
# and docs/conf.py as well.
virtualenv_version = "1.7.1.2"
import base64
import sys
import os
import optparse
import re
| import shutil
import logging
import tempfile
import zlib
import errno
import distutils.sysconfig
from distutils.util import strtobool
try:
import subprocess
except ImportError:
if sys.version_info <= (2, 3):
print('ERROR: %s' % sys.exc_info()[1])
print('ERROR: this script requires Python 2.4 or greater; or at least the subprocess module.')
print('If you copy subprocess.py from a newer version of Python this script will probably work')
sys.exit(101)
else:
raise
try:
set
except NameError:
from sets import Set as set
try:
basestring
except NameError:
basestring = str
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
join = os.path.join
py_version = 'python%s.%s' % (sys.version_info[0], sys.version_info[1])
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')
is_win = (sys.platform == 'win32')
abiflags = getattr(sys, 'abiflags', '')
user_dir = os.path.expanduser('~')
if sys.platform == 'win32':
user_dir = os.environ.get('APPDATA', user_dir) # Use %APPDATA% for roaming
default_storage_dir = os.path.join(user_dir, 'virtualenv')
else:
default_storage_dir = os.path.join(user_dir, '.virtualenv')
default_config_file = os.path.join(default_storage_dir, 'virtualenv.ini')
if is_pypy:
expected_exe = 'pypy'
elif is_jython:
expected_exe = 'jython'
else:
expected_exe = 'python'
REQUIRED_MODULES = ['os', 'posix', 'posixpath', 'nt', 'ntpath', 'genericpath',
'fnmatch', 'locale', 'encodings', 'codecs',
'stat', 'UserDict', 'readline', 'copy_reg', 'types',
're', 'sre', 'sre_parse', 'sre_constants', 'sre_compile',
'zlib']
REQUIRED_FILES = ['lib-dynload', 'config']
majver, minver = sys.version_info[:2]
if majver == 2:
if minver >= 6:
REQUIRED_MODULES.extend(['warnings', 'linecache', '_abcoll', 'abc'])
if minver >= 7:
REQUIRED_MODULES.extend(['_weakrefset'])
if minver <= 3:
REQUIRED_MODULES.extend(['sets', '__future__'])
elif majver == 3:
# Some extra modules are needed for Python 3, but different ones
# for different versions.
REQUIRED_MODULES.extend(['_abcoll', 'warnings', 'linecache', 'abc', 'io',
'_weakrefset', 'copyreg', 'tempfile', 'random',
'__future__', 'collections', 'keyword', 'tarfile',
'shutil', 'struct', 'copy'])
if minver >= 2:
REQUIRED_FILES[-1] = 'config-%s' % majver
if minver == 3:
# The whole list of 3.3 modules is reproduced below - the current
# uncommented ones are required for 3.3 as of now, but more may be
# added as 3.3 development continues.
REQUIRED_MODULES.extend([
#"aifc",
#"antigravity",
#"argparse",
#"ast",
#"asynchat",
#"asyncore",
"base64",
#"bdb",
#"binhex",
"bisect",
#"calendar",
#"cgi",
#"cgitb",
#"chunk",
#"cmd",
#"codeop",
#"code",
#"colorsys",
#"_compat_pickle",
#"compileall",
#"concurrent",
#"configparser",
#"contextlib",
#"cProfile",
#"crypt",
#"csv",
#"ctypes",
#"curses",
#"datetime",
#"dbm",
#"decimal",
#"difflib",
#"dis",
#"doctest",
#"dummy_threading",
"_dummy_thread",
#"email",
#"filecmp",
#"fileinput",
#"formatter",
#"fractions",
#"ftplib",
#"functools",
#"getopt",
#"getpass",
#"gettext",
#"glob",
#"gzip",
"hashlib",
"heapq",
"hmac",
#"html",
#"http",
#"idlelib",
#"imaplib",
#"imghdr",
#"importlib",
#"inspect",
#"json",
#"lib2to3",
#"logging",
#"macpath",
#"macurl2path",
#"mailbox",
#"mailcap",
#"_markupbase",
#"mimetypes",
#"modulefinder",
#"multiprocessing",
#"netrc",
#"nntplib",
#"nturl2path",
#"numbers",
#"opcode",
#"optparse",
#"os2emxpath",
#"pdb",
#"pickle",
#"pickletools",
#"pipes",
#"pkgutil",
#"platform",
#"plat-linux2",
#"plistlib",
#"poplib",
#"pprint",
#"profile",
#"pstats",
#"pty",
#"pyclbr",
#"py_compile",
#"pydoc_data",
#"pydoc",
#"_pyio",
#"queue",
#"quopri",
"reprlib",
"rlcompleter",
#"runpy",
#"sched",
#"shelve",
#"shlex",
#"smtpd",
#"smtplib",
#"sndhdr",
#"socket",
#"socketserver",
#"sqlite3",
#"ssl",
#"stringprep",
#"string",
#"_strptime",
#"subprocess",
#"sunau",
#"symbol",
#"symtable",
#"sysconfig",
#"tabnanny",
#"telnetlib",
#"test",
#"textwrap",
#"this",
#"_threading_local",
#"threading",
#"timeit",
#"tkinter",
#"tokenize",
#"token",
#"traceback",
#"trace",
#"tty",
#"turtledemo",
#"turtle",
#"unittest",
#"urllib",
#"uuid",
#"uu",
#"wave",
"weakref",
#"webbrowser",
#"wsgiref",
#"xdrlib",
#"xml",
#"xmlrpc",
#"zipfile",
])
if is_pypy:
# these are needed to correctly display the exceptions that may happen
# during the bootstrap
REQUIRED_MODULES.extend(['traceback', 'linecache'])
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO+logging.WARN)/2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
def __init__(self, consumers):
self.consumers = consumers
self.indent = 0
self.in_progress = None
self.in_progress_hanging = False
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
rendered = None
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consu |
feigaochn/leetcode | p659_split_array_into_consecutive_subsequences.py | Python | mit | 1,129 | 0 | #!/usr/bin/env python
# coding: utf-8
class Solution:
def isPossible(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if not nums:
return False
vcs = [[nums[0], 1]]
for n in nums[1:]:
if n == vcs[-1][0]:
| vcs[-1][1] += 1
else:
vcs.append([n, 1])
cur = [vcs[0][0], vcs[0][1], 0, 0, 0]
for v, c in vcs[1:]:
if v > cur[0] + 1:
if cur[1] > 0 or cur[2] > 0:
return False
else:
cur = [v, c, 0, 0, 0]
elif v == cur[0] + 1:
if c < cur[1] + cur[2]:
return False
cur = [v,
max(0, c - (sum(cur[1:] | ))),
cur[1],
cur[2] + min(cur[3], c - (cur[1] + cur[2]))]
return cur[1] == 0 and cur[2] == 0
if __name__ == '__main__':
sol = Solution().isPossible
print(sol([1, 2, 3, 3, 4, 5]))
print(sol([1, 2, 3, 4, 4, 5]))
print(sol([1, 2, 3, 3, 4, 4, 5, 5]))
|
avian2/unidecode | setup.py | Python | gpl-2.0 | 1,387 | 0.002163 | #!/usr/bin/python
# vi:tabstop=4:expandtab:sw=4
import os
from setuptools import setup
def get_long_description():
with open(os.path.join(os.path.dirname(__file__), "README.rst"), encoding='utf-8') as fp:
return fp.read()
setup(
name='Unidecode',
version='1.3.3',
description='ASCII transliterations of Unicode text',
license='GPL',
long_description=get_long_description(),
author='Tomaz Solc',
author_email='tomaz.solc@tablix.org',
packages=['unidecode'],
package_data={'unidecode': ['py.typed']},
python_requires=">=3.5",
test_suite='tests',
entry_points={
'console_scripts': [
'unidecode | = unidecode.util:main'
]
},
classifiers=[
"License :: OSI Approved :: GNU Gener | al Public License v2 or later (GPLv2+)",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Text Processing",
"Topic :: Text Processing :: Filters",
],
)
|
addition-it-solutions/project-all | addons/point_of_sale/controllers/main.py | Python | agpl-3.0 | 1,015 | 0.010837 | # -*- coding: utf-8 -*-
import logging
import werkzeug.utils
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import login_redirect, abort_and_redirect
_logger = logging.getLogger(__name__)
class PosController(http.Controller):
@http.route('/pos/web', type='http', auth='user')
def a(self, debug=False, **k):
cr, uid, context, session = request.cr, request.uid, request.context, request.session
# if user not logged in, log him in
if not session.uid:
return login_redirec | t()
PosSession = request.registry['pos.session']
pos_session_ids = PosSession.search(cr, uid, [('state','=','opened'),('u | ser_id','=',session.uid)], context=context)
if not pos_session_ids:
return werkzeug.utils.redirect('/web#action=point_of_sale.action_pos_session_opening')
PosSession.login(cr, uid, pos_session_ids, context=context)
return request.render('point_of_sale.index')
|
slaff/attachix | server/core/provider/storages/meta/db.py | Python | mit | 8,100 | 0.005802 | # @todo: Refactor this method to work as the HBase meta class !!!
from copy import copy
from core.pattern import ActiveRecord, TableDataGateway
import time
NODE_COLLECTION = 1
NODE_FILE = 0
DEPTH_NODE = 0
DEPTH_CHILDREN = 1
DEPTH_INFINITY = -1
class Meta(ActiveRecord):
_definition = {
'location': 'location',
'ref_count': 'ref_count'
}
def save(self):
if self.new:
self.ref_count = 0
self.ref_count += 1
ActiveRecord.save(self)
def delete(self):
"""
Decrease the ref count with 1.
@return int Returns the current ref count. If it is below 1 than
the physical file has to be removed also
"""
try:
self.getPrimaryKey()
except ValueError:
raise ValueError('The id key must be set in order to delete Meta record')
self.ref_count = self.ref_count - 1
if self.ref_count < 1:
ActiveRecord.delete(self)
else:
ActiveRecord.save(self)
return self.ref_count
class Node(ActiveRecord):
"""
Virtual File System.
"""
_definition = {
'name' : 'name',
'parent' : 'parent',
'reference' : 'reference',
'collection': 'type',
'path' : 'path',
'owner' : 'owner',
'user' : 'user',
'permission': 'permission',
'group' : 'group',
'mime_major': 'mime_major',
'mime_minor': 'mime_minor',
'size' : 'size',
'modified' : 'modified',
'created' : 'created',
# reference keys
'meta' : 'meta'
}
_dependent = {
'meta': {
'class': Meta,
'refKey': 'meta'
}
}
_alias = {
# field : dependent table
'location': 'meta',
'ref_count': 'meta'
}
def save(self):
new = self.new
ActiveRecord.save(self)
# update the path
if new:
self.created = time.time()
if self.parent:
parentNode = Node(self.db)
parentNode.id = self.parent
parentNode.load(True)
self.path | = '%s%s/' % (parentNode.path, self.id)
else:
self.path = '/%s/' % self.id
ActiveRecord.save(self)
def getChildren(self, depth=1):
finder = Search()
nodes = finder.findByPat | h(self._data['path'], depth)
return nodes
def copy(self):
node = Node(self.db)
for (name, value) in self._data.items():
if name == 'id':
continue
setattr(node,name,value)
return node
def link(self, name, parentNode):
targetNode = self.copy()
# change the db to be the db of the parentNode
targetNode.setDb(parentNode.db)
targetNode.name = name
targetNode.parent = parentNode.id
targetNode.reference = self.id
targetNode.save()
def delete(self, recursively=False):
if recursively:
finder = Search(self.db)
if not self._data.get('path',None):
self.load(True)
if not self._data['path']:
raise ValueError('Not enough data given')
nodes = finder.findByIdPath(self._data['path'], DEPTH_INFINITY)
count = 0
for node in nodes:
node.delete()
count += 1
return count
return ActiveRecord.delete(self)
def setProperty(self, Property):
pass
def getProperty(self):
"""
@return Property
"""
pass
def setGroup(self):
pass
def getGroup(self):
"""
@return VFS.Group instance
"""
pass
class Property():
def set(self):
pass
def update(self):
pass
def delete(self):
pass
def load(self, nodeId):
pass
def save(self):
pass
class Group():
"""
Make it extend a hash and have
__getiter__
__len__
__get__
__set__
__del__
the key is the name and the value is 1
"""
def __init__(self):
"""
Creates new group or loads existing one
"""
pass
def load(self, id):
"""
Loads information about existing group
"""
pass
def addUser(self):
pass
def removeUser(self):
pass
def getUsers(self):
pass
def delete(self):
pass
def save(self):
"""
Saves any changes added to the group
"""
pass
class Search():
"""
Class for fast discovery of nodes
"""
DB_HANDLER = TableDataGateway
def __init__(self, db):
self.dbConnection = Search.DB_HANDLER(db, 'node')
# @todo: implement some caching here
def findByPath(self, path, depth=0):
"""
Finds nodes mathing the url path
Example: /this/folder/file -> /12/34/45/5
@return {Node} or list of Nodes depending on the depth or None if no node is found
"""
try:
names = path.strip('/').split('/')
lenNames = len(names)
except:
return None
if lenNames == 0:
return None
safeNames = []
for part in names:
safeNames.append(self.dbConnection.quote(part))
fields = ['id','name','path','parent']
columns = {}
for (index, name) in enumerate(fields):
columns[name]=index
(_,rows) = self.dbConnection.select(fields,
" name in (%s)" % ','.join(safeNames))
lastParent = None;
idParts = []
lookupTable = {}
count = len(rows)
i = 0
while i < count:
row = rows[i]
if len(names) > 0 and names[0] == row[columns['name']] \
and row[columns['parent']] == lastParent:
idParts.append('%s' % row[columns['id']])
lastParent = row[columns['id']]
names.pop(0)
i = 0
else:
i+=1
lookupTable[row[columns['path']]] = 1
if lenNames!=len(idParts):
return None
idPath = '/' + '/'.join(idParts) + '/'
if not lookupTable.has_key(idPath):
return None
return self.findByIdPath(idPath, depth)
def findByIdPath(self, path, depth=0):
"""
Finds the id of a node based on its idpath.
Example: /usr/local/bin -> 121
@param string idpath
@paramt int depth if the depth parameter is positive then returns list of nodes
ordered by path ASC
@return {Node} or list of {Node} or None if no node is found
"""
if depth == DEPTH_INFINITY:
where = "path LIKE %s" % self.dbConnection.quote(path+'%')
else:
where = "path=%s" % self.dbConnection.quote(path)
if depth == 1:
[fields, rows] = self.dbConnection.select(['id'], where)
where = "parent=%d" % rows[0][0]
where += ' ORDER BY path DESC '
[fields, rows] = self.dbConnection.select([], where)
if not len(rows):
if depth == 0:
return None
else:
return []
results = []
for row in rows:
node = Node(self.dbConnection.db)
node.preLoad(fields,row)
results.append(copy(node))
if depth == DEPTH_NODE:
return results[0]
return results
def findById(self, id):
"""
Finds the node by id
@param int id
@return {Node} or None if the node cannot be foudn
"""
node = Node(self.dbConnection.db)
node.id = id
try:
node.load(True)
except:
return None
return node |
overxfl0w/Grampus-Forensic-Utils | Metadata/Image/XMP/GIF/gifxmp.py | Python | gpl-2.0 | 1,117 | 0.046553 | from mmap import mmap
## exiftool -xmp-dc:subject=Metadatos XMP de prueba ./test.gif ##
class gifxmp:
def __init__(self,filename):
self.mmaped = self.__mmap(filename)
self.fst = self. | __checkxmp()
if self.fst != -1:
self.xml = self.__loadxmppacket()
print self.xml
## dict = ModuleXML.parsexmp(self.xml) ##
def __mmap(self,filename):
with open(filename,"r+b") as fd:
_ = mmap(fd.fileno(),0)
fd.close()
return _
## Comprueba que el header es correcto, solo se comprobara la existencia de
## unos cuantos caracteres clave en el hader, si es correcto, devuelve el indice
## de la primera aparicio | n de XMP en la cabecera. Se pivotara a partir de aqui
def __checkxmp(self):
return self.mmaped.find("XMP Data")
## Leemos el paquete, boundary primera ocurrencia del header + 12 bytes de la comprobacion, hasta el mismo + 256
## 256 -> (258 - 2 bytes del block terminate)
def __loadxmppacket(self):
blcktrmt = self.mmaped.find("\x00\x00",self.fst,self.mmaped.size())
return self.mmaped[self.fst+11:blcktrmt-256]
if __name__ == "__main__":
inst = gifxmp("test.gif")
|
vrai/gfmviewer-wx | setup.py | Python | gpl-2.0 | 953 | 0.023085 | from distutils.core import setup
setup (
name = 'gfmviewer',
version = '0.1.0',
description = 'View a Github Formatted Markdown file as formatted HTML',
scripts = [ 'gfmviewer' ],
author = 'Vrai Stacey',
author_email = 'vrai.stacey@gmail.com',
url = 'http://github.com/vrai/gfmviewer-wx',
license = 'GPL',
long_description = """\
Converts a Github Flavoured Markdown file in to HTML - using the Github API -
and displays it. The source file is monitored and will be reformatted /
displayed on any change.
By default a proper file notification library will be use | d to watch the file.
However if this is not available, or it is disabled with the --poll option,
the file's modification time will be checked once a second.
On most platforms (current not includi | ng OS X) the --fork option will cause
the viewer to run in the background, detached from the terminal used to start
it. """ )
# vim: ft=python:sw=4:ts=4:et
|
Greh/Project-Euler | euler5.py | Python | mit | 132 | 0.022727 | print ("project | eule | r problem 5 find the smallest number divisible by each number smaller than twenty")
print 16*9*5*7*11*13*17*19
|
matheuskiser/pdx_code_guild | django/tango_with_django_project/rango/migrations/0003_category_slug.py | Python | mit | 444 | 0 | # -*- coding: utf-8 -*-
| from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('rango', '0002_auto_20150401_0132'),
]
operations = [
migrations.AddField(
model_name='category',
name='slug',
field=models.SlugField(default='', | unique=True),
preserve_default=False,
),
]
|
z-jason/anki | aqt/forms/setgroup.py | Python | agpl-3.0 | 2,025 | 0.003951 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'designer/setgroup.ui'
#
# Created: Sun Mar 30 10:19:30 2014
# by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
| def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog. | setObjectName(_fromUtf8("Dialog"))
Dialog.resize(433, 143)
self.verticalLayout_2 = QtGui.QVBoxLayout(Dialog)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.label = QtGui.QLabel(Dialog)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout_2.addWidget(self.label)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout_2.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_("Anki"))
self.label.setText(_("Move cards to deck:"))
|
geopython/pywps | tests/test_execute.py | Python | mit | 30,624 | 0.001404 | ##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
import unittest
import pytest
from pywps import xml_util as etree
import json
import tempfile
import os.path
from pywps import Service, Process, LiteralOutput, LiteralInput,\
BoundingBoxOutput, BoundingBoxInput, Format, ComplexInput, ComplexOutput, FORMATS
from pywps.validator.base import emptyvalidator
from pywps.validator.complexvalidator import validategml
from pywps.exceptions import InvalidParameterValue
from pywps import get_inputs_from_xml
from pywps import E, get_ElementMakerForVersion
from pywps.app.basic import get_xpath_ns
from pywps.tests import client_for, assert_response_success, assert_response_success_json
from pywps import configuration
from io import StringIO
try:
import netCDF4
except ImportError:
WITH_NC4 = False
else:
WITH_NC4 = True
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data')
VERSION = "1.0.0"
WPS, OWS = get_ElementMakerForVersion(VERSION)
xpath_ns = get_xpath_ns(VERSION)
def create_ultimate_question():
def handler(request, response):
response.outputs['outvalue'].data = '42'
return response
return Process(handler=handler,
identifier='ultimate_question',
title='Ultimate Question',
outputs=[LiteralOutput('outvalue', 'Output Value', data_type='string')])
def create_greeter():
def greeter(request, response):
name = request.inputs['name'][0].data
assert isinstance(name, str)
response.outputs['message'].data = "Hello {}!".format(name)
return response
return Process(handler=greeter,
identifier='greeter',
title='Greeter',
inputs=[LiteralInput('name', 'Input name', data_type='string')],
outputs=[LiteralOutput('message', 'Output message', data_type='string')])
def create_translated_greeter():
def greeter(request, response):
name = request.inputs['name'][0].data
response.outputs['message'].data = "Hello {}!".format(name)
return response
return Process(
handler=greeter,
identifier='greeter',
title='Greeter',
abstract='Say hello',
inputs=[
LiteralInput(
'name',
'Input name',
data_type='string',
abstract='Input description',
translations={"fr-CA": {"title": "Nom", "abstract": "Description"}},
)
],
outputs=[
LiteralOutput(
'message',
'Output message',
data_type='string',
abstract='Output description',
translations={"fr-CA": {"title": "Message de retour", "abstract": "Description"}},
)
],
translations={"fr-CA": {"title": "Salutations", "abstract": "Dire allô"}},
)
def create_bbox_process():
def bbox_process(request, response):
coords = request.inputs['mybbox'][0].data
assert isinstance(coords, list)
assert len(coords) == 4
assert coords[0] == 15.0
response.outputs['outbbox'].data = coords
return response
return Process(handler=bbox_process,
identifier='my_bbox_process',
title='Bbox process',
inputs=[BoundingBoxInput('mybbox', 'Input name', ["EPSG:4326"])],
| outputs=[BoundingBoxOutput('outbbox', 'Output message', ["EPSG:4326"])])
def create_complex_proces(mime_type: str = 'gml'):
def complex_proces(request, r | esponse):
response.outputs['complex'].data = request.inputs['complex'][0].data_as_json()
return response
if mime_type == 'gml':
frmt = Format(mime_type='application/gml', extension=".gml") # this is unknown mimetype
elif mime_type == 'geojson':
frmt = FORMATS.GEOJSON
else:
raise Exception(f'Unknown mime type {mime_type}')
return Process(handler=complex_proces,
identifier='my_complex_process',
title='Complex process',
inputs=[
ComplexInput(
'complex',
'Complex input',
min_occurs=0,
default="DEFAULT COMPLEX DATA",
supported_formats=[frmt])
],
outputs=[
ComplexOutput(
'complex',
'Complex output',
supported_formats=[frmt])
])
def create_complex_nc_process():
def complex_proces(request, response):
from pywps.dependencies import netCDF4 as nc
url = request.inputs['dods'][0].url
with nc.Dataset(url) as D:
response.outputs['conventions'].data = D.Conventions
response.outputs['outdods'].url = url
response.outputs['ncraw'].file = os.path.join(DATA_DIR, 'netcdf', 'time.nc')
response.outputs['ncraw'].data_format = FORMATS.NETCDF
return response
return Process(handler=complex_proces,
identifier='my_opendap_process',
title='Opendap process',
inputs=[
ComplexInput(
'dods',
'Opendap input',
supported_formats=[Format('DODS'), Format('NETCDF')],
# mode=MODE.STRICT
)
],
outputs=[
LiteralOutput(
'conventions',
'NetCDF convention',
),
ComplexOutput('outdods', 'Opendap output',
supported_formats=[FORMATS.DODS, ],
as_reference=True),
ComplexOutput('ncraw', 'NetCDF raw data output',
supported_formats=[FORMATS.NETCDF, ],
as_reference=False)
])
def create_mimetype_process():
def _handler(request, response):
response.outputs['mimetype'].data = response.outputs['mimetype'].data_format.mime_type
return response
frmt_txt = Format(mime_type='text/plain')
frmt_txt2 = Format(mime_type='text/plain+test')
return Process(handler=_handler,
identifier='get_mimetype_process',
title='Get mimeType process',
inputs=[],
outputs=[
ComplexOutput(
'mimetype',
'mimetype of requested output',
supported_formats=[frmt_txt, frmt_txt2])
])
def create_metalink_process():
from .processes.metalinkprocess import MultipleOutputs
return MultipleOutputs()
def get_output(doc):
"""Return the content of LiteralData, Reference or ComplexData."""
output = {}
for output_el in xpath_ns(doc, '/wps:ExecuteResponse'
'/wps:ProcessOutputs/wps:Output'):
[identifier_el] = xpath_ns(output_el, './ows:Identifier')
lit_el = xpath_ns(output_el, './wps:Data/wps:LiteralData')
if lit_el != []:
output[identifier_el.text] = lit_el[0].text
ref_el = xpath_ns(output_el, './wps:Reference')
if ref_el != []:
output[identifier_el.text] = ref_el[0].attrib['href']
data_el = xpath_ns(output_el, './wps:Data/wps:ComplexData')
if data_el != []:
if data_el[0].text:
output[identifier_el.text] = data_el[0].text
else: # XML children
ch = list(data_el[0])[0]
|
samowitsch/bCNC | bCNC/lib/python_utils/import_.py | Python | gpl-2.0 | 2,797 | 0 |
from __future__ import absolute_import
class DummyException(Exception):
pass
def import_global(
name, modules=None, exceptions=DummyException, locals_=None,
globals_=None, level=-1):
'''Import the requested items into the global scope
WARNING! this method _will_ overwrite your global scope
If you have a variable named "path" and you call import_global('sys')
it will be overwritten with sys.path
Args:
name (str): the name of the module to import, e.g. sys
modules (str): the modules to import, use None for everything
exception (Exception): the exception to catch, e.g. ImportError
`locals_`: the `locals()` method (in case you need a different scope)
`globals_`: the `globals()` method (in case you need a different scope)
level (int): the level to import from, this can be used for
relative imports
'''
frame = None
try:
# If locals_ or globals_ are not given, autodetect them by inspecting
# the current stack
if locals_ is None or globals_ is None:
import inspect
frame = inspect.stack()[1][0]
if locals_ is None:
locals_ = frame.f_locals
if globals_ is None:
globals_ = frame.f_globals
try:
name = name.split('.')
# Relative imports are supported (from .spam import eggs)
if not name[0]:
name = name[1:]
level = 1
# raise IOError((name, level))
module = __import__(
name=name[0] or '.',
globals=globals_,
locals=locals_,
fromlist=name[1:],
level=max(level, 0),
)
# Make sure we get the right part of a dotted import (i.e.
# spam.eggs should return eggs, not spam)
try:
for attr in name[1:]:
module = getattr(module, attr)
except AttributeError:
raise ImportError('No module named ' + '.'.join(name))
# If no list of modules is given, autodetect from either __all__
# or a dir() of the module
if not modules:
modules = getattr(module, '__all__', dir(module))
else:
modules = set(modules).intersection(dir(module))
# Add all items in modules to the global scope
for k in set(dir(module)).intersection(modules):
if k and k[0] != '_':
| globals_[k] = getattr(module, k)
except exceptions as e:
| return e
finally:
# Clean up, just to be sure
del name, modules, exceptions, locals_, globals_, frame
|
teedoo/dotfiles | .sublime/Packages/PackageDev/completions_dev.py | Python | mit | 921 | 0.001086 | import sys
import s | ublime_plugin
if sys.version_info < (3,):
from sublime_lib.pa | th import root_at_packages, get_package_name
else:
from .sublime_lib.path import root_at_packages, get_package_name
PLUGIN_NAME = get_package_name()
COMPLETIONS_SYNTAX_DEF = ("Packages/%s/Syntax Definitions/Sublime Completions.tmLanguage"
% PLUGIN_NAME)
TPL = """{
"scope": "source.${1:off}",
"completions": [
{ "trigger": "${2:some_trigger}", "contents": "${3:Hint: Use f, ff and fff plus Tab inside here.}" }$0
]
}""".replace(" ", "\t") # NOQA - line length
class NewCompletionsCommand(sublime_plugin.WindowCommand):
def run(self):
v = self.window.new_file()
v.run_command('insert_snippet', {"contents": TPL})
v.set_syntax_file(COMPLETIONS_SYNTAX_DEF)
v.settings().set('default_dir', root_at_packages('User'))
|
tomv564/LSP | plugin/hover.py | Python | mit | 11,187 | 0.002414 | import mdpopups
import sublime
import sublime_plugin
import webbrowser
import os
from html import escape
from .code_actions import actions_manager, run_code_action_or_command
from .code_actions import CodeActionOrCommand
from .core.configurations import is_supported_syntax
from .core.popups import popups
from .core.protocol import Request, DiagnosticSeverity, Diagnostic, DiagnosticRelatedInformation, Point
from .core.registry import session_for_view, LspTextCommand, windows
from .core.settings import client_configs, settings
from .core.typing import List, Optional, Any, Dict
from .core.views import make_link
from .core.views import text_document_position_params
from .diagnostics import filter_by_point, view_diagnostics
SUBLIME_WORD_MASK = 515
class HoverHandler(sublime_plugin.ViewEventListener):
def __init__(self, view: sublime.View) -> None:
self.view = view
@classmethod
def is_applicable(cls, view_settings: dict) -> bool:
if 'hover' in settings.disabled_capabilities:
return False
syntax = view_settings.get('syntax')
if syntax:
return is_supported_syntax(syntax, client_configs.all)
else:
return False
def on_hover(self, point: int, hover_zone: int) -> None:
if hover_zone != sublime.HOVER_TEXT or self.view.is_popup_visible():
return
self.view.run_command("lsp_hover", {"point": point})
_test_contents = [] # type: List[str]
class_for_severity = {
DiagnosticSeverity.Error: 'errors',
DiagnosticSeverity.Warning: 'warnings',
DiagnosticSeverity.Information: 'info',
DiagnosticSeverity.Hint: 'hints'
}
class GotoKind:
__slots__ = ("lsp_name", "label", "subl_cmd_name")
def __init__(self, lsp_name: str, label: str, subl_cmd_name: str) -> None:
self.lsp_name = lsp_name
self.label = label
self.subl_cmd_name = subl_cmd_name
goto_kinds = [
GotoKind("definition", "Definition", "definition"),
GotoKind("typeDefinition", "Type Definition", "type_definition"),
GotoKind("declaration", "Declaration", "declaration"),
GotoKind("implementation", "Implementation", "implementation")
]
class LspHoverCommand(LspTextCommand):
def __init__(self, view: sublime.View) -> None:
super().__init__(view)
self._base_dir = None # type: Optional[str]
def is_likely_at_symbol(self, point: int) -> bool:
word_at_sel = self.view.classify(point)
return bool(word_at_sel & SUBLIME_WORD_MASK)
def run(self, edit: sublime.Edit, point: Optional[int] = None) -> None:
hover_point = point or self.view.sel()[0].begin()
self._base_dir = windows.lookup(self.view.window()).get_project_path(self.view.file_name() or "")
self._hover = None # type: Optional[Any]
self._actions_by_config = {} # type: Dict[str, List[CodeActionOrCommand]]
self._diagnostics_by_config = {} # type: Dict[str, List[Diagnostic]]
if self.is_likely_at_symbol(hover_point):
self.request_symbol_hover(hover_point)
| self._diagnostics_by_config = filter_by_point(view_diagnostics(self.view),
| Point(*self.view.rowcol(hover_point)))
if self._diagnostics_by_config:
self.request_code_actions(hover_point)
self.request_show_hover(hover_point)
def request_symbol_hover(self, point: int) -> None:
# todo: session_for_view looks up windowmanager twice (config and for sessions)
# can we memoize some part (eg. where no point is provided?)
session = session_for_view(self.view, 'hoverProvider', point)
if session:
document_position = text_document_position_params(self.view, point)
if session.client:
session.client.send_request(
Request.hover(document_position),
lambda response: self.handle_response(response, point))
def request_code_actions(self, point: int) -> None:
actions_manager.request(self.view, point, lambda response: self.handle_code_actions(response, point))
def handle_code_actions(self, responses: Dict[str, List[CodeActionOrCommand]], point: int) -> None:
self._actions_by_config = responses
self.request_show_hover(point)
def handle_response(self, response: Optional[Any], point: int) -> None:
self._hover = response
self.request_show_hover(point)
def symbol_actions_content(self) -> str:
actions = []
for goto_kind in goto_kinds:
if self.has_client_with_capability(goto_kind.lsp_name + "Provider"):
actions.append(make_link(goto_kind.lsp_name, goto_kind.label))
if self.has_client_with_capability('referencesProvider'):
actions.append(make_link('references', 'References'))
if self.has_client_with_capability('renameProvider'):
actions.append(make_link('rename', 'Rename'))
return "<p class='actions'>" + " | ".join(actions) + "</p>"
def format_diagnostic_related_info(self, info: DiagnosticRelatedInformation) -> str:
file_path = info.location.file_path
if self._base_dir and file_path.startswith(self._base_dir):
file_path = os.path.relpath(file_path, self._base_dir)
location = "{}:{}:{}".format(file_path, info.location.range.start.row+1, info.location.range.start.col+1)
link = make_link("location:{}".format(location), location)
return "{}: {}".format(link, escape(info.message))
def format_diagnostic(self, diagnostic: 'Diagnostic') -> str:
diagnostic_message = escape(diagnostic.message, False).replace('\n', '<br>')
related_infos = [self.format_diagnostic_related_info(info) for info in diagnostic.related_info]
related_content = "<pre class='related_info'>" + "<br>".join(related_infos) + "</pre>" if related_infos else ""
if diagnostic.source:
return "<pre class=\"{}\">[{}] {}{}</pre>".format(class_for_severity[diagnostic.severity],
diagnostic.source, diagnostic_message, related_content)
else:
return "<pre class=\"{}\">{}{}</pre>".format(class_for_severity[diagnostic.severity], diagnostic_message,
related_content)
def diagnostics_content(self) -> str:
formatted = []
for config_name in self._diagnostics_by_config:
by_severity = {} # type: Dict[int, List[str]]
formatted.append("<div class='diagnostics'>")
for diagnostic in self._diagnostics_by_config[config_name]:
by_severity.setdefault(diagnostic.severity, []).append(self.format_diagnostic(diagnostic))
for severity, items in by_severity.items():
formatted.append("<div>")
formatted.extend(items)
formatted.append("</div>")
if config_name in self._actions_by_config:
action_count = len(self._actions_by_config[config_name])
if action_count > 0:
href = "{}:{}".format('code-actions', config_name)
text = "{} ({})".format('Code Actions', action_count)
formatted.append("<div class=\"actions\">{}</div>".format(make_link(href, text)))
formatted.append("</div>")
return "".join(formatted)
def hover_content(self) -> str:
contents = [] # type: List[Any]
if isinstance(self._hover, dict):
response_content = self._hover.get('contents')
if response_content:
if isinstance(response_content, list):
contents = response_content
else:
contents = [response_content]
formatted = []
for item in contents:
value = ""
language = None
if isinstance(item, str):
value = item
else:
value = item.get("value")
language = item.get("language")
|
cbernet/cpyroot | tools/fitter2d.py | Python | gpl-2.0 | 1,550 | 0.008387 | from ROOT import gDirectory, TH2F, TH1F, TFile
class Fitter2D(object):
def __init__(self, *args):
self.h2d = TH2F(*args)
def draw2D(self, *args):
self.h2d.Draw(*args)
self.hmean.Draw('psame')
def fit(self, bin, opt='0'):
hslice = self.h2d.ProjectionY("", bin, bin, "")
if not hslice.GetEntries():
return 0., 0., 0., 0., 0., 0.
hslice.Fit('gaus', opt)
func = hslice.GetFunction('gaus')
x = self.h2d.GetXaxis().GetBinCenter(bin)
dx = self.h2d.GetXaxis().GetBinWidth(bin)
mean = func.GetParameter(1)
dmean = func.GetParError(1)
sigma = func.GetParameter(2)
dsigma = func.GetParError(2)
return x, dx, mean, dmean, sigma, dsigma
def fit_slices(self):
self.h2d.FitSlicesY()
self.hmean = gDirectory.Get( self.h2d.GetName() + '_1' )
self.hsigma = gDirectory.Get( self.h2d.GetName() + '_2' )
# self.hsigma.SetYTitle('#sigma(MET_{x,y})')
self.hchi2 = gDirectory.Get( self.h2d.GetName() + '_chi2' )
def format(self, style, xtitle):
for hist in [self.hmean, self.hsigma, self.hchi2]:
style.format(hist)
hist.SetTitle('')
hist. | SetXTitle(xtitle)
def write(self):
outfile = TFile(self.h2d.GetName()+'.root', 'recreate')
for hist in [self.hmean, self.hsigma, self.hchi2, self.h2d]:
hist.Clone()
his | t.SetDirectory(outfile)
outfile.Write()
outfile.Close()
|
ichung/karaoke-chan | kchan/timedtext.py | Python | mit | 2,694 | 0.00297 | #! /usr/bin/python2
import re
from kchan.lyrics import Lyrics
def load(lyricsData):
"""Parse text with timestamps into a Lyrics instance
Args:
lyricsData (str): String containing text and timestamps. There may be
any number of timestamps, in any order, at any locations, but they must
all be of the form [mm:ss] or [mm:ss.xx]. If several timestamps appear
consecutively with nothing separating them, they are treated as all
applying to the nonempty phrase that follows, so that the phrase is
repeated at several different times in the song. If there's no initial
timestamp, then the initial phrase is treated as having timestamp
[00:00.00].
Returns:
Lyrics instance storing the phrases and timing from the provided lyrics,
with no metadata
"""
lyrics = Lyrics()
terms = re.split(r"(\[\d\d:\d\d\]|\[\d\d:\d\d\.\d\d\])", lyricsData)
if terms[0] != "":
lyrics.addPhrase(terms[0], [0])
timedphrases = zip(terms[1::2], terms[2::2]) # pairs of timestamp and phrase
times = []
for (t, p) in timedphrases:
m = re.match(r"\[(\d\d):(\d\d)\.?(\d\d)?\]", t)
timeParts = m.groups('00')
time = (int(timeParts[0]) * 6000
+ int(timeParts[1]) * 100
+ int(timeParts[2]))
times.append(time)
if p != "":
lyrics.addPhrase(p, times)
times = []
if times != []:
lyrics.addPhrase("", times)
return lyrics
def dump(lyrics, frac=False, crlf=False):
"""Dump timing data from a Lyrics instance into a string format
Args:
lyrics (Lyrics): Lyrics instance to be converted to text
Kwargs:
frac (bool): whether to include hundredths of a second in timestamps.
If True, timestamps will be of the form [mm:ss.xx], otherwise
t | hey will just be [mm:ss] (rounded to the nearest second).
Defaults to False.
cr (bool): whether to use CRLF newlines. Defaults to False.
Returns:
str. A string containing phrases and timestamps.
"""
phrases = [[phrase.replace('\n', '\r\n') if crlf else phrase]
for phrase in lyrics.getPhrases()]
times = lyrics.getTimes()
for (time, idx) in times:
minutes = time / 6000
seconds = (time / 100) % 60
hundre | dths = time % 100
if frac:
phrases[idx].insert(-1, "[{:02}:{:02}.{:02}]".format(minutes, seconds, hundredths))
else:
phrases[idx].insert(-1, "[{:02}:{:02}]".format(minutes, seconds + (1 if hundredths >= 50 else 0)))
return ''.join(''.join(l) for l in phrases)
|
agepoly/mezzanine | mezzanine/pages/migrations/0015_auto__add_field_page_title_fr_CH__add_field_page_title_en__add_field_p.py | Python | bsd-2-clause | 8,423 | 0.006174 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Page.title_fr_CH'
db.add_column(u'pages_page', 'title_fr_CH',
self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True),
keep_default=False)
# Adding field 'Page.title_en'
db.add_column(u'pages_page', 'title_en',
self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True),
keep_default=False)
# Adding field 'Page._meta_title_fr_CH'
db.add_column(u'pages_page', '_meta_title_fr_CH',
self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True),
keep_default=False)
# Adding field 'Page._meta_title_en'
db.add_column(u'pages_page', '_meta_title_en',
self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True),
keep_default=False)
# Adding field 'Page.description_fr_CH'
db.add_column(u'pages_page', 'description_fr_CH',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'Page.description_en'
db.add_column(u'pages_page', 'description_en',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'Page.titles_fr_CH'
db.add_column(u'pages_page', 'titles_fr_CH',
self.gf('django.db.models.fields.CharField')(max_length=1000, null=True, blank=True),
keep_default=False)
# Adding field 'Page.titles_en'
db.add_column(u'pages_page', 'titles_en',
self.gf('django.db.models.fields.CharField')(max_length=1000, null=True, blank=True),
keep_default=False)
# Adding field 'RichTextPage.content_fr_CH'
db.add_column(u'pages_richtextpage', 'content_fr_CH',
self.gf('mezzanine.core.fields.RichTextField')(null=True, blank=True),
keep_default=False)
# Adding field 'RichTextPage.content_en'
db.add_column(u'pages_richtextpage', 'content_en',
self.gf('mezzanine.core.fields.RichTextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Page.title_fr_CH'
db.delete_column(u'pages_page', 'title_fr_CH')
# Deleting field 'Page.title_en'
db.delete_column(u'pages_page', 'title_en')
# Deleting field 'Page._meta_title_fr_CH'
db.delete_column(u'pages_page', '_meta_title_fr_CH')
# Deleting field 'Page._meta_title_en'
db.delete_column(u'pages | _page', '_meta_title_en')
# Deleting field 'Page.description_fr_CH'
db.delete_column(u'pages_page', 'description_fr_CH')
# Deleting field 'Page.description_en'
db.delete_column(u'pages_page', 'description_en')
# Deleting field 'Page.titles_fr_CH'
db.delete_column(u'pages_page', 'titles_fr_CH')
# Deleting field 'Page.titles_en'
db.delete_column(u'pages_page', 'titles_en')
# Deleting field 'RichTextPage.content_fr_CH'
| db.delete_column(u'pages_richtextpage', 'content_fr_CH')
# Deleting field 'RichTextPage.content_en'
db.delete_column(u'pages_richtextpage', 'content_en')
models = {
u'pages.link': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Link', '_ormbases': [u'pages.Page']},
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'pages.page': {
'Meta': {'ordering': "(u'titles',)", 'object_name': 'Page'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_meta_title_en': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_meta_title_fr_CH': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr_CH': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_menus': ('mezzanine.pages.fields.MenusField', [], {'default': '(1, 2, 3)', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['pages.Page']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'title_en': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'title_fr_CH': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'titles_en': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'titles_fr_CH': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'pages.richtextpage': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'RichTextPage', '_ormbases': [u'pages.Page']},
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'content_en': ('mezzanine.core.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
'content_fr_CH': ('mezzanine.core.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [] |
barrazamiguel/pydrez | modelo/tuple.py | Python | gpl-3.0 | 157 | 0.025478 | # -*- encoding: utf-8 -*-
def tupleToString(t):
return reduce(lambda x, y: str(x) +" "+ st | r(y), t)
def stringTo | Tuple(s):
return tuple(s.split(" ")) |
chromium/chromium | tools/grit/grit/node/node_io_unittest.py | Python | bsd-3-clause | 6,562 | 0.004267 | #!/usr/bin/env python3
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for node_io.FileNode'''
from __future__ import print_function
import os
import sys
import unittest
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from six import StringIO
from grit.node import misc
from grit.node import node_io
from grit.node import empty
from grit import grd_reader
from grit import util
def _GetAllCliques(root_node):
"""Return all cliques in the |root_node| tree."""
ret = []
for node in root_node:
ret.extend(node.GetCliques())
return ret
class FileNodeUnittest(unittest.TestCase):
def testGetPath(self):
root = misc.GritNode()
root.StartParsing(u'grit', None)
root.HandleAttribute(u'latest_public_release', u'0')
root.HandleAttribute(u'current_release', u'1')
root.HandleAttribute(u'base_dir', r'..\resource')
translations = empty.TranslationsNode()
translations.StartParsing(u'translations', root)
root.AddChild(translations)
file_node = node_io.FileNode()
file_node.StartParsing(u'file', translations)
file_node.HandleAttribute(u'path', r'flugel\kugel.pdf')
transl | ations.AddChild(file_node)
root.EndParsing()
self.failUnless(root.ToRealPath(file_node.GetInputPath()) ==
util.normpath(
os.path.join(r'../resource', r'flugel/kugel.pdf')))
def VerifyCliquesContainEnglishAndFrenchAndNothingElse(self | , cliques):
self.assertEqual(2, len(cliques))
for clique in cliques:
self.assertEqual({'en', 'fr'}, set(clique.clique.keys()))
def testLoadTranslations(self):
xml = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<file path="generated_resources_fr.xtb" lang="fr" />
</translations>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>Joi</ex></ph></message>
</messages>
</release>
</grit>'''
grd = grd_reader.Parse(StringIO(xml),
util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
self.VerifyCliquesContainEnglishAndFrenchAndNothingElse(_GetAllCliques(grd))
def testIffyness(self):
grd = grd_reader.Parse(StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<if expr="lang == 'fr'">
<file path="generated_resources_fr.xtb" lang="fr" />
</if>
</translations>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>Joi</ex></ph></message>
</messages>
</release>
</grit>'''), util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
cliques = _GetAllCliques(grd)
self.assertEqual(2, len(cliques))
for clique in cliques:
self.assertEqual({'en'}, set(clique.clique.keys()))
grd.SetOutputLanguage('fr')
grd.RunGatherers()
self.VerifyCliquesContainEnglishAndFrenchAndNothingElse(_GetAllCliques(grd))
def testConditionalLoadTranslations(self):
xml = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3"
base_dir=".">
<translations>
<if expr="True">
<file path="generated_resources_fr.xtb" lang="fr" />
</if>
<if expr="False">
<file path="no_such_file.xtb" lang="de" />
</if>
</translations>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>
Joi</ex></ph></message>
</messages>
</release>
</grit>'''
grd = grd_reader.Parse(StringIO(xml),
util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
self.VerifyCliquesContainEnglishAndFrenchAndNothingElse(_GetAllCliques(grd))
def testConditionalOutput(self):
xml = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3"
base_dir=".">
<outputs>
<output filename="resource.h" type="rc_header" />
<output filename="en/generated_resources.rc" type="rc_all"
lang="en" />
<if expr="pp_if('NOT_TRUE')">
<output filename="de/generated_resources.rc" type="rc_all"
lang="de" />
</if>
</outputs>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
</messages>
</release>
</grit>'''
grd = grd_reader.Parse(StringIO(xml),
util.PathFromRoot('grit/test/data'),
defines={})
grd.SetOutputLanguage('en')
grd.RunGatherers()
outputs = grd.GetChildrenOfType(node_io.OutputNode)
active = set(grd.ActiveDescendants())
self.failUnless(outputs[0] in active)
self.failUnless(outputs[0].GetType() == 'rc_header')
self.failUnless(outputs[1] in active)
self.failUnless(outputs[1].GetType() == 'rc_all')
self.failUnless(outputs[2] not in active)
self.failUnless(outputs[2].GetType() == 'rc_all')
# Verify that 'iw' and 'no' language codes in xtb files are mapped to 'he' and
# 'nb'.
def testLangCodeMapping(self):
grd = grd_reader.Parse(StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<file path="generated_resources_no.xtb" lang="nb" />
<file path="generated_resources_iw.xtb" lang="he" />
</translations>
<release seq="3">
<messages></messages>
</release>
</grit>'''), util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
self.assertEqual([], _GetAllCliques(grd))
if __name__ == '__main__':
unittest.main()
|
mhugent/Quantum-GIS | python/plugins/processing/ProcessingPlugin.py | Python | gpl-2.0 | 6,571 | 0.002587 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ProcessingPlugin.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from processing import interface
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import shutil
import inspect
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from processing.core.Processing import Processing
from processing.gui.ProcessingToolbox import ProcessingToolbox
from processing.gui.HistoryDialog import HistoryDialog
from processing.gui.ConfigDialog import ConfigDialog
from processing.gui.ResultsDialog import ResultsDialog
from processing.modeler.ModelerDialog import ModelerDialog
from processing.gui.CommanderWindow import CommanderWindow
from processing.tools import dataobjects
from processing.tools.system import *
import processing.resources_rc
cmd_folder = os.path.split(inspect.getfile(inspect.currentframe()))[0]
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
class ProcessingPlugin:
def __init__(self, iface):
interface.iface = ifac | e
Processing.initialize()
def initGui(self):
self.commander = None
self.toolbox = ProcessingToolbox()
interface.iface.addDockWidget(Qt.RightDockWidgetArea, self.toolbox) |
self.toolbox.hide()
#Processing.addAlgListListener(self.toolbox)
self.menu = QMenu(interface.iface.mainWindow().menuBar())
self.menu.setObjectName( 'processing' )
self.menu.setTitle(QCoreApplication.translate('Processing',
'Processing'))
self.toolboxAction = self.toolbox.toggleViewAction()
self.toolboxAction.setObjectName( 'toolboxAction' )
self.toolboxAction.setIcon(QIcon(':/processing/images/alg.png'))
self.toolboxAction.setText(QCoreApplication.translate('Processing',
'Toolbox'))
self.menu.addAction(self.toolboxAction)
self.modelerAction = QAction(QIcon(':/processing/images/model.png'),
QCoreApplication.translate('Processing',
'Graphical modeler'),
interface.iface.mainWindow())
self.modelerAction.setObjectName( 'modelerAction' )
self.modelerAction.triggered.connect(self.openModeler)
self.menu.addAction(self.modelerAction)
self.historyAction = QAction(QIcon(':/processing/images/history.gif'),
QCoreApplication.translate('Processing',
'History and log'),
interface.iface.mainWindow())
self.historyAction.setObjectName( 'historyAction' )
self.historyAction.triggered.connect(self.openHistory)
self.menu.addAction(self.historyAction)
self.configAction = QAction(QIcon(':/processing/images/config.png'),
QCoreApplication.translate('Processing',
'Options and configuration'),
interface.iface.mainWindow())
self.configAction.setObjectName( 'configAction' )
self.configAction.triggered.connect(self.openConfig)
self.menu.addAction(self.configAction)
self.resultsAction = QAction(QIcon(':/processing/images/results.png'),
QCoreApplication.translate('Processing',
'&Results viewer'),
interface.iface.mainWindow())
self.resultsAction.setObjectName( 'resultsAction' )
self.resultsAction.triggered.connect(self.openResults)
self.menu.addAction(self.resultsAction)
menuBar = interface.iface.mainWindow().menuBar()
menuBar.insertMenu(
interface.iface.firstRightStandardMenu().menuAction(), self.menu)
self.commanderAction = QAction(
QIcon(':/processing/images/commander.png'),
QCoreApplication.translate('Processing', '&Commander'),
interface.iface.mainWindow())
self.commanderAction.setObjectName( 'commanderAction' )
self.commanderAction.triggered.connect(self.openCommander)
self.menu.addAction(self.commanderAction)
interface.iface.registerMainWindowAction(self.commanderAction,
'Ctrl+Alt+M')
def unload(self):
self.toolbox.setVisible(False)
self.menu.deleteLater()
# delete temporary output files
folder = tempFolder()
if QDir(folder).exists():
shutil.rmtree(folder, True)
interface.iface.unregisterMainWindowAction(self.commanderAction)
def openCommander(self):
if self.commander is None:
self.commander = CommanderWindow(interface.iface.mainWindow(),
interface.iface.mapCanvas())
Processing.addAlgListListener(self.commander)
self.commander.prepareGui()
self.commander.show()
def openToolbox(self):
if self.toolbox.isVisible():
self.toolbox.hide()
else:
self.toolbox.show()
def openModeler(self):
dlg = ModelerDialog()
dlg.show()
dlg.exec_()
if dlg.update:
Processing.updateAlgsList()
self.toolbox.updateProvider('model')
def openResults(self):
dlg = ResultsDialog()
dlg.show()
dlg.exec_()
def openHistory(self):
dlg = HistoryDialog()
dlg.exec_()
def openConfig(self):
dlg = ConfigDialog(self.toolbox)
dlg.exec_()
|
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/webapps/galaxy/api/group_roles.py | Python | gpl-3.0 | 5,199 | 0.027313 | """
API operations on Group objects.
"""
import logging
from galaxy.web.base.controller import BaseAPIController, url_for
from galaxy import web
log = logging.getLogger( __name__ )
class GroupRolesAPIController( BaseAPIController ):
@web.expose_api
@web.require_admin
def index( self, trans, group_id, **kwd ):
"""
GET /api/groups/{encoded_group_id}/roles
Displays a collection (list) of groups.
"""
decoded_group_id = trans.security.decode_id( group_id )
try:
group = trans.sa_session.query( trans.app.model.Group ).get( decoded_group_id )
except:
group = None
if not group:
trans.response.status = 400
return "Invalid group id ( %s ) specified." % str( group_id )
rval = []
try:
for gra in group.roles:
role = gra.role
encoded_id = trans.security.encode_id( role.id )
rval.append( dict( id = encoded_id,
name = role.name,
url = url_for( 'group_role', group_id=group_id, id=encoded_id, ) ) )
except Exception, e:
rval = "Error in group API at listing roles"
log.error( rval + ": %s" % str(e) )
trans.response.status = 500
return rval
@web.expose_api
@web.require_admin
def show( self, trans, id, group_id, **kwd ):
"""
GET /api/groups/{encoded_group_id}/roles/{encoded_role_id}
Displays information about a group role.
"""
role_id = id
decoded_group_id = trans.security.decode_id( group_id )
decoded_role_id = trans.security.decode_id( role_id )
item = None
try:
group = trans.sa_session.query( trans.app.model.Group ).get( decoded_group_id )
role = trans.sa_session.query( trans.app.model.Role ).get( decoded_role_id )
for gra in group.roles:
if gra.role == role:
item = dict( id = role_id,
name = role.name,
url = url_for( 'group_role', group_id=group_id, id=role_id) ) # TODO Fix This
if not item:
item = "role %s not in group %s" % (role.name,group.name)
except Exception, e:
item = "Error in group_role API group %s role %s" % (group.name, role.name)
log.error(item + ": %s" % str(e))
return item
@web.expose_api
@web.require_admin
def update( self, trans, id, group_id, **kwd ):
"""
PUT /api/groups/{encoded_group_id}/roles/{encoded_role_id}
Adds a role to a group
"""
role_id = id
decoded_group_id = trans.security.decode_id( group_id )
decoded_role_id = trans.security.decode_id( role_id )
item = None
try:
| group = trans.sa_session.query( | trans.app.model.Group ).get( decoded_group_id )
role = trans.sa_session.query( trans.app.model.Role ).get( decoded_role_id )
for gra in group.roles:
if gra.role == role:
item = dict( id = role_id,
name = role.name,
url = url_for( 'group_role', group_id=group_id, id=role_id) )
if not item:
gra = trans.app.model.GroupRoleAssociation( group, role )
# Add GroupRoleAssociation
trans.sa_session.add( gra )
trans.sa_session.flush()
item = dict( id = role_id,
name = role.name,
url = url_for( 'group_role', group_id=group_id, id=role_id) )
except Exception, e:
item = "Error in group_role API Adding role %s to group %s" % (role.name,group.name)
log.error(item + ": %s" % str(e))
return item
@web.expose_api
@web.require_admin
def delete( self, trans, id, group_id, **kwd ):
"""
DELETE /api/groups/{encoded_group_id}/roles/{encoded_role_id}
Removes a role from a group
"""
role_id = id
decoded_group_id = trans.security.decode_id( group_id )
decoded_role_id = trans.security.decode_id( role_id )
try:
group = trans.sa_session.query( trans.app.model.Group ).get( decoded_group_id )
role = trans.sa_session.query( trans.app.model.Role ).get( decoded_role_id )
for gra in group.roles:
if gra.role == role:
trans.sa_session.delete( gra )
trans.sa_session.flush()
item = dict( id = role_id,
name = role.name,
url = url_for( 'group_role', group_id=group_id, id=role_id) )
if not item:
item = "role %s not in group %s" % (role.name,group.name)
except Exception, e:
item = "Error in group_role API Removing role %s from group %s" % (role.name,group.name)
log.error(item + ": %s" % str(e))
return item
|
dknlght/dkodi | src/addons_xml_generator3.py | Python | gpl-2.0 | 8,998 | 0.005001 | #!/usr/bin/python
# *
# * Copyright (C) 2012-2013 Garrett Brown
# * Copyright (C) 2010 j48antialias
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with XBMC; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# * Based on code by tknorris:
# * https://offshoregit.com/tknorris/tknorris-release-repo/raw/master/addons_xml_generator2.py
# * Based on code by j48antialias:
# * https://anarchintosh-projects.googlecode.com/files/addons_xml_generator.py
# *
# * Changes since v2:
# * - (assumed) zips reside in folder "download"
# * - md5 checksum creation added for zips
# * - Skip moving files and zip creation if zip file for the same version already exists
# * - alphabetical sorting
""" addons.xml generator """
import os
import sys
import time
import re
import xml.etree.ElementTree as ET
try:
import shutil, zipfile
except Exception as e:
print('An error occurred importing module!\n%s\n' % e)
# Compatibility with | 3.0, 3.1 and 3.2 not supporting u"" literals
print(sys.version)
if sys.version < '3':
import codecs
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
def u(x):
return x
class Generator:
"""
Generates a new addons.xml file from each addons addon.xml file
and a new addons.xml.md5 hash file. Must be run from the root of
the checked-out repo. Only handles si | ngle depth folder structure.
"""
def __init__(self):
# generate files
self._generate_addons_file()
self._generate_md5_file()
# notify user
print("Finished updating addons xml and md5 files\n")
def _generate_addons_file(self):
# addon list
addons = sorted(os.listdir("."))
# final addons text
addons_xml = u("<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<addons>\n")
# loop thru and add each addons addon.xml file
for addon in addons:
try:
# skip any file or .svn folder or .git folder
if (not os.path.isdir(addon) or addon == ".svn" or addon == ".git" or addon == ".github" or addon == "download"): continue
# create path
_path = os.path.join(addon, "addon.xml")
# split lines for stripping
xml_lines = open(_path, "r").read().splitlines()
# new addon
addon_xml = ""
# loop thru cleaning each line
for line in xml_lines:
# skip encoding format line
if (line.find("<?xml") >= 0): continue
# add line
if sys.version < '3':
addon_xml += unicode(line.rstrip() + "\n", "UTF-8")
else:
addon_xml += line.rstrip() + "\n"
# we succeeded so add to our final addons.xml text
addons_xml += addon_xml.rstrip() + "\n\n"
except Exception as e:
# missing or poorly formatted addon.xml
print("Excluding %s for %s" % (_path, e))
# clean and add closing tag
addons_xml = addons_xml.strip() + u("\n</addons>\n")
# save file
self._save_file(addons_xml.encode("UTF-8"), file="addons.xml")
def _generate_md5_file(self):
# create a new md5 hash
try:
import md5
m = md5.new(open("addons.xml", "r").read()).hexdigest()
except ImportError:
import hashlib
m = hashlib.md5(open("addons.xml", "r", encoding="UTF-8").read().encode("UTF-8")).hexdigest()
# save file
try:
self._save_file(m.encode("UTF-8"), file="addons.xml.md5")
except Exception as e:
# oops
print("An error occurred creating addons.xml.md5 file!\n%s" % e)
def _save_file(self, data, file):
try:
# write data to the file (use b for Python 3)
open(file, "wb").write(data)
except Exception as e:
# oops
print("An error occurred saving %s file!\n%s" % (file, e))
def zipfolder(foldername, target_dir, zips_dir, addon_dir):
zipobj = zipfile.ZipFile(zips_dir + foldername, 'w', zipfile.ZIP_DEFLATED)
rootlen = len(target_dir) + 1
for base, dirs, files in os.walk(target_dir):
for f in files:
fn = os.path.join(base, f)
zipobj.write(fn, os.path.join(addon_dir, fn[rootlen:]))
zipobj.close()
if (__name__ == "__main__"):
# start
Generator()
# rezip files and move
try:
print('Starting zip file creation...')
rootdir = sys.path[0]
zipsdir = rootdir + os.sep + 'download'
filesinrootdir = sorted(os.listdir(rootdir))
for x in filesinrootdir:
if re.search("^(context|plugin|script|service|skin|repository|docker)" , x) and not re.search('.zip', x):
zipfilename = x + '.zip'
zipfilenamefirstpart = zipfilename[:-4]
zipfilenamelastpart = zipfilename[len(zipfilename) - 4:]
zipsfolder = os.path.normpath(os.path.join('download', x)) + os.sep
foldertozip = rootdir + os.sep + x
filesinfoldertozip = sorted(os.listdir(foldertozip))
# #check if download folder exists
if not os.path.exists(zipsfolder):
os.makedirs(zipsfolder)
print('Directory doesn\'t exist, creating: ' + zipsfolder)
# #get addon version number
if "addon.xml" in filesinfoldertozip:
tree = ET.parse(os.path.join(rootdir, x, "addon.xml"))
root = tree.getroot()
for elem in root.iter('addon'):
print('%s %s version: %s' % (x, elem.tag, elem.attrib['version']))
version = '-' + elem.attrib['version']
# # #check for existing zips
if not os.path.exists(zipsfolder + x + version + '.zip'):
# #check if and move addon, changelog, fanart and icon to zipdir
for y in filesinfoldertozip:
# print('processing file: ' + os.path.join(rootdir,x,y))
if re.search("addon|changelog|icon|fanart", y):
shutil.copyfile(os.path.join(rootdir, x, y), os.path.join(zipsfolder, y))
print('Copying %s to %s' % (y, zipsfolder))
# #check for and zip the folders
print('Zipping %s and moving to %s\n' % (x, zipsfolder))
try:
zipfolder(zipfilenamefirstpart + version + zipfilenamelastpart, foldertozip, zipsfolder, x)
print('zipped with zipfolder')
# # #create md5 checksum for zips
import hashlib
try:
m = hashlib.md5(open("%s" % (zipsfolder + x + version + '.zip'), "rb").read()).hexdigest()
open("%s" % (zipsfolder + x + version + '.zip.md5'), "wb").write(m.encode("UTF-8"))
print("zip.md5 file created\n")
except Exception as e:
print("An error occurred creating zip.md5 file!\n%s" % e) |
fab13n/caracole | floreal/signals/__init__.py | Python | mit | 97 | 0 | imp | ort | django.dispatch
task_generate_pre_save = django.dispatch.Signal(providing_args=["task"])
|
Maistho/CouchPotatoServer | couchpotato/core/_base/_core.py | Python | gpl-3.0 | 13,193 | 0.00523 | from uuid import uuid4
import os
import platform
import signal
import time
import traceback
import webbrowser
import sys
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.variable import cleanHost, md5, isSubFolder, compareVersions
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from tornado.ioloop import IOLoop
log = CPLog(__name__)
autoload = 'Core'
class Core(Plugin):
ignore_restart = [
'Core.restart', 'Core.shutdown',
'Updater.check', 'Updater.autoUpdate',
]
shutdown_started = False
def __init__(self):
addApiView('app.shutdown', self.shutdown, docs = {
'desc': 'Shutdown the app.',
'return': {'type': 'string: shutdown'}
})
addApiView('app.restart', self.restart, docs = {
'desc': 'Restart the app.',
'return': {'type': 'string: restart'}
})
addApiView('app.available', self.available, docs = {
'desc': 'Check if app available.'
})
addApiView('app.version', self.versionView, docs = {
'desc': 'Get version.'
})
addEvent('app.shutdown', self.shutdown)
addEvent('app.restart', self.restart)
addEvent('app.load', self.launchBrowser, priority = 1)
addEvent('app.base_url', self.createBaseUrl)
addEvent('app.api_url', self.createApiUrl)
addEvent('app.version', self.version)
addEvent('app.load', self.checkDataDir)
addEvent('app.load', self.cleanUpFolders)
addEvent('app.load.after', self.dependencies)
addEvent('setting.save.core.password', self.md5Password)
addEvent('setting.save.core.api_key', self.checkApikey)
# Make sure we can close-down with ctrl+c properly
if not Env.get('desktop'):
self.signalHandler()
# Set default urlopen timeout
import socket
socket.setdefaulttimeout(30)
# Don't check ssl by default
try:
if sys.version_info >= (2, 7, 9):
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
except:
log.debug('Failed setting default ssl context: %s', traceback.format_exc())
def dependencies(self):
# Check if lxml is available
try: from lxml import etree
except: log.error('LXML not available, please install for better/faster scraping support: `http://lxml.de/installation.html`')
try:
import OpenSSL
v = OpenSSL.__version__
v_needed = '0.15'
if compareVersions(OpenSSL.__version__, v_needed) < 0:
log.error('OpenSSL installed but %s is needed while %s is installed. Run `pip install pyopenssl --upgrade`', (v_needed, v))
try:
import ssl
log.debug('OpenSSL detected: pyopenssl (%s) using OpenSSL (%s)', (v, ssl.OPENSSL_VERSION))
except:
pass
except:
log.error('OpenSSL not available, please install for better requests validation: `https://pyopenssl.readthedocs.org/en/latest/install.html`: %s', traceback.format_exc())
def md5Password(self, value):
return md5(value) if value else ''
def checkApikey(self, value):
return value if value and len(value) > 3 else uuid4().hex
def checkDataDir(self):
if isSubFolder(Env.get('data_dir'), Env.get('app_dir')):
log.error('You | should NOT use your CouchPotato directory to save your settings in. Files will get overwritten or be deleted.')
return True
def cleanUpFolders(self):
only_clean = ['couchpotato', 'libs', 'init']
self.deleteEmptyFolder(Env.get('app_dir'), show_error = False, only_clean = only_clean)
def available(self, **kwargs):
return {
'success': True
}
def shutdown(self, **kwargs):
if self.shutdown_started:
return Fa | lse
def shutdown():
self.initShutdown()
if IOLoop.current()._closing:
shutdown()
else:
IOLoop.current().add_callback(shutdown)
return 'shutdown'
def restart(self, **kwargs):
if self.shutdown_started:
return False
def restart():
self.initShutdown(restart = True)
IOLoop.current().add_callback(restart)
return 'restarting'
def initShutdown(self, restart = False):
if self.shutdown_started:
log.info('Already shutting down')
return
log.info('Shutting down' if not restart else 'Restarting')
self.shutdown_started = True
fireEvent('app.do_shutdown', restart = restart)
log.debug('Every plugin got shutdown event')
loop = True
starttime = time.time()
while loop:
log.debug('Asking who is running')
still_running = fireEvent('plugin.running', merge = True)
log.debug('Still running: %s', still_running)
if len(still_running) == 0:
break
elif starttime < time.time() - 30: # Always force break after 30s wait
break
running = list(set(still_running) - set(self.ignore_restart))
if len(running) > 0:
log.info('Waiting on plugins to finish: %s', running)
else:
loop = False
time.sleep(1)
log.debug('Safe to shutdown/restart')
loop = IOLoop.current()
try:
if not loop._closing:
loop.stop()
except RuntimeError:
pass
except:
log.error('Failed shutting down the server: %s', traceback.format_exc())
fireEvent('app.after_shutdown', restart = restart)
def launchBrowser(self):
if Env.setting('launch_browser'):
log.info('Launching browser')
url = self.createBaseUrl()
try:
webbrowser.open(url, 2, 1)
except:
try:
webbrowser.open(url, 1, 1)
except:
log.error('Could not launch a browser.')
def createBaseUrl(self):
host = Env.setting('host')
if host == '0.0.0.0' or host == '':
host = 'localhost'
port = Env.setting('port')
ssl = Env.setting('ssl_cert') and Env.setting('ssl_key')
return '%s:%d%s' % (cleanHost(host, ssl = ssl).rstrip('/'), int(port), Env.get('web_base'))
def createApiUrl(self):
return '%sapi/%s' % (self.createBaseUrl(), Env.setting('api_key'))
def version(self):
ver = fireEvent('updater.info', single = True) or {'version': {}}
if os.name == 'nt': platf = 'windows'
elif 'Darwin' in platform.platform(): platf = 'osx'
else: platf = 'linux'
return '%s - %s-%s - v2' % (platf, ver.get('version').get('type') or 'unknown', ver.get('version').get('hash') or 'unknown')
def versionView(self, **kwargs):
return {
'version': self.version()
}
def signalHandler(self):
if Env.get('daemonized'): return
def signal_handler(*args, **kwargs):
fireEvent('app.shutdown', single = True)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
config = [{
'name': 'core',
'order': 1,
'groups': [
{
'tab': 'general',
'name': 'basics',
'description': 'Needs restart before changes take effect.',
'wizard': True,
'options': [
{
'name': 'username',
'default': '',
'ui-meta' : 'rw',
},
{
'name': 'password',
'default': '',
'type': 'password',
},
{
'name': 'port',
|
briandrawert/molnsutil | run_ensemble_map_aggregate.py | Python | gpl-3.0 | 3,303 | 0.002725 | import pickle
def run_ensemble_map_and_aggregate(model_class, parameters, param_set_id, seed_base, number_of_trajectories, mapper,
aggregator=None, cluster_import=False):
""" Generate an ensemble, then run the mappers are aggregator. This will not store the results. """
if cluster_import is True:
from molnsutil.molns_exceptions import MolnsUtilException
from molnsutil.utils import builtin_aggregator_list_append, create_model
else:
from molns_exceptions import MolnsUtilException
from utils import builtin_aggregator_list_append, create_model
if aggregator is None:
aggregator = builtin_aggregator_list_append
# Create the model
model = create_model(model_class, parameters)
# Run the solver
res = None
num_processed = 0
results = model.run(seed=seed_base, number_of_trajectories=number_of_trajectories)
if not isinstance(results, list):
results = [results]
for result in results:
try:
mapres = mapper(result)
res = aggregator(mapres, res)
num_processed += 1
except Exception as e:
import traceback
notes = "Error running mapper and aggregator, caught | {0}: {1}\n".format(type(e), e)
notes += "type(mapper) = {0}\n".format(type(mapper))
notes += "type(aggregator) = {0}\n".format(type(aggregator))
notes += "dir={0}\n{1}".format(dir(), traceback.format_exc())
raise MolnsUtilException(notes)
return {'result': res, 'param_set_id': param_set_ | id, 'num_successful': num_processed,
'num_failed': number_of_trajectories - num_processed}
if __name__ == "__main__":
try:
import molnsutil.constants as constants
import molnsutil.molns_cloudpickle as cloudpickle
with open(constants.job_input_file_name, "rb") as inp:
unpickled_list = pickle.load(inp)
num_of_trajectories = unpickled_list['chunk_size']
seed = unpickled_list['seed']
params = unpickled_list['pset']
param_set_id_ = unpickled_list['pndx']
if not unpickled_list.get('model_class', False):
with open(constants.pickled_cluster_input_file, "rb") as inp:
unpickled_cluster_input = pickle.load(inp)
model_cls = unpickled_cluster_input['model_class']
mapper_fn = unpickled_cluster_input['mapper']
aggregator_fn = unpickled_cluster_input['aggregator']
else:
model_cls = unpickled_list['model_class']
mapper_fn = unpickled_list['mapper']
aggregator_fn = unpickled_list['aggregator']
result = run_ensemble_map_and_aggregate(model_class=model_cls, parameters=params, param_set_id=param_set_id_,
seed_base=seed, number_of_trajectories=num_of_trajectories,
mapper=mapper_fn, aggregator=aggregator_fn, cluster_import=True)
with open(constants.job_output_file_name, "wb") as output:
cloudpickle.dump(result, output)
except Exception as errors:
with open(constants.job_error_file_name, "wb") as error:
error.write(str(errors))
|
jainaman224/zenodo | tests/unit/deposit/test_api_buckets.py | Python | gpl-2.0 | 9,093 | 0 | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Test Zenodo deposit REST API."""
from __future__ import absolute_import, print_function
import json
from invenio_search import current_search
from six import BytesIO
def test_bucket_create_delete(api_client, deposit, json_auth_headers,
deposit_url, get_json, license_record,
auth_headers, minimal_deposit):
"""Test bucket creation/deletion of bucket with each deposit."""
client = api_client
headers = json_auth_headers
auth = auth_headers
# Create deposit
res = client.post(
deposit_url, data=json.dumps(minimal_deposit), headers=headers)
links = get_json(res, code=201)['links']
current_search.flush_and_refresh(index='deposits')
# Assert bucket was created and accessible
assert 'bucket' in links
res = client.get(links['bucket'], headers=auth)
assert res.status_code == 200
res = client.get(links['bucket'])
assert res.status_code == 404
# Upload object via files-rest.
object_url = links['bucket'] + '/viafilesrest'
res = client.put(
object_url,
input_stream=BytesIO(b'viafilesrest'),
headers=auth,
)
assert res.status_code == 200
# Get object via files-rest
res = client.get(object_url, headers=auth)
assert res.status_code == 200
# List files in deposit.
res = client.get(links['self'], headers=headers)
data = get_json(res, code=200)
assert len(data['files']) == 1
# Get file via deposit.
res = client.get(data['files'][0]['links']['self'], headers=headers)
data = get_json(res, code=200)
# Delete deposit
res = client.delete(links['self'], headers=auth)
assert res.status_code == 204
# Assert bucket no longer exists
res = client.get(links['bucket'], headers=auth)
assert res.status_code == 404
res = client.get(object_url, headers=auth)
assert res.status_code == 404
def test_bucket_create_publish(api_client, deposit, json_auth_headers,
deposit_url, get_json, license_record,
auth_headers, minimal_deposit):
"""Test bucket features on deposit publish."""
client = api_client
headers = json_auth_headers
auth = auth_headers
# Create deposit
res = client.post(
deposit_url, data=json.dumps(minimal_deposit), headers=headers)
links = get_json(res, code=201)['links']
current_search.flush_and_refresh(index='deposits')
# Upload file
res = client.put(
links['bucket'] + '/test.txt',
input_stream=BytesIO(b'testfile'),
headers=auth,
)
assert res.status_code == 200
# Publish deposit
res = client.post(links['publish'], headers=auth)
data = get_json(res, code=202)
# Bucket should be locked.
res = client.put(
links['bucket'] + '/newfile.txt',
input_stream=BytesIO(b'testfile'),
headers=auth,
)
assert res.status_code == 403
# Get deposit.
res = client.get(links['self'], headers=auth)
assert res.status_code == 200
# Get record.
res = client.get(data['links']['record'])
data = get_json(res, code=200)
# Assert record and deposit bucket is not identical.
assert data['links']['bucket'] != links['bucket']
# Get record bucket.
res = client.get(data['links']['bucket'])
assert res.status_code == 200
# Get file in bucket.
res = client.get(data['links']['bucket'] + '/test.txt')
assert res.status_code == 200
# Record bucket is also locked.
res = client.put(
data['links']['bucket'] + '/newfile.txt',
input_stream=BytesIO(b'testfile'),
headers=auth,
)
assert res.status_code == 404
# Delete deposit not allowed
res = client.delete(links['self'], headers=auth)
assert res.status_code == 403
def test_bucket_new_version(api_client, deposit, json_auth_headers,
deposit_url, get_json, license_record,
auth_headers, minimal_deposit):
"""Test bucket features on record new version."""
client = api_client
headers = json_auth_headers
auth = auth_headers
# Create deposit
res = client.post(
deposit_url, data=json.dumps(minimal_deposit), headers=headers)
links = get_json(res, code=201)['links']
current_search.flush_and_refresh(index='deposits')
# Upload file
res = client.put(
links['bucket'] + '/test.txt',
input_stream=BytesIO(b'testfile'),
headers=auth,
)
assert res.status_code == 200
# Publish deposit
res = client.post(links['publish'], headers=auth)
data = get_json(res, code=202)
# Get record
res = client.get(data['links']['record'])
data = get_json(res, code=200)
rec_v1_bucket = data['links']['bucket']
# Get deposit
res = client.get(links['self'], headers=auth)
links = get_json(res, code=200)['links']
dep_v1_bucket = links['bucket']
# Create new version
res = client.post(links['newversion'], headers=auth)
data = get_json(res, code=201)
# Get new version deposit
res = client.get(data['links']['latest_draft'], headers=auth)
data = get_json(res, code=200)
dep_v2_publish = data['links']['publish']
dep_v2_bucket = data['links']['bucket']
# Assert t | hat all the buckets are different
assert len(set([rec_v1_bucket, dep_v1_bucket, dep_v2_bucket])) == 3
# Get file from old version deposit bucket
res = client.get(dep_v1_bucket + '/test.txt', headers=auth)
dep_v1_file_data = res.get_data(as_text=True)
# Get file from old version record bucket
res = client.get(rec_v1_bucket + '/test.txt')
rec_v1_file_data = res.get_data(as_text=True)
# Get file from new v | ersion deposit bucket
res = client.get(dep_v2_bucket + '/test.txt', headers=auth)
dep_v2_file_data = res.get_data(as_text=True)
# Assert that the file is the same in the new version
assert rec_v1_file_data == dep_v1_file_data == dep_v2_file_data
# Record bucket is unlocked.
res = client.put(
dep_v2_bucket + '/newfile.txt',
input_stream=BytesIO(b'testfile2'),
headers=auth,
)
assert res.status_code == 200
# Deleting files in new version deposit bucket is allowed
res = client.delete(dep_v2_bucket + '/newfile.txt', headers=auth)
assert res.status_code == 204
# Try to publish the new version
# Should fail (400), since the bucket contents is the same
res = client.post(dep_v2_publish, headers=auth)
data = get_json(res, code=400)
# Add another file, so that the bucket has a different content
res = client.put(
dep_v2_bucket + '/newfile2.txt',
input_stream=BytesIO(b'testfile3'),
headers=auth,
)
assert res.status_code == 200
# Publish new version deposit
res = client.post(dep_v2_publish, headers=auth)
data = get_json(res, code=202)
# Get record
res = client.get(data['links']['record'])
data = get_json(res, code=200)
rec_v2_bucket = data['links']['bucket']
# Assert that all the buckets are different
assert len(set(
[rec_v1_bucket, |
bitcraft/pyglet | contrib/experimental/buffer/bars.py | Python | bsd-3-clause | 2,705 | 0 | #!/usr/bin/python
# $Id:$
import random
import sys
from pyglet.gl import *
from pyglet import clock
from pyglet import font
from pyglet import graphics
from pyglet import window
BARS = 100
if len(sys.argv) > 1:
BARS = int(sys.argv[1])
MIN_BAR_LENGTH = 4
MAX_BAR_LENGTH = 100
BAR_SEGMENT_HEIGHT = 10
UPDATE_PERIOD = 0.01
win = window.Window(vsync=False)
batch = graphics.Batch()
bars = list()
colors = [
[170, 0, 0],
[0, 255, 100],
[80, 100, 255],
[40, 180, 180],
[200, 255, 100],
[255, 70, 200],
]
def create_bars():
width = win.width / float(BARS)
for i in range(BARS):
position = [i * width, 0, # degenerate
i * width, 0,
(i + 1) * width, 0,
(i + 1) * width, 0 # degenerate
]
color = colors[i % len(colors)] * 4
bar = batch.add(4, GL_TRIANGLE_STRIP, None,
('v2f/dynamic', position),
('c3B/dynamic', color))
bars.append(bar)
def update_bars():
for bar in bars:
old_length = bar.count
length = random.randint(MIN_BAR_LENGTH, MAX_BAR_LENGTH)
bar.resize(length)
verti | ces = bar.vertices
# Update new vertices (overwrite old degenerate)
for i in range((old_length - 1) * 2, length * 2):
if i & 1: # y
vertices[i] = BAR_SEGMENT_HEIGHT * (i // 4)
else: # x
vertices[i] = vertices[i - 4]
# Update top degenerate (first degenerate is never modified)
vertices[-2:] = vertices[-4:-2]
# Update colors
if length > old_length:
| bar.colors[old_length * 3:length * 3] = \
bar.colors[:3] * (length - old_length)
stats_text = font.Text(font.load('', 12, bold=True), '',
x=win.width, y=0,
halign='right')
def update_stats(dt):
np = len(bars)
usage = bars[0].domain.allocator.get_usage()
fragmentation = bars[0].domain.allocator.get_fragmentation()
blocks = len(bars[0].domain.allocator.starts)
stats_text.text = \
'Bars: %d Blocks: %d Usage: %d%% Fragmentation: %d%%' % \
(np, blocks, usage * 100, fragmentation * 100)
clock.schedule_interval(update_stats, 1)
fps_text = clock.ClockDisplay(color=(1, 1, 1, 1))
create_bars()
update_time = 0.
while not win.has_exit:
win.dispatch_events()
dt = clock.tick()
dt = min(dt, 0.05)
update_time += dt
if update_time > UPDATE_PERIOD:
update_bars()
update_time -= UPDATE_PERIOD
win.clear()
batch.draw()
stats_text.draw()
fps_text.draw()
win.flip()
|
landlab/landlab | landlab/graph/structured_quad/__init__.py | Python | mit | 426 | 0 | from .dual_structured_quad import (
DualRectilinearGraph,
DualStructuredQuadGraph,
DualUniformRectilinearGraph | ,
)
from .structured_quad import (
RectilinearGraph,
StructuredQuadGraph,
UniformRectilinearGraph,
)
__all__ = [
"StructuredQuadGraph",
"RectilinearGraph",
"UniformRectilinearGraph",
"DualUniformRectilinearGraph",
"DualRectilinearGraph",
"DualStructuredQuadGra | ph",
]
|
apagac/cfme_tests | cfme/tests/services/test_provision_stack.py | Python | gpl-2.0 | 11,067 | 0.001626 | import fauxfactory
import pytest
from wait_for import wait_for
fr | om cfme import test_requirements
from cfme.cloud.provider import CloudProvider
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.ec2 import EC2Provider
from cfme.cloud.provider.openstack import OpenStackProvider |
from cfme.markers.env_markers.provider import ONE_PER_TYPE
from cfme.services.myservice import MyService
from cfme.services.service_catalogs import ServiceCatalogs
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.conf import credentials
from cfme.utils.datafile import load_data_file
from cfme.utils.path import orchestration_path
pytestmark = [
pytest.mark.meta(server_roles='+automate'),
pytest.mark.ignore_stream('upstream'),
test_requirements.stack,
pytest.mark.tier(2),
pytest.mark.usefixtures("setup_provider_modscope"),
pytest.mark.provider([CloudProvider], selector=ONE_PER_TYPE,
required_fields=[['provisioning', 'stack_provisioning']],
scope='module'),
]
@pytest.fixture
def stack_data(appliance, provider, provisioning):
random_base = fauxfactory.gen_alphanumeric()
stackname = 'test{}'.format(random_base)
vm_name = 'test-{}'.format(random_base)
stack_timeout = '20'
if provider.one_of(AzureProvider):
try:
template = provider.data.templates.small_template
vm_user = credentials[template.creds].username
vm_password = credentials[template.creds].password
except AttributeError:
pytest.skip('Could not find small_template or credentials for {}'.format(provider.name))
stack_data = {
'stack_name': stackname,
'resource_group': provisioning.get('resource_group'),
'deploy_mode': provisioning.get('mode'),
'location': provisioning.get('region_api'),
'vmname': vm_name,
'vmuser': vm_user,
'vmpassword': vm_password,
'vmsize': provisioning.get('vm_size'),
'cloudnetwork': provisioning.get('cloud_network').split()[0],
'cloudsubnet': provisioning.get('cloud_subnet').split()[0]
}
elif provider.one_of(OpenStackProvider):
stack_prov = provisioning['stack_provisioning']
stack_data = {
'stack_name': stackname,
'key': stack_prov['key_name'],
'flavor': stack_prov['instance_type'],
'tenant_name': provisioning['cloud_tenant']
}
else:
stack_prov = provisioning['stack_provisioning']
stack_data = {
'stack_name': stackname,
'stack_timeout': stack_timeout,
'param_virtualMachineName': vm_name,
'param_KeyName': stack_prov['key_name']
}
return stack_data
@pytest.fixture
def dialog_name():
return 'dialog_{}'.format(fauxfactory.gen_alphanumeric())
@pytest.fixture
def template(appliance, provider, provisioning, dialog_name, stack):
template_group = provisioning['stack_provisioning']['template_type']
template_type = provisioning['stack_provisioning']['template_type_dd']
template_name = fauxfactory.gen_alphanumeric()
file = provisioning['stack_provisioning']['data_file']
data_file = load_data_file(str(orchestration_path.join(file)))
content = data_file.read().replace('CFMETemplateName', template_name)
collection = appliance.collections.orchestration_templates
template = collection.create(template_group=template_group, template_name=template_name,
template_type=template_type, description="my template",
content=content)
template.create_service_dialog_from_template(dialog_name)
yield template
if stack.exists:
stack.retire_stack()
if template.exists:
template.delete()
@pytest.fixture
def catalog(appliance):
cat_name = "cat_{}".format(fauxfactory.gen_alphanumeric())
catalog = appliance.collections.catalogs.create(name=cat_name, description="my catalog")
yield catalog
if catalog.exists:
catalog.delete()
@pytest.fixture
def catalog_item(appliance, dialog, catalog, template, provider, dialog_name):
item_name = fauxfactory.gen_alphanumeric()
catalog_item = appliance.collections.catalog_items.create(
appliance.collections.catalog_items.ORCHESTRATION,
name=item_name,
description="my catalog",
display_in=True,
catalog=catalog,
dialog=dialog_name,
orch_template=template,
provider_name=provider.name,
)
yield catalog_item
if catalog_item.exists:
catalog_item.delete()
@pytest.fixture
def service_catalogs(appliance, catalog_item, stack_data):
return ServiceCatalogs(appliance, catalog_item.catalog, catalog_item.name, stack_data)
@pytest.fixture
def stack(appliance, provider, stack_data):
return appliance.collections.cloud_stacks.instantiate(stack_data['stack_name'],
provider=provider)
@pytest.fixture
def order_stack(appliance, request, service_catalogs, stack):
"""Fixture which prepares provisioned stack"""
provision_request = service_catalogs.order()
provision_request.wait_for_request(method='ui')
request.addfinalizer(lambda: _cleanup(appliance, provision_request))
assert provision_request.is_succeeded()
stack.wait_for_exists()
return provision_request, stack
def _cleanup(appliance=None, provision_request=None, service=None):
if not service:
last_message = provision_request.get_request_row_from_ui()['Last Message'].text
service_name = last_message.split()[2].strip('[]')
myservice = MyService(appliance, service_name)
else:
myservice = service
if myservice.exists:
myservice.delete()
def test_provision_stack(order_stack):
"""Tests stack provisioning
Metadata:
test_flag: provision
Polarion:
assignee: jhenner
initialEstimate: 1/3h
casecomponent: Provisioning
"""
provision_request, stack = order_stack
assert provision_request.is_succeeded()
def test_reconfigure_service(appliance, service_catalogs, request):
"""Tests service reconfiguring
Metadata:
test_flag: provision
Polarion:
assignee: nansari
initialEstimate: 1/4h
casecomponent: Services
tags: stack
"""
provision_request = service_catalogs.order()
provision_request.wait_for_request(method='ui')
last_message = provision_request.get_request_row_from_ui()['Last Message'].text
service_name = last_message.split()[2].strip('[]')
myservice = MyService(appliance, service_name)
request.addfinalizer(lambda: _cleanup(service=myservice))
assert provision_request.is_succeeded()
myservice.reconfigure_service()
@pytest.mark.uncollectif(lambda provider: provider.one_of(EC2Provider),
reason='EC2 locks template between Stack order and template removal')
def test_remove_non_read_only_orch_template(appliance, provider, template, service_catalogs,
request):
"""
Steps:
1. Order Service which uses Orchestration template
2. Try to remove this Orchestration template
Metadata:
test_flag: provision
Polarion:
assignee: nansari
initialEstimate: 1/4h
casecomponent: Services
tags: stack
"""
provision_request = service_catalogs.order()
request.addfinalizer(lambda: _cleanup(appliance, provision_request))
template.delete()
wait_for(lambda: provision_request.status == 'Error', timeout='5m')
assert not template.exists
@pytest.mark.uncollectif(lambda provider: not provider.one_of(EC2Provider),
reason='Only EC2 locks orchestration template')
def test_remove_read_only_orch_template_neg(appliance, provider, template, service_catalogs,
request):
"""
For RHOS/Azure the original template will remain stand-alone whil |
eevee/cocos2d-mirror | test/test_callfunc.py | Python | bsd-3-clause | 1,086 | 0.02302 | # This code is so you can run the samples without ins | talling the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 1.1, s, q"
tags = "CallFunc, visible"
import cocos
from cocos.director import director
from cocos.actions import CallFunc, Delay
from cocos.sprite import Sprite
import pyglet
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x,y = director.ge | t_window_size()
self.sprite = Sprite( 'grossini.png', (x/2, y/2) )
self.sprite.visible = False
self.add( self.sprite )
def make_visible( sp ):
sp.visible = True
self.sprite.do( Delay(1) + CallFunc( make_visible, self.sprite ) )
description = """Sprite grossini starts invisible, after 1 second will turn
visible thanks to action CallFunc
"""
def main():
print description
director.init()
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
if __name__ == '__main__':
main()
|
gongbudaizhe/bilib | demos/hash_it_out/solution.py | Python | mit | 541 | 0.007394 | def compute_single_digest(single_message, last_message):
return (129 * single_message ^ last_message) % 256
def | reverse_single_digest(single_digest, last_message):
for i in xrange(256):
if single_digest == compute_single_digest(i, last_message):
return i
def answer(x):
last_message = 0
message = []
for single_digest in x:
single_message = reverse_single_digest(single_digest, last_message)
last_message = single_message
message.append(single_message)
return message | |
adrianco/cassandgo | aws.py | Python | apache-2.0 | 5,522 | 0.046903 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os,sys,logging,socket,collections,boto,boto.ec2
# Init
conf_HVM = [ # DataStax AMI in all regions
{'region':'us-east','zone':'1','ami':'ami-ada2b6c4','sg':'SG-Cassandra-us-east-1','key':'Key-us-east-1'},
{'region':'us-west','zone':'1','ami':'ami-3cf7c979','sg':'SG-Cassandra-us-west-1','key':'Key-us-west-1'},
{'region':'us-west','zone':'2','ami':'ami-1cff962c','sg':'SG-Cassandra-us-west-2','key':'Key-us-west-2'},
{'region':'eu-west','zone':'1','ami':'ami-7f33cd08','sg':'SG-Cassandra-eu-west-1','key':'Key-eu-west-1'},
{'region':'ap-southeast','zone':'1','ami':'ami-b47828e6','sg':'SG-Cassandra-ap-southeast-1','key':'Key-ap-southeast-1'},
{'region':'ap-southeast','zone':'2','ami':'ami-55d54d6f','sg':'SG-Cassandra-ap-southeast-2','key':'Key-ap-southeast-2'},
{'region':'ap-northeast','zone':'1','ami':'ami-714a3770','sg':'SG-Cassandra-ap-northeast-1','key':'Key-ap-northeast-1'},
{'region':'sa-east','zone':'1','ami':'ami-1dda7800','sg':'SG-Cassandra-sa-east-1','key':'Key-sa-east-1'}
]
keysDir = 'keys' # Directory for saving key pairs
SecurityGroupRule = collections.namedtuple("SecurityGroupRule", ["ip_protocol", "from_port", "to_port", "cidr_ip", "src_group_name"])
CASSANDRA_RULES = [
SecurityGroupRule("tcp", "22", "22", "0.0.0.0/0", None),
SecurityGroupRule("tcp", "8888", "8888", "0.0.0.0/0", None),
SecurityGroupRule("tcp", "9042", "9042", "0.0.0.0/0", None),
SecurityGroupRule("tcp", "9160", "9160", "0.0.0.0/0", None),
SecurityGroupRule("tcp", "7000", "7001", "0.0.0.0/0", None),
SecurityGroupRule("tcp", "1024", "65535", "0.0.0.0/0", "SG"),
SecurityGroupRule("tcp", "7199", "7199", "0.0.0.0/0", "SG"),
SecurityGroupRule("tcp", "61621", "61621", "0.0.0.0/0", "SG"),
SecurityGroupRule("tcp", "61620", "61620", "0.0.0.0/0", None),
SecurityGroupRule("icmp", "-1", "-1", "0.0.0.0/0", None),
]
def logError(msg):
logging.error(msg)
print "ERROR :",msg
print 'Error on line {}'.format(sys.exc_info()[-1].tb_lineno)
def checkInstance(instance):
"""
Check if an instance is up and running and responding to ssh request
"""
res = False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((instance.ip_address, 22))
res = True
except socket.error as e:
pass
s.close()
return res
def terminateInstance(region,zone,instance_id):
"""
Terminate an ec2 instance
"""
try:
ec2 = boto.ec2.connect_to_region(region+'-'+zone)
ec2.terminate_instances(instance_ids=[instance_id])
return True
except Exception as e:
logError(e)
return False
def createInstance(ec2,ami,nb_nodes,placement,instance_type,key,sg,user_data=None):
"""
Create a new EC2 instance with specific parameters
SecurityGroup (sg) and KeyPair (key) have to be previously created (see cassandgo initSG and cassandgo initKP)
"""
reservation = ec2.run_instances(ami,min_count=nb_nodes,max_count=nb_nodes,placement = placement,key_name=key,security_groups=[sg],instance_type=instance_type,user_data=user_data)
instance = reservation.instances[0]
return instance
def createSG(ec2,name,rules):
"""
Create a new SecurityGroup
"""
# check if the security group exists
group = None
sgGroups = [sg for sg in ec2.get_all_security_groups() if sg.name == name]
if sgGroups:
group = sgGroups[0]
ec2.delete_security_group(name=name, group_id=group)
print "Creating %s Security Group" % name
group = ec2.create_security_group(name, 'group for %s' % name)
if group:
# Set the inbound rules
for rule in rules:
if rule.src_group_name:
group.authorize(ip_protocol=rule.ip_protocol,from_port=rule.from_port,to_port=rule.to_port,cidr_ip=rule.cidr_ip,src_group=group)
else:
group.authorize(ip_protocol=rule.ip_protocol,from_port=rule.from_port,to_port=rule.to_port,cidr_ip=rule.cidr_ip,src_group=None)
return True
else:
logError('Error during '+name+' Security Group update')
return False
def getInstancesRegionZone(region,zone):
try:
ec2 = boto.ec2.connect_to_region(region+'-'+zone)
instances = []
all_i | nst = ec2.get_all_instances()
for res in all_inst:
for instance in res.instan | ces:
instances.append(instance)
return instances
except Exception as e:
logError(e)
return None
def listInstancesRegionZone(region,zone):
"""
List all instances for a specific region and zone
"""
print "-"*80
print "# Region :",region," Zone", zone
print "-"*80
instances = getInstancesRegionZone(region,zone)
if instances:
for instance in instances:
print "[",instance.ami_launch_index,"]",instance.ip_address," (",instance.private_ip_address,") ",instance.instance_type," key=",instance.key_name
def createAllSG():
"""
Create all Cassandra security groups in all regions
"""
for info in conf_HVM:
ec2 = boto.ec2.connect_to_region(info['region']+'-'+info['zone'])
createSG(ec2,'SG-Cassandra-'+info['region']+'-'+info['zone'],CASSANDRA_RULES)
def createAllKP():
"""
Create all key pairs in all regions
"""
if not os.path.exists(keysDir):
os.makedirs(keysDir)
for info in conf_HVM:
keyName = 'Key-'+info['region']+'-'+info['zone']
try:
os.remove(keysDir+'/'+keyName+'.pem')
except OSError:
pass
print "Key creation :",keyName
ec2 = boto.ec2.connect_to_region(info['region']+'-'+info['zone'])
# check if the key pair exists
kps = [kp for kp in ec2.get_all_key_pairs() if kp.name == keyName]
if kps:
ec2.delete_key_pair(keyName)
key = ec2.create_key_pair(keyName)
key.save(keysDir)
|
SeanEstey/Bravo | app/notify/sms_announce.py | Python | gpl-2.0 | 2,703 | 0.012579 | '''app.notify.sms_announce'''
import os
import twilio
from flask import g, request
from datetime import datetime, date, time, timedelta
from dateutil.parser import parse
from pymongo.collection import ReturnDocument
from app import get_keys, colors as c
from app.main.etapestry import call, get_prim_phone, EtapError, get_query
from . import events, accounts, triggers, voice, sms
from logging import getLogger
log = getLogger(__name__)
#-------------------------------------------------------------------------------
def add_event():
log.debug(request.form.to_dict())
try:
accts = get_query(request.form['query_name'], category=request.form['query_category'])
except Exception as e:
msg = 'Failed to retrieve query "%s". Details: %s' % (request.form['query_name'], str(e))
log.exception(msg)
raise EtapError(msg)
evnt_id = events.add(
g.group,
request.form['event_name'] or request.form['query_name'],
parse(request.form['event_date']),
'recorded_announcement')
trig_id = triggers.add(
evnt_id,
'voice_sms',
parse(request.form['notific_date']).date(),
parse(request.form['notific_time']).time())
event_date = parse(request.form['event_date']).date()
for i in range(len(accts)):
acct_id = accounts.add(
g.group,
evnt_id,
accts[i]['name'],
phone = get_prim_phone(accts[i]))
sms.add(
evnt_id,
event_date,
trig_id,
acct_id,
get_prim_phone(accts[i]),
{'source': 'template',
'template': 'sms/%s/announce.html' % g.group}
'url': request.form['audio_url']},
{'module': 'app.notify.voice_announce',
'func': 'on_interact'}
)
log.info('SMS announce event created')
return evnt_id
#-------------------------------------------------------------------------------
def on_interact():
if request.form.get('Digits' | ) == '1':
notific = g.db['notifics'].find_one_and_update({
'tracking.sid': request.form['CallSid'],
}, {
'$set': {
'tracking.digit': request.form['Digits']
}},
return_document= | ReturnDocument.AFTER)
from twilio.twiml.messaging_response import MessagingResponse
response = MessagingResponse()
response.play(notific['on_answer']['audio_url'], voice='alice')
response.gather(
num_digits=1,
action="%s/notify/voice/play/interact.xml" % os.environ.get('BRV_HTTP_HOST'),
method='POST')
return response
|
airbnb/superset | superset/migrations/versions/67a6ac9b727b_update_spatial_params.py | Python | apache-2.0 | 1,978 | 0 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/li | censes/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the Lic | ense is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""update_spatial_params
Revision ID: 67a6ac9b727b
Revises: 4736ec66ce19
Create Date: 2017-12-08 08:19:21.148775
"""
import json
from alembic import op
from sqlalchemy import Column, Integer, String, Text
from sqlalchemy.ext.declarative import declarative_base
from superset import db
# revision identifiers, used by Alembic.
revision = "67a6ac9b727b"
down_revision = "4736ec66ce19"
Base = declarative_base()
class Slice(Base):
__tablename__ = "slices"
id = Column(Integer, primary_key=True)
viz_type = Column(String(250))
params = Column(Text)
def upgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
for slc in session.query(Slice).filter(Slice.viz_type.like("deck_%")):
params = json.loads(slc.params)
if params.get("latitude"):
params["spatial"] = {
"lonCol": params.get("longitude"),
"latCol": params.get("latitude"),
"type": "latlong",
}
del params["latitude"]
del params["longitude"]
slc.params = json.dumps(params)
session.merge(slc)
session.commit()
session.close()
def downgrade():
pass
|
ckaus/EpiPy | epipy/ui/view/plotwidget.py | Python | mit | 1,934 | 0 | # -*- coding: utf-8 -*-
from PyQt4 import QtGui, QtCore
from matplotlib.backends.backend_qt4 | \
import NavigationToolbar2QT as | NavigationToolbar
from matplotlib.backends.backend_qt4agg import \
FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
class PlotWidget(QtGui.QWidget):
"""This class represents the plot view for plotting graphs.
:returns: an instance of *PlotWidget*
"""
def __init__(self):
QtGui.QWidget.__init__(self)
self.figure = Figure(facecolor=(1, 1, 1), tight_layout=True)
self.ax = self.figure.add_subplot(111)
# Canvas
self.canvas = FigureCanvas(self.figure)
self.canvas.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
self.canvas.updateGeometry()
# Navigation
self.navi_toolbar = NavigationToolbar(self.canvas, self)
self.navi_toolbar.setOrientation(QtCore.Qt.Vertical)
# Fixed with, otherwise navigation bar resize arbitrary
self.navi_toolbar.setFixedWidth(40)
self.vbl = QtGui.QHBoxLayout()
self.vbl.addWidget(self.navi_toolbar)
self.vbl.addWidget(self.canvas)
self.setLayout(self.vbl)
def plot(self, x_data_1, y_data_1, x_data_2, y_data_2):
"""Plot given data on *PlotWidget*.
:param x_data_1: X-axis data for plot 1
:type x_data_1: array_like
:param y_data_1: Y-axis data for plot 1
:type y_data_1: array_like
:param x_data_2: X-axis data for plot 2
:type x_data_2: array_like
:param y_data_2: Y-axis data for plot 2
:type y_data_2: array_like
"""
self.ax.clear()
self.ax.grid(True)
self.ax.plot(x_data_1, y_data_1, 'b^', label='Input Data')
self.ax.plot(x_data_2, y_data_2, 'b-', label='Fitted Data')
self.ax.legend()
self.canvas.draw()
|
John-Colvin/clFFT | src/scripts/perf/fftPerformanceTesting.py | Python | apache-2.0 | 11,827 | 0.006426 | # ########################################################################
# Copyright 2013 Advanced Micro Devices, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ########################################################################
import itertools
import re#gex
import subprocess
import os
import sys
from datetime import datetime
# Common data and functions for the performance suite
tableHeader = 'lengthx,lengthy,lengthz,batch,device,inlay,outlay,place,precision,label,GFLOPS'
class TestCombination:
def __init__(self,
lengthx, lengthy, lengthz, batchsize,
device, inlayout, outlayout, placeness, precision,
label):
self.x = lengthx
self.y = lengthy
self.z = lengthz
self.batchsize = batchsize
self.device = device
self.inlayout = inlayout
self.outlayout = outlayout
self.placeness = placeness
self.precision = precision
self.label = label
def __str__(self):
return self.x + 'x' + self.y + 'x' + self.z + ':' + self.batchsize + ', ' + self.device + ', ' + self.inlayout + '/' + self.outlayout + ', ' + self.placeness + ', ' + self.precision + ' -- ' + self.label
class GraphPoint:
def __init__(self,
lengthx, lengthy, lengthz, batchsize,
precision, device, label,
gflops):
self.x = lengthx
self.y = lengthy
self.z = lengthz
self.batchsize = batchsize
self.device = device
self.label = label
self.precision = precision
self.gflops = gflops
self.problemsize = str(int(self.x) * int(self.y) * int(self.z) * int(self.batchsize))
def __str__(self):
# ALL members must be represented here (x, y, z, batch, device, label, etc)
return self.x + 'x' + self.y + 'x' + self.z + ':' + self.batchsize + ', ' + self.precision + ' precision, ' + self.device + ', -- ' + self.label + '; ' + self.gflops
class TableRow:
# parameters = class TestCombination instantiation
def __init__(self, parameters, gflops):
self.parameters = parameters
self.gflops = gflops
def __str__(self):
return self.parameters.__str__() + '; ' + self.gflops
def transformDimension(x,y,z):
if int(z) != 1:
return 3
elif int(y) != 1:
return 2
elif int(x) != 1:
return 1
def executable(library):
if type(library) != str:
print 'ERROR: expected library name to be a string'
quit()
if sys.platform != 'win32' and sys.platform != 'linux2':
print 'ERROR: unknown operating system'
quit()
if library == 'clFFT' or library == 'null':
if sys.platform == 'win32':
exe = 'clFFT-client-2.5.0.exe'
elif sys.platform == 'linux2':
exe = 'clFFT-client-2.5.0'
elif library == 'cuFFT':
if sys.platform == 'win32':
exe = 'cuFFT-Client.exe'
elif sys.platform == 'linux2':
exe = 'cuFFT-Client'
else:
print 'ERROR: unknown library -- cannot determine executable name'
quit()
return exe
def max_mem_available_in_bytes(exe, device):
arguments = [exe, '-i', device]
deviceInfo = subprocess.check_output(arguments, stderr=subprocess.STDOUT).split(os.linesep)
deviceInfo = itertools.ifilter( lambda x: x.count('MAX_MEM_ALLOC_SIZE'), deviceInfo)
deviceInfo = list(itertools.islice(deviceInfo, None))
maxMemoryAvailable = re.search('\d+', deviceInfo[0])
return int(maxMemoryAvailable.group(0))
def max_problem_size(exe, layout, precision, device):
if layout == 1 or layout == 1:
numbers_in_one_datapoint = 2
if precision == 'single':
bytes_in_one_number = 4
elif precision == 'double':
bytes_in_one_number = 8
else:
print 'max_problem_size(): unknown precision'
quit()
max_problem_size = pow(2,25) #max_mem_available_in_bytes(exe, device)
if layout == '5':
max_problem_size = pow(2,24) #max_mem_available_in_bytes(exe, device)
#max_problem_size=max_problem_size/ (numbers_in_one_datapoint * bytes_in_one_number)
#max_problem_size = max_problem_size / 16
return max_problem_size
def maxBatchSize(lengthx, lengthy, lengthz, layout, precision, exe, device):
problemSize = int(lengthx) * int(lengthy) * int(lengthz)
maxBatchSize = max_problem_size(exe, layout, precision, device) / problemSize
return str(maxBatchSize)
#if int(lengthx) == pow(2,16) or int(lengthx) == pow(2,17):
# # special cases in the kernel. extra padding is added in, so we need to shrink the batch size to accommodate
# return str(maxBatchSize/2)
#else:
# return str(maxBatchSize)
def create_ini_file_if_requested(args):
if args.createIniFilename:
for x in vars(args):
if (type(getattr(args,x)) != file) and x.count('File') == 0:
args.createIniFilename.write('--' + x + os.linesep)
args.createIniFilename.write(str(getattr(args,x)) + os.linesep)
quit()
def load_ini_file_if_requested(args, parser):
if args.useIniFilename:
argument_list = args.useIniFilename.readlines()
argument_list = [x.strip() for x in argument_list]
args = parser.parse_args(argument_list)
return args
def is_numeric_type(x):
return type(x) == int or type(x) == long or type(x) == float
def split_up_comma_delimited_lists(args):
for x in vars(args):
attr = getattr(args, x)
if attr == None:
setattr(args, x, [None])
elif is_numeric_type(attr):
setattr(args, x, [att | r])
elif type(attr) == str:
setattr(args, x, attr.split(','))
return args
class Range:
def __init__(self, ranges, defaultStep='+1'):
# we might be passed in a single value or a list of strings
# if we receive a single value, we want to feed it right back
if type(ranges) != list:
self.expanded = ranges
elif ranges[0] == None:
self.expanded = [None]
else:
| self.expanded = []
for thisRange in ranges:
thisRange = str(thisRange)
if re.search('^\+\d+$', thisRange):
self.expanded = self.expanded + [thisRange]
elif thisRange == 'max':
self.expanded = self.expanded + ['max']
else:
#elif thisRange != 'max':
if thisRange.count(':'):
self._stepAmount = thisRange.split(':')[1]
else:
self._stepAmount = defaultStep
thisRange = thisRange.split(':')[0]
if self._stepAmount.count('x'):
self._stepper = '_mult'
else:
self._stepper = '_add'
self._stepAmount = self._stepAmount.lstrip('+x')
self._stepAmount = int(self._stepAmount)
if thisRange.count('-'):
self.begin = int(thisRange.split('-')[0])
self.end = int(thisRange.split('-')[1])
else:
self.begin = int(thisRange.split('-')[0])
self.end = int(thisRange.split('-')[0])
self.current = self.begin
if self.begin == 0 and self._stepper == '_mult':
self.expanded = self.expanded + [0]
else:
while self.current <= self.e |
mfeit-internet2/pscheduler-dev | pscheduler-server/pscheduler-server/api-server/pschedulerapiserver/tests.py | Python | apache-2.0 | 4,144 | 0.003861 | #
# Test-Related Pages
#
import pscheduler
from pschedulerapiserver import application
from flask import request
from .dbcursor import dbcursor_query
from .json import *
from .response import *
#
# Tests
#
# All tests
@application.route("/tests", methods=['GET'])
def tests():
return json_query("SELECT json FROM test"
" WHERE available ORDER BY name", [])
# Test <name>
@application.route("/tests/<name>", methods=['GET'])
def tests_name(name):
return json_query("SELECT json FROM test"
" WHERE available AND name = %s",
[name], single=True)
# Derive a spec from command line arguments in 'arg'
@application.route("/tests/<name>/spec", methods=['GET'])
def tests_name_spec(name):
cursor = dbcursor_query("SELECT EXISTS (SELECT * FROM test"
" WHERE available AND name = %s)",
[ name ])
exists = cursor.fetchone()[0]
cursor.close()
if not exists:
return not_found()
try:
args = arg_json('args')
except ValueError as ex:
return bad_request("JSON passed to 'args': %s " % (str(ex)))
status, stdout, stderr = pscheduler.run_program(
[ 'pscheduler', 'internal', 'invoke', 'test', name, 'cli-to-spec' ],
stdin = pscheduler.json_dump(args),
timeout=5
)
if status != 0:
return bad_request(stderr)
# The extra parse here makes 'pretty' work.
returned_json = pscheduler.json_load(stdout)
return ok_json(returned_json, sanitize=False)
# Test spec validation
@application.route("/tests/<name>/spec/is-valid", methods=['GET'])
def tests_name_spec_is_valid(name):
cursor = dbcursor_query(
"SELECT EXISTS"
" (SELECT * FROM test WHERE available AND name = %s)",
[name])
exists = cursor.fetchone()[0]
cursor.close()
if not exists:
return not_found()
spec = request.args.get('spec')
if spec is None:
return bad_request("No test spec provided")
try:
returncode, stdout, stderr = pscheduler.run_program(
["pscheduler", "internal", "invoke", "test",
name, "spec-is-valid"],
stdin=spec)
if returncode != 0:
return error("Unable to validate test spec: %s" % (stderr))
validate_json = pscheduler.json_load(stdout, max_schema=1)
return ok_json(validate_json)
except Exception as ex:
return error("Unable to validate test spec: %s" % (str(ex)))
# Tools that can c | arry out test <name>
@application.route("/tests/<name>/tools", methods=['GET'])
def | tests_name_tools(name):
# TODO: Should probably 404 if the test doesn't exist.
# TODO: Is this used anywhere?
expanded = is_expanded()
cursor = dbcursor_query("""
SELECT
tool.name,
tool.json
FROM
tool
JOIN tool_test ON tool_test.tool = tool.id
JOIN test ON test.id = tool_test.test
WHERE
tool.available
AND test.name = %s
""", [name])
result = []
for row in cursor:
url = root_url('tools/' + row[0])
if not expanded:
result.append(url)
continue
row[1]['href'] = url
result.append(row[1])
cursor.close()
return json_response(result)
# Participants in a test spec
@application.route("/tests/<name>/participants", methods=['GET'])
def tests_name_participants(name):
spec = request.args.get('spec')
if spec is None:
return bad_request("No test spec provided")
try:
returncode, stdout, stderr = pscheduler.run_program(
[ "pscheduler", "internal", "invoke", "test", name,
"participants"],
stdin = spec,
)
except KeyError:
return bad_request("Invalid spec")
except Exception as ex:
return bad_request(ex)
if returncode != 0:
return bad_request(stderr)
# If this fails because of bad JSON, an exception will be thrown,
# caught and logged.
return json_response(pscheduler.json_load(stdout, max_schema=1))
|
matachi/subdownloader | languages/Languages.py | Python | gpl-3.0 | 6,237 | 0.018919 | #!/usr/bin/env python
# Copyright (c) 2010 SubDownloader Developers - See COPYING - GPLv3
import languages.autodetect_lang as autodetect_lang
import re
import os.path
import logging
log = logging.getLogger("subdownloader.languages.Languages")
import __builtin__
__builtin__._ = lambda x : x
LANGUAGES = [{'locale':'sq', 'ISO639': 'sq', 'SubLanguageID': 'alb', 'LanguageName': _('Albanian')},
{'locale':'ar', 'ISO639': 'ar', 'SubLanguageID': 'ara', 'LanguageName': _('Arabic')},
{'locale':'hy', 'ISO639': 'hy', 'SubLanguageID': 'arm', 'LanguageName': _('Armenian')},
{'locale':'ms', 'ISO639': 'ms', 'SubLanguageID': 'may', 'LanguageName': _('Malay')},
{'locale':'bs', 'ISO639': 'bs', 'SubLanguageID': 'bos', 'LanguageName': _('Bosnian')},
{'locale':'bg', 'ISO639': 'bg', 'SubLanguageID': 'bul', 'LanguageName': _('Bulgarian')},
{'locale':'ca', 'ISO639': 'ca', 'SubLanguageID': 'cat', 'LanguageName': _('Catalan')},
{'locale':'eu', 'ISO639': 'eu', 'SubLanguageID': 'eus', 'LanguageName': _('Basque')},
{'locale':'zh_CN', 'ISO639': 'zh', 'SubLanguageID': 'chi', 'LanguageName': _('Chinese (China)')},
{'locale':'hr', 'ISO639': 'hr', 'SubLanguageID': 'hrv', 'LanguageName': _('Croatian')},
{'locale':'cs', 'ISO639': 'cs', 'SubLanguageID': 'cze', 'LanguageName': _('Czech')},
{'locale':'da', 'ISO639': 'da', 'SubLanguageID': 'dan', 'LanguageName': _('Danish')},
{'locale':'nl', 'ISO639': 'nl', 'SubLanguageID': 'dut', 'LanguageName': _('Dutch')},
{'locale':'en', 'ISO639': 'en', 'SubLanguageID': 'eng', 'LanguageName': _('English (US)')},
{'locale':'en_GB', 'ISO639': 'en', 'SubLanguageID': 'bre', 'LanguageName': _('English (UK)')},
{'locale':'eo', 'ISO639': 'eo', 'SubLanguageID': 'epo', 'LanguageName': _('Esperanto')},
{'locale':'et', 'ISO639': 'et', 'SubLanguageID': 'est', 'LanguageName': _('Estonian')},
{'locale':'fi', 'ISO639': 'fi', 'SubLanguageID': 'fin', 'LanguageName': _('Finnish')},
{'locale':'fr', 'ISO639': 'fr', 'SubLanguageID': 'fre', 'LanguageName': _('French')},
{'locale':'gl', 'ISO639': 'gl', 'SubLanguageID': 'glg', 'LanguageName': _('Galician')},
{'locale':'ka', 'ISO639': 'ka', 'SubLanguageID': 'geo', 'LanguageName': _('Georgian')},
{'locale':'de', 'ISO639': 'd | e', 'SubLanguageID': 'ger', 'LanguageName': _('German')},
{'locale':'el', 'ISO639': 'el', 'SubLanguageID': 'ell', 'LanguageName': _('Greek')},
{'locale':'he', 'ISO639': 'he', 'SubLanguageID': 'heb', 'LanguageName': _('Hebrew')},
{'locale':'hu', 'ISO639': 'hu', 'SubLanguageID': 'hun', 'LanguageName': _('Hungarian')},
{'locale':'id', 'ISO639': | 'id', 'SubLanguageID': 'ind', 'LanguageName': _('Indonesian')},
{'locale':'it', 'ISO639': 'it', 'SubLanguageID': 'ita', 'LanguageName': _('Italian')},
{'locale':'ja', 'ISO639': 'ja', 'SubLanguageID': 'jpn', 'LanguageName': _('Japanese')},
{'locale':'kk', 'ISO639': 'kk', 'SubLanguageID': 'kaz', 'LanguageName': _('Kazakh')},
{'locale':'ko', 'ISO639': 'ko', 'SubLanguageID': 'kor', 'LanguageName': _('Korean')},
{'locale':'lv', 'ISO639': 'lv', 'SubLanguageID': 'lav', 'LanguageName': _('Latvian')},
{'locale':'lt', 'ISO639': 'lt', 'SubLanguageID': 'lit', 'LanguageName': _('Lithuanian')},
{'locale':'lb', 'ISO639': 'lb', 'SubLanguageID': 'ltz', 'LanguageName': _('Luxembourgish')},
{'locale':'mk', 'ISO639': 'mk', 'SubLanguageID': 'mac', 'LanguageName': _('Macedonian')},
{'locale':'no', 'ISO639': 'no', 'SubLanguageID': 'nor', 'LanguageName': _('Norwegian')},
{'locale':'fa', 'ISO639': 'fa', 'SubLanguageID': 'per', 'LanguageName': _('Persian')},
{'locale':'pl', 'ISO639': 'pl', 'SubLanguageID': 'pol', 'LanguageName': _('Polish')},
{'locale':'pt_PT', 'ISO639': 'pt', 'SubLanguageID': 'por', 'LanguageName': _('Portuguese (Portugal)')},
{'locale':'pt_BR', 'ISO639': 'pb', 'SubLanguageID': 'pob', 'LanguageName': _('Portuguese (Brazil)')},
{'locale':'ro', 'ISO639': 'ro', 'SubLanguageID': 'rum', 'LanguageName': _('Romanian')},
{'locale':'ru', 'ISO639': 'ru', 'SubLanguageID': 'rus', 'LanguageName': _('Russian')},
{'locale':'sr', 'ISO639': 'sr', 'SubLanguageID': 'scc', 'LanguageName': _('Serbian')},
{'locale':'sk', 'ISO639': 'sk', 'SubLanguageID': 'slo', 'LanguageName': _('Slovak')},
{'locale':'sl', 'ISO639': 'sl', 'SubLanguageID': 'slv', 'LanguageName': _('Slovenian')},
{'locale':'es_ES', 'ISO639': 'es', 'SubLanguageID': 'spa', 'LanguageName': _('Spanish (Spain)')},
{'locale':'sv', 'ISO639': 'sv', 'SubLanguageID': 'swe', 'LanguageName': _('Swedish')},
{'locale':'th', 'ISO639': 'th', 'SubLanguageID': 'tha', 'LanguageName': _('Thai')},
{'locale':'tr', 'ISO639': 'tr', 'SubLanguageID': 'tur', 'LanguageName': _('Turkish')},
{'locale':'uk', 'ISO639': 'uk', 'SubLanguageID': 'ukr', 'LanguageName': _('Ukrainian')},
{'locale':'vi', 'ISO639': 'vi', 'SubLanguageID': 'vie', 'LanguageName': _('Vietnamese')}]
def ListAll_xx():
temp = []
for lang in LANGUAGES:
temp.append(lang['ISO639'])
return temp
def ListAll_xxx():
temp = []
for lang in LANGUAGES:
temp.append(lang['SubLanguageID'])
return temp
def ListAll_locale():
temp = []
for lang in LANGUAGES:
temp.append(lang['locale'])
return temp
def ListAll_names():
temp = []
for lang in LANGUAGES:
temp.append(lang['LanguageName'])
return temp
def xx2xxx(xx):
for lang in LANGUAGES:
if lang['ISO639'] == xx:
return lang['SubLanguageID']
def xxx2xx(xxx):
for lang in LANGUAGES:
if lang['SubLanguageID'] == xxx:
return lang['ISO639']
def xxx2name(xxx):
for lang in LANGUAGES:
if lang['SubLanguageID'] == xxx:
return lang['LanguageName']
def locale2name(locale):
for lang in LANGUAGES:
if lang['locale'] == locale:
return lang['LanguageName']
def xx2name(xx):
for lang in LANGUAGES:
if lang['ISO639'] == xx:
return lang['LanguageName']
def name2xx(name):
for lang in LANGUAGES:
if lang['LanguageName'].lower() == name.lower():
return lang['ISO639']
def name2xxx(name):
for lang in LANGUAGES:
if lang['LanguageName'].lower() == name.lower():
return lang['SubLanguageID']
def CleanTagsFile(text):
p = re.compile( '<.*?>')
return p.sub('',text)
|
luxnovalabs/enjigo_door | web_interface/keyedcache/utils.py | Python | unlicense | 281 | 0.003559 | import types
def is_string_like(maybe):
"""Test value to see if it acts like a string"""
try:
maybe+""
except TypeError:
r | eturn 0
else:
return 1
def is_list_or_tuple(maybe):
return isinstance(maybe | , (types.TupleType, types.ListType))
|
erikrose/parsimonious | parsimonious/__init__.py | Python | mit | 421 | 0 | """Parsimonious's public API. | Import from here.
Things may move around in modules deeper than this one.
"""
from parsimonious.exceptions import (ParseError, IncompleteParseError,
VisitationError, UndefinedLabel,
| BadGrammar)
from parsimonious.grammar import Grammar, TokenGrammar
from parsimonious.nodes import NodeVisitor, VisitationError, rule
|
locationtech/geowave | python/src/main/python/pygw/store/cassandra/__init__.py | Python | apache-2.0 | 739 | 0.00406 | #
# Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copy | right
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
# ===============================================================================================
"""
This module contains classes specific to Cassandra data stores.
It contains the following import shortcuts:
```python
from pygw.store.c | assandra import CassandraOptions
```
"""
from .options import CassandraOptions
|
xkollar/spacewalk | client/rhel/rhn-client-tools/src/up2date_client/hardware.py | Python | gpl-2.0 | 29,568 | 0.005344 | #
# Copyright (c) 1999--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# This thing gets the hardware configuraion out of a system
"""Used to read hardware info from kudzu, /proc, etc"""
from socket import gethostname, getaddrinfo, AF_INET, AF_INET6
import socket
import re
import os
import sys
from up2date_client import config
from up2date_client import rhnserver
from rhn.i18n import ustr
try:
long
except NameError: # long is not defined in python3
long = int
try:
import ethtool
ethtool_present = True
except ImportError:
sys.stderr.write("Warning: information about network interfaces could not be retrieved on this platform.\n")
ethtool_present = False
import gettext
t = gettext.translation('rhn-client-tools', fallback=True)
# Python 3 translations don't have a ugettext method
if not hasattr(t, 'ugettext'):
t.ugettext = t.gettext
_ = t.ugettext
import dbus
import dmidecode
from up2date_client import up2dateLog
try: # F13 and EL6
from up2date_client.hardware_gudev import get_devices, get_computer_info
using_gudev = 1
except ImportError:
from up2date_client.hardware_hal import check_hal_dbus_status, get_hal_computer, read_hal
using_gudev = 0
# Some systems don't have the _locale module installed
try:
import locale
except ImportError:
locale = None
sys.path.append("/usr/share/rhsm")
try:
from subscription_manager.hwprobe import Hardware as SubManHardware
subscription_manager_available = True
except ImportError:
subscription_manager_available = False
# this does not change, we can cache it
_dmi_data = None
_dmi_not_available = 0
def dmi_warnings():
if not hasattr(dmidecode, 'get_warnings'):
return None
return dmidecode.get_warnings()
dmi_warn = dmi_warnings()
if dmi_warn:
dmidecode.clear_warnings()
log = up2dateLog.initLog()
log.log_debug("Warnings collected during dmidecode import: %s" % dmi_warn)
def _initialize_dmi_data():
""" Initialize _dmi_data unless it already exist and returns it """
global _dmi_data, _dmi_not_available
if _dmi_data is None:
if _dmi_not_available:
# do not try to initialize it again and again if not available
return None
else :
dmixml = dmidecode.dmidecodeXML()
dmixml.SetResultType(dmidecode.DMIXML_DOC)
# Get all the DMI data and prepare a XPath context
try:
data = dmixml.QuerySection('all')
dmi_warn = dmi_warnings()
if dmi_warn:
dmidecode.clear_warnings()
log = up2dateLog.initLog()
log.log_debug("dmidecode warnings: " % dmi_warn)
except:
# DMI decode FAIL, this can happend e.g in PV guest
_dmi_not_available = 1
dmi_warn = dmi_warnings()
if dmi_warn:
dmidecode.clear_warnings()
return None
_dmi_data = data.xpathNewContext()
return _dmi_data
def get_dmi_data(path):
""" Fetch DMI data from given section using given path.
If data could not be retrieved, returns empty string.
General method and should not be used outside of this module.
"""
dmi_data = _initialize_dmi_data()
if dmi_data is None:
return ''
data = dmi_data.xpathEval(path)
if data != []:
return data[0].content
else:
# The path do not exist
return ''
def dmi_vendor():
""" Return Vendor from dmidecode bios information.
If this value could not be fetch, returns empty string.
"""
return get_dmi_data('/dmidecode/BIOSinfo/Vendor')
def dmi_system_uuid():
""" Return UUID from dmidecode system information.
If this value could not be fetch, returns empty string.
"""
# if guest was created manualy it can have empty UUID, in this
# case dmidecode set attribute unavailable to 1
uuid = get_dmi_data("/dmidecode/SystemInfo/SystemUUID[not(@unavailable='1')]")
if not uuid:
uuid = ''
return uuid
def read_installinfo():
if not os.access("/etc/sysconfig/installinfo", os.R_OK):
return {}
installinfo = open("/etc/sysconfig/installinfo", "r").readlines()
installdict = {}
installdict['class'] = "INSTALLINFO"
for info in installinfo:
if not len(info):
continue
vals = info.split('=')
if len(vals) <= 1:
continue
strippedstring = vals[0].strip()
vals[0] = strippedstring
installdict[vals[0]] = ''.join(vals[1:]).strip()
return installdict
def cpu_count():
""" returns number of CPU in system
Beware that it can be different from number of active CPU (e.g. on s390x architecture
"""
try:
cpu_dir = os.listdir('/sys/devices/system/cpu/')
except OSError:
cpu_dir = []
re_cpu = re.compile(r"^cpu[0-9]+$")
return len([i for i in cpu_dir if re_cpu.match(i)])
# get the number of sockets available on this machine
def __get_number_sockets():
try:
if subscription_manager_available:
return SubManHardware().getCpuInfo()['cpu.cpu_socket(s)']
except:
pass # something went wrong, let's figure it out ourselves
number_sockets = 0
# Try | lscpu command if available
if os.access("/usr/bin/lscpu", os.X_OK):
try:
lines = os.popen("/usr/bin/lscpu -p").readlines()
max_socket_index = -1
for line in lines:
| if line.startswith('#'):
continue
# get the socket index from the output
socket_index = int(line.split(',')[2])
if socket_index > max_socket_index:
max_socket_index = socket_index
if max_socket_index > -1:
return 1 + max_socket_index
except:
pass
# Next try parsing /proc/cpuinfo
if os.access("/proc/cpuinfo", os.R_OK):
try:
lines = open("/proc/cpuinfo", 'r').readlines()
socket_ids = set()
for line in lines:
if 'physical id' in line:
socket_index = int(line.split(':')[1].strip())
socket_ids.add(socket_index)
if len(socket_ids) > 0:
return len(socket_ids)
except:
pass
# Next try dmidecode
if os.access("/usr/sbin/dmidecode", os.X_OK):
try:
lines = os.popen("/usr/sbin/dmidecode -t processor").readlines()
count = 0
for line in lines:
if 'Processor Information' in line:
count += 1
if count > 0:
return count
except:
pass
return None
# This has got to be one of the ugliest fucntions alive
def read_cpuinfo():
def get_entry(a, entry):
e = entry.lower()
if not e in a:
return ""
return a[e]
# read cpu list and return number of cpus and list as dictionary
def get_cpulist_as_dict(cpulist):
count = 0
tmpdict = {}
for cpu in cpulist.split("\n\n"):
if not len(cpu):
continue
count = count + 1
if count > 1:
break # no need to parse rest
for cpu_attr in cpu.split("\n"):
if not len(cpu_attr):
continue
vals = cpu_attr.split(":")
if len(vals) != 2:
# XXX: make at least some effort to recover t |
phiros/nepi | src/nepi/resources/linux/gretunnel.py | Python | gpl-3.0 | 3,088 | 0.006801 | #
# NEPI, a framework to manage network experiments
# Copyright (C) 2013 INRIA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.linux.tunnel import LinuxTunnel
from nepi.util.sshfuncs import ProcStatus
from nepi.util.timefuncs import tnow, tdiffsec
import re
import socket
import time
import os
@clsinit_copy
class LinuxGRETunnel(LinuxTunnel):
_rtype = "linux::GRETunnel"
_help = "Constructs a tunnel between two Linux endpoints using a UDP connection "
_backend = "linux"
def log_message(self, msg):
return " guid %d - GRE tunnel %s - %s - %s " % (self.guid,
self.endpoint1.node.get("hostname"),
self.endpoint2.node.get("hostname"),
msg)
def get_endpoints(self):
""" Returns the list of RM that are endpoints to the tunnel
"""
connected = []
for guid in self.connections:
rm = self.ec.get_resource(guid)
if hasattr(rm, "gre_connect"):
connected.append(rm)
return connected
def initiate_connection(self, endpoint, remote_endpoin | t):
# Return the command to execute to initiate the connection to the
# other endpoint
connection_run_home = self.run_home(endpoint)
| connection_app_home = self.app_home(endpoint)
data = endpoint.gre_connect(remote_endpoint,
connection_app_home,
connection_run_home)
return data
def establish_connection(self, endpoint, remote_endpoint, data):
pass
def verify_connection(self, endpoint, remote_endpoint):
remote_ip = socket.gethostbyname(remote_endpoint.node.get("hostname"))
command = "ping -c 4 %s" % remote_ip
(out, err), proc = endpoint.node.execute(command,
blocking = True)
m = re.search("(\d+)% packet loss", str(out))
if not m or int(m.groups()[0]) == 100:
msg = " Error establishing GRE Tunnel"
self.error(msg, out, err)
raise RuntimeError, msg
def terminate_connection(self, endpoint, remote_endpoint):
pass
def check_state_connection(self):
pass
def valid_connection(self, guid):
# TODO: Validate!
return True
|
hebaishi/pybullet | pybullet.py | Python | mit | 3,446 | 0.017702 | #!/usr/bin/python
import os
import json
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
import urllib2
import urllib
import mimetypes
def push_note(title = "Untitled", body = "Sample body", token = ""):
"""
Push note using pushbullet
"""
json_object = dict()
json_object["type"] = "note"
json_object["title"] = title
json_object["body"] = body
try:
request = urllib2.Request("https://api.pushbullet.com/v2/pushes", data = json.dumps(json_object) )
request.add_header('Content-Type', 'application/json')
request.add_header('Access-Token', token)
response = urllib2.urlopen(request)
return response.read()
except:
return None
def push_link(title = "Untitled", body = "Sample body", url = "http://www.google.co.uk", token = ""):
"""
Push link using pushbullet
"""
json_object = dict()
json_object["type"] = "link"
json_object["title"] = title
json_object["body"] = body
json_object["url"] = url
try:
request = urllib2.Request("https://api.pushbullet.com/v2/pushes", data = json.dumps(json_object) )
request.add_header('Content-Type', 'application/json')
request.add_header('Access-Token', token)
response = urllib2.urlopen(request)
return response.read() |
except:
return None
def push_file(file_path = "", file_name = "", file_desc="", token = ""):
"""
Push file using pushbullet
"""
json_object = dict()
json_object["file_name"] = file_name
url = urllib.pathname2url(file_path)
mime_type = mimetypes.guess_type(url)[0]
json_object["mime_type"] = mime_type
try:
request = urllib2.Request("https://api.pushbullet.com/v2/upload-request", data = json.dumps(json_object) )
request.add_header( | 'Content-Type', 'application/json')
request.add_header('Access-Token', token)
response = urllib2.urlopen(request)
upload_response = response.read()
except:
return None
upload_data = json.loads(upload_response)
register_openers()
try:
datagen, headers = multipart_encode({"file": open(file_path)})
upload_request = urllib2.Request(upload_data["upload_url"], datagen, headers)
urllib2.urlopen(upload_request).read()
except:
return None
file_object = dict()
file_object["type"] = "file"
file_object["body"] = file_desc
file_object["file_name"] = file_name
file_object["file_type"] = mime_type
file_object["file_url"] = upload_data["file_url"]
try:
push_request = urllib2.Request("https://api.pushbullet.com/v2/pushes", data = json.dumps(file_object) )
push_request.add_header('Content-Type', 'application/json')
push_request.add_header('Access-Token', token)
push_response = urllib2.urlopen(push_request)
return push_response.read()
except:
return None
def get_pushes(modified_after = 0, active = True, token = ""):
"""
List pushbullet 'pushes'
"""
url = "https://api.pushbullet.com/v2/pushes?" + "modified_after=" + str(modified_after) + "&active=" + str(active)
try:
request = urllib2.Request(url)
request.add_header('Authorization', 'Bearer ' + token)
response = urllib2.urlopen(request)
return response.read()
except:
return None
|
leanrobot/contestsite | team/models.py | Python | gpl-3.0 | 1,210 | 0.019835 | from __future__ import absolute_import
import os
from datetime import timedelta,datetime
import logging
import sys
from django.db import models as DjangoModels
from django.contrib.auth.models import User
from django.conf import settings
from django.db.models.query import EmptyQuerySet
from pytz import timezone
from celery.contrib import rdb
from problems.models import Problem, ProblemResult
logging.basicConfig(filename="django.log", level=logging.CRITICAL)
class UserSettings(DjangoModels.Model):
user = Dja | ngoModels.OneToOneField(User, primary_key=True)
teamName = DjangoModels.CharField(max_length=30)
def score(self):
problems = Problem.objects.all()
score = 0
for problem in problems:
correct = problem.getCorrectSolution(self.user)
if(co | rrect):
score += problem.possibleScore(self.user)
return score
def getCorrect(self):
correct = ProblemResult.objects.filter(user=self.user, graded=True, successful=True)
problems = [pr.problem for pr in correct]
return problems
def getFailed(self):
problems = Problem.objects.all()
failed = [pr for pr in problems if pr.failed(self.user)]
return failed
def __unicode__(self):
return ("%s's Settings" % (self.user,))
|
thusoy/blag | blag/migrations/versions/f7888bd46c75_add_hikes.py | Python | mit | 1,664 | 0.015625 | """add-hikes
Revision ID: f7888bd46c75
Revises: 820bb005f2c5
Create Date: 2017-02-16 07:36:06.108806
"""
# revision identifiers, used by Alembic.
revision = 'f7888bd46c75'
down_revision = 'fc92ba2ffd7f'
from alembic import op
import sqlalchemy as sa
import geoal | chemy2
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('hike_destination',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('altitude', sa.Integer() | , nullable=True),
sa.Column('high_point_coord', geoalchemy2.types.Geometry(geometry_type='POINT'), nullable=False),
sa.Column('is_summit', sa.Boolean(), server_default='t', nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text(u'now()'), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('hike',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('destination_id', sa.Integer(), nullable=False),
sa.Column('datetime', sa.DateTime(), server_default=sa.text(u'now()'), nullable=False),
sa.Column('method', sa.String(length=30), nullable=False),
sa.Column('notes', sa.Text(), server_default='', nullable=False),
sa.CheckConstraint(u"method in ('ski', 'foot', 'crampons', 'climb', 'via ferrata')"),
sa.ForeignKeyConstraint(['destination_id'], [u'hike_destination.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('hike')
op.drop_table('hike_destination')
### end Alembic commands ###
|
viktorTarasov/PyKMIP | kmip/tests/unit/core/misc/test_server_information.py | Python | apache-2.0 | 7,934 | 0 | # Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
# |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distribut | ed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six import string_types
from testtools import TestCase
from kmip.core.misc import ServerInformation
from kmip.core.utils import BytearrayStream
class TestServerInformation(TestCase):
"""
A test suite for the ServerInformation class.
"""
def setUp(self):
super(TestServerInformation, self).setUp()
self.data = BytearrayStream(b'\x00\x01\x02\x03')
self.encoding_a = BytearrayStream(
b'\x42\x00\x88\x01\x00\x00\x00\x00')
self.encoding_b = BytearrayStream(
b'\x42\x00\x88\x01\x00\x00\x00\x04\x00\x01\x02\x03')
def tearDown(self):
super(TestServerInformation, self).tearDown()
def test_init(self):
ServerInformation()
def _test_read(self, stream, data):
server_information = ServerInformation()
server_information.read(stream)
expected = data
observed = server_information.data
msg = "data decoding mismatch"
msg += "; expected {0}, observed {1}".format(expected, observed)
self.assertEqual(expected, observed, msg)
def test_read_with_none(self):
"""
Test that a ServerInformation object with no data can be read from a
data stream.
"""
self._test_read(self.encoding_a, BytearrayStream())
def test_read_with_data(self):
"""
Test that a ServerInformation object with data can be read from a
data stream.
"""
self._test_read(self.encoding_b, self.data)
def _test_write(self, stream_expected, data):
stream_observed = BytearrayStream()
server_information = ServerInformation()
if data is not None:
server_information.data = data
server_information.write(stream_observed)
length_expected = len(stream_expected)
length_observed = len(stream_observed)
msg = "encoding lengths not equal"
msg += "; expected {0}, observed {1}".format(
length_expected, length_observed)
self.assertEqual(length_expected, length_observed, msg)
msg = "encoding mismatch"
msg += ";\nexpected:\n{0}\nobserved:\n{1}".format(
stream_expected, stream_observed)
self.assertEqual(stream_expected, stream_observed, msg)
def test_write_with_none(self):
"""
Test that a ServerInformation object with no data can be written to a
data stream.
"""
self._test_write(self.encoding_a, None)
def test_write_with_data(self):
"""
Test that a ServerInformation object with data can be written to a
data stream.
"""
self._test_write(self.encoding_b, self.data)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
ServerInformation objects with the same internal data.
"""
a = ServerInformation()
b = ServerInformation()
a.data = self.data
b.data = self.data
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_equal_and_empty(self):
"""
Test that the equality operator returns True when comparing two
ServerInformation objects with no internal data.
"""
a = ServerInformation()
b = ServerInformation()
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal(self):
"""
Test that the equality operator returns False when comparing two
ServerInformation objects with different sets of internal data.
"""
a = ServerInformation()
b = ServerInformation()
a.data = self.data
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing a
ServerInformation object to a non-ServerInformation object.
"""
a = ServerInformation()
b = "invalid"
self.assertFalse(a == b)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing
two ServerInformation objects with the same internal data.
"""
a = ServerInformation()
b = ServerInformation()
a.data = self.data
b.data = self.data
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_equal_and_empty(self):
"""
Test that the inequality operator returns False when comparing
two ServerInformation objects with no internal data.
"""
a = ServerInformation()
b = ServerInformation()
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal(self):
"""
Test that the inequality operator returns True when comparing two
ServerInformation objects with different sets of internal data.
"""
a = ServerInformation()
b = ServerInformation()
a.data = self.data
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing a
ServerInformation object to a non-ServerInformation object.
"""
a = ServerInformation()
b = "invalid"
self.assertTrue(a != b)
def test_repr(self):
"""
Test that the representation of a ServerInformation object is
formatted properly and can be used by eval to create a new
ServerInformation object.
"""
server_information = ServerInformation()
expected = "ServerInformation()"
observed = repr(server_information)
msg = "expected {0}, observed {1}".format(expected, observed)
self.assertEqual(expected, observed, msg)
expected = server_information
observed = eval(observed)
msg = "expected {0}, observed {1}".format(expected, observed)
self.assertEqual(expected, observed, msg)
def _test_str(self, data):
server_information = ServerInformation()
server_information.data = data
str_repr = str(server_information)
expected = len(str(data))
observed = len(str_repr)
msg = "expected {0}, observed {1}".format(expected, observed)
self.assertEqual(expected, observed, msg)
# TODO (peter-hamilton) This should be binary_type. Fix involves
# TODO (peter-hamilton) refining BytearrayStream implementation.
expected = string_types
observed = str_repr
msg = "expected {0}, observed {1}".format(expected, observed)
self.assertIsInstance(observed, expected, msg)
def test_str_with_no_data(self):
"""
Test that the string representation of a ServerInformation object
is formatted properly when there is no internal data.
"""
self._test_str(BytearrayStream())
def test_str_with_data(self):
"""
Test that the string representation of a ServerInformation object
is formatted properly when there is internal data.
"""
self._test_str(self.data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.