repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
CSCI-462-01-2017/bedrock | tests/pages/internet_health.py | Python | mpl-2.0 | 732 | 0 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from selenium.webdriver.common.by import By
from pages.base import BasePage
class InternetHealthPage(BasePage):
URL_TEMPLATE = '/{locale}/internet-health'
_blog_feed_locator = (By.ID, ' | blogs')
_blog_feed_articles_locator = (By.CSS_SELECTOR, '#blogs article')
@property
def is_blog_feed_displayed(self):
return self.is_element_displayed(*self._blog_feed_locator)
@property
def number_of_blog_articles_present(self):
| return len(self.find_elements(*self._blog_feed_articles_locator))
|
simontakite/sysadmin | pythonscripts/learningPython/exiter2.py | Python | gpl-2.0 | 208 | 0.009615 | impo | rt sys
def bye():
sys.exit(40) # Crucial error: abort now!
try:
bye()
except Exception:
print('got it') # Oops--we ignored the exit
print('continuing...' | )
|
SpaceGroupUCL/qgisSpaceSyntaxToolkit | esstoolkit/external/pyqtgraph/opengl/shaders.py | Python | gpl-3.0 | 16,009 | 0.011618 | try:
from OpenGL import NullFunctionError
except ImportError:
from OpenGL.error import NullFunctionError
from OpenGL.GL import *
from OpenGL.GL import shaders
import re
## For centralizing and managing vertex/fragment shader programs.
def initShaders():
global Shaders
Shaders = [
ShaderProgram(None, []),
## increases fragment alpha as the normal turns orthogonal to the view
## this is useful for viewing shells that enclose a volume (such as isosurfaces)
ShaderProgram('balloon', [
VertexShader("""
varying vec3 normal;
void main() {
// compute here for use in fragment shader
normal = normalize(gl_NormalMatrix * gl_Normal);
gl_FrontColor = gl_Color;
gl_BackColor = gl_Color;
gl_Position = ftransform();
}
"""),
FragmentShader("""
varying vec3 normal;
void main() {
vec4 color = gl_Color;
color.w = min(color.w + 2.0 * color.w * pow(normal.x*normal.x + normal.y*normal.y, 5.0), 1.0);
gl_FragColor = color;
}
""")
]),
## colors fragments based on face normals relative to view
## This means that the colors will change depending on how the view is rotated
ShaderProgram('viewNormalColor', [
VertexShader("""
varying vec3 normal;
void main() {
// compute here for use in fragment shader
normal = normalize(gl_NormalMatrix * gl_Normal);
gl_FrontColor = gl_Color;
gl_BackColor = gl_Color;
gl_Position = ftransform();
}
"""),
FragmentShader("""
varying vec3 normal;
void main() {
vec4 color = gl_Color;
color.x = (normal.x + 1.0) * 0.5;
color.y = (normal.y + 1.0) * 0.5;
color.z = (normal.z + 1.0) * 0.5;
gl_FragColor = color;
}
""")
]),
## colors fragments based on absolute face normals.
ShaderProgram('normalColor', [
VertexShader("""
varying vec3 normal;
void main() {
// compute here for use in fragment shader
normal = normalize(gl_Normal);
gl_FrontColor = gl_Color;
gl_BackColor = gl_Color;
gl_Position = ftransform();
}
"""),
FragmentShader("""
varying vec3 normal;
void main() {
vec4 color = gl_Color;
color.x = (normal.x + 1.0) * 0.5;
color.y = (normal.y + 1.0) * 0.5;
color.z = (normal.z + 1.0) * 0.5;
gl_FragColor = color;
}
""")
]),
## very simple simulation of lighting.
## The light source position is always relative to the camera.
ShaderProgram('shaded', [
VertexShader("""
varying vec3 normal;
void main() {
// compute here for use in fragment shader
normal = normalize(gl_NormalMatrix * gl_Normal);
gl_FrontColor = gl_Color;
gl_BackColor = gl_Color;
gl_Position = ftransform();
}
"""),
FragmentShader("""
varying vec3 normal;
void main() {
float p = dot(normal, normalize(vec3(1.0, -1.0, -1.0)));
p = p < 0. ? 0. : p * 0.8;
vec4 color = gl_Color;
color.x = color.x * (0.2 + p);
color.y = color.y * (0.2 + p);
color.z = color.z * (0.2 + p);
gl_FragColor = color;
}
""")
]),
## colors get brighter near edges of object
ShaderProgram('edgeHilight', [
VertexShader("""
varying vec3 normal;
void main() {
// compute here for use in fragment shader
normal = normalize(gl_NormalMatrix * gl_Normal);
gl_FrontColor = gl_Color;
gl_BackColor = gl_Color;
gl_Position = ftransform();
}
"""),
FragmentShader("""
varying vec3 normal;
void main() {
vec4 color = gl_Color;
float s = pow(normal.x*normal.x + normal.y*normal.y, 2.0);
color.x = color.x + s * (1.0-color.x);
color.y = color.y + s * (1.0-color.y);
color.z = color.z + s * (1.0-color.z);
gl_FragColor = color;
}
""")
]),
## colors fragments by z-value.
## This is useful for coloring surface plots by height.
## This shader uses a uniform called "colorMap" to determine how to map the colors:
## red = pow(colorMap[0]*(z + colorMap[1]), colorMap[2])
## green = pow(colorMap[3]*(z + colorMap[4]), colorMap[5])
## blue = pow(colorMap[6]*(z + colorMap[7]), colorMap[8])
## (set the values like this: shader['uniformMap'] = array([...])
ShaderProgram('heightColor', [
VertexShader("""
varying vec4 pos;
void main() {
gl_FrontColor = gl_Color;
gl_BackColor = gl_Color;
pos = gl_Vertex;
gl_Position = ftransform();
}
"""),
FragmentShader("""
uniform float colorMap[9];
varying vec4 pos;
//out vec4 gl_FragColor; // only needed for later glsl versions
//in vec4 gl_Color;
void main() {
vec4 color = gl_Color;
color.x = colorMap[0] * (pos.z + colorMap[1]);
if (colorMap[2] != 1.0)
color.x = pow(color.x, colorMap[2]);
color.x = color.x < 0. ? 0. : (color.x > 1. ? 1. : color.x);
color.y = colorMap[3] * (pos.z + colorMap[4]);
if (colorMap[5] != 1.0)
color.y = pow(color.y, colorMap[5]);
color.y = color.y < 0. ? 0. : (color.y > 1. ? 1. : color.y);
color.z = colorMap[6] * (pos.z + colorMap[7]);
if (colorMap[8] != 1.0)
color.z = pow(color.z, colorMap[8]);
color.z = color.z < 0. ? 0. : (color.z > 1. ? 1. : color.z);
color.w = 1.0;
gl_FragColor = color;
}
"""),
], uniforms={'colorMap': [1, 1, 1, 1, 0.5, 1, 1, 0, 1]}),
ShaderProgram('pointSprite', [ ## allows specifying point size using normal.x
| ## See:
##
## http://stackoverflow.com/questions/9609423/applying-part-of-a-texture-sprite-sheet-texture-map-to-a-point-sprite-in-ios
## http://stackoverflow.com/questions/3497068/textured-points-in-opengl-es-2-0
##
##
VertexShader("""
void main() {
gl_FrontColor=gl_Color;
gl_PointSize = gl_Normal.x;
gl_Po | sition = ftransform();
}
"""),
#FragmentShader("""
##version 120
#uniform sampler2D texture;
#void main ( )
#{
#gl |
lebauce/artub | builder/Installer/hooks/hook-DateTime.py | Python | gpl-2.0 | 79 | 0 | hid | denimports = ['ISO', 'ARPA', 'ODMG', 'Locale', 'Feasts', 'Parser', 'NIST']
| |
tkralphs/GrUMPy | src/grumpy/examples/LP6.py | Python | epl-1.0 | 420 | 0.05 | numVars = 2
#points = [[2.5, 4.5], | [6.5, 0.5],
# [7, 5.7], [7.7, 5], [-2, -2]]
#rays = []
A = [[ -1, 3.75 ],
[ 1, -3.4 ],
[ -1.625, 1.125],
[ 3.75, -1 ],
[ 1, 1 ],
]
b = [ 14.375,
4.8 ,
1.0 | ,
23.875,
12.7 ,
]
c = [2, 1]
opt = (7.7, 5)
obj_val = 20.4
sense = ('<=', 'Max')
|
alex/sqlalchemy | test/base/test_dependency.py | Python | mit | 8,781 | 0.001025 | from sqlalchemy.util import topological
from sqlalchemy.testing import assert_raises, eq_
from sqlalchemy.testing.util import conforms_partial_ordering
from sqlalchemy import exc
from sqlalchemy.testing import fixtures
class DependencySortTest(fixtures.TestBase):
def assert_sort(self, tuples, allitems=None):
if allitems is None:
allitems = self._nodes_from_tuples(tuples)
else:
allitems = self._nodes_from_tuples(tuples).union(allitems)
result = list(topological.sort(tuples, allitems))
assert conforms_partial_ordering(tuples, result)
def _nodes_from_tuples(self, tups):
s = set()
for tup in tups:
s.update(tup)
return s
def test_sort_one(self):
rootnode = 'root'
node2 = 'node2'
node3 = 'node3'
node4 = 'node4'
subnode1 = 'subnode1'
subnode2 = 'subnode2'
subnode3 = 'subnode3'
subnode4 = 'subnode4'
subsubnode1 = 'subsubnode1'
tuples = [
(subnode3, subsubnode1),
(node2, subnode1),
(node2, subnode2),
(rootnode, node2),
(rootnode, node3),
(rootnode, node4),
(node4, subnode3),
(node4, subnode4),
]
self.assert_sort(tuples)
def test_sort_two(self):
node1 = 'node1'
node2 = 'node2'
node3 = 'node3'
node4 = 'node4'
node5 = 'node5'
node6 = 'node6'
node7 = 'node7'
tuples = [(node1, node2), (node3, node4), (node4, node5),
(node5, node6), (node6, node2)]
self.assert_sort(tuples, [node7])
def test_sort_three(self):
node1 = 'keywords'
node2 = 'itemkeyowrds'
node3 = 'items'
node4 = 'hoho'
tuples = [(node1, node2), (node4, node1), (node1, node3),
(node3, node2)]
self.assert_sort(tuples)
def test_raise_on_cycle_one(self):
node1 = 'node1'
node2 = 'node2'
node3 = 'node3'
node4 = 'node4'
node5 = 'node5'
tuples = [
(node4, node5),
(node5, node4),
(node1, node2),
(node2, node3),
(node3, node1),
(node4, node1),
]
allitems = self._nodes_from_tuples(tuples)
try:
list(topological.sort(tuples, allitems))
assert False
except exc.CircularDependencyError as err:
eq_(err.cycles, set(['node1', 'node3', 'node2', 'node5',
'node4']))
eq_(err.edges, set([('node3', 'node1'), ('node4', 'node1'),
('node2', 'node3'), ('node1', 'node2'),
('node4','node5'), ('node5', 'node4')]))
def test_raise_on_cycle_two(self):
# this condition was arising from ticket:362 and was not treated
# properly by topological sort
node1 = 'node1'
node2 = 'node2'
node3 = 'node3'
node4 = 'node4'
tuples = [(node1, node2), (node3, node1), (node2, node4),
(node3, node2), (node2, node3)]
allitems = self._nodes_from_tuples(tuples)
try:
list(topological.sort(tuples, allitems))
assert False
except exc.CircularDependencyError as err:
eq_(err.cycles, set(['node1', 'node3', 'node2']))
eq_(err.edges, set([('node3', 'node1'), ('node2', 'node3'),
('node3', 'node2'), ('node1', 'node2'),
('node2','node4')]))
def test_raise_on_cycle_three(self):
question, issue, providerservice, answer, provider = \
'Question', 'Issue', 'ProviderService', 'Answer', 'Provider'
tuples = [
(question, issue),
(providerservice, issue),
(provider, question),
(question, provider),
(providerservice, question),
(provider, providerservice),
(question, answer),
(issue, question),
]
allitems = self._nodes_from_tuples(tuples)
assert_raises(exc.CircularDependencyError, list,
topological.sort(tuples, allitems))
# TODO: test find_cycles
def test_large_sort(self):
tuples = [(i, i + 1) for i in range(0, 1500, 2)]
self.assert_sort(tuples)
def test_ticket_1380(self):
# ticket:1380 regression: would raise a KeyError
tuples = [(id(i), i) for i in range(3)]
self.assert_sort(tuples)
def test_find_cycle_one(self):
node1 = 'node1'
node2 = 'node2'
node3 = 'node3'
node4 = 'node4'
tuples = [(node1, node2), (node3, node1), (node2, node4),
(node3, node2), (node2, node3)]
eq_(topological.find_cycles(tuples,
self._nodes_from_tuples(tuples)), set([node1, node2,
node3]))
def test_find_multiple_cycles_one(self):
node1 = 'node1'
node2 = 'node2'
node3 = 'node3'
node4 = 'node4'
node5 = 'node5'
node6 = 'node6'
node7 = 'node7'
node8 = 'node8'
node9 = 'node9'
tuples = [ # cycle 1 cycle 2 cycle 3 cycle 4, but only if cycle
# 1 nodes are present
(node1, node2),
(node2, node4),
(node4, node1),
(node9, node9),
(node7, node5),
(node5, node7),
(node1, node6),
(node6, node8),
(node8, node4),
(node3, node1),
(node3, node2),
]
allnodes = set([
node1,
node2,
node3,
node4,
node5,
node6,
node7,
node8,
node9,
])
eq_(topological.find_cycles(tuples, allnodes), set([
'node8',
'node1',
'node2',
'node5',
'node4',
'node7',
'node6',
'node9',
]))
def test_find_multiple_cycles_two(self):
node1 = 'node1'
node2 = 'node2'
node3 = 'node3'
node4 = 'node4'
node5 = 'node5'
node6 = 'node6'
tuples = [ # cycle 1 cycle 2
(node1, node2),
(node2, node4),
(node4, node1),
(node1, node6),
(node6, node2),
(node2, node4),
(node4, node1),
]
allnodes = set([
node1,
node2,
node3,
node4,
node5,
node6,
])
# node6 only became present here once [ticket:2282] was addressed.
eq_(
topological.find_cycles(tuples, allnodes),
set(['node1','node2', 'node4', 'node6'])
)
def test_find_multiple_cycles_three(self):
node1 = 'node1'
node2 = 'node2'
node3 = 'node3'
node4 = 'node4'
node5 = 'node5'
node6 = 'node6'
tuples = [ # cycle 1 cycle 2 cycle3 cycle4
(node1, node2),
(node2, node1),
(node2, node3),
(node3, node2),
(node2, node4),
(node4, node2),
(node2, node5),
(node5, node6),
(node6, node2),
]
allnodes = set([
node1,
node2,
node3,
node4,
node5,
node6,
])
eq_(topological.find_cycles(tuples, allnodes), allnodes)
def test_find_multiple_cycles_four(self):
tuples = [
('node6', 'node2'),
('node15', 'node19'),
('node19', 'node2'), ('node4', 'node10'),
('node | 15', 'node13'),
| ('node17', 'node11'), ('node1', 'node19'), ('node15', 'node8'),
('node6', 'node20'), ('node14', 'node11'), ('node6', 'node14'),
('node11', 'node2'), ('node10', 'node20'), ('node1', 'node11'),
('node20', 'node19'), ('node4', 'node20'), ('node15', 'node20'),
('node9', 'no |
respawner/peering-manager | devices/tests/test_views.py | Python | apache-2.0 | 772 | 0 | from devices.models import Platform
from utils.testing import ViewTestCases
class PlatformTestCase(
ViewTestCases.CreateObjectViewTestCase,
ViewTestCases.EditObjectViewTestCase,
ViewTestCases.DeleteObjectViewTestCase,
ViewTestCases.ListObjectsViewTestCase,
):
model = Platform
@classmethod
def setUpTestData(cls):
Platform.object | s.bulk_create(
[
Platform(name="Some OS", slug="someos"),
Platform(name="Test OS", slug="testos"),
]
)
cls.form_data = {
"name": "Bugs OS",
"slug": "bugsos",
"napalm_driver": "bugsos" | ,
"napalm_args": {},
"password_algorithm": "",
"description": "",
}
|
sciunto/rss2email | rss2email/feeds.py | Python | gpl-2.0 | 14,342 | 0.000279 | # Copyright (C) 2004-2014 Aaron Swartz
# Brian Lalor
# Dean Jackson
# Erik Hetzner
# Etienne Millon <me@emillon.org>
# Joey Hess
# Lindsey Smith <lindsey.smith@gmail.com>
# Marcel Ackermann
# Martin 'Joey' Schulze
# Matej Cepl
# W. Trevor King <wking@tremily.us>
#
# This file is part of rss2email.
#
# rss2email is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 2 of the License, or (at your option) version 3 of
# the License.
#
# rss2email is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# rss2email. If not, see <http://www.gnu.org/licenses/>.
"""Define the ``Feed`` class for handling a list of feeds
"""
import codecs as _codecs
import collections as _collections
import os as _os
import json as _json
import pickle as _pickle
import sys as _sys
from . import LOG as _LOG
from . import config as _config
from . import error as _error
from . import feed as _feed
UNIX = False
try:
import fcntl as _fcntl
# A pox on SunOS file locking methods
if 'sunos' not in _sys.platform:
UNIX = True
except:
pass
# Path to the filesystem root, '/' on POSIX.1 (IEEE Std 1003.1-2008).
ROOT_PATH = _os.path.splitdrive(_sys.executable)[0] or _os.sep
class Feeds (list):
"""Utility class for rss2email activity.
>>> import codecs
>>> import os.path
>>> import json
>>> import tempfile
>>> from .feed import Feed
Setup a temporary directory to load.
>>> tmpdir = tempfile.TemporaryDirectory(prefix='rss2email-test-')
>>> configfile = os.path.join(tmpdir.name, 'rss2email.cfg')
>>> with open(configfile, 'w') as f:
... count = f.write('[DEFAULT]\\n')
... count = f.write('to = a@b.com\\n')
... count = f.write('[feed.f1]\\n')
... count = f.write('url = http://a.net/feed.atom\\n')
... count = f.write('to = x@y.net\\n')
... count = f.write('[feed.f2]\\n')
... count = f.write('url = http://b.com/rss.atom\\n')
>>> datafile = os.path.join(tmpdir.name, 'rss2email.json')
>>> with codecs.open(datafile, 'w', Feeds.datafile_encoding) as f:
... json.dump({
... 'version': 1,
... 'feeds': [
... Feed(name='f1').get_state(),
... Feed(name='f2').get_state(),
... ],
... }, f)
>>> feeds = Feeds(configfiles=[configfile,], datafile=datafile)
>>> feeds.load()
>>> for feed in feeds:
... print(feed)
f1 (http://a.net/feed.atom -> x@y.net)
f2 (http://b.com/rss.atom -> a@b.com)
You can index feeds by array index or by feed name.
>>> feeds[0]
<Feed f1 (http://a.net/feed.atom -> x@y.net)>
>>> feeds[-1]
<Feed f2 (http://b.com/rss.atom -> a@b.com)>
>>> feeds['f1']
<Feed f1 (http://a.net/feed.atom -> x@y.net)>
>>> feeds['missing']
Traceback (most recent call last):
...
IndexError: missing
Tweak the feed configuration and save.
>>> feeds[0].to = None
>>> feeds.save()
>>> print(open(configfile, 'r').read().rstrip('\\n'))
... # doctest: +REPORT_UDIFF, +ELLIPSIS
[DEFAULT]
from = user@rss2email.invalid
...
verbose = warning
<BLANKLINE>
[feed.f1]
url = http://a.net/feed.atom
<BLANKLINE>
[feed.f2]
url = http://b.com/rss.atom
Cleanup the temporary directory.
>>> tmpdir.cleanup()
"""
datafile_version = 2
dat | afile_encoding = 'utf-8'
def __init__(self, configfiles=None, datafile=None, config=None):
super(Feeds, self).__init__()
if configfiles is None:
configfiles = self._get_configfiles()
self.configfiles = configfiles
if datafile is None:
datafile = self._get_datafile()
self.datafile = _os.path.realpath(datafile)
if config is None:
config = | _config.CONFIG
self.config = config
self._datafile_lock = None
def __getitem__(self, key):
for feed in self:
if feed.name == key:
return feed
try:
index = int(key)
except ValueError as e:
raise IndexError(key) from e
return super(Feeds, self).__getitem__(index)
def __append__(self, feed):
feed.load_from_config(self.config)
feed = super(Feeds, self).append(feed)
def __pop__(self, index=-1):
feed = super(Feeds, self).pop(index=index)
if feed.section in self.config:
self.config.pop(feed.section)
return feed
def index(self, index):
if isinstance(index, int):
try:
return self[index]
except IndexError as e:
raise _error.FeedIndexError(index=index, feeds=self) from e
elif isinstance(index, str):
try:
index = int(index)
except ValueError:
pass
else:
return self.index(index)
for feed in self:
if feed.name == index:
return feed
try:
super(Feeds, self).index(index)
except (IndexError, ValueError) as e:
raise _error.FeedIndexError(index=index, feeds=self) from e
def remove(self, feed):
super(Feeds, self).remove(feed)
if feed.section in self.config:
self.config.pop(feed.section)
def clear(self):
while self:
self.pop(0)
def _get_configfiles(self):
"""Get configuration file paths
Following the XDG Base Directory Specification.
"""
config_home = _os.environ.get(
'XDG_CONFIG_HOME',
_os.path.expanduser(_os.path.join('~', '.config')))
config_dirs = [config_home]
config_dirs.extend(
_os.environ.get(
'XDG_CONFIG_DIRS',
_os.path.join(ROOT_PATH, 'etc', 'xdg'),
).split(':'))
# reverse because ConfigParser wants most significant last
return list(reversed(
[_os.path.join(config_dir, 'rss2email.cfg')
for config_dir in config_dirs]))
def _get_datafile(self):
"""Get the data file path
Following the XDG Base Directory Specification.
"""
data_home = _os.environ.get(
'XDG_DATA_HOME',
_os.path.expanduser(_os.path.join('~', '.local', 'share')))
data_dirs = [data_home]
data_dirs.extend(
_os.environ.get(
'XDG_DATA_DIRS',
':'.join([
_os.path.join(ROOT_PATH, 'usr', 'local', 'share'),
_os.path.join(ROOT_PATH, 'usr', 'share'),
]),
).split(':'))
datafiles = [_os.path.join(data_dir, 'rss2email.json')
for data_dir in data_dirs]
for datafile in datafiles:
if _os.path.isfile(datafile):
return datafile
return datafiles[0]
def load(self, lock=True, require=False):
_LOG.debug('load feed configuration from {}'.format(self.configfiles))
if self.configfiles:
self.read_configfiles = self.config.read(self.configfiles)
else:
self.read_configfiles = []
_LOG.debug('loaded configuration from {}'.format(
self.read_configfiles))
self._load_feeds(lock=lock, require=require)
def _load_feeds(self, lock, require):
_LOG.debug('load feed data from {}'.format(self.datafile))
if not _os.path.exists(se |
Ophiuchus1312/enigma2-master | lib/python/Plugins/Extensions/Infopanel/SwapManager.py | Python | gpl-2.0 | 12,563 | 0.026825 | # for localized messages
from . import _
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.ChoiceBox import ChoiceBox
from Components.config import config, configfile, ConfigYesNo, ConfigSubsection
from Components.ActionMap import ActionMap
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.Harddisk import harddiskmanager, getProcMounts
from Components.Console import Console
from Components.Sources.StaticText import StaticText
from os import system, stat as mystat, path, remove, rename
from enigma import eTimer
from glob import glob
import stat
config.plugins.infopanel = ConfigSubsection()
config.plugins.infopanel.swapautostart = ConfigYesNo(default = False)
startswap = None
def SwapAutostart(reason, session=None, **kwargs):
global startswap
if reason == 0:
if config.plugins.infopanel.swapautostart.getValue():
print "[SwapManager] autostart"
startswap = StartSwap()
startswap.start()
class StartSwap:
def __init__(self):
self.Console = Console()
def start(self):
self.Console.ePopen("sfdisk -l /dev/sd? | grep swap", self.startSwap2)
def startSwap2(self, result = None, retval = None, extra_args = None):
swap_place = ""
if result and result.find('sd') != -1:
for line in result.split('\n'):
if line.find('sd') != -1:
parts = line.strip().split()
swap_place = parts[0]
file('/etc/fstab.tmp', 'w').writelines([l for l in file('/etc/fstab').readlines() if swap_place not in l])
rename('/etc/fstab.tmp','/etc/fstab')
print "[SwapManager] Found a swap partition:", swap_place
else:
devicelist = []
for p in harddiskmanager.getMountedPartitions():
d = path.normpath(p.mountpoint)
if path.exists(p.mountpoint) and p.mountpoint != "/" and not p.mountpoint.startswith('/media/net'):
devicelist.append((p.description, d))
if len(devicelist):
for device in devicelist:
for filename in glob(device[1] + '/swap*'):
if path.exists(filename):
swap_place = filename
print "[SwapManager] Found a swapfile on ", swap_place
f = file('/proc/swaps').read()
if f.find(swap_place) == -1:
print "[SwapManager] Starting swapfile on ", swap_place
system('swapon ' + swap_place)
else:
print "[SwapManager] Swapfile is already active on ", swap_place
#######################################################################
class Swap(Screen):
skin = """
<screen name="Swap" position="center,center" size="420,250" title="Swap File Manager" flags="wfBorder" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget name="key_red" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget name="key_green" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="key_yellow" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget name="autostart_off" position="10,50" zPosition="1" pixmap="skin_default/icons/lock_off.png" size="32,32" alphatest="on" />
<widget name="autostart_on" position="10,50" zPosition="2" pixmap="skin_default/icons/lock_on.png" size="32,32" alphatest="on" />
<widget name="lab1" position="50,50" size="360,30" font="Regular;20" valign="center" transparent="1"/>
<widget name="lab2" position="10,100" size="150,30" font="Regular;20" valign="center" transparent="1"/>
<widget name="lab3" position="10,150" size="150,30" font="Regular;20" valign="center" transparent="1"/>
<widget name="lab4" position="10,200" size="150,30" font="Regular;20" valign="center" transparent="1" />
<widget name="labplace" position="160,100" size="220,30" font="Regular;20" valign="center" backgroundColor="#4D5375"/>
<widget name="labsize" position="160,150" size="220,30" font="Regular;20" valign="center" backgroundColor="#4D5375"/>
<widget name="inactive" position="160,200" size="100,30" font="Regular;20" valign="center" halign="center" backgroundColor="red"/>
<widget name="active" position="160,200" size="100,30" font="Regular;20" valign="center" halign="center" backgroundColor="green"/>
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Swap Manager"))
self['lab1'] = Label()
self['autostart_on'] = Pixmap()
self['autostart_off'] = Pixmap()
self['lab2'] = Label(_("Swap Place:"))
self['labplace'] = Label()
self['lab3'] = Label(_("Swap Size:"))
self['labsize'] = Label()
self['lab4'] = Label(_("Status:"))
self['inactive'] = Label(_("Inactive"))
self['active'] = Label(_("Active"))
self['key_red'] = Label(_("Activate"))
self['key_green'] = Label(_("Create"))
self['key_yellow'] = Label(_("Autostart"))
self['swapname_summary'] = StaticText()
self['swapactive_summary'] = StaticText()
self.Console = Console()
self.swap_place = ''
self.new_place = ''
self.creatingswap = False
self['actions'] = ActionMap(['WizardActions', 'ColorActions', "MenuActions"], {'back': self.close, 'red': self.actDeact, 'green': self.createDel, 'yellow': self.autoSsWap, "menu": self.close})
self.activityTimer = eTimer()
self.activityTimer.timeout.get().append(self.getSwapDevice)
self.updateSwap()
def updateSwap(self, result = None, retval = None, extra_args = None):
self["actions"].setEnabled(False)
self.swap_active = False
self['autostart_on'].hide()
self['autostart_off'].show()
self['active'].hide()
self['inactive'].show()
self['labplace'].hide()
self['labsize'].hide()
self['swapactive_summary'].setText(_("Current Status:"))
scanning = _("Wait please while scanning...")
self['lab1'].setText(scanning)
self.activityTimer.start(10)
def getSwapDevice(self):
self.activityTimer.stop()
if path.exists('/etc/rcS.d/S98SwapManager'):
remove('/etc/rcS.d/S98SwapManager')
config.plugins.infopanel.swapautostart.setValue(True)
config.plugins.infopanel.swapautostart.save()
if path.exists('/tmp/swapdevices.tmp'):
remove('/tmp/swapdevices.tmp')
self.Console.ePopen("sfdisk -l /dev/sd? | grep swap", self.updateSwap2)
def updateSwap2(self, result = None, retval = None, extra_args = None):
self.swapsize = 0
self.swap_place = ''
self.swap_active = False
self.device = False
if result.find('sd') > 0:
self['key_green'].setText("")
for line in result.split('\n'):
if line.find('sd') > 0:
parts = line.strip() | .split()
self.swap_place = parts[0]
if self.swap_place == 'sfdisk:':
self.swap_place = ''
self.device = True
f = open('/proc/swaps', 'r')
for line in f.readlines():
parts = line.strip().split()
if line.find('partition') != -1:
self.swap_active = True
self.swapsize = parts[2]
continue
f.close()
else:
self['key_green'].setText(_("Create"))
devicelist = []
for p in harddiskmanager.getMountedPar | titions():
d = path.normpath(p.mountpoint)
if path.exists(p.mountpoint) and p.mountpoint != "/" and not p.mountpoint.startswith('/media/net'):
devicelist.append((p.description, d))
if len(devicelist):
for device in devicelist:
for filename in glob(device[1] + '/swap*'):
self.swap_place = filename
self['key_green'].setText(_("Delete"))
info = mystat(self.swap_place)
self.swapsize = info[stat.ST_SIZE]
continue
if config.plugins.infopanel.swapautostart.getValue() and self.swap_place:
self['autostart_off'].hide()
self['autostart_on'].show()
else:
config.plugins.infopanel.swapautostart.setValue(False)
config.plugins.infopanel.swapautostart.save()
configfile.save()
self['autostart_on'].hide()
self['autostart_off'].show()
self['labplace'].setText(self.swap_place)
self['labplace'].show()
f = open('/pr |
macknowak/simtools | simtools/simrun.py | Python | gpl-3.0 | 3,606 | 0.000832 | # -*- coding: utf-8 -*-
"""Simulation launch services.
Simulation launch services provide the following functionality:
- generating simulation id based on local date and time;
- generating simulation directory name;
- loading names of simulation directories from a text file;
- creating directory structure for simulation;
- normalizing the format of executable;
- launching simulation.
"""
import os
import shlex
import subprocess
import time
from simtools.argparse import all_optio | ns as options
from simtools.base import is_iterable, is_string
TMP_DIR_PREFIX = "_"
def generate_sim_id():
"""Generate simulation id based on local date and time."" | "
t = time.localtime()
sim_id = "{0:04}{1:02}{2:02}_{3:02}{4:02}{5:02}".format(
t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
return sim_id
def generate_sim_dirname(tmp=False, sim_id=None):
"""Generate simulation directory name."""
if not sim_id:
sim_id = generate_sim_id()
return sim_id if not tmp else TMP_DIR_PREFIX + sim_id
def make_dirs(sim_dirname, sim_master_dirname=None, data_dirname=None):
"""Create directory structure for simulation."""
if sim_master_dirname is not None:
sim_path = os.path.join(sim_master_dirname, sim_dirname)
else:
sim_path = sim_dirname
os.makedirs(sim_path) # raises an error if simulation directory already
# exists
if data_dirname is not None:
os.makedirs(os.path.join(sim_path, data_dirname))
return sim_path
def run_sim(model_filename, params_filename=None, sim_id=None,
data_dirname=None, executable=None, model_args=None):
"""Launch simulation."""
cmd = []
if executable:
if is_string(executable):
cmd.append(executable)
else:
if not is_iterable(executable):
raise TypeError(
"'executable' is neither a string nor iterable.")
cmd += executable
cmd.append(model_filename)
if params_filename:
cmd += [options['params_filename']['arg'][1], params_filename]
if sim_id:
cmd += [options['sim_id']['arg'][1], sim_id]
if data_dirname:
cmd += [options['data_dirname']['arg'][1], data_dirname]
cmd.append(options['save_data']['arg'][1])
if model_args:
cmd += model_args
return subprocess.call(cmd)
def norm_executable(executable):
"""Normalize the format of executable."""
# Split executable name and arguments
executable = shlex.split(executable)
# If necessary, determine the absolute path to the executable
if not os.path.isabs(executable[0]) and os.path.isfile(executable[0]):
executable[0] = os.path.abspath(executable[0])
return executable
def load_sim_dirnames(filename):
"""Load names of simulation directories from a file."""
COMMENT_START_TOKEN = "#"
sim_dirnames = []
with open(filename) as sim_dirnames_file:
for line in sim_dirnames_file:
# Strip leading and trailing whitespace from the line
stripped_line = line.strip()
# If the stripped line is empty or contains only a comment, skip it
if (not stripped_line
or stripped_line.startswith(COMMENT_START_TOKEN)):
continue
# Assume that the stripped line contains a directory path and
# normalize it according to the platform
sim_dirname = os.path.normpath(stripped_line.replace("\\", os.sep))
sim_dirnames.append(sim_dirname)
return sim_dirnames
|
evature/flightstats | flightstats/flightstats.py | Python | mit | 7,585 | 0.007779 | '''
Created on Jun 29, 2016
@author: evature
https://developer.flightstats.com/api-docs/scheduledFlights/v1
'''
from __future__ import absolute_import
import os
import requests
import datetime
from pprint import pprint
from pytz import timezone
from flightstats.flightaware_airports import AIRPORTS as FA_AIRPORTS
APPLICATION_ID = os.environ['FLIGHTSTATS_APP_ID']
APPLICATION_KEY = os.environ['FLIGHTSTATS_APP_KEY']
def send_request(search_url):
req_url = "https://api.flightstats.com/flex/schedules/rest/v1/json/{}?appId={}&appKey={}&codeType=IATA".format(search_url,
APPLICATION_ID,
APPLICATION_KEY)
resp = requests.get(req_url)
if resp.status_code == requests.codes.ok: # @UndefinedVariable pylint:disable=no-member
return resp.json()
def arrivals(from_airport, to_airport, arrival_date):
"""
finds arrivals
@from_airport The airport code (IATA) of the departure airport (required)
@to_airport The airport code (IATA) of the arrival airport (required)
@arrival_date arrival date (required)
"""
search_url = "from/{from_airport}/to/{to_airport}/arriving/{arrival_year}/{arrival_month}/{arrival_day}".format(from_airport=from_airport,
to_airport=to_airport,
arrival_year=arrival_date.year,
arrival_month=arrival_date.month,
| arrival_day=arrival_date.day)
content = send_request(search_url)
return content
def departures(from_airport, to_airport, departure_date):
"""
finds departure
@from_airport The airport code (IATA) of the departure airport (required)
@to_airport The airport code (IATA) of the arrival airport (required)
@departure_date departure date (required | )
"""
search_url = "from/{from_airport}/to/{to_airport}/departing/{departure_year}/{departure_month}/{departure_day}".format(from_airport=from_airport,
to_airport=to_airport,
departure_year=departure_date.year,
departure_month=departure_date.month,
departure_day=departure_date.day)
content = send_request(search_url)
return content
def _helper_results_from_flightstats(response, airline, sort_by_key):
""" adds flights, airlines and airports """
response['flights'] = []
if response and response.get('scheduledFlights'):
response['flights'] = sorted(response['scheduledFlights'], key=lambda x: x[sort_by_key])
# removes all code shared flights
for flight in response['flights'][:]:
if flight['isCodeshare']:
response['flights'].remove(flight)
# filter down by airline
if airline is not None:
response['flights'] = [flight for flight in response['flights'] if flight['carrierFsCode'] == airline]
response['airports'] = {airport['fs']: airport for airport in response['appendix']['airports']}
response['airlines'] = {airline['fs']: airline for airline in response['appendix']['airlines']}
return response
def _helper_build_arrival_departure_text(flight_info, airline, airports, airlines, is_arrival):
if is_arrival:
date_key = 'arrivalTime'
location_key = 'departureAirportFsCode'
else:
date_key = 'departureTime'
location_key = 'arrivalAirportFsCode'
related_datetime = datetime.datetime.strptime(flight_info[date_key], "%Y-%m-%dT%H:%M:%S.%f")
related_city = airports[flight_info[location_key]]['city']
resp_text = ''
main_flight_number = "{}{}".format(flight_info["carrierFsCode"], flight_info["flightNumber"])
if not flight_info['codeshares'] and airline is None:
resp_text += airlines[flight_info['carrierFsCode']]['name'] + ' '
if flight_info['codeshares']:
code_shared_flights = ["{}{}".format(codeshare["carrierFsCode"], codeshare["flightNumber"]) for codeshare in flight_info['codeshares']]
resp_text += 'flights {} ({})'.format(main_flight_number, ", ".join(code_shared_flights))
else:
resp_text+= "flight {}".format(main_flight_number)
if is_arrival:
resp_text += " from {} will arrive at {}".format(related_city, datetime.datetime.strftime(related_datetime, '%H:%M'))
else:
resp_text += " to {} will depart at {}".format(related_city, datetime.datetime.strftime(related_datetime, '%H:%M'))
return resp_text
def arrivals_to_texts(from_airport, to_airport, airline=None, max_results=5):
""" converts arrivals response from flightstats to list of texts """
arrival_date = datetime.datetime.now()
destination_tz = timezone(FA_AIRPORTS[from_airport]['timezone'])
arrival_date = destination_tz.localize(arrival_date)
content = arrivals(from_airport, to_airport, arrival_date)
content = _helper_results_from_flightstats(response = content, airline = airline, sort_by_key = 'arrivalTime')
flights, airports, airlines = content['flights'], content['airports'], content['airlines']
if flights:
responses = []
for flight_info in flights[:max_results]:
resp_text = _helper_build_arrival_departure_text(flight_info, airline, airports, airlines, is_arrival = True)
responses.append(resp_text)
return responses
return "did not find any results"
def departures_to_texts(from_airport, to_airport, airline=None, max_results=5):
"""
converts departures response from flightstats to list of texts
"""
departure_date = datetime.datetime.now()
destination_tz = timezone(FA_AIRPORTS[from_airport]['timezone'])
departure_date = destination_tz.localize(departure_date)
content = departures(from_airport, to_airport, departure_date)
content = _helper_results_from_flightstats(response = content, airline = airline, sort_by_key = 'departureTime')
flights, airports, airlines = content['flights'], content['airports'], content['airlines']
if flights:
responses = []
for flight_info in flights[:max_results]:
resp_text = _helper_build_arrival_departure_text(flight_info, airline, airports, airlines, is_arrival = False)
responses.append(resp_text)
return responses
return "did not find any results"
def demo_departures():
from_airport = "ORD"
to_airport = "JFK"
pprint(departures_to_texts(from_airport, to_airport))
def demo_arrivals():
from_airport = "ORD"
to_airport = "JFK"
pprint(arrivals_to_texts(from_airport, to_airport, airline="B6"))
if __name__ == '__main__':
demo_departures()
|
radiosilence/pip | pip/__init__.py | Python | mit | 8,056 | 0.000993 | #!/usr/bin/env python
import os
import optparse
import sys
import re
import difflib
from pip.exceptions import InstallationError, CommandError, PipError
from pip.log import logger
from pip.util import get_installed_distributions, get_prog
from pip.vcs import git, mercurial, subversion, bazaar # noqa
from pip.baseparser import create_main_parser
from pip.commands import commands, get_similar_commands, get_summaries
# The version as used in the setup.py and the docs conf.py
__version__ = "1.2.1.post1"
def autocomplete():
"""Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash or zsh).
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for uninstall command
if subcommand_name == 'uninstall' and not current.startswith('-'):
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands[subcommand_name](parser)
options += [(opt.get_opt_string(), opt.nargs)
for opt in subcommand.parser.option_list_all
if opt.help != optparse.SUPPRESS_HELP]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
if current.startswith('-') or current.startswith('--'):
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
subcommands += [i.get_opt_string() for i in opts
if i.help != optparse.SUPPRESS_HELP]
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def parseopts(args):
parser = create_main_parser()
parser.main = True # so the help formatter knows
# create command listing
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
options, args = parser.parse_args(args)
if options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help || pip --help -> print_help()
if not args or (args[0] == 'help' and len(args) == 1):
parser.print_help()
sys.exit()
if not args:
msg = ('You must give a command '
'(use "pip --help" to see a list of commands)')
raise CommandError(msg)
command = args[0].lower()
if command not in commands:
guess = get_similar_commands(command)
msg = ['unknown command "%s"' % command]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
return command, options, args, parser
def main(initial_args=None):
if initial_args is None:
initial_args = sys.argv[1:]
autocomplete()
try:
cmd_name, options, args, parser = parseopts(initial_args)
except PipError:
e = sys.exc_info()[1]
sys.stderr.write("ERROR: %s" % e)
sys.stderr.write(os.linesep)
sys.exit(1)
command = commands[cmd_name](parser) # see baseparser.Command
return command.main(args[1:], options)
def bootstrap():
"""
Bootstrapping function to be called from install-pip.py script.
"""
return main(['install', '--upgrade', 'pip'])
############################################################
## Writing freeze files
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links, find_tags=False):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip.vcs import vcs, get_src_requirement
if vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location, find_tags)
except InstallationError:
ex = sys.exc_info()[1]
logger.warn("Error when trying to get requirement for VCS system %s, falling back to uneditable format" % ex)
req = None
if req is None:
logger.warn('Could not determine repository location of %s' % location)
comments.append('## !! Could not determine repository location')
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] == '=='
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match: |
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend(
).get_location(dist, dependency_links)
if not svn_location:
| logger.warn(
'Warning: cannot find svn location for %s' % req)
comments.append('## FIXME: could not find svn URL in dependency_links for this package:')
else:
comments.append('# Installing as editable to satisfy requirement %s:' % req)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (svn_location, rev, cls.egg_name(dist))
return cls(dist.project_name, req, editable, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
if __name__ == '__main__':
exit = main()
if exit:
sys.exit(exit)
|
ranisalt/moita-migrant | migrant.py | Python | mit | 1,377 | 0.000726 | #!/usr/bin/env python
import argparse
import binascii
import datetime
import gzip
import json
import magic
import os
import pymongo
import sys
def read_gzip(filename):
with gzip.open(filename) as file:
content = file.read()
return content
def read_plain(filename):
with open(filename) as | file:
content = file.read()
return content
readers = {
b'application/x-gzip': read_gzip,
b'text/plain': read_plain,
}
def read(filename):
type = magic.from_file(filename, mime=True)
| return readers[type](filename).decode()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', help='path to moita configuration file',
dest='moita', metavar='MOITA', required=True)
parser.add_argument('filename', nargs='+')
args = parser.parse_args()
sys.path.append(os.path.dirname(args.moita))
import config
connection = pymongo.MongoClient()
collection = connection[config.DATABASE].timetables
for file in args.filename:
content = json.loads(read(file))
identifier = binascii.unhexlify(
os.path.basename(file).split('.', 1)[0]).decode()
content['_id'] = identifier
mtime = datetime.datetime.fromtimestamp(os.path.getmtime(file))
content['updated_at'] = mtime
collection.save(content)
|
mrgloom/kepler-mapper | examples/digits/digits.py | Python | mit | 1,593 | 0.021971 | import km
# Load digits data
from sklearn import datasets
data, labels = datasets.load_digits().data, datasets.load_digits().target
# Create images for a custom tooltip array
import StringIO
from scipy.misc import imsave, toimage
import base64
tooltip_s = []
for image_data in data:
output = StringIO.StringIO()
img = toimage(image_data.reshape((8,8))) # Data was a flat row of 64 "pixels".
img.save(output, format="PNG")
contents = output.getvalue()
tooltip_s.append( """ <img src="data:image/png;base64,%s"> """%base64.b64encode(contents).replace("\n","") )
output.close()
tooltip_s = km.np.array(tooltip_s) # need to make sure to feed it as a NumPy array, not a list
# Initialize to use t-SNE with 2 components (reduces data to 2 dimensions). Also note high overlap_percentage.
mapper = km.KeplerMapper(cluster_algorithm=km.cluster.DBSCAN(eps=0.3, min_samples=15),
reducer = km.manifold.TSNE(), nr_cubes=35, overlap_perc=0.9,
link_local=False, verbose=2)
# Fit and transform data
data = mapper.fit_transform(data)
# Create the graph
complex = mapper.map(data, dimension_index=[0,1], dimension_name="t-SNE(2) 2D")
# Create the visualizations (increased the graph_gravity for a tighter graph-look.)
# Tooltips with image data for every cluster member
mapper.visualize(complex, "keplermapper_digits_custom_tooltips.html", "Digits", graph_gravity=0.25, custom_tooltips=tooltip_s)
# Tooltips with the target y-labels for every cluster member
ma | pper.visualize(compl | ex, "keplermapper_digits_ylabel_tooltips.html", "Digits", graph_gravity=0.25, custom_tooltips=labels) |
TeamEOS/external_chromium_org | chrome/common/extensions/docs/server2/cron_servlet.py | Python | bsd-3-clause | 12,019 | 0.007904 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import posixpath
import traceback
from app_yaml_helper import AppYamlHelper
from appengine_wrappers import IsDeadlineExceededError, logservice
from branch_utility import BranchUtility
from compiled_file_system import CompiledFileSystem
from data_source_registry import CreateDataSources
from environment import GetAppVersion, IsDevServer
from extensions_paths import EXAMPLES, PUBLIC_TEMPLATES, STATIC_DOCS
from file_system_util import CreateURLsFromPaths
from future import Future
from gcs_file_system_provider import CloudStorageFileSystemProvider
from github_file_system_provider import GithubFileSystemProvider
from host_file_system_provider import HostFileSystemProvider
from object_store_creator import ObjectStoreCreator
from render_servlet import RenderServlet
from server_instance import ServerInstance
from servlet import Servlet, Request, Response
from special_paths import SITE_VERIFICATION_FILE
from timer import Timer, TimerClosure
class _SingletonRenderServletDelegate(RenderServlet.Delegate):
def __init__(self, server_instance):
self._server_instance = server_instance
def CreateServerInstance(self):
return self._server_instance
class _CronLogger(object):
'''Wraps the logging.* methods to prefix them with 'cron' and flush
immediately. The flushing is important because often these cron runs time
out and we lose the logs.
'''
def info(self, msg, *args): self._log(logging.info, msg, args)
def warning(self, msg, *args): self._log(logging.warning, msg, args)
def error(self, msg, *args): self._log(logging.error, msg, args)
def _log(self, logfn, msg, args):
try:
logfn('cron: %s' % msg, *args)
finally:
logservice.flush()
_cronlog = _CronLogger()
def _RequestEachItem(title, items, request_callback):
'''Runs a task |request_callback| named |title| for each item in |items|.
|request_callback| must take an item and return a servlet response.
Returns true if every item was successfully run, false if any return a
non-200 response or raise an exception.
'''
_cronlog.info('%s: starting', title)
success_count, failure_count = 0, 0
timer = Timer()
try:
for i, item in enumerate(items):
def error_message(detail):
return '%s: error rendering %s (%s of %s): %s' % (
title, item, i + 1, len(items), detail)
try:
response = request_callback(item)
if response.status == 200:
success_count += 1
else:
_cronlog.error(error_message('response status %s' % response.status))
failure_count += 1
except Exception as e:
_cronlog.error(error_message(traceback.format_exc()))
failure_count += 1
if IsDeadlineExceededError(e): raise
finally:
_cronlog.info('%s: rendered %s of %s with %s failures in %s',
title, success_count, len(items), failure_count,
timer.Stop().FormatElapsed())
return success_count == len(items)
class CronServlet(Servlet):
'''Servlet which runs a cron job.
'''
def __init__(self, request, delegate_for_test=None):
Servlet.__init__(self, request)
self._delegate = delegate_for_test or CronServlet.Delegate()
class Delegate(object):
'''CronServlet's runtime dependencies. Override for testing.
'''
def CreateBranchUtility(self, object_store_creator):
return BranchUtility.Create(object_store_creator)
def CreateHostFileSystemProvider(self,
object_store_creator,
max_trunk_revision=None):
return HostFileSystemProvider(object_store_creator,
max_trunk_revision=max_trunk_revision)
def CreateGithubFileSystemProvider(self, object_store_creator):
return GithubFileSystemProvider(object_store_creator)
def CreateGCSFileSystemProvider(self, object_store_creator):
return CloudStorageFileSystemProvider(object_store_creator)
def GetAppVersion(self):
return GetAppVersion()
def Get(self):
# Crons often time out, and if they do we need to make sure to flush the
# logs before the process gets killed (Python gives us a couple of
# seconds).
#
# So, manually flush logs at the end of the cron run. However, sometimes
# even that isn't enough, which is why in this file we use _cronlog and
# make it flush the log every time its used.
logservice.AUTOFLUSH_ENABLED = False
try:
return self._GetImpl()
except BaseException:
_cronlog.error('Caught top-level exception! %s', traceback.format_exc())
finally:
logservice.flush()
def _GetImpl(self):
# Cron strategy:
#
# Find all public template files and static files, and render them. Most of
# t | he time these won't have changed since the last cron run, so it's a
# little wasteful, but hopefully rendering is really fast (if it isn't we
# have a problem).
_cro | nlog.info('starting')
# This is returned every time RenderServlet wants to create a new
# ServerInstance.
#
# TODO(kalman): IMPORTANT. This sometimes throws an exception, breaking
# everything. Need retry logic at the fetcher level.
server_instance = self._GetSafeServerInstance()
trunk_fs = server_instance.host_file_system_provider.GetTrunk()
def render(path):
request = Request(path, self._request.host, self._request.headers)
delegate = _SingletonRenderServletDelegate(server_instance)
return RenderServlet(request, delegate).Get()
def request_files_in_dir(path, prefix='', strip_ext=None):
'''Requests every file found under |path| in this host file system, with
a request prefix of |prefix|. |strip_ext| is an optional list of file
extensions that should be stripped from paths before requesting.
'''
def maybe_strip_ext(name):
if name == SITE_VERIFICATION_FILE or not strip_ext:
return name
base, ext = posixpath.splitext(name)
return base if ext in strip_ext else name
files = [maybe_strip_ext(name)
for name, _ in CreateURLsFromPaths(trunk_fs, path, prefix)]
return _RequestEachItem(path, files, render)
results = []
try:
# Start running the hand-written Cron methods first; they can be run in
# parallel. They are resolved at the end.
def run_cron_for_future(target):
title = target.__class__.__name__
future, init_timer = TimerClosure(target.Cron)
assert isinstance(future, Future), (
'%s.Cron() did not return a Future' % title)
def resolve():
resolve_timer = Timer()
try:
future.Get()
except Exception as e:
_cronlog.error('%s: error %s' % (title, traceback.format_exc()))
results.append(False)
if IsDeadlineExceededError(e): raise
finally:
resolve_timer.Stop()
_cronlog.info('%s took %s: %s to initialize and %s to resolve' %
(title,
init_timer.With(resolve_timer).FormatElapsed(),
init_timer.FormatElapsed(),
resolve_timer.FormatElapsed()))
return Future(callback=resolve)
targets = (CreateDataSources(server_instance).values() +
[server_instance.content_providers,
server_instance.api_models])
title = 'initializing %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
cron_futures = [run_cron_for_future(target) for target in targets]
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
# Samples are too expensive to run on the dev server, where there is no
# parallel fetch.
#
# XXX(kalman): Currently samples are *always* too expensive to fetch, so
# disabling them for now. It won't break anything so long as we're still
# not enforcing that everything gets cached f |
alok1974/compage | src/compage/logger.py | Python | mit | 5,536 | 0 | import os
import inspect
import logging
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
TRACEBACK | _INSPECTOR = inspect.currentframe
DEBUG = logging.DEBUG
INFO = logging.INFO
ERROR = logging.ERROR
WARNING = logging.WARN
CRITICAL = logging.CRITICAL
def debugCaller(traceback=None):
""" Function who return all the traceback of a call."""
tracebackLog = inspect.getouterframes(traceback)
| moduleName = os.path.basename(
tracebackLog[1][1]).replace(".py", "").replace(
'<Script Block >', 'stdin')
methodName = tracebackLog[1][3]
return [moduleName, methodName]
class Logger(object):
"""
"""
level = INFO
criticalFunc = None
infoFunc = None
warningFunc = None
debugFunc = None
errorFunc = None
tracebackFunc = None
separatorFunc = None
spaceFunc = None
@classmethod
def onDebug(cls, func):
"""
"""
cls.debugFunc = func
@classmethod
def onWarning(cls, func):
"""
"""
cls.warningFunc = func
@classmethod
def onCritical(cls, func):
"""
"""
cls.criticalFunc = func
@classmethod
def onError(cls, func):
"""
"""
cls.errorFunc = func
@classmethod
def onInfo(cls, func):
"""
"""
cls.infoFunc = func
@classmethod
def onTraceback(cls, func):
"""
"""
cls.tracebackFunc = func
@classmethod
def onSeparator(cls, func):
"""
"""
cls.separatorFunc = func
@classmethod
def onSpace(cls, func):
"""
"""
cls.spaceFunc = func
@classmethod
def warning(cls, msg):
"""
"""
if cls.level <= WARNING:
msgComplete = cls._buildString(
inspect.currentframe(), msg, WARNING)
if cls.warningFunc:
cls().warningFunc(msgComplete)
else:
logging.warning(msgComplete)
@classmethod
def info(cls, msg):
"""
"""
if cls.level <= INFO:
msgComplete = cls._buildString(inspect.currentframe(), msg, INFO)
if cls.infoFunc:
cls().infoFunc(msgComplete)
else:
logging.info(msgComplete)
@classmethod
def debug(cls, msg):
"""
"""
if cls.level <= DEBUG:
msgComplete = cls._buildString(inspect.currentframe(), msg, DEBUG)
if cls.debugFunc:
cls().debugFunc(msgComplete)
else:
logging.debug(msgComplete)
@classmethod
def error(cls, msg):
"""
"""
if cls.level <= ERROR:
msgComplete = cls._buildString(inspect.currentframe(), msg, ERROR)
if cls.errorFunc:
cls().errorFunc(msgComplete)
else:
logging.error(msgComplete)
@classmethod
def critical(cls, msg):
"""
"""
if cls.level <= CRITICAL:
msgComplete = cls._buildString(
inspect.currentframe(), msg, CRITICAL)
if cls.criticalFunc:
cls().criticalFunc(msgComplete)
else:
logging.critical(msgComplete)
def traceback(cls, msg):
"""
"""
if cls.tracebackFunc:
cls.tracebackFunc(msg)
else:
TracebackError(msg)
@classmethod
def _buildString(cls, input, msg, typeErr):
""" Build the display error string by the type of error """
debugAsString = debugCaller(input)
if typeErr in [INFO, WARNING]:
return "[%s] %s" % (debugAsString[0], msg)
return "[%s::%s] %s" % (debugAsString[0], debugAsString[1], msg)
@classmethod
def getLogger(cls, loggerName):
""" Return the given name of the logger """
logging.getLogger(loggerName)
@classmethod
def setLevel(cls, level):
""" set the level of debugging """
cls.level = level
@classmethod
def getLevel(cls):
""""""
return cls.level
@classmethod
def addSeparator(cls, separator="-", length=75):
"""
Create a line of separator to help viewable
displaying of an error
"""
if cls.separatorFunc:
cls().separatorFunc(separator * length)
else:
logging.info(separator * length)
@classmethod
def addSpace(cls):
if cls.spaceFunc:
cls().spaceFunc()
else:
logging.info("")
class TracebackError(object):
"""
Output the whole traceback instead of only the
last message and Log it as Critical
"""
def __init__(self, e):
"""TracebackError Constructor"""
super(TracebackError, self).__init__()
import StringIO
import traceback
fileHandler = StringIO.StringIO()
traceback.print_exc(file=fileHandler)
self.trace = fileHandler.getvalue()
Logger.critical(self.trace)
def asString(self):
""""""
return self.trace
if __name__ == "__main__":
class Test(object):
def __init__(self):
pass
def runTest(self):
Logger.setLevel(DEBUG)
Logger.info("info")
Logger.critical("critical")
Logger.debug("debug")
Logger.warning("warning")
Logger.error("error")
aTest = Test()
aTest.runTest()
|
fbentz/vedasboardgame | vedasboardgame/boardgame/admin.py | Python | mit | 95 | 0 | from django.co | ntrib import admin
from .models import BoardGame
admin | .site.register(BoardGame)
|
isard-vdi/isard | engine/engine/engine/services/balancers/balancer_interface.py | Python | agpl-3.0 | 262 | 0 | # Copyright 2018 the Isard-vd | i project authors:
# Alberto Larraz Dalmases
# Josep Maria Viñolas Auquer
# Daniel Criado Casas
# License: AGPLv3
class BalancerInterface(object):
def get_next(sel | f, **kwargs):
raise NotImplementedError
|
jscn/django | tests/auth_tests/test_hashers.py | Python | bsd-3-clause | 23,079 | 0.000955 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import skipUnless
from django.conf.global_settings import PASSWORD_HASHERS
from django.contrib.auth.hashers import (
UNUSABLE_PASSWORD_PREFIX, UNUSABLE_PASSWORD_SUFFIX_LENGTH,
BasePasswordHasher, PBKDF2PasswordHasher, PBKDF2SHA1PasswordHasher,
check_password, get_hasher, identify_hasher, is_password_usable,
make_password,
)
from django.test import SimpleTestCase, mock
from django.test.utils import override_settings
from django.utils import six
from django.utils.encoding import force_bytes
try:
import crypt
except ImportError:
crypt = None
else:
# On some platforms (e.g. OpenBSD), crypt.crypt() always return None.
if crypt.crypt('', '') is None:
crypt = None
try:
import bcrypt
except ImportError:
bcrypt = None
try:
import argon2
except ImportError:
argon2 = None
class PBKDF2SingleIterationHasher(PBKDF2PasswordHasher):
iterations = 1
@override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS)
class TestUtilsHashPass(SimpleTestCase):
def test_simple(self):
encoded = make_password('lètmein')
self.assertTrue(encoded.startswith('pbkdf2_sha256$'))
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
# Blank passwords
blank_encoded = make_password('')
self.assertTrue(blank_encoded.startswith('pbkdf2_sha256$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
def test_pbkdf2(self):
encoded = make_password('lètmein', 'seasalt', 'pbkdf2_sha256')
self.assertEqual(encoded, 'pbkdf2_sha256$30000$seasalt$VrX+V8drCGo68wlvy6rfu8i1d1pfkdeXA4LJkRGJodY=')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "pbkdf2_sha256")
# Blank passwords
blank_encoded = make_password('', 'seasalt', 'pbkdf2_sha256')
self.assertTrue(blank_encoded.startswith('pbkdf2_sha256$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
def test_sha1(self):
encoded = make_password('lètmein', 'seasalt', 'sha1')
self.assertEqual(encoded, 'sha1$seasalt$cff36ea83f5706ce9aa7454e63e431fc726b2dc8')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "sha1")
# Blank passwords
blank_encoded = make_password('', 'seasalt', 'sha1')
self.assertTrue(blank_encoded.startswith('sha1$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.MD5PasswordHasher'])
def test_md5(self):
encoded = make_password('lètmein', 'seasalt', 'md5')
self.assertEqual(encoded, 'md5$seasalt$3f86d0d3d465b7b458c231bf3555c0e3')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "md5")
# Blank passwords
blank_encoded = make_password('', 'seasalt', 'md5')
self.assertTrue(blank_encoded.startswith('md5$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.UnsaltedMD5PasswordHasher'])
def test_unsalted_md5(self):
encoded = make_password('lètmein', '', 'unsalted_md5')
self.assertEqual(encoded, '88a434c88cca4e900f7874cd98123f43')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_md5")
# Alternate unsalted syntax
alt_encoded = "md5$$%s" % encoded
self.assertTrue(is_password_usable(alt_encoded))
self.assertTrue(check_password('lètmein', alt_encoded))
self.assertFalse(check_password('lètmeinz', alt_encoded))
# Blank passwords
blank_encoded = make_password('', '', 'unsalted_md5')
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher'])
def test_unsalted_sha1(self):
encoded = make_password('lètmein', '', 'unsalted_sha1')
self.assertEqual(encoded, 'sha1$$6d138ca3ae545631b3abd71a4f076ce759c5700b')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_sha1")
# Raw SHA1 isn't acceptable
alt_encoded = encoded[6:]
self.assertFalse(check_password('lètmein', alt_encoded))
# Blank passwords
blank_encoded = make_password('', '', 'unsalted_sha1')
self.assertTrue(blank_encoded.startswith('sha1$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(crypt, "no crypt module to generate password.")
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.CryptPasswordHasher'])
def test_crypt(self):
encoded = make_password('lètmei', 'ab', 'crypt')
self.assertEqual(encoded, 'crypt$$ab1Hv2Lg7ltQo')
self.assertTrue(is_password_usab | le(encoded))
self.assertTrue(check_password('lètmei', encoded))
self.assertFalse(check_password('lètmeiz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "crypt")
# Blank passwords
blank_encoded = make_password('', 'ab', 'crypt')
se | lf.assertTrue(blank_encoded.startswith('crypt$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(bcrypt, "bcrypt not installed")
def test_bcrypt_sha256(self):
encoded = make_password('lètmein', hasher='bcrypt_sha256')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(encoded.startswith('bcrypt_sha256$'))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt_sha256")
# Verify that password truncation no longer works
password = (
'VSK0UYV6FFQVZ0KG88DYN9WADAADZO1CTSIVDJUNZSUML6IBX7LN7ZS3R5'
'JGB3RGZ7VI7G7DJQ9NI8BQFSRPTG6UWTTVESA5ZPUN'
)
encoded = make_password(password, hasher='bcrypt_sha256')
self.assertTrue(check_password(password, encoded))
self.assertFalse(check_password(password[:72], encoded))
# Blank passwords
blank_encoded = make_password('', hasher='bcrypt_sha256')
self.assertTrue(blank_encoded.startswith('bcrypt_sha256$'))
self.assertTrue(is_password_usable(blank_encod |
emmuchira/kps_erp | erpnext/selling/page/point_of_sale/point_of_sale.py | Python | gpl-3.0 | 2,765 | 0.027848 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.utils.nestedset import get_root_of
@frappe.whitelist()
def get_items(start, page_length, price_list, item_group, search_value=""):
serial_no = ""
batch_no = ""
barcode = ""
item_code = search_value
if not frappe.db.exists('Item Group', item_group):
item_group = get_root_of('Item Group')
| if search_value:
# search serial no
serial_no_data = frappe.db.get_value('Serial No', search_value, ['name', 'item_code'])
if serial_no_data:
serial_no, item_code = serial_no_data
if not serial_no:
batch_no_data = frappe.db.get_value('Batch', | search_value, ['name', 'item'])
if batch_no_data:
batch_no, item_code = batch_no_data
if not serial_no and not batch_no:
barcode_data = frappe.db.get_value('Item', {'barcode': search_value}, ['name', 'barcode'])
if barcode_data:
item_code, barcode = barcode_data
item_code, condition = get_conditions(item_code, serial_no, batch_no, barcode)
lft, rgt = frappe.db.get_value('Item Group', item_group, ['lft', 'rgt'])
# locate function is used to sort by closest match from the beginning of the value
res = frappe.db.sql("""select i.name as item_code, i.item_name, i.image as item_image,
item_det.price_list_rate, item_det.currency
from `tabItem` i LEFT JOIN
(select item_code, price_list_rate, currency from
`tabItem Price` where price_list=%(price_list)s) item_det
ON
(item_det.item_code=i.name or item_det.item_code=i.variant_of)
where
i.disabled = 0 and i.has_variants = 0 and i.is_sales_item = 1
and i.item_group in (select name from `tabItem Group` where lft >= {lft} and rgt <= {rgt})
and {condition}
limit {start}, {page_length}""".format(start=start,
page_length=page_length, lft=lft, rgt=rgt, condition=condition),
{
'item_code': item_code,
'price_list': price_list
} , as_dict=1)
res = {
'items': res
}
if serial_no:
res.update({
'serial_no': serial_no
})
if batch_no:
res.update({
'batch_no': batch_no
})
return res
def get_conditions(item_code, serial_no, batch_no, barcode):
if serial_no or batch_no or barcode:
return frappe.db.escape(item_code), "i.item_code = %(item_code)s"
condition = """(i.item_code like %(item_code)s
or i.item_name like %(item_code)s)"""
return '%%%s%%'%(frappe.db.escape(item_code)), condition
@frappe.whitelist()
def submit_invoice(doc):
if isinstance(doc, basestring):
args = json.loads(doc)
doc = frappe.new_doc('Sales Invoice')
doc.update(args)
doc.run_method("set_missing_values")
doc.run_method("calculate_taxes_and_totals")
doc.submit()
return doc
|
savoirfairelinux/ring-daemon | tools/jamictrl/sippwrap.py | Python | gpl-3.0 | 5,229 | 0.001912 | #!/usr/bin/env python
#
# Copyright (C) 2012 by the Free Software Foundation, Inc.
#
# Author: Alexandre Savard <alexandre.savard@savoirfairelinux.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
class SippWrapper:
""" Wrapper that allow for managing sipp command line easily """
def __init__(self):
self.commandLine = "./sipp"
self.remoteServer = ""
self.remotePort = ""
self.localInterface = ""
self.localPort = ""
self.customScenarioFile = ""
self.isUserAgenClient = True
self.launchInBackground = False
self.numberOfCall = 0
self.numberOfSimultaneousCall = 0
self.enableTraceMsg = False
self.enableTraceShormsg = False
self.enableTraceScreen = False
self.enableTraceError = False
self.enableTraceStat = False
self.enableTraceCounts = False
self.enableTraceRtt = False
self.enableTraceLogs = False
def buildCommandLine(self, port):
""" Fill the command line arguments based on specified parameters """
self.localPort = str(port)
if not self.remotePort and not self.remoteServer:
self.isUserAgentClient = False
elif self.remotePort and not self.remoteServer:
print "Error cannot have remote port specified with no server"
return
if self.remoteServer:
self.commandLine += " " + self.remoteServer
if self.remotePort:
self.commandLine += ":" + self.remotePort
if self.localInterface:
self.commandLine += " -i " + self.localInterface
if self.localPort:
self.commandLine += " -p " + self.localPort
if self.customScenarioFile:
self.commandLine += " -sf " + self.customScenarioFile
elif self.isUserAgentClient is True:
self.commandLine += " -sn uac"
elif self.isUserAgentClient is False:
self.commandLine += " -sn uas"
if self.launchInBackground:
self.commandLine += " -bg"
if self.numberOfCall:
self.commandLine += " -m " + str(self.numberOfCall)
if self.numberOfSimultaneousCall:
self.commandLine += " -l " + str(self.numberOfSimultaneousCall)
if self.enableTraceMsg:
self.commandLine += " -trace_msg"
if self.enableTraceShormsg:
self.commandLine += " -trace_shortmsg"
if self.enableTraceScreen:
self.commandLine += " -trace_screen"
if self.enableTraceError:
self.commandLine += " -trace_err"
if self.enableTraceStat:
self.commandLine += " -trace_stat"
if self.enableTraceCounts:
self.commandLine += " -trace_counts"
if self.enableTraceRtt:
self.commandLine += " -trace_rtt"
if self.enableTraceLogs:
self.commandLine += " -trace_logs"
def launch(self):
""" Launch the sipp instance using the specified arguments """
print self.commandLine
return os.system(self.commandLine + " 2>&1 > /dev/null")
class SippScreenStatParser:
""" Class that parse statistic reported by a sipp instance
report some of the most important value """
def __init__(self, filename):
print "Opening " + filename
self.logfile = open(filename, "r").readlines()
print self.logfile[39]
print self.logfile[40]
def isAnyFailedCall(self):
""" Look for any failed call
Return true if there are failed call, false elsewhere """
# TODO: Find a better way to determine which line to consider
if "Failed call" not in self.logfile[40]:
print "Error: Could not find 'Failed call' statistics"
# We consider this as a failure
return True
return "1" | in self.logfile[40]
def isAnySuccessfulCall(self):
""" Look for any successful call
Return true if there are successful call, false elsewhere """
# TODO: F | ind a better way to determine which line to consider
if "Successful call" not in self.logfile[39]:
print "Error: Could not find 'Successful call' statistics"
return False
return "1" in self.logfile[39]
def test_result_parsing():
dirlist = os.listdir("./")
logfile = [x for x in dirlist if "screen.log" in x]
testResult = SippScreenStatParser(logfile[0])
assert(not testResult.isAnyFailedCall())
assert(testResult.isAnySuccessfulCall())
|
sinpantuflas/aubio | waflib/extras/compat15.py | Python | gpl-3.0 | 7,478 | 0.050682 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import sys
from waflib import ConfigSet,Logs,Options,Scripting,Task,Build,Configure,Node,Runner,TaskGen,Utils,Errors,Context
sys.modules['Environment']=ConfigSet
ConfigSet.Environment=ConfigSet.ConfigSet
sys.modules['Logs']=Logs
sys.modules['Options']=Options
sys.modules['Scripting']=Scripting
sys.modules['Task']=Task
sys.modules['Build']=Build
sys.modules['Configure']=Configure
sys.modules['Node']=Node
sys.modules['Runner']=Runner
sys.modules['TaskGen']=TaskGen
sys.modules['Utils']=Utils
from waflib.Tools import c_preproc
sys.modules['preproc']=c_preproc
from waflib.Tools import c_config
sys.modules['config_c']=c_config
ConfigSet.ConfigSet.copy=ConfigSet.ConfigSet.derive
ConfigSet.ConfigSet.set_variant=Utils.nada
Build.BuildContext.add_subdirs=Build.BuildContext.recurse
Build.BuildContext.new_task_gen=Build.BuildContext.__call__
Build.BuildContext.is_install=0
Node.Node.relpath_gen=Node.Node.path_from
def name_to_obj(self,s,env=None):
Logs.warn('compat: change "name_to_obj(name, env)" by "get_tgen_by_name(name)"')
return self.get_tgen_by_name(s)
Build.BuildContext.name_to_obj=name_to_obj
def env_of_name(self,name):
try:
return self.all_envs[name]
except KeyError:
Logs.error('no such environment: '+name)
return None
Build.BuildContext.env_of_name=env_of_name
def set_env_name(self,name,env):
self.all_envs[name]=env
return env
Configure.ConfigurationContext.set_env_name=set_env_name
def retrieve(self,name,fromenv=None):
try:
env=self.all_envs[name]
except KeyError:
env=ConfigSet.ConfigSet()
self.prepare_env(env)
self.all_envs[name]=env
else:
if fromenv:Logs.warn("The environment %s may have been configured already"%name)
return env
Configure.ConfigurationContext.retrieve=retrieve
Configure.ConfigurationContext.sub_config=Configure.ConfigurationContext.recurse
Configure.ConfigurationContext.check_tool=Configure.ConfigurationContext.load
Configure.conftest=Configure.conf
Configure.ConfigurationError=Errors.ConfigurationError
Options.OptionsContext.sub_options=Options.OptionsContext.recurse
Options.OptionsContext.tool_options=Context.Context.load
Options.Handler=Options.OptionsContext
Task.simple_task_type=Task.task_type_from_func=Task.task_factory
Task.TaskBase.classes=Task.classes
def setitem(self,key,value):
if key.startswith('CCFLAGS'):
key=key[1:]
self.table[key]=value
ConfigSet.ConfigSet.__setitem__=setitem
@TaskGen.feature('d')
@TaskGen.before('apply_incpaths')
def old_importpaths(self):
if getattr(self,'importpaths',[]):
self.includes=self.importpaths
from waflib import Context
eld=Context.load_tool
def load_tool(*k,**kw):
ret=eld(*k,**kw)
if'set_options'in ret.__dict__:
Logs.warn('compat: rename "set_options" to options')
ret.options=ret.set_options
if'detect'in ret.__dict__:
Logs.warn('compat: rename "detect" to "configure"')
ret.configure=ret.detect
return ret
Context.load_tool=load_tool
rev=Context.load_module
def load_module(path):
ret=rev(path)
if'set_options'in ret.__dict__:
Logs.warn('compat: rename "set_options" to "options" (%r)'%path)
ret.options=ret.set_options
if'srcdir'in ret.__dict__:
Logs.warn('compat: rename "srcdir" to "top" (%r)'%path)
ret.top=ret.srcdir
if'blddir'in ret.__dict__:
Logs.warn('compat: rename "blddir" to "out" (%r)'%path)
ret.out=ret.blddir
return ret
Context.load_module=load_module
old_post=TaskGen.task_gen.post
def post(self):
self.features=self.to_list(self.features)
if'cc'in self.features:
Logs.warn('compat: the feature cc does not exist anymore (use "c")')
self.features.remove('cc')
self.features.append('c')
if'cstaticlib'in self.features:
Logs.warn('compat: the feature cstaticlib does not exist anymore (use "cstlib" or "cxxstlib")')
self.features.remove('cstaticlib')
self.features.append(('cxx'in self.features)and'cxxstlib'or'cstlib')
if getattr(self,'ccflags',None):
Logs.warn('compat: "ccflags" was renamed to "cflags"')
self.cflags=self.ccflags
return old_post(self)
TaskGen.task_gen.post=post
def waf_version(*k,**kw):
Logs.warn('wrong version (waf_version was removed in waf 1.6)')
Utils.waf_version=waf_version
import os
@TaskGen.feature('c','cxx','d')
@TaskGen.before('apply_incpaths','propagate_uselib_vars')
@TaskGen.after('apply_link','process_source')
def apply_uselib_local(self):
env=self.env
from waflib.Tools.ccroot import stlink_task
self.uselib=self.to_list(getattr(self,'uselib',[]))
self.includes=self.to_list(getattr(self,'includes',[]))
names=self.to_list(getattr(self,'uselib_local',[]))
get=self.bld.get_tgen_by_name
seen=set([])
tmp=Utils.deque(names)
if tmp:
Logs.warn('compat: "uselib_local" is deprecated, replace by "use"')
while tmp:
lib_name=tmp.popleft()
if lib_name in seen:
continue
y=get(lib_name)
y.post()
seen.add(lib_name)
if getattr(y,'uselib_local',None):
for x in self.to_list(getattr(y,'uselib_local',[])):
obj=get(x)
obj.post()
if getattr(obj,'link_task',None):
if not isinstance(obj.link_task,stlink_task):
tmp.append(x)
if getattr(y,'link_task',None):
link_name=y.target[y.target.rfind(os.sep)+1:]
if isinstance(y.link_task,stlink_task):
env.append_value('STLIB',[link_name])
else:
env.append_value('LIB',[link_name])
self.link_task.set_run_after(y.link_task)
self.link_task.dep_nodes+=y.link_task.outputs
tmp_path=y.link_task.outputs[0].parent.bldpath()
if not tmp_path in env['LIBPATH']:
env.prepend_value('LIBPATH',[tmp_path])
for v in self.to_list(getattr(y,'uselib',[])):
if not env['STLIB_'+v]:
if not v in self.uselib:
self.uselib.insert(0,v)
if getattr(y,'export_includes',None):
self.includes.extend(y.to_incnodes(y.export_includes))
@TaskGen.feature('cprogram','cxxprogram','cstlib','cxxstlib','cshlib','cxxshlib','dprogram','dstlib','dshlib')
@TaskGen.after('apply_link')
def apply_objdeps(self):
names=getattr(self,'add_objects',[])
if not names:
return
names=self.to_list(names)
get=self.bld.get_tgen_by_name
seen=[]
while names:
x=names[0]
if x in seen:
names=names[1:]
continue
y=get(x)
if getattr(y,'add_objects',None):
added=0
lst=y.to_list(y.add_objects)
lst.reverse()
for u in lst:
if u in seen:continue
added=1
names=[u]+n | ames
if added:continue
y.post()
seen.append(x)
for t in getattr(y,'compiled_tasks',[]):
self.link_task.inputs.extend(t.outputs)
@TaskGen.after(' | apply_link')
def process_obj_files(self):
if not hasattr(self,'obj_files'):
return
for x in self.obj_files:
node=self.path.find_resource(x)
self.link_task.inputs.append(node)
@TaskGen.taskgen_method
def add_obj_file(self,file):
if not hasattr(self,'obj_files'):self.obj_files=[]
if not'process_obj_files'in self.meths:self.meths.append('process_obj_files')
self.obj_files.append(file)
old_define=Configure.ConfigurationContext.__dict__['define']
@Configure.conf
def define(self,key,val,quote=True):
old_define(self,key,val,quote)
if key.startswith('HAVE_'):
self.env[key]=1
old_undefine=Configure.ConfigurationContext.__dict__['undefine']
@Configure.conf
def undefine(self,key):
old_undefine(self,key)
if key.startswith('HAVE_'):
self.env[key]=0
def set_incdirs(self,val):
Logs.warn('compat: change "export_incdirs" by "export_includes"')
self.export_includes=val
TaskGen.task_gen.export_incdirs=property(None,set_incdirs)
|
patricklaw/pants | src/python/pants/backend/python/lint/docformatter/skip_field.py | Python | apache-2.0 | 840 | 0.002381 | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.pytho | n.target_types import (
PythonSourcesGeneratorTarget,
PythonSourceTarget,
PythonTestsGeneratorTarget,
PythonTestTarget,
)
from pants.engine.target import BoolField
class SkipDocformatterField(BoolField):
alias = "skip_docformatter"
default = False
help = "If true, don't run Docformatter on this target's code."
def rul | es():
return [
PythonSourceTarget.register_plugin_field(SkipDocformatterField),
PythonSourcesGeneratorTarget.register_plugin_field(SkipDocformatterField),
PythonTestTarget.register_plugin_field(SkipDocformatterField),
PythonTestsGeneratorTarget.register_plugin_field(SkipDocformatterField),
]
|
arjunsatyapal/lantern | demo1/quiz/models.py | Python | apache-2.0 | 6,723 | 0.010858 | #!/usr/bin/python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""App Engine data model (schema) definition for Quiz."""
# Python imports
import base64
import logging
import md5
import operator
import os
import re
import time
# AppEngine imports
from google.appengine.ext import db
from google.appengine.api import memcache
class QuizBaseModel(db.Model):
"""Base class for quiz models."""
class QuizTrunkModel(QuizBaseModel):
"""Maintains trunk for quiz model.
Attributes:
head: Maintians the head of a quiz.
"""
head = db.StringProperty()
class QuizRevisionModel(QuizBaseModel):
"""Maintains list of revisions for a quiz.
Quiz trunk associated with the revision is made parent of the model.
Attributes:
quiz_id: I | d (key) for particular version of the quiz.
time_stamp: Time_stamp for a new revision.
commit_message: Commit message associated with new version.
"""
quiz_id = db.StringProperty()
time_stamp = db.DateTimeProperty(auto_now=True)
commit_messa | ge = db.StringProperty(default='Commiting a new version')
class QuizPropertyModel(QuizBaseModel):
"""Defines various properties for a quiz.
Attributes:
shuffle_questions: If set questions are presented in random order.
min_options: minimum number of options to be presented.
max_options: maximum number of options to be presented.
min_questions: minimum number of questions required to complete the quiz.
Used to track the progress.
repeat_questions: If set questions are repeated.
repeat_wrongly_answered_questions: If set wrongly answered questions are
repeated.
"""
shuffle_questions = db.BooleanProperty(default=True)
min_options = db.IntegerProperty(default=2)
max_options = db.IntegerProperty(default=10) # 0 implies all
min_questions = db.IntegerProperty(default=0) # 0 implies all
repeat_questions = db.BooleanProperty(default=False)
repeat_wrongly_answered_questions = db.BooleanProperty(default=False)
class QuizModel(QuizBaseModel):
"""Represents a quiz.
Attributes:
difficulty_level: Difficulty level for the quiz (range 0-10).
quiz_property: Reference to property asscociated with quiz.
title: Title of the quiz.
tags: Associated tags with quiz.
trunk: Reference to asscociated trunk with the quiz.
introduction: Introduction text to be shown on the start page for quiz.
"""
# implicit id
difficulty_level = db.RatingProperty(default=5)
quiz_property = db.ReferenceProperty(QuizPropertyModel)
title = db.StringProperty()
tags = db.ListProperty(db.Category)
trunk = db.ReferenceProperty(QuizTrunkModel)
introduction = db.StringProperty()
class ChoiceModel(QuizBaseModel):
"""Represents a choice/option provided to user for a question model.
Attributes:
body: Body of the choice.
message: Message to be displayed when choice is selected.
May act like a hint.
is_correct: If the choice selected is correct.
"""
# implicit id
body = db.TextProperty()
message = db.StringProperty()
is_correct = db.BooleanProperty(default=False)
def dump_to_dict(self):
"""Dumps choice to a dictionary for passing around as JSON object."""
data_dict = {'body': self.body,
'id': str(self.key())}
return data_dict
class QuestionModel(QuizBaseModel):
"""Represents a question.
Attributes:
body: Text asscociated with quiz.
choices: List of possible choices.
shuffle_choices: If set choices are randomly shuffled.
hints: Ordered list of progressive hints
"""
# implicit id
body = db.TextProperty()
choices = db.ListProperty(db.Key)
shuffle_choices = db.BooleanProperty(default=True)
hints = db.StringListProperty()
def dump_to_dict(self):
"""Dumps the question model to a dictionary for passing
around as JSON object."""
data_dict = {'id': str(self.key()),
'body': self.body,
'hints': self.hints,
'choices': [db.get(el).dump_to_dict() for el in self.choices]
}
if self.shuffle_choices and data_dict['choices']:
data_dict['choices'] = random.shuffle(data_dict['choices'])
return data_dict
class QuizQuestionListModel(QuizBaseModel):
"""Maintains a list of question with its quiz id.
This is necessary because questions may be shared between different quizes.
Attributes:
quiz: Reference to quiz object.
question: Reference to question object asscociated with quiz.
time_stamp: Time stamp.
"""
quiz = db.ReferenceProperty(QuizModel)
question = db.ReferenceProperty(QuestionModel)
time_stamp = db.DateTimeProperty(auto_now_add=True)
class ResponseModel(QuizBaseModel):
"""Stores response data required for producing next question.
Attributes:
session_id: Session Identifier.
answered_correctly: Set if the response resulted in correct answer.
question: Reference to question being answered.
quiz: Reference to associated quiz.
quiz_trunk: Reference to associated quiz trunk.
time_stamp: Time stamp of the response
attempts: Number of attempts so far, useful for scoring.
"""
session_id = db.StringProperty(required=True)
answered_correctly = db.BooleanProperty(db.Key)
question = db.ReferenceProperty(QuestionModel)
quiz = db.ReferenceProperty(QuizModel)
quiz_trunk = db.ReferenceProperty(QuizTrunkModel)
time_stamp = db.DateTimeProperty(auto_now=True)
attempts = db.IntegerProperty(default=0)
class QuizScoreModel(QuizBaseModel):
"""Stores progress status associated with a quiz and session.
Both score and progress are out of 100.
Attributes:
session_id: Session Identifier.
quiz: Reference to associated quiz.
quiz_trunk: Reference to associated quiz trunk.
score: Current score.
progress: Current progress status
questions_attempted: Number of questions attempted so far.
"""
quiz_trunk = db.ReferenceProperty(QuizTrunkModel)
session_id = db.StringProperty(required=True)
quiz = db.ReferenceProperty(QuizModel)
score = db.FloatProperty(default=0.0)
progress = db.FloatProperty(default=0.0)
questions_attempted = db.IntegerProperty(default=0)
|
econti/quora-clone | quora/questions/migrations/0001_initial.py | Python | mit | 3,169 | 0.003471 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-20 03:46
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('slug', models.SlugField(blank=True, max_length=255, null=True)),
('content', models.TextField(blank=True, max_length=4000, null=True)),
('status', models.CharField(choices=[(b'D', b'Draft'), (b'P', b'Published')], default=b'D', max_length=1)),
('create_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(blank=True, null=True)),
('create_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('update_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-create_date',),
'verbose_name': 'Question',
'verbose_name_plural': 'Q | uestions',
},
| ),
migrations.CreateModel(
name='QuestionComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=500)),
('date', models.DateTimeField(auto_now_add=True)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='questions.Question')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('date',),
'verbose_name': 'Question Comment',
'verbose_name_plural': 'Question Comments',
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.CharField(max_length=50)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='questions.Question')),
],
options={
'verbose_name': 'Tag',
'verbose_name_plural': 'Tags',
},
),
migrations.AlterUniqueTogether(
name='tag',
unique_together=set([('tag', 'question')]),
),
migrations.AlterIndexTogether(
name='tag',
index_together=set([('tag', 'question')]),
),
]
|
huyphan/pyyawhois | test/record/parser/test_response_whois_centralnic_com_hu_com_status_registered.py | Python | mit | 6,533 | 0.00199 |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.centralnic.com/hu.com/status_registered
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisCentralnicComHuComStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.centralnic.com/hu.com/status_registered.txt"
host = "whois.centralnic.com"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, ["pendi | ngDelete", "pendingDelete"])
def test_available(self):
eq_(self.record.available, False)
def test_domain(self):
eq_(self.record.domain, "porn.hu.com")
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 2)
eq_(self.record.nameservers[0].__class__.__nam | e__, 'Nameserver')
eq_(self.record.nameservers[0].name, "ns1.sedoparking.com")
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "ns2.sedoparking.com")
def test_admin_contacts(self):
eq_(self.record.admin_contacts.__class__.__name__, 'list')
eq_(len(self.record.admin_contacts), 1)
eq_(self.record.admin_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.admin_contacts[0].type, yawhois.record.Contact.TYPE_ADMINISTRATIVE)
eq_(self.record.admin_contacts[0].id, "RWG000000004273D")
eq_(self.record.admin_contacts[0].name, "Gintautas Liaskus")
eq_(self.record.admin_contacts[0].organization, "G.Liaskaus firma INFOMEGA")
eq_(self.record.admin_contacts[0].address, "Kapsu 32-53")
eq_(self.record.admin_contacts[0].city, "Vilnius")
eq_(self.record.admin_contacts[0].zip, "02167")
eq_(self.record.admin_contacts[0].state, None)
eq_(self.record.admin_contacts[0].country, None)
eq_(self.record.admin_contacts[0].country_code, "LT")
eq_(self.record.admin_contacts[0].phone, "+370.52711457")
eq_(self.record.admin_contacts[0].fax, "+370.52784278")
eq_(self.record.admin_contacts[0].email, "infotau@infotau.lt")
def test_registered(self):
eq_(self.record.registered, True)
def test_created_on(self):
eq_(self.record.created_on.__class__.__name__, 'datetime')
eq_(self.record.created_on, time_parse('2012-11-28 17:46:03 UTC'))
def test_registrar(self):
eq_(self.record.registrar.__class__.__name__, 'Registrar')
eq_(self.record.registrar.id, "H65658")
eq_(self.record.registrar.name, None)
eq_(self.record.registrar.organization, "101Domain, Inc.")
eq_(self.record.registrar.url, "http://www.101domain.com")
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(len(self.record.registrant_contacts), 1)
eq_(self.record.registrant_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.registrant_contacts[0].type, yawhois.record.Contact.TYPE_REGISTRANT)
eq_(self.record.registrant_contacts[0].id, "RWG000000004273D")
eq_(self.record.registrant_contacts[0].name, "Gintautas Liaskus")
eq_(self.record.registrant_contacts[0].organization, "G.Liaskaus firma INFOMEGA")
eq_(self.record.registrant_contacts[0].address, "Kapsu 32-53")
eq_(self.record.registrant_contacts[0].city, "Vilnius")
eq_(self.record.registrant_contacts[0].zip, "02167")
eq_(self.record.registrant_contacts[0].state, None)
eq_(self.record.registrant_contacts[0].country, None)
eq_(self.record.registrant_contacts[0].country_code, "LT")
eq_(self.record.registrant_contacts[0].phone, "+370.52711457")
eq_(self.record.registrant_contacts[0].fax, "+370.52784278")
eq_(self.record.registrant_contacts[0].email, "infotau@infotau.lt")
def test_technical_contacts(self):
eq_(self.record.technical_contacts.__class__.__name__, 'list')
eq_(len(self.record.technical_contacts), 1)
eq_(self.record.technical_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.technical_contacts[0].type, yawhois.record.Contact.TYPE_TECHNICAL)
eq_(self.record.technical_contacts[0].id, "RWG000000004273D")
eq_(self.record.technical_contacts[0].name, "Gintautas Liaskus")
eq_(self.record.technical_contacts[0].organization, "G.Liaskaus firma INFOMEGA")
eq_(self.record.technical_contacts[0].address, "Kapsu 32-53")
eq_(self.record.technical_contacts[0].city, "Vilnius")
eq_(self.record.technical_contacts[0].zip, "02167")
eq_(self.record.technical_contacts[0].state, None)
eq_(self.record.technical_contacts[0].country, None)
eq_(self.record.technical_contacts[0].country_code, "LT")
eq_(self.record.technical_contacts[0].phone, "+370.52711457")
eq_(self.record.technical_contacts[0].fax, "+370.52784278")
eq_(self.record.technical_contacts[0].email, "infotau@infotau.lt")
def test_updated_on(self):
eq_(self.record.updated_on.__class__.__name__, 'datetime')
eq_(self.record.updated_on, time_parse('2014-02-11 00:16:13 UTC'))
def test_domain_id(self):
eq_(self.record.domain_id, "CNIC-DO970405")
def test_expires_on(self):
eq_(self.record.expires_on.__class__.__name__, 'datetime')
eq_(self.record.expires_on, time_parse('2013-11-28 23:59:59 UTC'))
def test_disclaimer(self):
eq_(self.record.disclaimer, "This whois service is provided by CentralNic Ltd and only contains information pertaining to Internet domain names we have registered for our customers. By using this service you are agreeing (1) not to use any information presented here for any purpose other than determining ownership of domain names, (2) not to store or reproduce this data in any way, (3) not to use any high-volume, automated, electronic processes to obtain data from this service. Abuse of this service is monitored and actions in contravention of these terms will result in being permanently blacklisted. All data is (c) CentralNic Ltd https://www.centralnic.com/")
|
grnet/synnefo | snf-admin-app/synnefo_admin/admin/tests/projects.py | Python | gpl-3.0 | 1,582 | 0.001274 | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#import logging
from astakos.im.models import Resource
from synnefo_admin.admin.resources.projects.utils import get_project_quota_category
from .common import AdminTestCase
class TestAdminProjects(AdminTestCase):
"""Test suite for project-related tests."""
def test_quota(self):
"""Test if project quota are measured properly."""
# Get the reported description of the resource.
resource = Resource.objects.get(name=u"σέρβις1.ρίσορς11")
desc = resource.report_desc
| # Get the member and project quota.
member_quota = get_project_quota_category(self.project, "member")
project_quota = get_project_quota_category(self.project, "limit")
# Compare them to the ones in the application.
self.assertEqual(mem | ber_quota, [(desc, '512')])
self.assertEqual(project_quota, [(desc, '1024')])
|
pombredanne/msgpack-numpy | setup.py | Python | bsd-3-clause | 1,407 | 0.022743 | #!/usr/bin/env python
import sys, os
from glob import glob
try:
import setuptools
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
NAME = 'msgpack-numpy'
VERSION = '0.3.6'
AUTHOR = 'Lev Givon'
AUTHOR_EMAIL = 'lev@columbia.edu'
URL = 'https://github.com/lebedov/msgpack-numpy'
DESCRIPTION = 'Numpy data serialization using msgpack'
LONG_DESCRIPTION = DESCRIPTION
DOWNLOAD_URL = URL
LICENSE = 'BSD'
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Software Development']
if __name__ == "__main__":
if os.path.exists( | 'MANIFEST'):
os.remove('MANIFEST')
setup(
name = NAME,
version = VERSION,
author = AUTHOR,
author_email = AUTHOR_EMAIL,
license = LICENSE,
classifiers = CLASSIFIERS,
description = DESCRIPTION,
long_description = LONG_DESCRIPTION,
url = URL,
py_modules = ['msgpack_numpy'],
install_requires | = ['numpy',
'msgpack-python>=0.3.0']
)
|
s20121035/rk3288_android5.1_repo | external/deqp/android/scripts/GenAndroidCTSXML.py | Python | gpl-3.0 | 5,485 | 0.025889 | import argparse
import string
class TestGroup:
def __init__(self, name, parent = None):
self.parent = parent
self.name = name
self.testGroups = {}
self.testCases = {}
if parent:
assert not name in parent.testGroups
parent.testGroups[name] = self
def getName (self):
return self.name
def getPath (self):
if self.parent:
return self.parent.getPath() + "." + self.name
else:
return self.name
def hasGroup(self, groupName):
return groupName in self.testGroups
def getGroup(self, groupName):
return self.testGroups[groupName]
def hasTest(self, testName):
return testName in self.testCases
def getTest(self, testName):
return self.testCases[testName]
def hasTestCases(self):
return len(self.testCases) != 0
def hasTestGroups(self):
return len(self.testGroups) != 0
def getTestCases(self):
return self.testCases.values()
def getTestGroups(self):
return self.testGroups.values()
class TestCase:
def __init__(self, name, parent):
self.name = name
self.parent = parent
assert not name in self.parent.testCases
self.parent.testCases[name] = self
def getPath (self):
return self.parent.getPath() + "." + self.name
def getName(self):
return self.name
def addGroupToHierarchy(rootGroup, path):
pathComponents = string.split(path, ".")
currentGroup = rootGroup
assert pathComponents[0] == rootGroup.getName()
for i in range(1, len(pathComponents)):
component = pathComponents[i]
if currentGroup.hasGroup(component):
currentGroup = currentGroup.getGroup(component)
else:
currentGroup = TestGroup(component, parent=currentGroup)
def addTestToHierarchy(rootGroup, path):
pathComponents = | string.split(p | ath, ".")
currentGroup = rootGroup
assert pathComponents[0] == rootGroup.getName()
for i in range(1, len(pathComponents)):
component = pathComponents[i]
if i == len(pathComponents) - 1:
TestCase(component, currentGroup)
else:
if currentGroup.hasGroup(component):
currentGroup = currentGroup.getGroup(component)
else:
currentGroup = TestGroup(component, parent=currentGroup)
def loadTestHierarchy (input):
line = input.readline()
rootGroup = None
if line.startswith("GROUP: "):
groupName = line[len("GROUP: "):-1]
rootGroup = TestGroup(groupName)
else:
assert False
for line in input:
if line.startswith("GROUP: "):
groupPath = line[len("GROUP: "):-1];
addGroupToHierarchy(rootGroup, groupPath)
elif line.startswith("TEST: "):
testPath = line[len("TEST: "):-1]
addTestToHierarchy(rootGroup, testPath)
else:
assert False
return rootGroup
def hasFilteredCases(group, includeTests):
for child in group.getTestCases():
if child.getPath() in includeTests:
return True
for child in group.getTestGroups():
if hasFilteredCases(child, includeTests):
return True
return False
def addFilteredTest(parent, group, includeTests):
for child in group.getTestGroups():
if hasFilteredCases(child, includeTests):
newChild = TestGroup(child.getName(), parent)
addFilteredTest(newChild, child, includeTests)
for child in group.getTestCases():
if child.getPath() in includeTests:
TestCase(child.getName(), parent)
def filterTests(includeTests, group):
root = TestGroup(group.getName())
addFilteredTest(root, group, includeTests)
return root
def writeAndroidCTSTest(test, output):
output.write('<Test name="%s" />\n' % test.getName())
def writeAndroidCTSTestCase(group, output):
assert group.hasTestCases()
assert not group.hasTestGroups()
output.write('<TestCase name="%s">\n' % group.getName())
for testCase in group.getTestCases():
writeAndroidCTSTest(testCase, output)
output.write('</TestCase>\n')
def writeAndroidCTSTestSuite(group, output):
output.write('<TestSuite name="%s">\n' % group.getName())
for childGroup in group.getTestGroups():
if childGroup.hasTestCases():
assert not childGroup.hasTestGroups()
writeAndroidCTSTestCase(childGroup, output)
elif childGroup.hasTestGroups():
writeAndroidCTSTestSuite(childGroup, output)
# \note Skips groups without testcases or child groups
output.write('</TestSuite>\n')
def writeAndroidCTSFile(rootGroup, output, mustpass, name="dEQP-GLES3", appPackageName="com.drawelements.deqp.gles3"):
output.write('<?xml version="1.0" encoding="UTF-8"?>\n')
output.write('<TestPackage name="%s" appPackageName="%s" testType="deqpTest">\n' % (name, appPackageName))
writeAndroidCTSTestSuite(filterTests(mustpass, rootGroup), output)
output.write('</TestPackage>\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input', type=argparse.FileType('r'), help="Input dEQP test hierarchy in txt format.")
parser.add_argument('output', type=argparse.FileType('w'), help="Output file for Android CTS test file.")
parser.add_argument('--name', dest="name", type=str, required=True, help="Name of the test package")
parser.add_argument('--package', dest="package", type=str, required=True, help="Name of the app package")
parser.add_argument('--must-pass', dest="mustpass", type=argparse.FileType('r'), required=True, help="Must pass file")
args = parser.parse_args()
rootGroup = loadTestHierarchy(args.input)
writeAndroidCTSFile(rootGroup, args.output, name=args.name, appPackageName=args.package, mustpass=set(map(lambda x : x.rstrip(), args.mustpass.readlines())))
|
spixi/wesnoth | data/tools/wesnoth/wmlparser3.py | Python | gpl-2.0 | 27,879 | 0.002152 | #!/usr/bin/env python3
# encoding: utf-8
"""
This parser uses the --preprocess option of wesnoth so a working
wesnoth executable must be available at runtime if the WML to parse
contains preprocessing directives.
Pure WML can be parsed as is.
For example:
wml = ""
[unit]
id=elve
name=Elve
[abilities]
[damage]
id=Ensnare
[/dama ge]
[/abilities]
[/unit]
""
p = Parser()
cfg = p.parse_text(wml)
for unit in cfg.get_all(tag = "unit"):
print(unit.get_text_val("id"))
print(unit.get_text_val("name"))
for abilities in unit.get_all(tag = "abilitities"):
for ability in abilities.get_all(tag = ""):
print(ability.get_name())
print(ability.get_text_v | al("id"))
Because no preprocessing is required, we did not have to pass the
location | of the wesnoth executable to Parser.
The get_all method always returns a list over matching tags or
attributes.
The get_name method can be used to get the name and the get_text_val
method can be used to query the value of an attribute.
"""
import os, glob, sys, re, subprocess, argparse, tempfile, shutil
import atexit
tempdirs_to_clean = []
tmpfiles_to_clean = []
@atexit.register
def cleaner():
for temp_dir in tempdirs_to_clean:
shutil.rmtree(temp_dir, ignore_errors=True)
for temp_file in tmpfiles_to_clean:
os.remove(temp_file)
class WMLError(Exception):
"""
Catch this exception to retrieve the first error message from
the parser.
"""
def __init__(self, parser=None, message=None):
if parser:
self.line = parser.parser_line
self.wml_line = parser.last_wml_line
self.message = message
self.preprocessed = parser.preprocessed
def __str__(self):
return """WMLError:
%s %s
%s
%s
""" % (str(self.line), self.preprocessed, self.wml_line, self.message)
class StringNode:
"""
One part of an attribute's value. Because a single WML string
can be made from multiple translatable strings we model
it as a list of several StringNode each with its own text domain.
"""
def __init__(self, data: bytes):
self.textdomain = None # non-translatable by default
self.data = data
def wml(self) -> bytes:
if not self.data:
return b""
return self.data
def debug(self):
if self.textdomain:
return "_<%s>'%s'" % (self.textdomain,
self.data.decode("utf8", "ignore"))
else:
return "'%s'" % self.data.decode("utf8", "ignore")
def __str__(self):
return "StringNode({})".format(self.debug())
def __repr__(self):
return str(self)
class AttributeNode:
"""
A WML attribute. For example the "id=Elfish Archer" in:
[unit]
id=Elfish Archer
[/unit]
"""
def __init__(self, name, location=None):
self.name = name
self.location = location
self.value = [] # List of StringNode
def wml(self) -> bytes:
s = self.name + b"=\""
for v in self.value:
s += v.wml().replace(b"\"", b"\"\"")
s += b"\""
return s
def debug(self):
return self.name.decode("utf8") + "=" + " .. ".join(
[v.debug() for v in self.value])
def get_text(self, translation=None) -> str:
"""
Returns a text representation of the node's value. The
translation callback, if provided, will be called on each
partial string with the string and its corresponding textdomain
and the returned translation will be used.
"""
r = ""
for s in self.value:
ustr = s.data.decode("utf8", "ignore")
if translation:
r += translation(ustr, s.textdomain)
else:
r += ustr
return r
def get_binary(self):
"""
Returns the unmodified binary representation of the value.
"""
r = b""
for s in self.value:
r += s.data
return r
def get_name(self):
return self.name.decode("utf8")
def __str__(self):
return "AttributeNode({})".format(self.debug())
def __repr__(self):
return str(self)
class TagNode:
"""
A WML tag. For example the "unit" in this example:
[unit]
id=Elfish Archer
[/unit]
"""
def __init__(self, name, location=None):
self.name = name
self.location = location
# List of child elements, which are either of type TagNode or
# AttributeNode.
self.data = []
self.speedy_tags = {}
def wml(self) -> bytes:
"""
Returns a (binary) WML representation of the entire node.
All attribute values are enclosed in quotes and quotes are
escaped (as double quotes). Note that no other escaping is
performed (see the BinaryWML specification for additional
escaping you may require).
"""
s = b"[" + self.name + b"]\n"
for sub in self.data:
s += sub.wml() + b"\n"
s += b"[/" + self.name.lstrip(b'+') + b"]\n"
return s
def debug(self):
s = "[%s]\n" % self.name.decode("utf8")
for sub in self.data:
for subline in sub.debug().splitlines():
s += " %s\n" % subline
s += "[/%s]\n" % self.name.decode("utf8").lstrip('+')
return s
def get_all(self, **kw):
"""
This gets all child tags or child attributes of the tag.
For example:
[unit]
name=A
name=B
[attack]
[/attack]
[attack]
[/attack]
[/unit]
unit.get_all(att = "name")
will return two nodes for "name=A" and "name=B"
unit.get_all(tag = "attack")
will return two nodes for the two [attack] tags.
unit.get_all()
will return 4 nodes for all 4 sub-elements.
unit.get_all(att = "")
Will return the two attribute nodes.
unit.get_all(tag = "")
Will return the two tag nodes.
If no elements are found an empty list is returned.
"""
if len(kw) == 1 and "tag" in kw and kw["tag"]:
return self.speedy_tags.get(kw["tag"].encode("utf8"), [])
r = []
for sub in self.data:
ok = True
for k, v in list(kw.items()):
v = v.encode("utf8")
if k == "tag":
if not isinstance(sub, TagNode):
ok = False
elif v != b"" and sub.name != v:
ok = False
elif k == "att":
if not isinstance(sub, AttributeNode):
ok = False
elif v != b"" and sub.name != v:
ok = False
if ok:
r.append(sub)
return r
def get_text_val(self, name, default=None, translation=None, val=-1):
"""
Returns the value of the specified attribute. If the attribute
is given multiple times, the value number val is returned (default
behaviour being to return the last value). If the
attribute is not found, the default parameter is returned.
If a translation is specified, it should be a function which
when passed a unicode string and text-domain returns a
translation of the unicode string. The easiest way is to pass
it to gettext.translation if you have the binary message
catalogues loaded.
"""
x = self.get_all(att=name)
if not x: return default
return x[val].get_text(translation)
def get_binary(self, name, default=None):
"""
Returns the unmodified binary data for the first attribute
of the given name or the passed default value if it is not
found.
"""
x = self.get_all(att=name)
if not x: re |
pkrusche/bsponmpi_template | site_scons/toolsets/clang.py | Python | mit | 1,021 | 0.039177 | import os
###############################################################################
# Make add options to an environment to use GCC
###################################### | #########################################
#
# Required options in root env:
# mode : 'debug' or 'release
# debuginfo : true or false to include debug info also in release versi | on
# profile : true or false to enable/disable gprof support
#
def generate (root):
root['CC'] = 'clang'
root['CXX'] = 'clang++'
root['LINK'] = 'clang++'
if os.environ.has_key('TERM'):
root['ENV']['TERM'] = os.environ['TERM']
mode = root['mode']
debuginfo = root['debuginfo']
profile = root['profile']
if mode == 'debug':
root.Append(
CCFLAGS=' -g -O0',
)
elif mode == 'release':
fast = '-O3'
if debuginfo:
root.Append(
CCFLAGS=' -g ',
LINKFLAGS=' -g ',
)
if profile:
root.Append(
CCFLAGS=' -pg -O3',
LINKFLAGS=' -pg',
)
else:
root.Append(
CCFLAGS=fast,
)
root.Replace(LINK = 'g++')
|
CarlosMart626/umusicfy | umusicfy/umusicfy/urls.py | Python | apache-2.0 | 1,887 | 0.00212 | """umusicfy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import t | he include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework_jwt.views import obtain_jwt_token
from rest_framework_jwt.views import refresh_jwt_token
from rest_framework_jwt.views import verify_jwt_token
# Configur | ation API Router
from rest_framework import routers
from songs.views import ArtistViewSet, AlbumViewSet, SongViewSet
router = routers.DefaultRouter()
router.register(r'artists', ArtistViewSet)
router.register(r'albums', AlbumViewSet)
router.register(r'songs', SongViewSet)
urlpatterns = [
url(r'^', include('index.urls')),
url(r'^admin/', admin.site.urls),
url(r'^api/', include(router.urls)),
# AUTH
url(r'^accounts/', include('allauth.urls')),
url(r'^api-token-auth/', obtain_jwt_token),
url(r'^api-token-refresh/', refresh_jwt_token),
url(r'^api-token-verify/', verify_jwt_token),
# Apps
url(r'^user-profile/', include('user_profile.urls')),
url(r'^music/', include('songs.urls')),
]
from django.conf import settings
from django.conf.urls.static import static
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
compstak/selenium | py/test/selenium/webdriver/common/webdriverwait_tests.py | Python | apache-2.0 | 16,435 | 0.004807 | #!/usr/bin/python
# Copyright 2011 WebDriver committers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoSuchFrameException
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import InvalidElementStateException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
def not_available_on_remote(func):
def testMethod(self):
print(self.driver)
if type(self.driver) == 'remote':
return lambda x: None
else:
return func(self)
return testMethod
def throwSERE(driver):
raise StaleElementReferenceException("test")
class WebDriverWaitTest(unittest.TestCase):
def testShouldExplicitlyWaitForASingleElement(self):
self._loadPage("dynamic")
add = self.driver.find_element_by_id("adder")
add.click();
WebDriverWait(self.driver, 3).until(EC.presence_of_element_located((By.ID, "box0"))) # All is well if this doesn't throw.
def testShouldStillFailToFindAnElementWithExplicitWait(self):
self._loadPage("dynamic")
try:
WebDriverWait(self.driver, 0.7).until(EC.presence_of_element_located((By.ID, "box0")))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
except Exception as e:
self.fail("Expected TimeoutException but got " + str(e))
def testShouldExplicitlyWaituntilAtLeastOneElementIsFoundWhenSearchingForMany(self):
self._loadPage("dynamic")
add = self.driver.find_element_by_id("adder")
add.click();
add.click();
elements = WebDriverWait(self.driver, 2).until(EC.presence_of_all_elements_located((By.CLASS_NAME, "redbox")))
self.assertTrue(len(elements) >= 1)
def testShouldFailToFindElementsWhenExplicitWaiting(self):
self._loadPage("dynamic")
try:
elements = WebDriverWait(self.driver, 0.7).until(EC.presence_of_all_elements_located((By.CLASS_NAME, "redbox")))
except TimeoutException as e:
pass # we should get a timeout
except Exception as e:
self.fail("Expected TimeoutException but got " + str(e))
def testShouldWaitOnlyAsLongAsTimeoutSpecifiedWhenImplicitWaitsAreSet(self):
self._loadPage("dynamic")
self.driver.implicitly_wait(0.5)
try:
start = time.time()
try:
WebDriverWait(self.driver, 1).until(EC.presence_of_element_located((By.ID, "box0")))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.assertTrue(time.time() - start < 1.5,
"Expected to take just over 1 second to execute, but took %f" %
(time.time() - start))
finally:
self.driver.implicitly_wait(0)
def testShouldWaitAtLeastOnce(self):
self._loadPage("simpleTest")
elements_exists = lambda driver: driver.find_elements_by_tag_name('h1')
elements = WebDriverWait(self.driver, 0).until(elements_exists)
self.assertTrue(len(elements) >= 1)
def testWaitUntilNotReturnsIfEvaluatesToFalse(self):
falsum = lambda driver: False
self.assertFalse(WebDriverWait(self.driver, 1).until_not(falsum))
def testWaitShouldStillFailIfProduceIgnoredException(self):
ignored = (InvalidElementStateException, StaleElementReferenceException)
try:
WebDriverWait(self.driver, 1, 0.7, ignored_exceptions=ignored).until(throwSERE)
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
def testWaitShouldStillFailIfProduceChildOfIgnoredException(self):
ignored = (WebDriverException)
try:
WebDriverWait(self.driver, 1, 0.7, ignored_exceptions=ignored).until(throwSERE)
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
def testWaitUntilNotShouldNotFailIfProduceIgnoredException(self):
ignored = (InvalidElementStateException, StaleElementReferenceException)
self.assertTrue(WebDriverWait(self.driver, 1, 0.7, ignored_exceptions=ignored).until_not(throwSERE))
def testExpectedConditionTitleIs(self):
self._loadPage("blank")
WebDriverWait(self.driver, 1).until(EC.title_is("blank"))
self.driver.execute_script("setTimeout(function(){document.title='not blank'}, 200)")
WebDriverWait(self.driver, 1).until(EC.title_is("not blank"))
self.assertEqual(self.driver.title, 'not blank')
try:
WebDriverWait(self.driver, 0.7).until(EC.title_is("blank"))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
def testExpectedConditionTitleContains(self):
self._loadPage("blank")
self.driver.execute_script("setTimeout(function(){document.title='not blank'}, 200)")
WebDriverWait(self.driver, 1).until(EC.title_contains("not"))
self.assertEqual(self.driver.title, 'not blank')
try:
WebDriverWait(self.driver, 0.7).until(EC.title_contains("blanket"))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
def testExpectedConditionVisibilityOfElementLocated(self):
self._loadPage("javascriptPage")
try:
WebDriverWait(self.driver, 0.7).until(EC.visibility_of_element_located((By.ID, 'clickToHide')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.find_element_by_id('clickToShow').click()
element = WebDriverWait(self.driver, 5).until(EC.visibility_of_element_located((By.ID, 'clickToHide')))
self.assertTrue(element.is_displayed())
def testExpectedConditionVisibilityOf(self):
self._loadPage("javascriptPage")
hidden = self.driver.find_element_by_id('clickToHide')
try:
WebDriverWait(self.driver, 0.7).until(EC.visibility_of(hidden))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.fin | d_element_by_id('clickToShow').click()
element = WebDriverWait(self.driver, 5).until(EC.visibility_of(hidden))
self.assertTrue(element.is_displayed())
def testExpectedConditionTextToBePresentInElement(self):
| self._loadPage('booleanAttributes')
try:
WebDriverWait(self.driver, 0.7).until(EC.text_to_be_present_in_element((By.ID, 'unwrappable'), 'Expected'))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){var el = document.getElementById('unwrappable'); el.textContent = el.innerText = 'Unwrappable Expected text'}, 200)")
WebDriverWait(self.driver, 1).until(EC.text_to_be_present_in_element((By.ID, 'unwrappable'), 'Expected'))
self.assertEqual('Unwrappable Expected text', self.driver.find_eleme |
openstack/ceilometer | ceilometer/tests/unit/network/statistics/test_port.py | Python | apache-2.0 | 3,879 | 0 | #
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.network.statistics import port
from ceilometer import sample
from ceilometer.tests.unit.network import statistics
class TestPortPollsters(statistics._Pollste | rTestBase):
def test_port_pollster(self):
self._test_pollster(
port.PortPollster,
'switch.port',
sample.TYPE_GAUGE,
'port')
def test_port_pollster_uptime(self):
self._test_pollster(
port.PortPollsterUptime,
'switch.port.uptime',
sample.TYPE_GAUGE,
's')
def test_port_pollster_re | ceive_packets(self):
self._test_pollster(
port.PortPollsterReceivePackets,
'switch.port.receive.packets',
sample.TYPE_CUMULATIVE,
'packet')
def test_port_pollster_transmit_packets(self):
self._test_pollster(
port.PortPollsterTransmitPackets,
'switch.port.transmit.packets',
sample.TYPE_CUMULATIVE,
'packet')
def test_port_pollster_receive_bytes(self):
self._test_pollster(
port.PortPollsterReceiveBytes,
'switch.port.receive.bytes',
sample.TYPE_CUMULATIVE,
'B')
def test_port_pollster_transmit_bytes(self):
self._test_pollster(
port.PortPollsterTransmitBytes,
'switch.port.transmit.bytes',
sample.TYPE_CUMULATIVE,
'B')
def test_port_pollster_receive_drops(self):
self._test_pollster(
port.PortPollsterReceiveDrops,
'switch.port.receive.drops',
sample.TYPE_CUMULATIVE,
'packet')
def test_port_pollster_transmit_drops(self):
self._test_pollster(
port.PortPollsterTransmitDrops,
'switch.port.transmit.drops',
sample.TYPE_CUMULATIVE,
'packet')
def test_port_pollster_receive_errors(self):
self._test_pollster(
port.PortPollsterReceiveErrors,
'switch.port.receive.errors',
sample.TYPE_CUMULATIVE,
'packet')
def test_port_pollster_transmit_errors(self):
self._test_pollster(
port.PortPollsterTransmitErrors,
'switch.port.transmit.errors',
sample.TYPE_CUMULATIVE,
'packet')
def test_port_pollster_receive_frame_errors(self):
self._test_pollster(
port.PortPollsterReceiveFrameErrors,
'switch.port.receive.frame_error',
sample.TYPE_CUMULATIVE,
'packet')
def test_port_pollster_receive_overrun_errors(self):
self._test_pollster(
port.PortPollsterReceiveOverrunErrors,
'switch.port.receive.overrun_error',
sample.TYPE_CUMULATIVE,
'packet')
def test_port_pollster_receive_crc_errors(self):
self._test_pollster(
port.PortPollsterReceiveCRCErrors,
'switch.port.receive.crc_error',
sample.TYPE_CUMULATIVE,
'packet')
def test_port_pollster_collision_count(self):
self._test_pollster(
port.PortPollsterCollisionCount,
'switch.port.collision.count',
sample.TYPE_CUMULATIVE,
'packet')
|
Azure/WALinuxAgent | azurelinuxagent/common/utils/textutil.py | Python | apache-2.0 | 12,526 | 0.001517 | # Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
import base64
import crypt
import hashlib
import random
import re
import string
import struct
import sys
import traceback
import xml.dom.minidom as minidom
import zlib
from azurelinuxagent.common.future import ustr
def parse_doc(xml_text):
"""
Parse xml document from string
"""
# The minidom lib has some issue with unicode in python2.
# Encode the string into utf-8 first
xml_text = xml_text.encode('utf-8')
return minidom.parseString(xml_text)
def findall(root, tag, namespace=None):
"""
Get all nodes by tag and namespace under Node root.
"""
if root is None:
return []
if namespace is None:
return root.getElementsByTagName(tag)
else:
return root.getElementsByTagNameNS(namespace, tag)
def find(root, tag, namespace=None):
"""
Get first node by tag and namespace under Node root.
"""
nodes = findall(root, tag, namespace=namespace)
if nodes is not None and len(nodes) >= 1:
return nodes[0]
else:
return None
def gettext(node):
"""
Get node text
"""
if node is None:
return None
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE:
return child.data
return None
def findtext(root, tag, namespace=None):
"""
Get text of node by tag and namespace under Node root.
"""
node = find(root, tag, namespace=namespace)
return gettext(node)
def getattrib(node, attr_name):
"""
Get attribute of xml node
"""
if node is not None:
return node.getAttribute(attr_name)
else:
return None
def unpack(buf, offset, value_range):
"""
Unpack bytes into python values.
"""
result = 0
for i in value_range:
result = (result << 8) | str_to_ord(buf[offset + i])
return result
def unpack_little_endian(buf, offset, length):
"""
Unpack little endian bytes into python values.
"""
return unpack(buf, offset, list(range(length - 1, -1, -1)))
def unpack_big_endian(buf, offset, length):
"""
Unpack big endian bytes into python values.
"""
return unpack(buf, offset, list(range(0, length)))
def hex_dump3(buf, offset, length):
"""
Dump range of buf in formatted hex.
"""
return ''.join(['%02X' % str_to_ord(char) for char in buf[offset:offset + length]])
def hex_dump2(buf):
"""
Dump buf in formatted hex.
"""
return hex_dump3(buf, 0, len(buf))
def is_in_range(a, low, high):
"""
Return True if 'a' in 'low' <= a <= 'high'
"""
return low <= a <= high
def is_printable(ch):
"""
Return True if character is displayable.
"""
return (is_in_range(ch, str_to_ord('A'), str_to_ord('Z'))
or is_in_range(ch, str_to_ord('a'), str_to_ord('z'))
or is_in_range(ch, str_to_ord('0'), str_to_ord('9')))
def hex_dump(buffer, size): # pylint: disable=redefined-builtin
"""
Return Hex formated dump of a 'buffer' of 'size'.
"""
if size < 0:
size = len(buffer)
result = ""
for i in range(0, size):
if (i % 16) == 0:
result += "%06X: " % i
byte = buffer[i]
if type(byte) == str:
byte = ord(byte.decode('latin1'))
result += "%02X " % byte
if (i & 15) == 7:
result += " "
if ((i + 1) % 16) == 0 or (i + 1) == size:
j = i
while ((j + 1) % 16) != 0:
result += " "
if (j & 7) == 7:
result += " "
j += 1
result += " "
for j in range(i - (i % 16), i + 1):
byte = buffer[j]
if type(byte) == str:
byte = str_to_ord(byte.decode('latin1'))
k = '.'
if is_printable(byte):
k = chr(byte)
result += k
if (i + 1) != size:
result += "\n"
return result
def str_to_ord(a):
"""
Allows indexing into a string or an array of integers transparently.
Generic utility function. |
"""
if type(a) == type(b'') or type(a) == type(u''):
a = ord(a)
return a
def compare_bytes(a, b, start, length):
for offset in range(start, start + length):
if str_to_ord(a[offset]) != str_to_ord(b[offset]):
return False
return True
def int_to_ip4_addr(a):
"""
Build DHCP request string.
"""
return "%u.%u.%u.%u" % ((a >> 24) & 0xFF,
(a >> 16) & 0xFF,
(a >> | 8) & 0xFF,
(a) & 0xFF)
def hexstr_to_bytearray(a):
"""
Return hex string packed into a binary struct.
"""
b = b""
for c in range(0, len(a) // 2):
b += struct.pack("B", int(a[c * 2:c * 2 + 2], 16))
return b
def set_ssh_config(config, name, val):
found = False
no_match = -1
match_start = no_match
for i in range(0, len(config)):
if config[i].startswith(name) and match_start == no_match:
config[i] = "{0} {1}".format(name, val)
found = True
elif config[i].lower().startswith("match"):
if config[i].lower().startswith("match all"):
# outside match block
match_start = no_match
elif match_start == no_match:
# inside match block
match_start = i
if not found:
if match_start != no_match:
i = match_start
config.insert(i, "{0} {1}".format(name, val))
return config
def set_ini_config(config, name, val):
notfound = True
nameEqual = name + '='
length = len(config)
text = "{0}=\"{1}\"".format(name, val)
for i in reversed(range(0, length)):
if config[i].startswith(nameEqual):
config[i] = text
notfound = False
break
if notfound:
config.insert(length - 1, text)
def replace_non_ascii(incoming, replace_char=''):
outgoing = ''
if incoming is not None:
for c in incoming:
if str_to_ord(c) > 128:
outgoing += replace_char
else:
outgoing += c
return outgoing
def remove_bom(c):
"""
bom is comprised of a sequence of three chars,0xef, 0xbb, 0xbf, in case of utf-8.
"""
if not is_str_none_or_whitespace(c) and \
len(c) > 2 and \
str_to_ord(c[0]) > 128 and \
str_to_ord(c[1]) > 128 and \
str_to_ord(c[2]) > 128:
c = c[3:]
return c
def gen_password_hash(password, crypt_id, salt_len):
collection = string.ascii_letters + string.digits
salt = ''.join(random.choice(collection) for _ in range(salt_len))
salt = "${0}${1}".format(crypt_id, salt)
if sys.version_info[0] == 2:
# if python 2.*, encode to type 'str' to prevent Unicode Encode Error from crypt.crypt
password = password.encode('utf-8')
return crypt.crypt(password, salt)
def get_bytes_from_pem(pem_str):
base64_bytes = ""
for line in pem_str.split('\n'):
if "----" not in line:
base64_bytes += line
return base64_bytes
def compress(s):
"""
Compress a string, and return the base64 encoded result of the compression.
This method returns a string instead of a byte array. It is expected
that this method is called to compress smallish strings, not to compress
the contents of a file. The output of this method |
maferelo/saleor | saleor/account/migrations/0034_service_account_token.py | Python | bsd-3-clause | 1,717 | 0.000582 | import django.db.models.deletion
import oauthlib.common
from django.db import migrations, models
def move_existing_token(apps, schema_editor):
ServiceAccount = apps.get_model("account", "ServiceAccount")
for service_account in ServiceAccount.objects.iterator():
service_account.tokens.create(
name="Default", auth_token=service_account.auth_token
)
class Migration(migrations.Migration):
dependencies = [("account", "0033_serviceaccount")]
operations = [
migrations.CreateModel(
name="ServiceAccountToken",
fields=[
(
"id",
models.AutoField(
auto_creat | ed=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(blank=True, default="", max_length=128)),
(
"auth_token",
| models.CharField(
default=oauthlib.common.generate_token,
max_length=30,
unique=True,
),
),
(
"service_account",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="tokens",
to="account.ServiceAccount",
),
),
],
),
migrations.RunPython(move_existing_token),
migrations.RemoveField(model_name="serviceaccount", name="auth_token"),
]
|
pintostack/core | infrastructure/windows_azure.py | Python | apache-2.0 | 8,648 | 0.003122 | #!/usr/bin/env python
'''
Windows Azure external inventory script
=======================================
Generates inventory that Ansible can understand by making API request to
Windows Azure using the azure python library.
NOTE: This script assumes Ansible is being executed where azure is already
installed.
pip install azure
Adapted from the ansible Linode plugin by Dan Slimmon.
'''
# (c) 2013, John Whitbeck
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
# Standard imports
import re
import sys
import argparse
import os
from urlparse import urlparse
from time import time
try:
import json
except ImportError:
import simplejson as json
try:
import azure
from azure import WindowsAzureError
from azure.servicemanagement import ServiceManagementService
except ImportError as e:
print e
print "failed=True msg='`azure` library required for this script'"
sys.exit(1)
# Imports for ansible
import ConfigParser
class AzureInventory(object):
def __init__(self):
"""Main execution path."""
# Inventory grouped by display group
self.inventory = {}
# Index of deployment name -> host
self.index = {}
# Read settings and parse CLI arguments
self.read_settings()
self.read_environment()
self.parse_cli_args()
# Initialize Azure ServiceManagementService
self.sms = ServiceManagementService(self.subscription_id, self.cert_path)
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
if self.args.list_images:
data_to_print = self.json_format_dict(self.get_images(), True)
elif self.args.list:
# Display list of nodes for inventory
if len(self.inventory) == 0:
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, Tr | ue)
pri | nt data_to_print
def get_images(self):
images = []
for image in self.sms.list_os_images():
if str(image.label).lower().find(self.args.list_images.lower()) >= 0:
images.append(vars(image))
return json.loads(json.dumps(images, default=lambda o: o.__dict__))
def is_cache_valid(self):
"""Determines if the cache file has expired, or if it is still valid."""
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
"""Reads the settings from the .ini file."""
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/windows_azure.ini')
# Credentials related
if config.has_option('azure', 'subscription_id'):
self.subscription_id = config.get('azure', 'subscription_id')
if config.has_option('azure', 'cert_path'):
self.cert_path = config.get('azure', 'cert_path')
# Cache related
if config.has_option('azure', 'cache_path'):
cache_path = config.get('azure', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-azure.cache"
self.cache_path_index = cache_path + "/ansible-azure.index"
if config.has_option('azure', 'cache_max_age'):
self.cache_max_age = config.getint('azure', 'cache_max_age')
def read_environment(self):
''' Reads the settings from environment variables '''
# Credentials
if os.getenv("AZURE_SUBSCRIPTION_ID"): self.subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID")
if os.getenv("AZURE_CERT_PATH"): self.cert_path = os.getenv("AZURE_CERT_PATH")
def parse_cli_args(self):
"""Command line argument processing"""
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Azure')
parser.add_argument('--list', action='store_true', default=True,
help='List nodes (default: True)')
parser.add_argument('--list-images', action='store',
help='Get all available images.')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Azure (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
"""Do API calls, and save data in cache files."""
self.add_cloud_services()
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def add_cloud_services(self):
"""Makes an Azure API call to get the list of cloud services."""
try:
for cloud_service in self.sms.list_hosted_services():
self.add_deployments(cloud_service)
except WindowsAzureError as e:
print "Looks like Azure's API is down:"
print
print e
sys.exit(1)
def add_deployments(self, cloud_service):
"""Makes an Azure API call to get the list of virtual machines associated with a cloud service"""
try:
for deployment in self.sms.get_hosted_service_properties(cloud_service.service_name,embed_detail=True).deployments.deployments:
if deployment.deployment_slot == "Production":
self.add_deployment(cloud_service, deployment)
except WindowsAzureError as e:
print "Looks like Azure's API is down:"
print
print e
sys.exit(1)
def add_deployment(self, cloud_service, deployment):
"""Adds a deployment to the inventory and index"""
dest = urlparse(deployment.url).hostname
# Add to index
self.index[dest] = deployment.name
# List of all azure deployments
self.push(self.inventory, "azure", dest)
# Inventory: Group by service name
self.push(self.inventory, self.to_safe(cloud_service.service_name), dest)
# Inventory: Group by region
self.push(self.inventory, self.to_safe(cloud_service.hosted_service_properties.location), dest)
def push(self, my_dict, key, element):
"""Pushed an element onto an array that may not have been defined in the dict."""
if key in my_dict:
my_dict[key].append(element);
else:
my_dict[key] = [element]
def get_inventory_from_cache(self):
"""Reads the inventory from the cache file and returns it as a JSON object."""
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
"""Reads the index from the cache file and sets self.index."""
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
"""Writes data in JSON format to a file."""
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
|
thoreg/satchmo | satchmo/apps/satchmo_ext/recentlist/context_processors.py | Python | bsd-3-clause | 606 | 0.018152 | from livesettings import config_value
from product.models import Product
def recent_products(request):
"""Puts the recently-viewed products in the page variables"""
recent = | request.session.get('RECENTLIST',[])
maxrecent = config_value('PRODUCT','RECENT_MAX')
products = []
for slug in recent:
if len(products) > maxrecent:
break
try:
p = Product.objects.get_by_site(slug__exact = slug)
products.append(p)
except Product.DoesNotExist:
pass
return {'recent_products' : product | s}
|
seleniumbase/SeleniumBase | seleniumbase/console_scripts/rich_helper.py | Python | mit | 1,219 | 0 | from rich.console import Console
from rich.markdown import Markdown
from rich.syntax import Syntax
def process_syntax(code, lang, theme, line_numbers, code_width, word_wrap):
syntax = Syntax(
code,
lang,
theme=theme,
line_numbers=line_numbers,
code_width=code_width,
word_wrap=word_wrap,
)
return syntax
def display_markdown(code):
try:
markdown = Markdown(code)
console = Console()
console.print(markdown) | # noqa
return True # Success
| except Exception:
return False # Failure
def display_code(code):
try:
console = Console()
console.print(code) # noqa
return True # Success
except Exception:
return False # Failure
def fix_emoji_spacing(code):
try:
# Fix the display width of certain emojis that take up two spaces
double_width_emojis = [
"🗺️", "🖼️", "🗄️", "⏺️", "♻️", "🗂️", "🖥️", "🕹️", "🎞️"
]
for emoji in double_width_emojis:
if emoji in code:
code = code.replace(emoji, emoji + " ")
except Exception:
pass
return code
|
dwillmer/fastats | tests/maths/test_gamma.py | Python | mit | 878 | 0 |
from pytest import approx, raises
from fastats.maths.gamma import gammaln
def test_gamma_ints():
assert gammaln(10) == approx(12.801827480081469, rel=1e-6)
assert gammaln(5) == approx(3.1780538303479458, rel=1e-6)
assert gammaln(19) == approx(36.39544520803305, rel=1e-6)
def test_gamma_floats():
assert gammaln(3.141) == approx(0.82 | 71155090776673, rel=1e-6)
assert gammaln(8.8129) == approx(10.206160943471318, rel=1e-6)
assert gammaln(12.001) == approx(17.50475055100354, rel=1e-6)
assert gammaln(0.007812) == approx(4.847635060148693, rel=1e-6)
assert gammaln(86.13) == approx(296.3450079998172, rel=1e- | 6)
def test_gamma_negative():
raises(AssertionError, gammaln, -1)
raises(AssertionError, gammaln, -0.023)
raises(AssertionError, gammaln, -10.9)
if __name__ == '__main__':
import pytest
pytest.main([__file__])
|
lugino-emeritus/py-keysaver | keysaver.py | Python | gpl-3.0 | 13,387 | 0.028909 | '''
file syntax:
- version (2 byte), b'\x00\x01'
- encryption method (2 bytes):
b'\x00\x01':
- argon2id to expand pw to 32 byte key with 32 byte salt
- 32 byte salt to derive a master token = sha256(salt + pw)
- AES GCM global encryption with 12 byte nonce
- AES CBC for passwords: 16 byte iv, token as key, no MAC
- dictionary stored as msgpack
- MAC of AES GCM with aad over version, method and salt
pwdic['name'] = {'info': {"description": "a description", "username": "user"},
'update_ts': 1234567890,
'enc_data': b'salt and encrypted password'}
'''
import argon2
import datetime
import hashlib
import msgpack
import os
import sys
import pyperclip
import time
from cryptography.exceptions import InvalidTag
from cryptography.hazmat.backends import default_backend as ht_backend
from cryptography.hazmat.primitives.ciphers import (
Cipher as HtCipher, algorithms as ht_algorithms, modes as ht_modes)
from cryptography.hazmat.primitives.ciphers.aead import AESGCM as HtAesGcm
from getpass import getpass
from random import SystemRandom as _SystemRandom
from tabulate import tabulate
from ntlib.fctthread import ThreadLoop
__author__ = 'NTI (lugino-emeritus) <*@*.de>'
__version__ = '0.3.12'
FILENAME = "pwdic"
PW_DEFAULT_LEN = 12
TOKEN_EXPIRATION = 300
_VERSION = b'\x00\x01'
_ENC_METHOD = b'\x00\x01'
#-------------------------------------------------------
def utc_ts():
return datetime.datetime.now(tz=datetime.timezone.utc).timestamp()
def _utc_msts48():
return int(utc_ts() * 1000) & (2**48-1)
sys_randint = _SystemRandom().randint # returns n with a <= n <= b
def pw_lifetime():
return sys_randint(400*86400, 600*86400)
_salt_count = sys_randint(0, 2**32-1)
def gen_salt(n):
global _salt_count
if n <= 10:
return os.urandom(n)
_salt_count = _salt_count+1 if _salt_count < 2**32-1 else 0
return b''.join((_utc_msts48().to_bytes(6, 'little'), _salt_count.to_bytes(4, 'little'), os.urandom(n-10)))
#-------------------------------------------------------
_RAND_CHARS = r'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
_RAND_SYMBOLS = r'!#$%&()*+,-./:;<=>?@[\]_{|}~'
def _mix_list(x):
n = len(x) - 1
for i in range(n):
j = sys_randint(i, n)
(x[i], x[j]) = (x[j], x[i])
return x
def gen_rand_pw(n=PW_DEFAULT_LEN, symbols=_RAND_SYMBOLS):
chars = _RAND_CHARS + symbols
m = len(chars) - 1
if n < 6:
return ''.join(chars[sys_randint(0,m)] for _ in range(n))
pw = [chars[sys_randint(0,9)], chars[sys_randint(10,35)], chars[sys_randint(36,61)]]
if symbols:
pw.append(symbols[sys_randint(0, len(symbols)-1)])
n -= len(pw)
pw.extend(chars[sys_randint(0, m)] for _ in range(n))
return ''.join(_mix_list(pw))
#-------------------------------------------------------
def _yes_no_question(s):
yes = {'yes', 'y', 'j', 'ja'}
no = {'no', 'n', 'nein'}
while True:
choice = input(s).lower()
if choice in yes:
return True
elif choice in no:
return False
else:
print("respond with 'yes' or 'no'")
def _read_new_pw():
pw = getpass('Enter new password: ')
while True:
while not pw:
pw = getpass('Choose a longer password: ')
if getpass('Repeat password: ') == pw:
return pw.encode()
pw = getpass('Passwords does not match, try again: ')
def _read_rand_pw():
data = input(f'To generate random password enter length > 0, 1 means default length ({PW_DEFAULT_LEN}): ')
if data:
try:
n = int(data)
if n:
if n == 1: n = PW_DEFAULT_LEN
pw = gen_rand_pw(n)
if _yes_no_question('New password created, copy to clipboard? '):
pyperclip.copy(pw)
return pw.encode()
except ValueError:
pass
return _read_new_pw()
#-------------------------------------------------------
class crypto:
# static namespace class for cryptography methods
def aes_cbc_encrypt(key, data):
iv = gen_salt(16)
data += b'\x80' + b'\x00' * ((15 - len(data)) % 16)
encryptor = HtCipher(ht_algorithms.AES(key), ht_modes.CBC(iv), ht_backend()).encryptor()
return iv + encryptor.update(data) + encryptor.finalize()
def aes_cbc_decrypt(key, data):
iv, data = data[:16], data[16:]
decryptor = HtCipher(ht_algorithms.AES(key), ht_modes.CBC(iv), ht_backend()).decryptor()
data = decryptor.update(data) + decryptor.finalize()
return data.rpartition(b'\x80')[0]
def aes_gcm_encrypt(key, data, aad=b''):
iv = gen_salt(12)
return iv + HtAesGcm(key).encrypt(iv, data, aad)
def aes_gcm_decrypt(key, data, aad=b''):
iv, data = data[:12], data[12:]
return HtAesGcm(key).decrypt(iv, data, aad)
def argon2_param1_hash(key, salt):
return argon2.low_level.hash_secret_raw(
type=argon2.Type.ID, secret=key, salt=salt, hash_len=32,
time_cost=4, parallelism=4, memory_cost=524288)
#-------------------------------------------------------
class DicSaver:
def __init__(self, filen | ame):
self.filename = filename
self._method = None
self._enc_salt = None
self._enc_key = None
self._token_data = None
self._token = None
@property
def method(self):
return self._method
def _refresh_pw(self, pw):
if self._method == b'\x00\x00':
self._enc_salt = b''
self._enc_key = b''
elif self._method == b'\x00\x01':
self._enc_salt = gen_salt(32)
sel | f._enc_key = crypto.argon2_param1_hash(pw, self._enc_salt)
else:
raise KeyError(f'method {self._method} unknown')
def _check_pw(self, pw):
if self._method == b'\x00\x00':
return True
elif self._method == b'\x00\x01':
return crypto.argon2_param1_hash(pw, self._enc_salt) == self._enc_key
else:
raise KeyError(f'method {self._method} unknown')
def _set_token(self, pw):
if self._method == b'\x00\x00':
self._token = b''
elif self._method == b'\x00\x01':
self._token = hashlib.sha256(self._token_data + pw).digest()
else:
raise KeyError(f'method {self._method} unknown')
def get_token(self):
if self._token is None:
pw = getpass('Enter master password: ').encode()
while not self._check_pw(pw):
pw = getpass('Wrong password, try again: ').encode()
self._set_token(pw)
return self._token
def read(self):
with open(self.filename, 'rb') as f:
data = f.read()
version, self._method, data = data[:2], data[2:4], data[4:]
if version != _VERSION:
raise Exception('version not supported')
salt_len = {b'\x00\x00': 0, b'\x00\x01': 32}[self._method]
salt, data = data[:salt_len], data[salt_len:]
aad = _VERSION + self._method + salt
pw = getpass('Enter master password: ').encode()
while True:
try:
if self._method == b'\x00\x00':
self._token_data = b''
elif self._method == b'\x00\x01':
key = crypto.argon2_param1_hash(pw, salt)
data = crypto.aes_gcm_decrypt(key, data, aad)
self._token_data, data = data[:32], data[32:]
else:
raise KeyError(f'method {self._method} unknown')
break
except InvalidTag:
pw = getpass('Wrong password, try again: ').encode()
self._refresh_pw(pw)
self._set_token(pw)
return msgpack.unpackb(data, raw=False)
def save(self, dic):
data = msgpack.packb(dic, use_bin_type=True)
aad = _VERSION + self._method + self._enc_salt
if self._method == b'\x00\x00':
pass
elif self._method == b'\x00\x01':
data = self._token_data + data
data = crypto.aes_gcm_encrypt(self._enc_key, data, aad)
else:
raise KeyError(f'method {self._method} unknown')
with open(self.filename, 'wb') as f:
f.write(aad + data)
def change_pw(self, *, method=None):
if self._enc_key:
pw = getpass('Enter current master password: ').encode()
while not self._check_pw(pw):
pw = getpass('Wrong password, try again: ').encode()
self._set_token(pw)
old_token = self._token
if method:
self._method = method
elif not self._method:
self._method = _ENC_METHOD
pw = _read_new_pw()
if self._method == b'\x00\x00':
self._token_data = b''
elif self._method == b'\x00\x01':
self._token_data = gen_salt(32)
else:
raise KeyError(f'method {self._method} unknown')
self._refresh_pw(pw)
self._set_token(pw)
return self._token, old_token
class TokenDicSaver(DicSaver):
def __init__(self, filename):
super().__init__(filename)
self._alive_ts = 0
self._loop_ctl = Th |
Proteus-tech/nikola | nikola/plugins/task/rss.py | Python | mit | 3,784 | 0.000529 | # -*- coding: utf-8 -*-
# Copyright © 2012-2013 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __fu | ture__ import unicode_literals, print_function
import os
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin # NOQA
from nikola import utils
from nikola.plugin_categories import Task
class GenerateRSS(Task) | :
"""Generate RSS feeds."""
name = "generate_rss"
def set_site(self, site):
site.register_path_handler('rss', self.rss_path)
return super(GenerateRSS, self).set_site(site)
def gen_tasks(self):
"""Generate RSS feeds."""
kw = {
"translations": self.site.config["TRANSLATIONS"],
"filters": self.site.config["FILTERS"],
"blog_title": self.site.config["BLOG_TITLE"],
"site_url": self.site.config["SITE_URL"],
"blog_description": self.site.config["BLOG_DESCRIPTION"],
"output_folder": self.site.config["OUTPUT_FOLDER"],
"rss_teasers": self.site.config["RSS_TEASERS"],
"hide_untranslated_posts": self.site.config['HIDE_UNTRANSLATED_POSTS'],
"feed_length": self.site.config['FEED_LENGTH'],
}
self.site.scan_posts()
yield self.group_task()
for lang in kw["translations"]:
output_name = os.path.join(kw['output_folder'],
self.site.path("rss", None, lang))
deps = []
if kw["hide_untranslated_posts"]:
posts = [x for x in self.site.timeline if x.use_in_feeds
and x.is_translation_available(lang)][:10]
else:
posts = [x for x in self.site.timeline if x.use_in_feeds][:10]
for post in posts:
deps += post.deps(lang)
feed_url = urljoin(self.site.config['BASE_URL'], self.site.link("rss", None, lang).lstrip('/'))
yield {
'basename': 'generate_rss',
'name': os.path.normpath(output_name),
'file_dep': deps,
'targets': [output_name],
'actions': [(utils.generic_rss_renderer,
(lang, kw["blog_title"], kw["site_url"],
kw["blog_description"], posts, output_name,
kw["rss_teasers"], kw['feed_length'], feed_url))],
'task_dep': ['render_posts'],
'clean': True,
'uptodate': [utils.config_changed(kw)],
}
def rss_path(self, name, lang):
return [_f for _f in [self.site.config['TRANSLATIONS'][lang],
self.site.config['RSS_PATH'], 'rss.xml'] if _f]
|
rogerthat-platform/rogerthat-backend | src/rogerthat/cron/log_retention.py | Python | apache-2.0 | 810 | 0 | # -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this | file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_versio | n:1.3@@
from google.appengine.ext import webapp
from rogerthat.utils import log_offload
class SendLogs(webapp.RequestHandler):
def get(self):
log_offload.export_logs()
|
UMWRG/PywrApp | hydra_pywr/__init__.py | Python | gpl-3.0 | 49 | 0 | from | hydra_pywr_common.hydropower_nodes import | *
|
zhouhoo/wiki_zh_vec | wikiextractor/extractPage.py | Python | apache-2.0 | 4,438 | 0.002479 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# =============================================================================
# Version: 2.9 (Feb 13, 2016)
# Author: Giuseppe Attardi (attardi@di.unipi.it), University of Pisa
# =============================================================================
# Copyright (c) 2009. Giuseppe Attardi (attardi@di.unipi.it).
# =============================================================================
# This file is part of Tanl.
#
# Tanl is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License, version 3,
# as published by the Free Software Foundation.
#
# Tanl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
"""Wikipedia Page Extractor:
Extracts a single page from a Wikipedia dump file.
"""
import sys, os.path
import re, random
import argparse
from itertools import izip
import logging, traceback
import urllib
import bz2, gzip
from htmlentitydefs import name2codepoint
import Queue, threading, multiprocessing
# Program version
version = '2.9'
# ----------------------------------------------------------------------
# READER
tagRE = re.compile(r'(.*?)<(/?\w+)[^>]*>(?:([^<]*)(<.*?>)?)?')
#tagRE = re.compile(r'(.*?)<(/?\w+)[^>]*>([^<]*)')
# 1 2 3
def process_data(input_file, ids, templates=False):
"""
:param input_file: name of the wikipedia dump file.
:param ids: article ids (single or range first-last).
:param templates: collect also templates
"""
if input_file.lower().endswith("bz2"):
opener = bz2.BZ2File
else:
opener = open
input = opener(input_file)
print '<mediawiki>'
rang = ids.split('-')
first = int(rang[0])
if len(rang) == 1:
last = first
else:
last = int(rang[1])
page = []
curid = 0
for line in input:
line = line.decode('utf-8')
if '<' not in line: # faster than doing re.search()
if page:
page.append(line)
continue
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'page':
page = []
page.append(line)
inArticle = False
elif tag == 'id' and not curid: # other <id> are present
curid = int(m.group(3))
if first <= curid <= last:
page.append(line)
inArticle = True
elif curid > last and not templates:
break
elif not inArticle and not templates:
page = []
elif tag == 'title':
if templates:
if m.group(3).startswith('Template:'):
page.append(line)
else:
page = []
else:
page.append(line)
elif tag == '/page':
if page:
page.append(line)
print ''.join(page).encode('utf-8')
if not templates and curid == last:
break
curid = 0
page = []
elif p | age:
page.append(line)
print '</mediawiki>'
input.close()
def main():
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
parser.add_argument("input",
help="XML wiki dump file")
parser.add_argument("--id", default="",
help="article number, or range first-last")
par | ser.add_argument("--template", action="store_true",
help="extract also all templates")
parser.add_argument("-v", "--version", action="version",
version='%(prog)s ' + version,
help="print program version")
args = parser.parse_args()
process_data(args.input, args.id, args.template)
if __name__ == '__main__':
main()
|
Venris/crazyflie-multilink | lib/cfclient/utils/input.py | Python | gpl-2.0 | 15,839 | 0.001831 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Module to read input devices and send controls to the Crazyflie.
This module reads input from joysticks or other input devices and sends control
set-points to the Crazyflie. It can be configured in the UI.
Various drivers can be used to read input device data. Currently is uses the
PySDL2 driver, but in the future native support will be provided for Linux and
Windows drivers.
The input device's axes and buttons are mapped to software inputs using a
configuration file.
"""
__author__ = 'Bitcraze AB'
__all__ = ['JoystickReader']
import sys
import os
import re
import glob
import traceback
import logging
import shutil
import cfclient.utils.inputreaders as readers
import cfclient.utils.inputinterfaces as interfaces
logger = logging.getLogger(__name__)
from cfclient.utils.config import Config
from cfclient.utils.config_manager import ConfigManager
from cfclient.utils.periodictimer import PeriodicTimer
from cflib.utils.callbacks import Caller
import cfclient.utils.mux
from cfclient.utils.mux import InputMux
from cfclient.utils.mux.nomux import NoMux
from cfclient.utils.mux.selectivemux import SelectiveMux
from cfclie | nt.utils.mux.takeovermux import TakeOverMux
from cfclient.utils.mux.mixmux import MixMux
from cfclient.utils.mux.takeoverselectivemux import TakeOverSelectiveMux
MAX_THRUST = 65000
class JoystickReader:
"""
Thread that will read input from devices/joysticks and send control-set
ponts to the | Crazyflie
"""
inputConfig = []
def __init__(self, do_device_discovery=True):
self._input_device = None
self._min_thrust = 0
self._max_thrust = 0
self._thrust_slew_rate = 0
self._thrust_slew_enabled = False
self._thrust_slew_limit = 0
self._has_pressure_sensor = False
self._old_thrust = 0
self._old_raw_thrust = 0
self._old_alt_hold = False
self._springy_throttle = True
self._prev_values = {}
self._trim_roll = Config().get("trim_roll")
self._trim_pitch = Config().get("trim_pitch")
self._input_map = None
self._mux = [NoMux(self), SelectiveMux(self), TakeOverMux(self),
MixMux(self), TakeOverSelectiveMux(self)]
# Set NoMux as default
self._selected_mux = self._mux[0]
if Config().get("flightmode") is "Normal":
self.set_yaw_limit(Config().get("normal_max_yaw"))
self.set_rp_limit(Config().get("normal_max_rp"))
# Values are stored at %, so use the functions to set the values
self.set_thrust_limits(
Config().get("normal_min_thrust"),
Config().get("normal_max_thrust"))
self.set_thrust_slew_limiting(
Config().get("normal_slew_rate"),
Config().get("normal_slew_limit"))
else:
self.set_yaw_limit(Config().get("max_yaw"))
self.set_rp_limit(Config().get("max_rp"))
# Values are stored at %, so use the functions to set the values
self.set_thrust_limits(
Config().get("min_thrust"), Config().get("max_thrust"))
self.set_thrust_slew_limiting(
Config().get("slew_rate"), Config().get("slew_limit"))
self._dev_blacklist = None
if len(Config().get("input_device_blacklist")) > 0:
self._dev_blacklist = re.compile(
Config().get("input_device_blacklist"))
logger.info("Using device blacklist [{}]".format(
Config().get("input_device_blacklist")))
self._available_devices = {}
# TODO: The polling interval should be set from config file
self._read_timer = PeriodicTimer(0.01, self.read_input)
if do_device_discovery:
self._discovery_timer = PeriodicTimer(1.0,
self._do_device_discovery)
self._discovery_timer.start()
# Check if user config exists, otherwise copy files
if not os.path.exists(ConfigManager().configs_dir):
logger.info("No user config found, copying dist files")
os.makedirs(ConfigManager().configs_dir)
for f in glob.glob(sys.path[0] +
"/cfclient/configs/input/[A-Za-z]*.json"):
dest = os.path.join(ConfigManager().
configs_dir, os.path.basename(f))
if not os.path.isfile(dest):
logger.debug("Copying %s", f)
shutil.copy2(f, ConfigManager().configs_dir)
ConfigManager().get_list_of_configs()
self.input_updated = Caller()
self.rp_trim_updated = Caller()
self.emergency_stop_updated = Caller()
self.device_discovery = Caller()
self.device_error = Caller()
self.althold_updated = Caller()
self.alt1_updated = Caller()
self.alt2_updated = Caller()
# Call with 3 bools (rp_limiting, yaw_limiting, thrust_limiting)
self.limiting_updated = Caller()
def set_alt_hold_available(self, available):
"""Set if altitude hold is available or not (depending on HW)"""
self._has_pressure_sensor = available
def enable_alt_hold(self, althold):
"""Enable or disable altitude hold"""
self._old_alt_hold = althold
def _do_device_discovery(self):
devs = self.available_devices()
if len(devs):
self.device_discovery.call(devs)
self._discovery_timer.stop()
def available_mux(self):
available = []
for m in self._mux:
available.append(m.name)
return available
def set_mux(self, name=None, mux=None):
if name:
for m in self._mux:
if m.name == name:
self._selected_mux = m
elif mux:
self._selected_mux = mux
logger.info("Selected MUX: {}".format(self._selected_mux.name))
def get_mux_supported_dev_count(self):
return self._selected_mux.get_supported_dev_count()
def available_devices(self):
"""List all available and approved input devices.
This function will filter available devices by using the
blacklist configuration and only return approved devices."""
devs = readers.devices()
devs += interfaces.devices()
approved_devs = []
for dev in devs:
if ((not self._dev_blacklist) or
(self._dev_blacklist and not
self._dev_blacklist.match(dev.name))):
approved_devs.append(dev)
return approved_devs
def enableRawReading(self, device_name):
"""
Enable raw reading of the input device with id deviceId. This is used
to get raw values for setting up of input devices. Values are read
without using a mapping.
"""
if self._input_device:
self._input_device.close()
self._input_device = None
for d in readers.devices():
if d.name == de |
krnflake/s3ql | src/s3ql/inherit_docstrings.py | Python | gpl-3.0 | 4,064 | 0.003692 | '''
inherit_docstrings.py - this file is part of S3QL (http://s3ql.googlecode.com)
Copyright © 2008 Nikolaus Rath <Nikolaus@rath.org>
This program can be distributed under the terms of the GNU GPLv3.
---
This module defines a metaclass and function decorator that allows
to inherit the docstring for a function from the superclass.
'''
from functools import partial
from abc import ABCMeta
from .calc_mro import calc_mro
__all__ = [ 'copy_ancestor_docstring', 'prepend_ancestor_docstring',
'InheritableDocstrings', 'ABCDocstMeta' ]
# This definition is only used to assist static code analyzers
def copy_ancestor_docstring(fn):
'''Copy docstring for method from superclass
For this decorator to work, the class has to use the `InheritableDocstrings`
metaclass.
'''
raise RuntimeError('Decorator can only be used in classes '
'using the `InheritableDocstrings` metaclass')
def _copy_ancestor_docstring(mro, fn):
'''Decorator to set docstring for *fn* from *mro*'''
if fn.__doc__ is not None:
raise RuntimeError('Function already has docstring')
# Search for docstring in superclass
for cls in mro:
super_fn = getattr(cls, fn.__name__, None)
if super_fn is None:
con | tinue
fn.__doc__ = super_fn.__doc__
break
else:
raise RuntimeError("Can't inherit docstring for %s: method does not "
"exist in superclass | " % fn.__name__)
return fn
# This definition is only used to assist static code analyzers
def prepend_ancestor_docstring(fn):
'''Prepend docstring from superclass method
For this decorator to work, the class has to use the `InheritableDocstrings`
metaclass.
'''
raise RuntimeError('Decorator can only be used in classes '
'using the `InheritableDocstrings` metaclass')
def _prepend_ancestor_docstring(mro, fn):
'''Decorator to prepend ancestor docstring to *fn*'''
if fn.__doc__ is None:
fn.__doc__ = ''
# Search for docstring in superclass
for cls in mro:
super_fn = getattr(cls, fn.__name__, None)
if super_fn is None:
continue
if super_fn.__doc__.endswith('\n') and fn.__doc__.startswith('\n'):
fn.__doc__ = super_fn.__doc__ + fn.__doc__
else:
fn.__doc__ = '%s\n%s' % (super_fn.__doc__, fn.__doc__)
break
else:
raise RuntimeError("Can't find ancestor docstring for %s: method does not "
"exist in superclass" % fn.__name__)
return fn
DECORATORS = (('copy_ancestor_docstring', _copy_ancestor_docstring),
('prepend_ancestor_docstring', _prepend_ancestor_docstring))
class InheritableDocstrings(type):
@classmethod
def __prepare__(cls, name, bases, **kwds):
classdict = super().__prepare__(name, bases, *kwds)
mro = calc_mro(*bases)
# Inject decorators into class namespace
for (name, fn) in DECORATORS:
classdict[name] = partial(fn, mro)
return classdict
def __new__(cls, name, bases, classdict):
for (dec_name, fn) in DECORATORS:
# Decorators may not exist in class dict if the class (metaclass
# instance) was constructed with an explicit call to `type`
# (Pythonbug? reported as http://bugs.python.org/issue18334)
if dec_name not in classdict:
continue
# Make sure that class definition hasn't messed with decorator
if getattr(classdict[dec_name], 'func', None) is not fn:
raise RuntimeError('No %s attribute may be created in classes using '
'the InheritableDocstrings metaclass' % name)
# Delete decorator from class namespace
del classdict[dec_name]
return super().__new__(cls, name, bases, classdict)
# Derive new metaclass to add docstring inheritance
class ABCDocstMeta(ABCMeta, InheritableDocstrings):
pass
|
emesene/emesene | emesene/e3/cache/PictureCache.py | Python | gpl-3.0 | 4,793 | 0.000626 | '''a module to define a cache class for pictures'''
# -*- coding: utf-8 -*-
# This file is part of emesene.
#
# emesene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# emesene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with emesene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import Cache
class PictureCache(Cache.Cache):
'''a class to maintain a cache of pictures
'''
'''a class to maintain a cache of an user avatars
'''
def __init__(self, config_path, user):
'''constructor
config_path -- the path where the base configuration is located
user -- the user account or identifier
'''
Cache.Cache.__init__(self, os.path.join(config_path,
user.strip()), 'pictures', True)
def parse(self):
'''parse the file that contains the dir information
return a list of tuples containing (stamp, hash) in the order found
on the file
'''
lines = {}
with file(self.info_path) as handle:
for line in handle.readlines():
stamp, hash_ = line.split(' ', 1)
lines[int(stamp)] = hash_.strip()
return lines
def list(self):
'''return a list of tuples (stamp, hash) of the elements on | cache
'''
return self.parse().items()
def insert(self, item):
'''insert a new item into the cache
return the information (stamp, hash) on success None otherwise
item -- a path to an image
'''
hash_ = Cache.get_file_path_hash(item)
if hash_ is None:
return None
path = os.path.join(self.path, | hash_)
last_path = os.path.join(self.path, 'last')
shutil.copy2(item, path)
shutil.copy2(item, last_path)
return self.__add_entry(hash_)
def insert_url(self, url):
'''download and insert a new item into the cache
return the information (stamp, hash) on success None otherwise
item -- a path to an image
'''
path = os.path.join(tempfile.gettempdir(), "avatars")
try:
urlretrieve(url, path)
except IOError:
log.warning("Can't read url avatar")
return None
return self.insert(path)
def insert_raw(self, item):
'''insert a new item into the cache
return the information (stamp, hash) on success None otherwise
item -- a file like object containing an image
'''
if item is None:
return None
position = item.tell()
item.seek(0)
hash_ = Cache.get_file_hash(item)
if hash_ is None:
return None
path = os.path.join(self.path, hash_)
last_path = os.path.join(self.path, 'last')
self.create_file(path, item)
shutil.copy2(path, last_path)
item.seek(position)
return self.__add_entry(hash_)
def __add_entry(self, hash_):
'''add an entry to the information file with the current timestamp
and the hash_ of the file that was saved
return (stamp, hash)
'''
time_info = int(time.time())
handle = file(self.info_path, 'a')
handle.write('%s %s\n' % (str(time_info), hash_))
handle.close()
return time_info, hash_
def __remove_entry(self, hash_to_remove):
'''remove an entry from the information file
'''
entries = self.list()
handle = file(self.info_path, 'w')
for stamp, hash_ in entries:
if hash_ != hash_to_remove:
handle.write('%s %s\n' % (str(stamp), hash_))
handle.close()
def remove(self, item):
'''remove an item from cache
return True on success False otherwise
item -- the name of the image to remove
'''
if item not in self:
return False
os.remove(os.path.join(self.path, item))
self.__remove_entry(item)
return True
def __contains__(self, name):
'''return True if name is in cache, False otherwise
this method is used to do something like
if image_hash in cache: asd()
'''
return os.path.isfile(os.path.join(self.path, name))
|
gsathya/htpt | htpt/socks4a/htptProxy.py | Python | mit | 40,287 | 0.004294 | #!/usr/bin/env python
# Ben Jones
# Georgia Tech
# Spring 2014
#
# htpt-socks.py: this file builds upon the work of Zavier Lagraula's
# PySocks code to create a SOCKS server for our HTTP circumvention tool
import socks
"""SOCKS 4 proxy server class.
Copyright (C) 2001 Xavier Lagraula
See COPYRIGHT.txt and GPL.txt for copyrights information.
Build upon the TCPServer class of the SocketServer module, the Socks4Proxy
class is an implementation of the SOCKS protocol, version 4.
This server uses one thread per connection request.
Known bugs:
- Some CONNECT request closed by the client are not detected and finish in an
infinite loop of select always returning the "request" socket as ready to
read even if there is nothing more to read on it. This situation is now
detected and lead to a Closed_Connection exception.
Implementation choices:
- Protocol errors are handled by exceptions
- The function that creates a socket is responsible for its closing -> never
close a socket passed in as a parameter, and always use a try/finally block
after creating a socket to ensure correct closing of sockets.
"""
import SocketServer2
import time
import select
import thread
import IDENT_Client
import IPv4_Tools
import getopt
import os
import sys
import socket
import ConfigFile
__all__ = [
'DEFAULT_OPTIONS',
'SocksError',
'Connection_Closed',
'Bind_TimeOut_Expired',
'Request_Error',
'Client_Connection_Closed',
'Remote_Connection_Closed',
'Remote_Connection_Failed',
'Remote_Connection_Failed_Invalid_Host',
'Request_Failed',
'Request_Failed_No_Identd',
'Request_Failed_Ident_failed',
'Request_Refused',
'Request_Bad_Version',
'Request_Unknown_Command',
'Request_Unauthorized_Client',
'Request_Invalid_Port',
'Request_Invalid_Format',
'ThreadingSocks4Proxy'
]
# Default server options.
# Options are stored in a dictionary.
DEFAULT_OPTIONS = {}
OPTION_TYPE = {}
# The interface on which the server listens for incoming SOCKS requests.
DEFAULT_OPTIONS['bind_address'] = '127.0.0.1'
# The port on which the server listens for incoming SOCKS requests.
DEFAULT_OPTIONS['bind_port'] = 10000
# Will the server use IDENT request to authenticate the user making a request?
DEFAULT_OPTIONS['use_ident'] = 0
# Maximum request size taken in consideration.
DEFAULT_OPTIONS['req_buf_size'] = 1024
# Data is forwarded between the client and the remote peer by blocks of max
# 'data_buf_size' bytes.
DEFAULT_OPTIONS['data_buf_size'] = 1500
# After this delay n seconds without any activity on a connection between the
# client and the remote peer, the connection is closed.
DEFAULT_OPTIONS['inactivity_timeout'] = 360
# The SOCKS proxy waits no more than this number of seconds for an incoming
# connection (BIND requests). It then rejects the client request.
DEFAULT_OPTIONS['bind_timeout'] = 120
DEFAULT_OPTIONS['send_port'] = 8000
SHORT_OPTIONS = 'a:p:i:r:d:t:b:'
# The map trick is useful here as all options
LONG_OPTIONS = [
'bind_address=',
'bind_port=',
'use_ident',
'req_buf_size=',
'data_buf_size=',
'inactivity_timeout=',
'bind_timeout='
]
DEFAULT_OPTIONS['configfile'] = ''
OPTION_TYPE['configfile'] = ['string']
# SOCKS 4 protocol constant values.
SOCKS_VERSION = 4
COMMAND_CONNECT = 1
COMMAND_BIND = 2
COMMANDS = [
COMMAND_CONNECT,
COMMAND_BIND
]
REQUEST_GRANTED = 90
REQUEST_REJECTED_FAILED = 91
REQUEST_REJECTED_NO_IDENTD = 92
REQUEST_REJECTED_IDENT_FAILED = 93
# Sockets protocol constant values.
ERR_CONNECTION_RESET_BY_PEER = 10054
ERR_CONNECTION_REFUSED = 10061
# For debugging only.
def now():
return time.ctime(time.time())
# Exception classes for the server
class SocksError(Exception): pass
class Connection_Closed(SocksError): pass
class Bind_TimeOut_Expired(SocksError): pass
class Request_Error(SocksError): pass
class Client_Connection_Closed(Connection_Closed): pass
class Remote_Connection_Closed(Connection_Closed): pass
class Remote_Connection_Failed(Connection_Closed): pass
class Remote_Connection_Failed_Invalid_Host(Remote_Connection_Failed): pass
class Request_Failed(Request_Error): pass
class Request_Failed_No_Identd(Request_Failed): pass
class Request_Failed_Ident_failed(Request_Failed): pass
class Request_Refused(Request_Error): pass
class Request_Bad_Version(Request_Refused): pass
class Request_Unknown_Command(Request_Refused): pass
class Request_Unauthorized_Client(Request_Refused): pass
class Request_Invalid_Port(Request_Refused): pass
class Request_Invalid_Format(Request_Refused): pass
# Server class
class ThreadingSocks4Proxy(SocketServer2.ThreadingTCPServer):
"""Threading SOCKS4 proxy class.
Note: this server maintains two lists of all CONNECTION and BIND requests being
handled. This is not really useful for now but may become in the future.
Moreover, it has been a good way to learn about the semaphores of the threading
module :)"""
def __Decode_Command_Line(self, argv = [], definitions = {}, defaults = {}):
result = {}
line_opts, rest = getopt.getopt(argv, SHORT_OPTIONS, LONG_OPTIONS)
for item in line_opts:
opt, value = item
# First trying "opt" value against options that use an argument.
if opt in ['-a', '--bind_adress']:
opt = 'bind_adress'
elif opt in ['-p', '--bind_port']:
opt = 'bind_port'
elif opt in ['-i', '--use_ident']:
opt = 'use_ident'
value = 1
elif opt in ['-r', '--req_buf_size']:
opt = 'req_buf_size'
elif opt in ['-d', '--data_buf_size']:
opt = 'data_buf_size'
elif opt in ['-d', '--inactivity_timeout']:
opt = 'inactivity_timeout'
elif opt in ['-b', '--bind_timeout']:
opt = 'bind | _timeout'
result[opt] = value
return ConfigFile.evaluate(definitions, result, defaults)
def __init__(self, RequestHandlerClass, | *args):
"""Constructor of the server."""
self.Options = DEFAULT_OPTIONS
listenPort = args[0]
if len(args) > 1:
sendPort = args[1]
self.Options['send_port'] = sendPort
self.Options['bind_port'] = listenPort
print "Server starting with following options:"
for i in self.Options.keys(): print i, ':', self.Options[i]
print 'The choosen ip adress is', DEFAULT_OPTIONS['bind_address']
SocketServer2.ThreadingTCPServer.__init__(
self,
(self.Options['bind_address'], self.Options['bind_port']),
RequestHandlerClass)
class ForwardSocksReq(SocketServer2.BaseRequestHandler):
"""This request handler class handles sOCKS 4 requests."""
def handle(self):
"""This function is the main request handler function.
It delegates each step of the request processing to a different function and
handles raised exceptions in order to warn the client that its request has
been rejected (if needed).
The steps are:
- decode_request: reads the request and splits it into a dictionary. it checks
if the request is well-formed (correct socks version, correct command number,
well-formed port number.
- validate_request: checks if the current configuration accepts to handle the
request (client identification, authorization rules)
- handle_connect: handles CONNECT requests
- handle_bind: handles BIND requests
"""
print thread.get_ident(), '-'*40
print thread.get_ident(), 'Request from ', self.client_address
try:
# Read and decode the request from the client and verify that it
# is well-formed.
req = se |
fouzelddin/py4j | py4j-python/src/py4j/compat.py | Python | bsd-3-clause | 2,249 | 0 | # coding: utf-8
"""
Compatibility functions for unified behavior between Python 2.x and 3.x.
:author: Alex Grönholm
"""
from __future__ import unicode_literals, absolute_import
import inspect
import sys
from threading import Thread
if sys.version_info[0] < 3:
def items(d):
return d.items()
def iteritems(d):
return d.iteritems()
def next(x):
| return x.next()
range = xrange # noqa
long = long # noqa
basestring = basestring # noqa
unicode = unicode # noqa
bytearray2 = bytearray
unichr = unichr # noqa
bytestr = str
tobytestr = str
def isbytestr(s):
return isinstance(s, str)
def ispython3bytestr(s):
return False
def isbytearray(s):
retu | rn isinstance(s, bytearray)
def bytetoint(b):
return ord(b)
def bytetostr(b):
return b
def strtobyte(b):
return b
import Queue
Queue = Queue.Queue
else:
def items(d):
return list(d.items())
def iteritems(d):
return d.items()
next = next
range = range
long = int
basestring = str
unicode = str
bytearray2 = bytes
unichr = chr
bytestr = bytes
def tobytestr(s):
return bytes(s, "ascii")
def isbytestr(s):
return isinstance(s, bytes)
def ispython3bytestr(s):
return isinstance(s, bytes)
def isbytearray(s):
return isinstance(s, bytearray)
def bytetoint(b):
return b
def bytetostr(b):
return str(b, encoding="ascii")
def strtobyte(s):
return bytes(s, encoding="ascii")
import queue
Queue = queue.Queue
if hasattr(inspect, "getattr_static"):
def hasattr2(obj, attr):
return bool(inspect.getattr_static(obj, attr, False))
else:
hasattr2 = hasattr
class CompatThread(Thread):
"""Compatibility Thread class.
Allows Python 2 Thread class to accept daemon kwarg in init.
"""
def __init__(self, *args, **kwargs):
daemon = None
try:
daemon = kwargs.pop("daemon")
except KeyError:
pass
super(CompatThread, self).__init__(*args, **kwargs)
if daemon:
self.daemon = daemon
|
tonygalmiche/is_coheliance | report/__init__.py | Python | agpl-3.0 | 147 | 0 | # -*- coding: utf-8 -*-
import is_suivi_facture
import is_suivi_refacturation_ass | ocie
import is_suivi_intervention
import is_account_invoice_line | |
wchan/tensorflow | tensorflow/python/ops/op_def_library.py | Python | apache-2.0 | 27,846 | 0.008403 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you ma | y not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# Se | e the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to hold a library of OpDefs and use it to create Brain operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import constant_op
from tensorflow.python.platform import logging
from tensorflow.python.util import compat
def _Attr(op_def, name):
for attr in op_def.attr:
if attr.name == name:
return attr
raise TypeError("Inconsistent OpDef for '%s', missing attr '%s'" %
(op_def.name, name))
def _AttrValue(attr_protos, name):
if name in attr_protos:
return attr_protos[name]
raise TypeError("Inconsistent OpDef, missing attr '%s' from '%s'." %
(name, attr_protos))
def _SatisfiesTypeConstraint(dtype, attr_def):
if attr_def.HasField("allowed_values"):
allowed_list = attr_def.allowed_values.list.type
if dtype not in allowed_list:
raise TypeError(
"DataType %s for attr '%s' not in list of allowed values: %s" %
(dtypes.as_dtype(dtype).name, attr_def.name,
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
def _IsListParameter(arg):
if arg.number_attr:
return True
elif arg.type_list_attr:
return True
return False
def _NumTypeFields(arg):
num = 0
if arg.type != types_pb2.DT_INVALID: num += 1
if arg.type_attr: num += 1
if arg.type_list_attr: num += 1
return num
def _IsListValue(v):
return isinstance(v, (list, tuple))
def _Flatten(l):
"""Converts [1, 2, [3, 4], [5]] to [1, 2, 3, 4, 5]."""
# [1, 2, [3, 4], [5]] -> [[1], [2], [3, 4], [5]]
l_of_l = [x if _IsListValue(x) else [x] for x in l]
# [[1], [2], [3, 4], [5]] -> [1, 2, 3, 4, 5]
return [item for sublist in l_of_l for item in sublist]
def _Restructure(l, structure):
"""Returns the elements of list l structured according to the given structure.
A structure is represented by a list whose elements are either
`None` or a non-negative integer. `None` corresponds to a single
element in the output list, and an integer N corresponds to a nested
list of length N.
The function returns a data structure whose shape is given by
`structure`, and whose elements are taken from `l`. If `structure`
is a singleton, the function returns the single data structure
implied by the 0th element of `structure`. For example:
_Restructure(["foo", "bar", "baz", "qux"], [None, 2, None])
-> ["foo", ["bar", "baz"], "qux"]
_Restructure(["foo"], [None]) -> "foo"
_Restructure(["foo"], [1]) -> ["foo"]
_Restructure([], [0]) -> []
Args:
l: A list.
structure: A list whose elements are either `None` or a non-negative
integer.
Returns:
The elements of `l`, restructured according to `structure`. If
`structure` is a list of length 1, this function returns the
single data structure implied by `structure[0]`.
"""
result = []
current_index = 0
for element in structure:
if element is None:
result.append(l[current_index])
current_index += 1
else:
result.append(l[current_index:current_index+element])
current_index += element
if len(result) == 1:
return result[0]
else:
return tuple(result)
def _MakeFloat(v, arg_name):
if not isinstance(v, compat.real_types):
raise TypeError("Expected float for argument '%s' not %s." %
(arg_name, repr(v)))
return float(v)
def _MakeInt(v, arg_name):
if isinstance(v, six.string_types):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
try:
return int(v)
except (ValueError, TypeError):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
def _MakeStr(v, arg_name):
if not isinstance(v, compat.bytes_or_text_types):
raise TypeError("Expected string for argument '%s' not %s." %
(arg_name, repr(v)))
return compat.as_bytes(v) # Convert unicode strings to bytes.
def _MakeBool(v, arg_name):
if not isinstance(v, bool):
raise TypeError("Expected bool for argument '%s' not %s." %
(arg_name, repr(v)))
return v
def _MakeType(v, attr_def):
try:
v = dtypes.as_dtype(v)
except TypeError:
raise TypeError("Expected DataType for argument '%s' not %s." %
(attr_def.name, repr(v)))
i = v.as_datatype_enum
_SatisfiesTypeConstraint(i, attr_def)
return i
def _MakeShape(v, arg_name):
"""Convert v into a TensorShapeProto."""
# Args:
# v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
# arg_name: String, for error messages.
# Returns:
# A TensorShapeProto.
if isinstance(v, tensor_shape_pb2.TensorShapeProto):
for d in v.dim:
if d.name:
logging.warning("Warning: TensorShapeProto with a named dimension: %s",
str(v))
break
return v
return tensor_shape.as_shape(v).as_proto()
def _MakeTensor(v, arg_name):
"""Ensure v is a TensorProto."""
if isinstance(v, tensor_pb2.TensorProto):
return v
raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'" %
(repr(v), arg_name))
class _OpInfo(object):
"""All per-Op state we would like to precompute/validate."""
def __init__(self, op_def):
self.op_def = op_def
# TODO(josh11b): SWIG the ValidateOpDef() function from C++ and call it
# here, instead of these checks.
for arg in list(op_def.input_arg) + list(op_def.output_arg):
num_type_fields = _NumTypeFields(arg)
if num_type_fields != 1:
raise TypeError("Arg '%s' of '%s' must have one type field not %d" %
(arg.name, op_def.name, num_type_fields))
if arg.type_attr:
attr_type = _Attr(op_def, arg.type_attr).type
if attr_type != "type":
raise TypeError("Attr '%s' of '%s' used as a type_attr "
"but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.type_list_attr:
attr_type = _Attr(op_def, arg.type_list_attr).type
if attr_type != "list(type)":
raise TypeError(
"Attr '%s' of '%s' used as a type_list_attr but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.number_attr:
attr_type = _Attr(op_def, arg.number_attr).type
if attr_type != "int":
raise TypeError(
"Attr '%s' of '%s' used as a number_attr but has type %s" %
(arg.number_attr, op_def.name, attr_type))
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def _MaybeColocateWith(inputs):
"""A context manager for (maybe) colocating with a list of input tensors.
Args:
inputs: A list of `Tensor` or `Operation` objects.
Returns:
A context manager.
"""
if not inputs:
yield
else:
# NOTE(mrry): The `ops.colocate_with()` function accepts only a single
# op or tensor, so we create one context manager pe |
rsc-dev/loophole | loophole/__main__.py | Python | mit | 9,572 | 0.002507 | #!/usr/bin/env python
__author__ = 'Radoslaw Matusiak'
__copyright__ = 'Copyright (c) 2016 Radoslaw Matusiak'
__license__ = 'MIT'
__version__ = '0.5'
import cmd
import functools
import os
import sys
from polar import Device
from polar.pb import device_pb2 as pb_device
__INTRO = """
_| _| _|
_| _|_| _|_| _|_|_| _|_|_| _|_| _| _|_|
_| _| _| _| _| _| _| _| _| _| _| _| _|_|_|_|
_| _| _| _| _| _| _| _| _| _| _| _| _|
_| _|_| _|_| _|_|_| _| _| _|_| _| _|_|_|
_|
_|
ver. {}
"""
def check_if_device_is_connected(f):
"""
Decorator. Checks if device is connected before invoking function.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
if args[0].device is not None:
return f(*args, **kwargs)
else:
print '[!] Device disconnected.'
print
return wrapper
class LoopholeCli(cmd.Cmd):
""" Loophole command line interface class. """
__PROMPT = 'loophole({})>'
def __init__(self):
"""Constructor.
"""
cmd.Cmd.__init__(self)
self.prompt = LoopholeCli.__PROMPT.format('no device')
self.device = None
# end-of-method __init__
def do_exit(self, _):
"""Quit.
Usage: exit
"""
if self.device is not None:
self.device.close()
sys.exit(0)
# end-of-method do_exit
def do_EOF(self, _):
"""Quit. handles EOF"""
self.do_exit(_)
# end-of-method do_EOF
def do_list(self, _):
"""List available Polar devices.
Usage: list
"""
devs = Device.list()
if len(devs) > 0:
for i, dev in enumerate(devs):
try:
info = Device.get_info(dev)
except ValueError as err:
| print "Device no: %i" % i
print "Device info:"
print dev
print " | -"*79
if 'langid' in err.message:
raise ValueError(
(
"Can't get device info. Origin Error: %s\n"
"Maybe this is a permission issue.\n"
"Please read section 'permission' in README ;)"
) % err
)
raise # raise origin error
print '{} - {} ({})'.format(i, info['product_name'], info['serial_number'])
else:
print '[!] No Polar devices found!'
print
# end-of-method do_list
def do_connect(self, dev_no):
"""Connect Polar device. Run 'list' to see available devices.
Usage: connect <device_no>
"""
try:
dev_no = int(dev_no)
except ValueError:
print '[!] You need to specify the device number. Run \'list\' to see available devices.'
print
return
try:
devs = Device.list()
dev = devs[dev_no]
serial = Device.get_info(dev)['serial_number']
self.prompt = LoopholeCli.__PROMPT.format(serial)
self.device = Device(dev)
self.device.open()
print '[+] Device connected.'
print
except IndexError:
print '[!] Device not found or failed to open it. Run \'list\' to see available devices.'
print
# end-of-method do_connect
@check_if_device_is_connected
def do_disconnect(self, _):
"""Disconnect Polar device.
"""
self.device.close()
self.device = None
self.prompt = LoopholeCli.__PROMPT.format('no device')
print '[+] Device disconnected.'
print
# end-of-method do_disconnect
@check_if_device_is_connected
def do_get(self, line):
"""Read file from device and store in under local_path.
Usage: get <device_path> <local_path>
"""
try:
src, dest = line.strip().split()
data = self.device.read_file(src)
with open(dest, 'wb') as outfile:
outfile.write(bytearray(data))
print '[+] File \'{}\' saved to \'{}\''.format(src, dest)
print
except ValueError:
print '[!] Invalid command usage.'
print '[!] Usage: get <source> <destination>'
print
# end-of-method do_get
@check_if_device_is_connected
def do_delete(self, line):
"""Delete file from device.
Usage: delete <device_path>
"""
path = line.strip()
_ = self.device.delete(path)
# end-of-method do_delete
@check_if_device_is_connected
def do_dump(self, path):
"""Dump device memory. Path is local folder to store dump.
Usage: dump <local_path>
"""
print '[+] Reading files tree...'
dev_map = self.device.walk(self.device.SEP)
for directory in dev_map.keys():
fixed_directory = directory.replace(self.device.SEP, os.sep)
full_path = os.path.abspath(os.path.join(path, fixed_directory[1:]))
if not os.path.exists(full_path):
os.makedirs(full_path)
d = dev_map[directory]
files = [e for e in d.entries if not e.name.endswith('/')]
for file in files:
with open(os.path.join(full_path, file.name), 'wb') as fh:
print '[+] Dumping {}{}'.format(directory, file.name)
data = self.device.read_file('{}{}'.format(directory, file.name))
fh.write(bytearray(data))
print '[+] Device memory dumped.'
print
# end-of-method do_dump
@check_if_device_is_connected
def do_info(self, _):
"""Print connected device info.
Usage: info
"""
info = Device.get_info(self.device.usb_device)
print '{:>20s} - {}'.format('Manufacturer', info['manufacturer'])
print '{:>20s} - {}'.format('Product name', info['product_name'])
print '{:>20s} - {}'.format('Vendor ID', info['vendor_id'])
print '{:>20s} - {}'.format('Product ID', info['product_id'])
print '{:>20s} - {}'.format('Serial number', info['serial_number'])
try:
data = self.device.read_file('/DEVICE.BPB')
resp = ''.join(chr(c) for c in data)
d = pb_device.PbDeviceInfo()
d.ParseFromString(resp)
bootloader_version = '{}.{}.{}'.format(d.bootloader_version.major, d.bootloader_version.minor, d.bootloader_version.patch)
print '{:>20s} - {}'.format('Bootloader version', bootloader_version)
platform_version = '{}.{}.{}'.format(d.platform_version.major, d.platform_version.minor, d.platform_version.patch)
print '{:>20s} - {}'.format('Platform version', platform_version)
device_version = '{}.{}.{}'.format(d.device_version.major, d.device_version.minor, d.device_version.patch)
print '{:>20s} - {}'.format('Device version', device_version)
print '{:>20s} - {}'.format('SVN revision', d.svn_rev)
print '{:>20s} - {}'.format('Hardware code', d.hardware_code)
print '{:>20s} - {}'.format('Color', d.product_color)
print '{:>20s} - {}'.format('Product design', d.product_design)
except:
print '[!] Failed to get extended info.'
print ' '
# end-of-method do_info
@check_if_device_is_connected
def do_fuzz(self, _):
import polar
num = _.strip()
if len(num) > 0:
num = int(num)
resp = self.device.send_raw([0x01, num] + [0x00] * 62)
print 'req: {} '.format(num),
if resp:
print 'err code: {}'.format(polar.PFTP_ERROR[resp[0]])
return
for i in xrange(256):
#raw_input('Sending [{}]...<press enter>'.format(i))
|
mick-d/nipype | nipype/interfaces/spm/tests/test_auto_ApplyTransform.py | Python | bsd-3-clause | 1,064 | 0.016917 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..utils import ApplyTransform
def test_ApplyTransform_inputs():
input_map = dict(ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(copyfile=True,
mandatory=True,
),
mat=dict(mandatory=True,
),
matlab_cmd=dict(),
mfile=dict(usedefault=True,
),
out_file=di | ct(genfile=True,
),
paths=dict(),
use_mcr=dict(),
use_v8struct=dict(min_ver | ='8',
usedefault=True,
),
)
inputs = ApplyTransform.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_ApplyTransform_outputs():
output_map = dict(out_file=dict(),
)
outputs = ApplyTransform.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
x0mak/test---project---python---Kurbatova | php4dvd/model/user.py | Python | apache-2.0 | 272 | 0.003676 | class User(object):
def __ | init__(self, username=None, password=None, email=None):
self.username = username
self.password = password
self.email = email
@classmethod
def Admin(cls):
return cls(username="admin", password="ad | min") |
frederica07/Dragon_Programming_Process | PyOpenGL-3.0.2/OpenGL/raw/GL/NV/conditional_render.py | Python | bsd-2-clause | 812 | 0.024631 | '''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p, constants as _cs, arrays
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_NV_conditional_render'
def _f( function ):
return _p.createFunction( function,_p.GL,'GL_NV_conditional_render',False)
_p.unpack_constants( """GL_QUERY_WAIT_NV 0x8E13
GL_QUERY_NO_WAIT_NV 0x8E14
GL | _QUERY_BY_REGION_WAIT_NV 0x8E15
GL_QUERY_BY_REGION_NO_WAIT_NV 0x8E16""", globals())
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum)
def glBeginConditionalRenderNV( id,mode ):pass
@_f
@ | _p.types(None,)
def glEndConditionalRenderNV( ):pass
def glInitConditionalRenderNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
|
bjornedstrom/toytls | toytls/hash.py | Python | mit | 890 | 0 | # -*- coding: utf-8 -*-
""" reference implementation of TLS PRF from tlslite
"""
import hashlib
impor | t math
import hmac
def P_hash(hashmod, secret, seed, length):
bytes = ['\x00'] * leng | th
A = seed
index = 0
while 1:
A = hmac.HMAC(secret, A, hashmod).digest()
output = hmac.HMAC(secret, A+seed, hashmod).digest()
for c in output:
if index >= length:
return ''.join(bytes)
bytes[index] = c
index += 1
return ''.join(bytes)
def PRF(secret, label, seed, length):
half = int(math.ceil(len(secret)/2.0))
S1 = secret[:half]
S2 = secret[half:]
p_md5 = P_hash(hashlib.md5, S1, label + seed, length)
p_sha1 = P_hash(hashlib.sha1, S2, label + seed, length)
res = []
for x in range(length):
res.append(chr(ord(p_md5[x]) ^ ord(p_sha1[x])))
return ''.join(res)
|
plotly/python-api | packages/python/plotly/plotly/validators/layout/yaxis/_fixedrange.py | Python | mit | 456 | 0.002193 | import _plotly_utils.basevalidators
class FixedrangeValidator(_plotly_utils.basevalidators.BooleanValidator) | :
def __init__(self, plotly_name="fixedrange", parent_name="layout.yaxis", **kwargs):
super(FixedrangeValidator | , self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
samuelmaudo/yepes | yepes/utils/unidecode/x082.py | Python | bsd-3-clause | 4,649 | 0.055066 | data = (
'Yao ', # 0x00
'Yu ', # 0x01
'Chong ', # 0x02
'Xi ', # 0x03
'Xi ', # 0x04
'Jiu ', # 0x05
'Yu ', # 0x06
'Yu ', # 0x07
'Xing ', # 0x08
'Ju ', # 0x09
'Jiu ', # 0x0a
'Xin ', # 0x0b
'She ', # 0x0c
'She ', # 0x0d
'Yadoru ', # 0x0e
'Jiu ', # 0x0f
'Shi ', # 0x10
'Tan ', # 0x11
'Shu ', # 0x12
'Shi ', # 0x13
'Tian ', # 0x14
'Dan ', # 0x15
'Pu ', # 0x16
'Pu ', # 0x17
'Guan ', # 0x18
'Hua ', # 0x19
'Tan ', # 0x1a
'Chuan ', # 0x1b
'Shun ', # 0x1c
'Xia ', # 0x1d
'Wu ', # 0x1e
'Zhou ', # 0x1f
'Dao ', # 0x20
'Gang ', # 0x21
'Shan ', # 0x22
'Yi ', # 0x23
'[?] ', # 0x24
'Pa ', # 0x25
'Tai ', # 0x26
'Fan ', # 0x27
'Ban ', # 0x28
'Chuan ', # 0x29
'Hang ', # 0x2a
'Fang ', # 0x2b
'Ban ', # 0x2c
'Que ', # 0x2d
'Hesaki ', # 0x2e
'Zhong ', # 0x2f
'Jian ', # 0x30
'Cang ', # 0x31
'Ling ', # 0x32
'Zhu ', # 0x33
'Ze ', # 0x34
'Duo ', # 0x35
'Bo ', # 0x36
'Xian ', # 0x37
'Ge ', # 0x38
'Chuan ', # 0x39
'Jia ', # 0x3a
'Lu ', # 0x3b
'Hong ', # 0x3c
'Pang ', # 0x3d
'Xi ', # 0x3e
'[?] ', # 0x3f
'Fu ', # 0x40
'Zao ', # 0x41
'Feng ', # 0x42
'Li ', # 0x43
'Shao ', # 0x44
'Yu ', # 0x45
'Lang ', # 0x46
'Ting ', # 0x47
'[?] ', # 0x48
'Wei ', # 0x49
'Bo ', # 0x4a
'Meng ', # 0x4b
'Nian ', # 0x4c
'Ju ', # 0x4d
'Huang ', # 0x4e
'Shou ', # 0x4f
'Zong ', # 0x50
'Bian ', # 0x51
'Mao ', # 0x52
'Die ', # 0x53
'[?] ', # 0x54
'Bang ', # 0x55
'Cha ', # 0x56
'Yi ', # 0x57
'Sao ', # 0x58
'Cang ', # 0x59
'Cao ', # 0x5a
'Lou ', # 0x5b
'Dai ', # 0x5c
'Sori ', # 0x5d
'Yao ', # 0x5e
'Tong ', # 0x5f
'Yofune ', # 0x60
'Dang ', # 0x61
'Tan ', # 0x62
'Lu ', # 0x63
'Yi ', # 0x64
'Jie ', # 0x65
'Jian ', # 0x66
'Huo ', # 0x67
'Meng ', # 0x68
'Qi ', # 0x69
'Lu ', # 0x6a
'Lu ', # 0x6b
'Chan ', # 0x6c
'Shuang ', # 0x6d
'Gen ', # 0x6e
'Liang ', # 0x6f
'Jian ', # 0x70
'Jian ', # 0x71
'Se ', # 0x72
'Yan ', # 0x73
'Fu ', # 0x74
'Ping ', # 0x75
'Yan ', # 0x76
'Yan ', # 0x77
'Cao ', # 0x78
'Cao ', # 0x79
'Yi ', # 0x7a
'Le ', # 0x7b
'Ting ', # 0x7c
'Qiu ', # 0x7d
'Ai ', # 0x7e
'Nai ', # 0x7f
'Tiao ', # 0x80
'Jiao ', # 0x81
'Jie ', # 0x82
'Peng ', # 0x83
'Wan ', # 0x84
'Yi ', # 0x85
'Chai ', # 0x86
'Mian ', # 0x87
'Mie ', # 0x88
'Gan ', # 0x89
'Qian ', # 0x8a
'Yu ', # 0x8b
'Yu | ', # 0x8c
'Shuo ', # 0x8d
'Qiong ', # 0x8e
'Tu ', # 0x8f
'Xia ', # 0x90
'Qi ', # 0x91
'Mang ', # 0x92
| 'Zi ', # 0x93
'Hui ', # 0x94
'Sui ', # 0x95
'Zhi ', # 0x96
'Xiang ', # 0x97
'Bi ', # 0x98
'Fu ', # 0x99
'Tun ', # 0x9a
'Wei ', # 0x9b
'Wu ', # 0x9c
'Zhi ', # 0x9d
'Qi ', # 0x9e
'Shan ', # 0x9f
'Wen ', # 0xa0
'Qian ', # 0xa1
'Ren ', # 0xa2
'Fou ', # 0xa3
'Kou ', # 0xa4
'Jie ', # 0xa5
'Lu ', # 0xa6
'Xu ', # 0xa7
'Ji ', # 0xa8
'Qin ', # 0xa9
'Qi ', # 0xaa
'Yuan ', # 0xab
'Fen ', # 0xac
'Ba ', # 0xad
'Rui ', # 0xae
'Xin ', # 0xaf
'Ji ', # 0xb0
'Hua ', # 0xb1
'Hua ', # 0xb2
'Fang ', # 0xb3
'Wu ', # 0xb4
'Jue ', # 0xb5
'Gou ', # 0xb6
'Zhi ', # 0xb7
'Yun ', # 0xb8
'Qin ', # 0xb9
'Ao ', # 0xba
'Chu ', # 0xbb
'Mao ', # 0xbc
'Ya ', # 0xbd
'Fei ', # 0xbe
'Reng ', # 0xbf
'Hang ', # 0xc0
'Cong ', # 0xc1
'Yin ', # 0xc2
'You ', # 0xc3
'Bian ', # 0xc4
'Yi ', # 0xc5
'Susa ', # 0xc6
'Wei ', # 0xc7
'Li ', # 0xc8
'Pi ', # 0xc9
'E ', # 0xca
'Xian ', # 0xcb
'Chang ', # 0xcc
'Cang ', # 0xcd
'Meng ', # 0xce
'Su ', # 0xcf
'Yi ', # 0xd0
'Yuan ', # 0xd1
'Ran ', # 0xd2
'Ling ', # 0xd3
'Tai ', # 0xd4
'Tiao ', # 0xd5
'Di ', # 0xd6
'Miao ', # 0xd7
'Qiong ', # 0xd8
'Li ', # 0xd9
'Yong ', # 0xda
'Ke ', # 0xdb
'Mu ', # 0xdc
'Pei ', # 0xdd
'Bao ', # 0xde
'Gou ', # 0xdf
'Min ', # 0xe0
'Yi ', # 0xe1
'Yi ', # 0xe2
'Ju ', # 0xe3
'Pi ', # 0xe4
'Ruo ', # 0xe5
'Ku ', # 0xe6
'Zhu ', # 0xe7
'Ni ', # 0xe8
'Bo ', # 0xe9
'Bing ', # 0xea
'Shan ', # 0xeb
'Qiu ', # 0xec
'Yao ', # 0xed
'Xian ', # 0xee
'Ben ', # 0xef
'Hong ', # 0xf0
'Ying ', # 0xf1
'Zha ', # 0xf2
'Dong ', # 0xf3
'Ju ', # 0xf4
'Die ', # 0xf5
'Nie ', # 0xf6
'Gan ', # 0xf7
'Hu ', # 0xf8
'Ping ', # 0xf9
'Mei ', # 0xfa
'Fu ', # 0xfb
'Sheng ', # 0xfc
'Gu ', # 0xfd
'Bi ', # 0xfe
'Wei ', # 0xff
)
|
jedijulia/starwarspwa | starwarspwa/migrations/0003_auto_20160901_1650.py | Python | mit | 955 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-01 16:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('starwarspwa', '0002_subscription'),
]
operations = [
migrations.RemoveField(
model_name='subscription',
name='subscription_id',
),
migrations.AddField(
model_name='subscription',
name='auth',
field=models.Char | Field(max_length=255, blank=True, null=True),
),
migrations.AddField(
model_name='subscription',
name='endpoint',
field=models.CharField(max_length=255, blank=True, null=True),
),
migrations.AddField(
model_name='subscription',
name='p256dh',
| field=models.CharField(max_length=255, blank=True, null=True),
),
]
|
ColinIanKing/autotest | scheduler/monitor_db_unittest.py | Python | gpl-2.0 | 58,290 | 0.002608 | #!/usr/bin/python
import gc, logging, time
try:
import autotest.common as common
except ImportError:
import common
from autotest.frontend import setup_django_environment
from autotest.frontend.afe import frontend_test_utils
from autotest.client.shared.test_utils import mock
from autotest.client.shared.test_utils import unittest
from autotest.database import database_connection
from autotest.frontend.afe import models
from autotest.scheduler import monitor_db, drone_manager, email_manager
from autotest.scheduler import scheduler_config, gc_stats, host_scheduler
from autotest.scheduler import monitor_db_functional_unittest
from autotest.scheduler import scheduler_models
_DEBUG = False
class DummyAgentTask(object):
num_processes = 1
owner_username = 'my_user'
def get_drone_hostnames_allowed(self):
return None
class DummyAgent(object):
started = False
_is_done = False
host_ids = ()
queue_entry_ids = ()
def __init__(self):
self.task = DummyAgentTask()
def tick(self):
self.started = True
def is_done(self):
return self._is_done
def set_done(self, done):
self._is_done = done
class IsRow(mock.argument_comparator):
def __init__(self, row_id):
self.row_id = row_id
def is_satisfied_by(self, parameter):
return list(parameter)[0] == self.row_id
def __str__(self):
return 'row with id %s' % self.row_id
class IsAgentWithTask(mock.argument_comparator):
def __init__(self, task):
self._task = task
def is_satisfied_by(self, parameter):
if not isinstance(parameter, monitor_db.Agent):
return False
tasks = list(parameter.queue.queue)
if len(tasks) != 1:
return False
return tasks[0] == self._task
def _set_host_and_qe_ids(agent_or_task, id_list=None):
if id_list is None:
id_list = []
agent_or_task.host_ids = agent_or_task.queue_entry_ids = id_list
class BaseSchedulerTest(unittest.TestCase,
frontend_test_utils.FrontendTestMixin):
_config_section = 'AUTOTEST_WEB'
def _do_query(self, sql):
self._database.execute(sql)
def _set_monitor_stubs(self):
# Clear the instance cache as this is a brand new database.
scheduler_models.DBObject._clear_instance_cache()
self._database = (
database_connection.TranslatingDatabase.get_test_database(
translators=monitor_db_functional_unittest._DB_TRANSLATORS))
self._database.connect(db_type='django')
self._database.debug = _DEBUG
self.god.stub_with(monitor_db, '_db', self._database)
self.god.stub_with(scheduler_models, '_db', self._database)
self.god.stub_with(drone_manager.instance(), '_results_dir',
'/test/path')
self.god.stub_with(drone_manager.instance(), '_temporary_directory',
'/test/path/tmp')
monitor_db.initialize_globals()
scheduler_models.initialize_globals()
def setUp(self):
self._frontend_common_setup()
self._set_monitor_stubs()
self._dispatcher = monitor_db.Dispatcher()
def tearDown(self):
self._database.disconnect()
self._frontend_common_teardown()
def _update_hqe(self, set, where=''):
query = 'UPDATE afe_host_queue_entries SET ' + set
if where:
query += ' WHERE ' + where
self._do_query(query)
class DispatcherSchedulingTest(BaseSchedulerTest):
_jobs_scheduled = []
def tearDown(self):
super(DispatcherSchedulingTest, self).tearDown()
def _set_monitor_stubs(self):
super(DispatcherSchedulingTest, self)._set_monitor_stubs()
def hqe__do_schedule_pre_job_tasks_stub(queue_entry):
"""Called by HostQueueEntry.run()."""
self._record_job_scheduled(queue_entry.job.id, queue_entry.host.id)
queue_entry.set_status('Starting')
self.god.stub_with(scheduler_models.HostQueueEntry,
'_do_schedule_pre_job_tasks',
hqe__do_schedule_pre_job_tasks_stub)
def hqe_queue_log_record_stub(self, log_line):
"""No-Op to avoid calls down to the _drone_manager during tests."""
self.god.stub_with(scheduler_models.HostQueueEntry, 'queue_log_record',
hqe_queue_log_record_stub)
def _record_job_scheduled(self, job_id, host_id):
record = (job_id, host_id)
self.assert_(record not in self._jobs_scheduled,
'Job %d scheduled on host %d twice' %
(job_id, host_id))
self._jobs_scheduled.append(record)
def _assert_job_scheduled_on(self, job_id, host_id):
record = (job_id, host_id)
self.assert_(record in self._jobs_scheduled,
'Job %d not scheduled on host %d as expected\n'
'Jobs scheduled: %s' %
(job_id, host_id, self._jobs_scheduled))
self._jobs_scheduled.remove(record)
def _assert_job_scheduled_on_number_of(self, job_id, host_ids, number):
"""Assert job was scheduled on exactly number hosts out of a set."""
found = []
for host_id in host_ids:
record = (job_id, host_id)
if record in self._jobs_scheduled:
found.append(record)
self._jobs_scheduled.remove(record)
if len(found) < number:
self.fail('Job %d scheduled on fewer than %d hosts in %s.\n'
'Jobs scheduled: %s' % (job_id, number, host_ids, found))
elif len(found) > number:
self.fail('Job %d scheduled on more than %d hosts in %s.\n'
'Jobs scheduled: %s' % (job_id, number, host_ids, f | ound))
def _check_for_extra_schedulings(self):
if len(self._jobs_scheduled) != 0:
self.fail('Extra jobs scheduled: ' +
str(self._jobs_scheduled))
def _convert_jobs_to_metahosts(self, *job_ids):
sql_tuple = '(' + ','.join(str(i) for i in job_ids) + ')'
self._do_query('UPDATE | afe_host_queue_entries SET '
'meta_host=host_id, host_id=NULL '
'WHERE job_id IN ' + sql_tuple)
def _lock_host(self, host_id):
self._do_query('UPDATE afe_hosts SET locked=1 WHERE id=' +
str(host_id))
def setUp(self):
super(DispatcherSchedulingTest, self).setUp()
self._jobs_scheduled = []
def _run_scheduler(self):
for _ in xrange(2): # metahost scheduling can take two cycles
self._dispatcher._schedule_new_jobs()
def _test_basic_scheduling_helper(self, use_metahosts):
'Basic nonmetahost scheduling'
self._create_job_simple([1], use_metahosts)
self._create_job_simple([2], use_metahosts)
self._run_scheduler()
self._assert_job_scheduled_on(1, 1)
self._assert_job_scheduled_on(2, 2)
self._check_for_extra_schedulings()
def _test_priorities_helper(self, use_metahosts):
'Test prioritization ordering'
self._create_job_simple([1], use_metahosts)
self._create_job_simple([2], use_metahosts)
self._create_job_simple([1,2], use_metahosts)
self._create_job_simple([1], use_metahosts, priority=1)
self._run_scheduler()
self._assert_job_scheduled_on(4, 1) # higher priority
self._assert_job_scheduled_on(2, 2) # earlier job over later
self._check_for_extra_schedulings()
def _test_hosts_ready_helper(self, use_metahosts):
"""
Only hosts that are status=Ready, unlocked and not invalid get
scheduled.
"""
self._create_job_simple([1], use_metahosts)
self._do_query('UPDATE afe_hosts SET status="Running" WHERE id=1')
self._run_scheduler()
self._check_for_extra_schedulings()
self._do_query('UPDATE afe_hosts SET status="Ready", locked=1 '
'WHERE id=1')
self._run_scheduler()
|
d3matt/d3matt.com | src/d3matt/d3matt/wsgi.py | Python | bsd-2-clause | 387 | 0.002584 | """
WSGI | config for d3matt project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "d3matt.settings")
from django.core.wsgi import get_wsgi_application
ap | plication = get_wsgi_application()
|
urmyfaith/notes_web_py | basic/Templetor/05templetor-control-structures.py | Python | apache-2.0 | 296 | 0.047297 | i | mport web
render = web.template.render('templates/')
urls = (
'/(.*)', 'index'
)
class index:
def GET(self,name):
i=web.input(name=None)
return render.controlStructures(name)
if __name__ == "__main__":
app = web.application(urls, globals())
a | pp.run()
|
fastflo/emma | emmalib/__init__.py | Python | gpl-2.0 | 2,772 | 0.002886 | # -*- coding: utf-8 -*-
# emma
#
# Copyright (C) 2006 Florian Schmidt (flo@fastflo.de)
# 2014 Nickolay Karnaukhov (mr.electronick@gmail.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
emma_instance = None
emma_registered_providers = []
if __name__ != 'emmalib':
print "Don't run __init__.py - run ../emma instead"
exit()
# package: python-gobject
try:
import gobject
except:
print "No gobject. Emma cannot start."
exit(-1)
# package: python-gtk2
try:
import gtk
import gtk.gdk
from gtk import keysyms
except:
print "No gtk. Emma cannot start.", sys.exc_value
exit(-1)
# package: python-glade2
try:
import gtk.glade
except:
print "No gtk.glade. Emma cannot start.", sys.exc_value
exit(-1)
from OutputHandler import OutputHandler
from Constants import *
from Emma import Emma
def usage():
"""
Show usage in console
"""
print """usage: emma [-h|--help] [-d|--debug] [-l output_log [-f|--flush]]
-h|--help show this help message
-d|--debug output debug information on stdout
-l|--log FILE append all output to a specified log file
-f|--flush flush {stdout,log} after each write
"""
sys.exit(0)
def start(args):
"""
Start Emma
@param args:
@return:
"""
global emma_instance
debug_output = False
log_file = None
log_flush = False
skip = False
for i, arg in enumerate(args):
if skip:
skip = False
continue
if arg == "-h" or arg == "--help":
usage()
elif arg == "-d" or arg == "--debug":
debug_output = True
elif arg == "-f" or arg == "--flush":
log_flush = True
elif arg | == "-l" or arg == "--log":
if i + 1 == len(args):
usage()
log_file = args[i + 1]
skip = True
else:
usage()
# this singleton will be accessible as sys.stdout!
OutputHandler(debug_output, log_file, log_flush)
emma_inst | ance = Emma()
emma_instance.start()
gtk.main()
return 0
|
ndtran/l10n-switzerland | l10n_ch_account_statement_base_import/parser/ubs_file_parser.py | Python | agpl-3.0 | 7,013 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Steve Ferry
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import logging
import csv
import uuid
from openerp import fields
from .base_parser import BaseSwissParser
_logger = logging.getLogger(__name__)
def float_or_zero(val):
""" Conversion function used to manage
empty string into float usecase"""
val = val.replace("'", "")
return float(val) if val else 0.0
def format_date(val):
return datetime.datetime.strptime(val, "%d.%m.%Y")
class UBSCSVParser(BaseSwissParser):
"""
Parser for UBS CSV Statements
"""
_ftype = 'ubs_csvparser'
def __init__(self, data_file):
"""Constructor
Splitting data_file in lines and create a dict from the csv file
"""
super(UBSCSVParser, self).__init__(data_file)
rows = []
self.lines = self.data_file.splitlines()
reader = csv.DictReader(self.lines[:-3], delimiter=';')
for row in reader:
rows.append(
dict([(key.decode('iso-8859-15'), value.decode('iso-8859-15'))
for key, value in row.iteritems()]))
self.datas = rows
def ftype(self):
"""Gives the type of file we want to import
:return: imported file type
:rtype: string
"""
return super(UBSCSVParser, self).ftype()
def get_currency(self):
"""Returns the ISO currency code of the parsed file
:return: The ISO currency code of the parsed file eg: CHF
:rtype: string
"""
return super(UBSCSVParser, self).get_currency()
def get_account_number(self):
"""Return the account_number related to parsed file
:return: The account number of the parsed file
:rtype: string
"""
return super(UBSCSVParser, self).get_account_number()
def get_statements(self):
"""Return the list of bank statement dict.
Bank statements data: list of dict containing
(optional items marked by o) :
- 'name': string (e.g: '000000123')
- 'date': date (e.g: 2013-06-26)
-o 'balance_start': float (e.g: 8368.56)
-o 'balance_end_real': float (e.g: 8888.88)
- 'transactions': list of dict containing :
- 'name': string
(e.g: 'KBC-INVESTERINGSKREDIET 787-5562831-01')
- 'date': date
- 'amount': float
- 'unique_import_id': string
-o 'account_number': string
Will be used to find/create the res.partner.bank in odoo
-o 'note': string
-o 'partner_name': string
-o 'ref': string
:return: a list of statement
:rtype: list
"""
return super(UBSCSVParser, self).get_statements()
def file_is_known(self):
"""Predicate the tells if the parser can parse the data file
:return: True if file is supported
:rtype: bool
"""
return len(self.datas) > 0 and ('IBAN' in self.datas[1])
def _parse_currency_code(self):
"""Parse file currency ISO code
:return: the currency ISO code of the file eg: CHF
:rtype: string
"""
return 'CHF'
def _par | se_statement_balance(self):
"""Parse file start and | end balance
:return: Tuple of the file start and end balance
:rtype: float
"""
solde_line = self.lines[-1].split(";")
balance_end = float_or_zero(solde_line[0])
balance_start = float_or_zero(solde_line[1])
return balance_start, balance_end
def _parse_transactions(self):
"""Parse bank statement lines from file
list of dict containing :
- 'name': string (e.g: 'KBC-INVESTERINGSKREDIET 787-5562831-01')
- 'date': date
- 'amount': float
- 'unique_import_id': string
-o 'account_number': string
Will be used to find/create the res.partner.bank in odoo
-o 'note': string
-o 'partner_name': string
-o 'ref': string
:return: a list of transactions
:rtype: list
"""
transactions = []
for line in self.datas:
descriptions = [
line.get("Description 1", '/'), line.get("Description 2", ''),
line.get("Description 3", '')]
label = ' '.join(filter(None, descriptions))
debit = "Débit".decode('iso-8859-15')
credit = "Crédit".decode('iso-8859-15')
amount = - float_or_zero(line[debit]) or \
float_or_zero(line[credit]) or 0.0
res = {
'name': label,
'date': format_date(line.get("Date de comptabilisation")),
'amount': amount,
'ref': '/',
'note': label,
'unique_import_id': str(uuid.uuid4())
}
transactions.append(res)
return transactions
def _parse_statement_date(self):
"""Parse file statement date
:return: A date usable by Odoo in write or create dict
"""
date = datetime.date.today()
return fields.Date.to_string(date)
def _parse(self):
"""
Launch the parsing through the csv file.
"""
self.currency_code = self._parse_currency_code()
balance_start, balance_end = self._parse_statement_balance()
statement = {}
statement['balance_start'] = balance_start
statement['balance_end_real'] = balance_end
statement['date'] = self._parse_statement_date()
statement['attachments'] = []
statement['transactions'] = self._parse_transactions()
self.statements.append(statement)
return True
|
NBor/SkyPython | src/layers/Layer.py | Python | apache-2.0 | 5,449 | 0.004588 | '''
// Copyright 2009 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Original Author: John Taylor, Brent Bryan
//
// Notification of Change: The original java source code has been
// modified in that it has been rewritten in the python programming
// language and additionally, may contain components and ideas that are
// not found in the original source code.
Copyright 2013 Neil Borle and Paul Lu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on 2013-05-28
@author: Neil Borle
'''
import threading
from src.source.PointSource import PointSource
from src.source.LineSource import LineSource
from src.source.TextSource import TextSource
from src.source.ImageSource import ImageSource
class Layer(object):
'''
Base class for any layer (is a combination of layer.java
and AbstractLayer.java in the original code).
'''
reentrant_lock = threading.RLock()
def register_with_renderer(self, rend_controller):
self.render_map.clear()
self.render_controller = rend_controller
self.update_layer_for_controller_change()
def set_visible(self, visible_bool):
'''
Makes this layer visible or invisible based on user
selection with the preference buttons.
'''
with self.reentrant_lock:
atomic = self.render_controller.create_atomic()
for render_manager in self.render_map.values():
render_manager.queue_enabled(visible_bool, atomic)
self.render_controller.queue_atomic(atomic)
def add_update_closure(self, closure):
if self.render_controller != None:
self.render_controller.add_update_closure(closure)
def remove_update_callback(self, closure):
if self.render_controller != None:
self.render_controller.remove_update_callback(closure)
def redraw(self, points, lines, texts, images, update_type):
'''
Forces a redraw of the layer by removing all object managers.
Updates the renderer (using the given UpdateType), with then given set
of UI elements. Depending on the value of UpdateType, current sources
will either have their state updated, or will be overwritten by the
given set of UI elements.
'''
if self.render_controller == None:
return
with self.reentrant_lock:
atomic = self.render_controller.create_atomic()
self.set_sources(points, update_type, PointSource, atomic)
self.set_sources(lines, update_type, LineSource, atomic)
self.set_sources(texts, update_type, TextSource, atomic)
self.set_sources(images, update_type, ImageSource, atomic)
self.render_controller.queue_atomic(atomic)
def set_sources(self, sources, update_type, clazz, atomic):
'''
Given an input source (point/line/text/image) a corresponding
object manager is created and stored in the render_map dictionary.
'''
if sources == None:
return
manager = None
if clazz not in self.render_map.keys():
manager = self.create_render_manager(clazz, atomic)
self.render_map[clazz] = manager
else:
manager = self.render_map[clazz]
manager.queue_objects(sources, update_type, atomic)
def create_render_manager(self, clazz, controller):
if clazz is PointSource:
return controller.create_point_manager(self.get_layer_id())
elif clazz is LineSource:
return controller.create_line_manager(self.get_layer_id())
elif clazz is TextSource:
return controller.create_label_manager(self.get_layer_id())
elif clazz is ImageSource:
return controller.create_image_manager(self.get_layer_id())
else:
raise Exception("class is of unknown type")
def get_preference_id(self):
return "source_provider." + self.layerNameId()
|
def get_layer_name(self):
raise NotImplementedError("need strings.xml")
def __init__(self):
''' |
Constructor
'''
self.render_map = {}
self.render_controller = None |
globality-corp/microcosm-flask | microcosm_flask/tests/swagger/parameters/test_enum.py | Python | apache-2.0 | 1,133 | 0.000883 | from enum import Enum, IntEnum, unique
from hamcrest import assert_that, equal_to, is_
from marshmallow import Schema
from microcosm_flask.fields import EnumField
from microcosm_flask | .swagger.api import build_parameter
@unique
class Choices(Enum):
Profit = "profit"
@unique
class ValueType(IntEnum):
Foo = 1
Bar = 2
class TestSchema(Schema):
choice = EnumField(Choices)
value = EnumField(ValueType, by_value=True)
def test_field_enum():
parameter = build_parameter(TestSchema().fields["choice"])
assert_that(parameter, is_(equal_to({
"format": "enum",
"type": "string",
"enum": [
"Profi | t",
],
})))
def test_field_enum_non_strict():
parameter = build_parameter(TestSchema().fields["choice"], strict_enums=False)
assert_that(parameter, is_(equal_to({
"type": "string",
})))
def test_field_int_enum():
parameter = build_parameter(TestSchema().fields["value"])
assert_that(parameter, is_(equal_to({
"format": "enum",
"type": "integer",
"enum": [
1,
2
],
})))
|
jbrackins/scheduling-research | src/location.py | Python | unlicense | 3,581 | 0.0148 | from rec import CourseRecord
from score import RoomScore
from evaluation import ScheduleEvaluation
FULL_HOURS = 8 # 8:00AM - 4:00PM utilization
PARTIAL_HOURS = FULL_HOURS * 0.75 #75%
HALF_HOURS = FULL_HOURS * 0.50 #50%
SPARSE | _HOURS = FULL_HOURS * 0.25 #25%
class LocationScore:
def __init__(self, evals=None):
sel | f.evals = evals
self.courses = None
self.location = None
self.daily_weights = {"M": {}, "T": {}, "W": {}, "R": {}, "F": {} ,"S": {}}
self.daily_totals = {"M": {}, "T": {}, "W": {}, "R": {}, "F": {} ,"S": {}}
self.final_weighted = 0
self.weight_rank = 0 # 0 = worst, 1 = best
if evals != None:
self.courses = self.evals.get_records()
self.location = self.find_location()
self.final_weighted = self.calculate_final_weighted_score()
def reset_daily_weights(self):
for day in ["M", "T", "W", "R", "F", "S"]:
self.daily_weights[day] = 0
self.daily_totals[day] = 0
def get_daily_weight(self,day_of_week):
return self.daily_weights[day_of_week]
def normalize_final_weighted_score(self,minimum,maximum):
value = self.final_weighted
value -= minimum
if maximum - minimum > 0:
value /= ( maximum - minimum )
else:
value = 0
self.weight_rank = "{0:.2f}".format(value * 10)
def calculate_final_weighted_score(self):
score_sum = 0.00
score_total = 0.00
#reset daily stuff
self.reset_daily_weights()
for course, score in self.courses:
days = course.rec["DAYS_OF_WEEK"]
#score_sum += score.get_weighted_score(course)
score_total += 1.00
for day in ["M", "T", "W", "R", "F", "S"]:
if day in days:
self.daily_weights[day] += score.get_weighted_score(course)
self.daily_totals[day] += 1
for day in ["M", "T", "W", "R", "F", "S"]:
if self.daily_totals[day] > 0:
self.daily_weights[day] /= self.daily_totals[day]
self.daily_weights[day] = self.adjust_utilization(self.daily_weights[day],self.daily_totals[day])
score_sum += self.daily_weights[day]
else:
self.daily_weights[day] = 0
return score_sum / score_total
def adjust_utilization(self,weights,totals):
max_score = 1.00
if totals >= FULL_HOURS: # 8 Hours or more, give slight boost to score
weights *= 1.15 # 15% Boost
elif totals >= PARTIAL_HOURS: # Small Penalty
weights *= (PARTIAL_HOURS/FULL_HOURS)
elif totals >= HALF_HOURS: # Medium Penalty
weights *= (HALF_HOURS/FULL_HOURS)
elif totals > SPARSE_HOURS: # Large Penalty
weights *= (SPARSE_HOURS/FULL_HOURS)
else: # Very Large Penalty
weights *= (1.00/FULL_HOURS)
return weights
def get_location(self):
return self.location
def find_location(self):
for course, score in self.courses:
location = str( course.rec["BUILDING"] )+ " " + str( course.rec["ROOM"] )
# just need to find the first one, so break after this happens
break
return location
def get_final_weighted_score(self):
return self.final_weighted
def get_score_rank(self):
return self.weight_rank
def get_evals(self):
return self.evals
|
slint/zenodo | zenodo/modules/records/serializers/schemas/marc21.py | Python | gpl-2.0 | 12,434 | 0.000161 | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""MARCXML translation index."""
from __future__ import absolute_import, print_function, unicode_literals
from dateutil.parser import parse
from flask import current_app
from marshmallow import Schema, fields, missing, post_dump
class RecordSchemaMARC21(Schema):
"""Schema for records in MARC."""
control_number = fields.Function(
lambda o: str(o['metadata'].get('recid')))
date_and_time_of_latest_transaction = fields.Function(
lambda obj: parse(obj['updated']).strftime("%Y%m%d%H%M%S.0"))
information_relating_to_copyright_status = fields.Function(
lambda o: dict(copyright_status=o['metadata']['access_right']))
index_term_uncontrolled = fields.Function(
lambda o: [
dict(uncontrolled_term=kw)
for kw in o['metadata'].get('keywords', [])
]
)
subject_added_entry_topical_term = fields.Method(
'get_subject_added_entry_topical_term')
terms_governing_use_and_reproduction_note = fields.Function(
lambda o: dict(
uniform_resource_identifier=o[
'metadata'].get('license', {}).get('url'),
terms_governing_use_and_reproduction=o[
'metadata'].get('license', {}).get('title')
))
title_statement = fields.Function(
lambda o: dict(title=o['metadata'].get('title')))
general_note = fields.Function(
lambda o: dict(general_note=o['metadata'].get('notes')))
information_relating_to_copyright_status = fields.Function(
lambda o: dict(copyright_status=o['metadata'].get('access_right')))
publication_distribution_imprint = fields.Method(
'get_publication_distribution_imprint')
funding_information_note = fields.Function(
lambda o: [dict(
text_of_note=v.get('title'),
grant_number=v.get('code')
) for v in o['metadata'].get('grants', [])])
other_standard_identifier = fields.Method('get_other_standard_identifier')
added_entry_meeting_name = fields.Method('get_added_entry_meeting_name')
main_entry_personal_name = fields.Method('get_main_entry_personal_name')
added_entry_personal_name = fields.Method('get_added_entry_personal_name')
summary = fields.Function(
lambda o: dict(summary=o['metadata'].get('description')))
host_item_entry = fields.Method('get_host_item_entry')
dissertation_note = fields.Function(
lambda o: dict(name_of_granting_institution=o[
'metadata'].get('thesis', {}).get('university')))
language_code = fields.Function(
lambda o: dict(language_code_of_text_sound_track_or_separate_title=\
o['metadata'].get('language'))) |
# Custom
# ======
resource_type = fields.Raw(attribute='metadata.resource_type')
communities = fields.Raw(attribute='metadata.communities')
references = fields.Raw(attribute='metadata.references')
embargo_date = fields.Raw(attribute='metadata.embargo_date')
journal = fields.Raw(attribute='metadata.journal')
_oai = fields.Raw(attribute='metadata._oai')
_files = fields.Meth | od('get_files')
leader = fields.Method('get_leader')
conference_url = fields.Raw(attribute='metadata.meeting.url')
def get_leader(self, o):
"""Return the leader information."""
rt = o['metadata']['resource_type']['type']
rec_types = {
'image': 'two-dimensional_nonprojectable_graphic',
'video': 'projected_medium',
'dataset': 'computer_file',
'software': 'computer_file',
}
type_of_record = rec_types[rt] if rt in rec_types \
else 'language_material'
res = {
'record_length': '00000',
'record_status': 'new',
'type_of_record': type_of_record,
'bibliographic_level': 'monograph_item',
'type_of_control': 'no_specified_type',
'character_coding_scheme': 'marc-8',
'indicator_count': 2,
'subfield_code_count': 2,
'base_address_of_data': '00000',
'encoding_level': 'unknown',
'descriptive_cataloging_form': 'unknown',
'multipart_resource_record_level':
'not_specified_or_not_applicable',
'length_of_the_length_of_field_portion': 4,
'length_of_the_starting_character_position_portion': 5,
'length_of_the_implementation_defined_portion': 0,
'undefined': 0,
}
return res
def get_files(self, o):
"""Get the files provided the record is open access."""
if o['metadata']['access_right'] != 'open':
return missing
res = []
for f in o['metadata'].get('_files', []):
res.append(dict(
uri=u'https://zenodo.org/record/{0}/files/{1}'.format(
o['metadata'].get('recid', ''), f['key']),
size=f['size'],
checksum=f['checksum'],
type=f['type'],
))
return res or missing
def get_host_item_entry(self, o):
"""Get host items."""
res = []
for v in o['metadata'].get('related_identifiers', []):
res.append(dict(
main_entry_heading=v.get('identifier'),
relationship_information=v.get('relation'),
note=v.get('scheme'),
))
imprint = o['metadata'].get('imprint', {})
part_of = o['metadata'].get('part_of', {})
if part_of and imprint:
res.append(dict(
main_entry_heading=imprint.get('place'),
edition=imprint.get('publisher'),
title=part_of.get('title'),
related_parts=part_of.get('pages'),
international_standard_book_number=imprint.get('isbn'),
))
return res or missing
def get_publication_distribution_imprint(self, o):
"""Get publication date and imprint."""
res = []
pubdate = o['metadata'].get('publication_date')
if pubdate:
res.append(dict(date_of_publication_distribution=pubdate))
imprint = o['metadata'].get('imprint')
part_of = o['metadata'].get('part_of')
if not part_of and imprint:
res.append(dict(
place_of_publication_distribution=imprint.get('place'),
name_of_publisher_distributor=imprint.get('publisher'),
date_of_publication_distribution=pubdate,
))
return res or missing
def get_subject_added_entry_topical_term(self, o):
"""Get licenses and subjects."""
res = []
license = o['metadata'].get('license', {}).get('id')
if license:
res.append(dict(
topical_term_or_geographic_name_entry_element='cc-by',
source_of_heading_or_term='opendefinition.org',
level_of_subject='Primary',
thesaurus='Source specified in subfield $2',
))
def _subject(term, id_, scheme):
return dict(
topical_term_or_geographic_name_entry_element=te |
Xosebelge/Pascal_Practicums | 09.06/2.py | Python | mit | 221 | 0.090498 | #coding: utf-8
inpu | t_string=raw_input()
brackets=0
for i in input_string:
if i=="(":
brackets+=1
elif i==")":
brackets-=1
if brackets<0:
print "No"
break
else:
if brackets>0:
print " | No"
else:
print "Yes"
|
AustinHartman/randomPrograms | reciprocal_cycles.py | Python | gpl-3.0 | 309 | 0.009709 | unit_fracs = [] | # Create list of all reciprocal frac 1-1000
for i in range(1, 1000):
unit_fracs.append(str(1/i)[2:])
unit_fracs = [frac for frac in unit_fracs if len(frac) > 4] # Filter all | list elements under 4 characters
print(unit_fracs)
|
hlerebours/lambda-calculus | lambdax/test/test_exposed.py | Python | mit | 3,947 | 0.001778 | """ Check what the `lambdax` module publicly exposes. """
import builtins
from inspect import isbuiltin, ismodule, isclass
from itertools import chain
import operator
from unittest.mock import patch
import lambdax.builtins_as_lambdas
import lambdax.builtins_overridden
from lambdax import x1, x2, x
def _get_exposed(tested_module):
return {name for name, obj in vars(tested_module).items()
if not name.startswith('_') and not ismodule(obj)}
def test_no_builtin_exposed():
for obj in chain(vars(lambdax).values(), vars(lambdax.builtins_overridden).values()):
assert not isbuiltin(obj)
def test_base_exposed():
variables = {'x'} | {'x%d' % i for i in range(1, 10)}
variables |= {v.upper() for v in variables}
special_functions = {'λ', 'is_λ', 'comp', 'circle', 'chaining', 'and_', 'or_', 'if_'}
to_expose = variables | special_functions
exposed = _get_exposed(lambdax.lambda_calculus)
assert to_expose == exposed
def test_operators_exposed():
operators = {name for name, obj in vars(operator).items()
if not name.startswith('_') and not isclass(obj) and not hasattr(builtins, name)}
to_expose = operators.difference(('and_', 'or_', 'xor'))
assert to_expose == _get_exposed(lambdax.operators)
def test_overridden_builtins_exposed():
builtin_names = {name for name, obj in vars(builtins).items()
if name[0].upper() != name[0]}
irrelevant_builtins = {
'input', 'help', 'open',
'copyright', 'license', 'credits',
'compile', 'eval', 'exec', 'execfile', 'runfile',
'classmethod', 'staticmethod', 'property',
'object', 'super',
'globals', 'locals'
}
builtins_to_expose = builtin_names - irrelevant_builtins
to_expose_as_λ = {name + '_λ' for name in builtins_to_expose}
split_exposed_names = (name.split('_') for name in _get_exposed(lambdax.builtins_as_lambdas))
exposed_as_λ = {'%s_%s' % (words[0], words[-1]) for words in split_exposed_names}
assert to_expose_as_λ == exposed_as_λ
assert builtins_to_expose == _get_exposed(lambdax.builtins_overridden)
def test_operators_implementations():
operators = vars(operator)
for name, abstraction in vars(lambdax.operators).items():
initial = operators.get(name)
if initial and isbuiltin(initial):
wrapped = getattr(abstraction, '_λ_constant')
assert wrapped == initial
try:
ref = initial(42, 51)
except TypeError as e:
ref = e.args
try:
res = abstraction(x1, x2)(42, 51)
except TypeError as e:
res = e.args
assert res == ref
def _get_effect(implementation):
output = []
with patch('sys.stdout') as out:
out.side_effect = output.append
try:
res = implementation("42")
except BaseException as e:
res = e.args
return res, output
def _get_method_or_object(obj, meth=''):
return getattr(obj, meth) if meth else obj
def test_overridden_builtins_implementations():
for name in _get_exposed(lambdax.builtins_as_lambdas):
obj, tail = name.split('_', 1)
m | eth = tail[:-2]
original = _get_method_or_object(getattr(builtins, obj), meth)
as_λ = getattr(lambdax.builtins_as_lambdas, name)
overridden = _get_method_or_object(getattr(lambdax.builtins_overridden, obj), meth)
ref, ref_output = _get_effect(original)
expl, expl_output = _get_effect(as_λ(x))
iso, iso_output = _get_effect(overridden)
lbda, lbda_output = _get_ | effect(overridden(x))
assert lbda_output == iso_output == expl_output == ref_output
try:
assert list(iter(lbda)) == list(iter(iso)) == list(iter(expl)) == list(iter(ref))
except TypeError:
assert lbda == iso == expl == ref
|
olsove/ougn-2017-p78 | locust/test/locustfile.py | Python | apache-2.0 | 4,070 | 0.010565 | from locust import HttpLocust, TaskSet, task, events
# names : random name generator https://github.com/treyhunner/names
import names
import os
def print_error(type, loc, code, reason) :
print "Error %s %s %s Reason:%s"%(type, code, loc, reason)
def print_success(type, loc, code) :
print "Success %s %s %s"%(type, code, loc)
def getTargetURL() :
target = os.environ['TARGET_URL']
print "Getting target url: %s"%(target)
return target
class UserBehavior(TaskSet):
root = getTargetURL()
request_header = {"Content-Type" : "application/json"}
@tas | k(20)
def findByFirstName(self):
url = "%s/search/findByFirstName?name=%s"%(UserBehavior.root,names.get_first_name())
r = self.client.get(url=url, name="people/search/findByFirstName?name={name}")
type = "GET"
if r.status_code == 200 :
| print_success(type, url, r.status_code)
else:
print_error(type, url , r.status_code, r.reason)
@task(30)
def findByLastName(self):
url = "%s/search/findByLastName?name=%s"%(UserBehavior.root,names.get_last_name())
r = self.client.get(url=url, name="people/search/findByLastName?name={name}")
type = "GET"
if r.status_code == 200 :
print_success(type, url, r.status_code)
else:
print_error(type, url , r.status_code, r.reason)
@task(10)
def people(self):
url = UserBehavior.root
def payload():
return "{ \"firstName\": \"%s\", \"lastName\":\"%s\"}"%(names.get_first_name(), names.get_last_name())
def post():
r = self.client.post(url=url, name="/people", data=payload(), headers=UserBehavior.request_header )
type = "POST"
if r.status_code == 201 :
loc = r.headers.get('location')
print_success(type, url, r.status_code)
return loc
else:
print_error(type, url, r.status_code, r.reason)
def get(loc):
r = self.client.get(url=loc, name="/people/{id}")
type = "GET"
if r.status_code == 200 :
print_success(type, loc, r.status_code)
else:
print_error(type, loc, r.status_code, r.reason)
def put(loc):
r = self.client.put(url=loc, name="/people/{id}", data=payload(), headers=UserBehavior.request_header)
type = "PUT"
if r.status_code == 200 :
print_success(type, loc, r.status_code)
else:
print_error(type, loc, r.status_code, r.reason)
def patch(loc):
payload = "{ \"lastName\":\"%s\"}"%(names.get_last_name())
r = self.client.patch(url=loc, name="/people/{id}", data=payload, headers=UserBehavior.request_header)
type = "PATCH"
if r.status_code == 200 :
print_success(type, loc, r.status_code)
else:
print_error(type, loc, r.status_code, r.reason)
def delete(loc):
r = self.client.delete(url=loc, name="/people/{id}")
type = "DELETE"
if r.status_code == 204 :
print_success(type, loc, r.status_code)
else:
print_error(type, loc, r.status_code, r.reason)
location = post()
get(location)
patch(location)
put(location)
#delete(location)
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait = 3000
max_wait = 6000
def on_request_success(request_type, name, response_time, response_length):
print "RequestType:[%s] Name:[%s] ResponseTime:[%s] ResponseLength:[%s]"%(request_type, name, response_time, response_length)
def on_request_failure(request_type, name, response_time, exception):
print "RequestType:[%s] Name:[%s] ResponseTime:[%s] Exception:[%s]"%(request_type, name, response_time, exception)
#events.request_success += on_request_success
events.request_failure += on_request_failure
|
soft-matter/pims_nd2 | setup.py | Python | bsd-3-clause | 1,353 | 0.000739 | import os
from setuptools import setup
try:
descr = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
except IOError:
descr = ''
try:
from pypandoc import convert
descr = convert(descr, 'rst', format='md')
except ImportError:
pass
setup_parameters = dict(
name="pims_nd2",
version="1.1",
description="An image reader for nd2 (NIS Elements) multidimensional images",
author="Casper van der Wel",
install_requires=['pims>=0.3'],
author_email="caspervdw@gmail.com",
url="https://github.com/soft-matter/pims_nd2",
packages=['pims_nd2'],
include_package_data=True,
classifiers=["Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Programming Language :: C",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering",
"Operating System :: Microsoft :: Windows", |
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS"],
platforms=['MacOS X', 'Windows', 'Linux CentOs 6.5/7' | , 'Linux Debian 7/8'],
long_description=descr)
setup(**setup_parameters)
|
s34rching/python_classes | tests/test_contact_data.py | Python | apache-2.0 | 2,053 | 0.006332 | import re
import random
def test_contact_data_from_home_page(app):
r_index = random.randrange(len(app.contact.get_contact_list()))
data_from_home_page = app.contact.get_contact_list()[r_index]
data_from_edit_page = app.contact.get_contact_info_from_edit_page(r_index)
assert data_from_home_page.firstname == data_from_edit_page.firstname
assert data_from_home_page.lastname == data_from_edit_page.lastname
assert data_from_home_page.address == data_from_edit_ | page.address
assert data_from_home_page.all_phones_from_homepage == merge_phones_like_on_homepage(data_from_edit_page)
assert data_from_home_page.all_emails_from_homepage == merge_emails_like_on_homepage(data_from_edit_page)
assert data_from_home_page.id == data_from_edit_page.id
def test_phones_from_v | iew_page(app):
r_index = random.randrange(len(app.contact.get_contact_list()))
data_from_view_page = app.contact.get_contact_info_from_view_page(r_index)
data_from_edit_page = app.contact.get_contact_info_from_edit_page(r_index)
assert data_from_view_page.home_number == data_from_edit_page.home_number
assert data_from_view_page.mobile_number == data_from_edit_page.mobile_number
assert data_from_view_page.work_number == data_from_edit_page.work_number
assert data_from_view_page.secondary_number == data_from_edit_page.secondary_number
def clear(s):
return re.sub('[() -]', '', s)
def merge_phones_like_on_homepage(contact):
return '\n'.join(filter(lambda x: x!= '',
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.home_number, contact.work_number, contact.mobile_number, contact.secondary_number]))))
def merge_emails_like_on_homepage(contact):
return '\n'.join(filter(lambda x: x!= '',
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.email, contact.email2, contact.email3])))) |
dfdeshom/reds | reds/redis_hash_layer.py | Python | gpl-2.0 | 1,323 | 0.013605 | import redis
from hashlib import sha1
class RedisHashLayer(object):
"""
A more memory-efficient way to store many small values in redis using hashes.
See http://antirez.com/post/redis-weekly-update-7.html
| Note: add these config value to redis:
hash-max-zipmap-entries 512
hash-max-zipmap-value 512
"""
def __init__(self,connection,name):
| self.connection = connection
self.name = name
def _get_hashname(self,key):
field = sha1(str(key)).hexdigest()
hashkey = "%s:%s" % (self.name, field[:4])
return (hashkey,field)
def __contains__(self,key):
hashkey,field = self._get_hashname(key)
res = self.connection.hget(hashkey,field)
if res:
return True
return False
def add(self,key):
hashkey,field = self._get_hashname(key)
self.connection.hset(hashkey,field,field)
return
def delete(self,key):
hashkey,field = self._get_hashname(key)
self.connection.hset(hashkey,field,field)
self.connection.hdel(hashkey,field)
return
def clear(self):
pipeline = self.connection.pipeline()
keys = self.connection.keys(self.name+"*")
for k in keys:
pipeline.delete(k)
pipeline.execute()
return
|
sirotin/at-a-glance-of-a-key | simple-python-testing-framework/tests/test_python_version.py | Python | mit | 658 | 0.004559 | # Author: Alexander Sirotin / sirotin@gmail.com
# Co | pyright (c) 2017 Alexander Sirotin.
# Licensed under MIT license:
# https://opensource.org/licenses/mit-license.php
# Your use is on an "AS-IS" basis, and subject to the terms and conditions of this license.
from tests_common impor | t TestBase, setup_logger
import sys
logger = setup_logger("TestPythonVersion")
class TestPythonVersion(TestBase):
@classmethod
def run_test(cls):
result = sys.version_info
logger.info("Python version is %d.%d.%d" % (result.major, result.minor, result.micro))
if result.major != 2:
raise Exception("Expected major version is 2")
|
kaustubhhiware/hiPy | think_python/lists2.py | Python | mit | 1,424 | 0.019663 | def no_extreme(listed):
"""
Takes a list and chops off extreme ends
"""
del listed[0]
del listed[-1:]
return listed
def better_no_extreme(listed):
"""
why better ? For starters , does not modify original list
"""
return listed[1:-1]
t = ['a','b','c']
print t
print '\n'
print 'pop any element : by t.pop(1) or t.remove(\'b\') or del t[1]'
del t[1]
print t
st = ['a','b','c','d','e','f']
print st
del st[1:3]
print 'del t[1:3] works as well : ', st
print 'Mid part is : ',no_extreme(st)
str = raw_input("\nEnter a string to be converted to list : ")
listr = list(str)
print listr
str2=raw_input("\nEnter a line to be separated into words : ")
listr2 = str2.split()#separated at spaces
print listr2
print 'You can split a line into words by changing the parameter as str2.split(parameter)'
print | 'this splits at - '
print 'joining statement : '
delimeter=' '
print delimeter.join(listr2)
print '\nNote: 2 identical lists are 2 objects ,so l_a is l_b for identical lists still says False'
print 'This does not happen for strings etc'
print 'l_a is l_b only true if assigned as l_b = l_a'
print '\n t.append(x) returns None , whereas t+[y] is not None'
print '\n Never t = t[1:] as empty , same goes for t.sort | ()'
print '\nDO\'s : t.append(x)\n t = t+[x] \n '
print 'Keep copy of original just in case : orig = t[:] \nt.sort()'
print '\nDONT\'s : t.append([x])\n t = t.append(x) \n t + [x] \n t = t + x' |
tannishk/airmozilla | airmozilla/manage/tests/test_autocompeter.py | Python | bsd-3-clause | 9,999 | 0 | import datetime
import json
from nose.tools import ok_, eq_, assert_raises
import mock
from django.conf import settings
from django.utils import timezone
from django.core.exceptions import ImproperlyConfigured
from funfactory.urlresolvers import reverse
from airmozilla.base.tests.testbase import Response
from airmozilla.main.models import Event, EventHitStats, Approval
from airmozilla.manage import autocompeter
from airmozilla.base.tests.testbase import DjangoTestCase
class TestAutocompeter(DjangoTestCase):
def test_update_without_key(self):
with self.settings(AUTOCOMPETER_KEY=None):
# This would simply fail if autocompeter.py wasn't smart
# enough to notice that the AUTOCOMPETER_KEY was not set.
autocompeter.update()
@mock.patch('requests.post')
def test_basic_update(self, rpost):
posts = []
def mocked_post(url, **options):
assert settings.AUTOCOMPETER_URL in url
data = json.loads(options['data'])
posts.append(data)
return Response(
'OK',
201
)
rpost.side_effect = mocked_post
autocompeter.update()
# nothing should happen because there are no recently modified events
ok_(not posts)
event = Event.objects.get(title='Test event')
event.save()
autocompeter.update()
eq_(len(posts), 1)
# In the posted data should be a thing called 'documents'
# which is a list of every document.
assert len(posts[0]['documents']) == 1
document = posts[0]['documents'][0]
eq_(document['url'], reverse('main:event', args=(event.slug,)))
eq_(document['title'], event.title)
eq_(document['group'], '')
eq_(document['popularity'], 0)
@mock.patch('requests.post')
def test_basic_update_with_popularity(self, rpost):
posts = []
def mocked_post(url, **options):
assert settings.AUTOCOMPETER_URL in url
data = json.loads(options['data'])
posts.append(data)
return Response(
'OK',
201
)
rpost.side_effect = mocked_post
event = Event.objects.get(title='Test event')
# also change to a non-public privacy setting
event.privacy = Event.PRIVACY_CONTRIBUTORS
event.save()
EventHitStats.objects.create(
event=event,
total_hits=100
)
autocompeter.update()
document = posts[0]['documents'][0]
eq_(document['popularity'], 100)
eq_(document['group'], Event.PRIVACY_CONTRIBUTORS)
@mock.patch('requests.post')
def test_basic_update_with_repeated_titles(self, rpost):
posts = []
def mocked_post(url, **options):
assert settings.AUTOCOM | PETER_URL in url
data = json.loads(options['data'])
posts.append(data)
return Response(
'OK',
201
)
rpost.side_effect = mocked_post
event = Event.objects.get(title='Test event')
# also change to a non-public privacy setting
event.privacy = Event.PRIVACY_CONTRIBUTORS
event.save()
EventHitStats.objects.create(
event=event,
| total_hits=100
)
event2 = Event.objects.create(
slug='something-else',
title=event.title,
status=event.status,
privacy=event.privacy,
start_time=event.start_time + datetime.timedelta(days=40),
description=event.description,
placeholder_img=event.placeholder_img,
archive_time=event.archive_time,
)
EventHitStats.objects.create(
event=event2,
total_hits=100
)
assert Event.objects.approved().count() == 2
autocompeter.update()
documents = posts[0]['documents']
titles = [x['title'] for x in documents]
titles.sort()
title1 = event.start_time.strftime('Test event %d %b %Y')
title2 = event2.start_time.strftime('Test event %d %b %Y')
eq_(titles, [title1, title2])
@mock.patch('requests.post')
def test_basic_update_upcoming_event(self, rpost):
posts = []
def mocked_post(url, **options):
assert settings.AUTOCOMPETER_URL in url
data = json.loads(options['data'])
posts.append(data)
return Response(
'OK',
201
)
rpost.side_effect = mocked_post
event = Event.objects.get(title='Test event')
EventHitStats.objects.create(
event=event,
total_hits=100
)
autocompeter.update()
future = timezone.now() + datetime.timedelta(days=1)
Event.objects.create(
slug='aaa',
title='Other',
start_time=future,
status=event.status,
)
assert Event.objects.approved().count() == 2
autocompeter.update()
assert len(posts[0]['documents']) == 1
document = posts[0]['documents'][0]
eq_(document['title'], 'Other')
# picks this up from the median
eq_(document['popularity'], 100)
@mock.patch('requests.post')
def test_basic_update_all(self, rpost):
posts = []
def mocked_post(url, **options):
assert settings.AUTOCOMPETER_URL in url
data = json.loads(options['data'])
posts.append(data)
return Response(
'OK',
201
)
rpost.side_effect = mocked_post
autocompeter.update(all=True)
assert len(posts[0]['documents']) == 1
document = posts[0]['documents'][0]
eq_(document['title'], 'Test event')
eq_(document['popularity'], 0)
@mock.patch('requests.post')
def test_basic_update_all_with_popularity(self, rpost):
posts = []
def mocked_post(url, **options):
assert settings.AUTOCOMPETER_URL in url
data = json.loads(options['data'])
posts.append(data)
return Response(
'OK',
201
)
rpost.side_effect = mocked_post
EventHitStats.objects.create(
event=Event.objects.get(title='Test event'),
total_hits=200
)
autocompeter.update(all=True)
assert len(posts[0]['documents']) == 1
document = posts[0]['documents'][0]
eq_(document['title'], 'Test event')
eq_(document['popularity'], 200)
@mock.patch('requests.post')
def test_basic_update_all_with_unapproved(self, rpost):
posts = []
def mocked_post(url, **options):
assert settings.AUTOCOMPETER_URL in url
data = json.loads(options['data'])
posts.append(data)
return Response(
'OK',
201
)
rpost.side_effect = mocked_post
event = Event.objects.get(title='Test event')
EventHitStats.objects.create(
event=event,
total_hits=200
)
autocompeter.update(all=True)
assert len(posts[0]['documents']) == 1
document = posts[0]['documents'][0]
eq_(document['title'], 'Test event')
eq_(document['group'], '')
app = Approval.objects.create(event=event)
autocompeter.update(all=True)
document = posts[1]['documents'][0]
eq_(document['title'], 'Test event')
eq_(document['group'], 'contributors')
app.approved = True
app.save()
autocompeter.update(all=True)
document = posts[2]['documents'][0]
eq_(document['title'], 'Test event')
eq_(document['group'], '')
@mock.patch('requests.delete')
@mock.patch('requests.post')
def test_basic_update_all_with_flush(self, rpost, rdelete):
posts = []
deletes = []
def mocked_post(url, **options):
assert |
Rensselaer-AI-League/GeneralizedGameServer | helpers/glicko2.py | Python | mit | 993 | 0.030211 | class Player:
self.rating = 1500 # Actual rating
self.rd = 350 # Rating deviation
self.sigma = 0.06 # Volatility
self.mu = None
self.phi = None
class | Glicko2:
self.tau = 1.0 # Should be betwee | n 0.3 and 1.2, use higher numbers for more predicatable games
def step2(self, player):
player.mu = (player.rating-1500)/173.7178
player.phi = player.sigma/173.7178
def g(phi):
return 1/math.sqrt(1 + 3*phi*phi/9.86960440109) # 9.869... is Pi squared
def E(mu, mu_j, phi_j):
return 1/(1 + math.exp(-1 * g(phi_j) * (mu - mu_j)))
def step3(self, player, opponents):
v = 0
for opponent in opponents:
e_j = E(player.mu, opponent.mu, opponent.phi)
v += g(opponent.phi)**2 * e_j * (1 - e_j)
return v
# Outcomes is an associative array from opponent id to outcome
def step4(self, player, opponents, outcomes):
d = 0
for (opponent in opponents):
e_j = E(player.mu, opponent.mu, opponent.phi)
d += g(opponent.phi)*(outcomes[opponent.id] - e_j)
d = v*d
|
gonzolino/heat | contrib/rackspace/rackspace/tests/test_rackspace_cloud_server.py | Python | apache-2.0 | 20,555 | 0 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import mox
from oslo_config import cfg
from oslo_utils import uuidutils
import six
from heat.common import exception
from heat.common import template_format
from heat.engine import environment
from heat.engine import resource
from heat.engine import scheduler
from heat.engine import stack as parser
from heat.engine import template
from heat.tests import common
from heat.tests.openstack.nova import fakes
from heat.tests import utils
from ..resources import cloud_server # noqa
wp_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "WordPress",
"Parameters" : {
"key_name" : {
"Description" : "key_name",
"Type" : "String",
"Default" : "test"
}
},
"Resources" : {
"WebServer": {
"Type": "OS::Nova::Server",
"Properties": {
"image" : "CentOS 5.2",
"flavor" : "256 MB Server",
"key_name" : "test",
"user_data" : "wordpress"
}
}
}
}
'''
cfg.CONF.import_opt('region_name_for_services', 'heat.common.config')
class CloudServersTest(common.HeatTestCase):
def setUp(self):
super(CloudServersTest, self).setUp()
cfg.CONF.set_override('region_name_for_services', 'RegionOne')
self.ctx = utils.dummy_context()
self.fc = fakes.FakeClient()
mock_nova_create = mock.Mock()
self.ctx.clients.client_plugin(
'nova')._create = mock_nova_create
mock_nova_create.return_value = self.fc
# Test environment may not have pyrax client library installed and if
# pyrax is not installed resource class would not be registered.
# So register resource provider class explicitly for unit testing.
resource._register_class("OS::Nova::Server",
cloud_server.CloudServer)
def _mock_get_image_id_success(self, imageId):
self.mock_get_image = mock.Mock()
self.ctx.clients.client_plugin(
'glance').get_image_id = self.mock_get_image
self.mock_get_image.return_value = imageId
def _stub_server_validate(self, server, imageId_input, image_id):
# stub glance image validate
self._mock_get_image_id_success(image_id)
def _setup_test_stack(self, stack_name):
t = template_format.parse(wp_template)
templ = template.Template(
t, env=environment.Environment({'key_name': 'test'}))
self.stack = parser.Stack(self.ctx, stack_name, templ,
stack_id=uuidutils.generate_uuid())
return (templ, self.stack)
def _setup_test_server(self, return_server, name, image_id=None,
override_name=False, stub_create=True, exit_code=0):
stack_name = '%s_s' % name
(tmpl, stack) = self._setup_test_stack(stack_name)
tmpl.t['Resources']['WebServer']['Properties'][
'image'] = image_id or 'CentOS 5.2'
tmpl.t['Resources']['WebServer']['Properties'][
'flavor'] = '256 MB Server'
server_name = '%s' % name
if override_name:
tmpl.t['Resources']['WebServer']['Properties'][
'name'] = server_name
resource_defns = tmpl.resource_definitions(stack)
server = cloud_server.CloudServer(server_name,
resource_defns['WebServer'],
stack)
self.patchobject(server, 'store_external_ports')
self._stub_server_validate(server, image_id or 'CentOS 5.2', 1)
if stub_create:
self.m.StubOutWithMock(self.fc.servers, 'create')
self.fc.servers.create(
image=1,
flavor=1,
key_name='test',
name=override_name and server.name or utils.PhysName(
stack_name, server.name),
security_groups=[],
userdata=mox.IgnoreArg(),
scheduler_hints=None,
meta=None,
nics=None,
availability_zone=None,
block_device_mapping=None,
block_device_mapping_v2=None,
config_drive=True,
disk_config=None,
reservation_id=None,
files=mox.IgnoreArg(),
admin_pass=None).AndReturn(return_server)
return server
def _create_test_server(self, return_server, name, override_name=False,
stub_create=True, exit_code=0):
server = self._setup_test_server(return_server, name,
stub_create=stub_create,
exit_code=exit_code)
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get(server.id).AndReturn(return_server)
self.m.ReplayAll()
scheduler.TaskRunner(server.create)()
return server
def _mock_metadata_os_distro(self):
image_data = mock.Mock(metadata={'os_distro': 'centos'})
self.fc.images.get = mock.Mock(return_value=image_data)
def test_rackconnect_deployed(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {
'rackconnect_automation_status': 'DEPLOYED',
'rax_service_level_automation': 'Complete',
}
server = self._setup_test_server(return_server,
'test_rackconnect_deployed')
server.context.roles = ['rack_connect']
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get(return_server.id).MultipleTimes(
).AndReturn(return_server)
self.m.ReplayAll()
scheduler.TaskRunner(server.create)()
self.assertEqual('CREATE', server.action)
self.assertEqual('COMPLETE', server.status)
self.m.VerifyAll()
def test_rackconnect_failed(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {
'rackconnect_automation_status': 'FAILED',
'rax_service_level_automation': 'Complete',
}
sel | f.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get(return_server.id).MultipleTimes(
).AndReturn(return_server)
server = self._setup_test_server(return_server,
'test_rackconnect_failed')
server.context.roles = ['rack_connect']
self.m.ReplayAll()
create = scheduler.TaskRu | nner(server.create)
exc = self.assertRaises(exception.ResourceFailure, create)
self.assertEqual('Error: resources.test_rackconnect_failed: '
'RackConnect automation FAILED',
six.text_type(exc))
def test_rackconnect_unprocessable(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {
'rackconnect_automation_status': 'UNPROCESSABLE',
'rackconnect_unprocessable_reason': 'Fake reason',
'rax_service_level_automation': 'Complete',
}
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get(return_server.id).MultipleTimes(
).AndReturn(return_server)
server = self._setup_test_server(return_server,
'test_rackconnect_unprocessable')
server.context.roles = ['rack_connect']
self.m.ReplayAll()
scheduler.TaskRunner(server.create)()
self.assertEqual('CREATE', server.action)
self.assertEqual('COMPLETE', server |
divmain/GitSavvy | tests/test_run_for_timeout.py | Python | mit | 689 | 0.002903 | from GitSav | vy.core.runtime import run_or_timeout
from unittesting import DeferrableTestCase
class TestRunForTimeout(DeferrableTestCase):
def testReturnsFunctionResult(self):
def main():
return "Ok"
self.assertEquals("Ok", run_or_timeout(main, timeout=1.0))
def te | stReraisesException(self):
def main():
1 / 0
self.assertRaises(ZeroDivisionError, lambda: run_or_timeout(main, timeout=1.0))
def testRaisesTimeoutIfFunctionTakesTooLong(self):
def main():
import time
time.sleep(0.1)
1 / 0
self.assertRaises(TimeoutError, lambda: run_or_timeout(main, timeout=0.01))
|
josephsuh/extra-specs | nova/volume/san.py | Python | apache-2.0 | 32,648 | 0.000459 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Drivers for san-stored volumes.
The unique thing about a SAN is that we don't expect that we can run the volume
controller on the SAN hardware. We expect to access it over SSH or some API.
"""
import base64
import httplib
import json
import os
import paramiko
import random
import socket
import string
import uuid
from lxml import etree
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova import utils
import nova.volume.driver
LOG = logging.getLogger(__name__)
san_opts = [
cfg.BoolOpt('san_thin_provision',
default=True,
help='Use thin provisioning for SAN volumes?'),
cfg.StrOpt('san_ip',
default='',
help='IP address of SAN controller'),
cfg.StrOpt('san_login',
default='admin',
help='Username for SAN controller'),
cfg.StrOpt('san_password',
default='',
help='Password for SAN controller'),
cfg.StrOpt('san_private_key',
default='',
help='Filename of private key to use for SSH authentication'),
cfg.StrOpt('san_clustername',
default='',
help='Cluster name to use for creating volumes'),
cfg.IntOpt('san_ssh_port',
default=22,
help='SSH port to use with SAN'),
cfg.BoolOpt('san_is_local',
default=False,
help='Execute commands locally instead of over SSH; '
'use if the volume service is running on the SAN device'),
cfg.StrOpt('san_zfs_volume_base',
default='rpool/',
help='The ZFS path under which to create zvols for volumes.'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(san_opts)
class SanISCSIDriver(nova.volume.driver.ISCSIDriver):
"""Base class for SAN-style storage volumes
A SAN-style storage value is 'different' because the volume controller
probably won't run on it, so we need to access is over SSH or another
remote protocol.
"""
def __init__(self):
super(SanISCSIDriver, self).__init__()
self.run_local = FLAGS.san_is_local
def _build_iscsi_target_name(self, volume):
return "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
def _connect_to_ssh(self):
ssh = paramiko.SSHClient()
#TODO(justinsb): We need a better SSH key policy
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if FLAGS.san_password:
ssh.connect(FLAGS.san_ip,
port=FLAGS.san_ssh_port,
username=FLAGS.san_login,
password=FLAGS.san_password)
elif FLAGS.san_private_key:
privatekeyfile = os.path.expanduser(FLAGS.san_private_key)
# It sucks that paramiko doesn't support DSA keys
privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
ssh.connect(FLAGS.san_ip,
port=FLAGS.san_ssh_port,
username=FLAGS.san_login,
pkey=privatekey)
else:
msg = _("Specify san_password or san_private_key")
raise exception.NovaException(msg)
return ssh
def _execute(self, *cmd, **kwargs):
if self.run_local:
return utils.execute(*cmd, **kwargs)
else:
check_exit_code = kwargs.pop('check_exit_code', None)
command = ' '.join(*cmd)
return self._run_ssh(command, check_exit_code)
def _run_ssh(self, command, check_exit_code=True):
#TODO(justinsb): SSH connection caching (?)
ssh = self._connect_to_ssh()
#TODO(justinsb): Reintroduce the retry hack
ret = utils.ssh_execute(ssh, command, check_exit_code=check_exit_code)
ssh.close()
return ret
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
pass
def create | _export(self, context, volume):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
pass
def check_for_setup_error(self):
"""Re | turns an error if prerequisites aren't met."""
if not self.run_local:
if not (FLAGS.san_password or FLAGS.san_private_key):
raise exception.NovaException(_('Specify san_password or '
'san_private_key'))
# The san_ip must always be set, because we use it for the target
if not (FLAGS.san_ip):
raise exception.NovaException(_("san_ip must be set"))
def _collect_lines(data):
"""Split lines from data into an array, trimming them """
matches = []
for line in data.splitlines():
match = line.strip()
matches.append(match)
return matches
def _get_prefixed_values(data, prefix):
"""Collect lines which start with prefix; with trimming"""
matches = []
for line in data.splitlines():
line = line.strip()
if line.startswith(prefix):
match = line[len(prefix):]
match = match.strip()
matches.append(match)
return matches
class SolarisISCSIDriver(SanISCSIDriver):
"""Executes commands relating to Solaris-hosted ISCSI volumes.
Basic setup for a Solaris iSCSI server:
pkg install storage-server SUNWiscsit
svcadm enable stmf
svcadm enable -r svc:/network/iscsi/target:default
pfexec itadm create-tpg e1000g0 ${MYIP}
pfexec itadm create-target -t e1000g0
Then grant the user that will be logging on lots of permissions.
I'm not sure exactly which though:
zfs allow justinsb create,mount,destroy rpool
usermod -P'File System Management' justinsb
usermod -P'Primary Administrator' justinsb
Also make sure you can login using san_login & san_password/san_private_key
"""
def _execute(self, *cmd, **kwargs):
new_cmd = ['pfexec']
new_cmd.extend(*cmd)
return super(SolarisISCSIDriver, self)._execute(self,
*new_cmd,
**kwargs)
def _view_exists(self, luid):
(out, _err) = self._execute('/usr/sbin/stmfadm',
'list-view', '-l', luid,
check_exit_code=False)
if "no views found" in out:
return False
if "View Entry:" in out:
return True
msg = _("Cannot parse list-view output: %s") % (out)
raise exception.NovaException()
def _get_target_groups(self):
"""Gets list of target groups from host."""
(out, _err) = self._execute('/usr/sbin/stmfadm', 'list-tg')
matches = _get_prefixed_values(out, 'Target group: ')
LOG.debug("target_groups=%s" % matches)
return matches
def _target_group_exists(self, target_group_name):
return target_group_name not in self._get_target_groups()
def _get_target_group_members(self, target_group_name):
(out, _err) = self._execute('/usr/sbin/stmfadm',
'list-tg', '-v', target_group_name)
matches = _get_prefixed_values(out, 'Member: ')
LOG.debug("members of %s=%s" % (target_group_name, matches))
|
rohitranjan1991/home-assistant | homeassistant/components/mill/climate.py | Python | mit | 8,828 | 0.00068 | """Support for mill wifi-enabled home heaters."""
import mill
import voluptuous as vol
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
FAN_ON,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_IP_ADDRESS,
CONF_USERNAME,
PRECISION_WHOLE,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
ATTR_AWAY_TEMP,
ATTR_COMFORT_TEMP,
ATTR_ROOM_NAME,
ATTR_SLEEP_TEMP,
CLOUD,
CONNECTION_TYPE,
DOMAIN,
LOCAL,
MANUFACTURER,
MAX_TEMP,
MIN_TEMP,
SERVICE_SET_ROOM_TEMP,
)
SET_ROOM_TEMP_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ROOM_NAME): cv.string,
vol.Optional(ATTR_AWAY_TEMP): cv.positive_int,
vol.Optional(ATTR_COMFORT_TEMP): cv.positive_int,
vol.Optional(ATTR_SLEEP_TEMP): cv.positive_int,
}
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the Mill climate."""
if entry.data.get(CONNECTION_TYPE) == LOCAL:
mill_data_coordinator = hass.data[DOMAIN][LOCAL][entry.data[CONF_IP_ADDRESS]]
async_add_entities([LocalMillHeater(mill_data_coordinator)])
return
mill_data_coordinator = hass.data[DOMAIN][CLOUD][entry.data[CONF_USERNAME]]
entities = [
MillHeater(mill_data_coordinator, mill_device)
for mill_device in mill_data_coordinator.data.values()
if isinstance(mill_device, mill.Heater)
]
async_add_entities(entities)
async def set_room_temp(service: ServiceCall) -> None:
"""Set room temp."""
room_name = service.data.get(ATTR_ROOM_NAME)
sleep_temp = service.data.get(ATTR_SLEEP_TEMP)
comfort_temp = service.data.get(ATTR_COMFORT_TEMP)
away_temp = service.data.get(ATTR_AWAY_TEMP)
await mill_data_coordinator.mill_data_connection.set_room_temperatures_by_name(
room_name, sleep_temp, comfort_temp, away_temp
)
hass.services.async_register(
DOMAIN, SERVICE_SET_ROOM_TEMP, set_room_temp, schema=SET_ROOM_TEMP_SCHEMA
)
class MillHeater(CoordinatorEntity, ClimateEntity):
"""Representation of a Mill Thermostat device."""
_attr_fan_modes = [FAN_ON, HVAC_MODE_OFF]
_attr_max_temp = MAX_TEMP
_attr_min_temp = MIN_TEMP
_attr_target_temperature_step = PRECISION_WHOLE
_attr_temperature_unit = TEMP_CELSIUS
def __init__(self, coordinator, heater):
"""Initialize the thermostat."""
super().__init__(coordinator)
self._available = False
self._id = heater.device_id
self._attr_unique_id = heater.device_id
self._attr_name = heater.name
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, heater.device_id)},
manufacturer=MANUFACTURER,
model=f"Generation {heater.generation}",
name=self.name,
)
if heater.is_gen1:
self._attr_hvac_modes = [HVAC_MODE_HEAT]
else:
self._attr_hvac_modes = [HVAC_MODE_HEAT, HVAC_MODE_OFF]
if heater.generation < 3:
self._attr_supported_features = (
SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
)
else:
self._attr_supported_features = SUPPORT_TARGET_TEMPERATURE
self._update_attr(heater)
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
if (temperature := kwargs.get(ATTR_TEMPERATURE)) is None:
return
await self.coordinator.mill_data_connection.set_heater_temp(
self._id, int(temperature)
)
await self.coordinator.async_request_refresh()
async def async_set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
fan_status = 1 if fan_mode == FAN_ON else 0
await self.coordinator.mill_data_connection.heater_control(
self._id, fan_status=fan_status
)
await self.coordinator.async_request_refresh()
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
he | ater = self.coordinator.data[self._id]
if hvac_mode == HVAC_MODE_HEAT:
await self.coordinator.mill_data_connection.heater_control(
self._id, power_status=1
)
await self.coordinator.async_request_refresh()
elif hvac_mo | de == HVAC_MODE_OFF and not heater.is_gen1:
await self.coordinator.mill_data_connection.heater_control(
self._id, power_status=0
)
await self.coordinator.async_request_refresh()
@property
def available(self) -> bool:
"""Return True if entity is available."""
return super().available and self._available
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self._update_attr(self.coordinator.data[self._id])
self.async_write_ha_state()
@callback
def _update_attr(self, heater):
self._available = heater.available
self._attr_extra_state_attributes = {
"open_window": heater.open_window,
"heating": heater.is_heating,
"controlled_by_tibber": heater.tibber_control,
"heater_generation": heater.generation,
}
if heater.room:
self._attr_extra_state_attributes["room"] = heater.room.name
self._attr_extra_state_attributes["avg_room_temp"] = heater.room.avg_temp
else:
self._attr_extra_state_attributes["room"] = "Independent device"
self._attr_target_temperature = heater.set_temp
self._attr_current_temperature = heater.current_temp
self._attr_fan_mode = FAN_ON if heater.fan_status == 1 else HVAC_MODE_OFF
if heater.is_heating == 1:
self._attr_hvac_action = CURRENT_HVAC_HEAT
else:
self._attr_hvac_action = CURRENT_HVAC_IDLE
if heater.is_gen1 or heater.power_status == 1:
self._attr_hvac_mode = HVAC_MODE_HEAT
else:
self._attr_hvac_mode = HVAC_MODE_OFF
class LocalMillHeater(CoordinatorEntity, ClimateEntity):
"""Representation of a Mill Thermostat device."""
_attr_hvac_mode = HVAC_MODE_HEAT
_attr_hvac_modes = [HVAC_MODE_HEAT]
_attr_max_temp = MAX_TEMP
_attr_min_temp = MIN_TEMP
_attr_supported_features = SUPPORT_TARGET_TEMPERATURE
_attr_target_temperature_step = PRECISION_WHOLE
_attr_temperature_unit = TEMP_CELSIUS
def __init__(self, coordinator):
"""Initialize the thermostat."""
super().__init__(coordinator)
self._attr_name = coordinator.mill_data_connection.name
if mac := coordinator.mill_data_connection.mac_address:
self._attr_unique_id = mac
self._attr_device_info = DeviceInfo(
connections={(CONNECTION_NETWORK_MAC, mac)},
configuration_url=self.coordinator.mill_data_connection.url,
manufacturer=MANUFACTURER,
model="Generation 3",
name=coordinator.mill_data_connection.name,
sw_version=coordinator.mill_data_connection.version,
)
self._update_attr()
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
if (temperature := kwargs.get(ATTR_TEMPERATURE)) is None:
return
await self.coordinator.mill_data_connection.set_target_temperature(
int |
Team02-TeamGuinness/BIOE421_RoboHand | ablation-GUI/CONFIG/2015-January-31/05-24-55 PM C3-1-CONFIG.py | Python | gpl-2.0 | 582 | 0.06701 | #File
fname = 'C3-1.gcode'
f=open(fname,'w')
#Laser Parameters
laserPower = 20 #% max power
dwellTime = 25 #ms
x_start = 416
y_start = 343
z_start = 123.10 #mm above home
pauseTime = 500 #ms; time paused after movement before ablation
feedRate = 500 #movement speed
# Rectangle size properties
rectLength = 10 #mm; x-direction
rec | tWidth = 10 #mm; y-direction
spaceSmall = 3 #mm; space | between rectangles
hexLength = 0.700 #mm
#Other
relative = 0 #0 for homing before beginning. 1 if machine has already been homed |
Tim-Erwin/marshmallow-jsonapi | docs/conf.py | Python | mit | 1,125 | 0.000889 | # -*- coding: ut | f-8 -*-
from __future__ import unicode_literals
import datetime as dt
import os
import sys
sys.p | ath.insert(0, os.path.abspath('..'))
import marshmallow_jsonapi
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx_issues',
]
primary_domain = 'py'
default_role = 'py:obj'
intersphinx_mapping = {
'python': ('http://python.readthedocs.org/en/latest/', None),
'marshmallow': ('http://marshmallow.readthedocs.org/en/latest/', None),
}
issues_github_path = 'marshmallow-code/marshmallow-jsonapi'
source_suffix = '.rst'
master_doc = 'index'
project = 'marshmallow-jsonapi'
copyright = 'Steven Loria {0:%Y}'.format(
dt.datetime.utcnow()
)
version = release = marshmallow_jsonapi.__version__
exclude_patterns = ['_build']
# THEME
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
qevo/py_file_helper | file_helper/__init__.py | Python | mit | 243 | 0 | """File related wrapper functions to st | reamline common use cases"""
import manage as manage
from manage import find_file, find_file_re, list_dir
import operation as operation
from operation import hash_file, re | ad_file, slice_file, write_file
|
LLNL/spack | var/spack/repos/builtin/packages/perl-sub-identify/package.py | Python | lgpl-2.1 | 547 | 0.005484 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Proje | ct Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlSubIdentify(PerlPackage):
"""Retrieve names of code references"""
homepage = "https://metacpan.org/pod/Sub::Identify"
url = "http://search.cpan.org/CPAN/authors/id/R/RG/RGARCIA/Sub-Identify-0.14.tar.gz"
version('0.14', sha256='068d272086514dd1e842b6a40b1bedbafee63900e5b0889 | 0ef6700039defad6f')
|
netscaler/neutron | neutron/tests/unit/openvswitch/test_agent_scheduler.py | Python | apache-2.0 | 59,767 | 0.00005 | # Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import copy
import mock
from oslo.config import cfg
from webob import exc
from neutron.api import extensions
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron import context
from neutron.db import agents_db
from neutron.db import dhcp_rpc_base
from neutron.db import l3_rpc_base
from neutron.extensions import agent
from neutron.extensions import dhcpagentscheduler
from neutron.extensions import l3agentscheduler
from neutron import manager
from neutron.openstack.common import timeutils
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as service_constants
from neutron.tests.unit import test_agent_ext_plugin
from neutron.tests.unit import test_db_plugin as test_plugin
from neutron.tests.unit import test_extensions
from neutron.tests.unit import test_l3_plugin
from neutron.tests.unit.testlib_api import create_request
from neutron.wsgi import Serializer
L3_HOSTA = 'hosta'
DHCP_HOSTA = 'hosta'
L3_HOSTB = 'hostb'
DHCP_HOSTC = 'hostc'
class AgentSchedulerTestMixIn(object):
def _request_list(self, path, admin_context=True,
expected_code=exc.HTTPOk.code):
req = self._path_req(path, admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
return self.deserialize(self.fmt, res)
def _path_req(self, path, method='GET', data=None,
query_string=None,
admin_context=True):
content_type = 'application/%s' % self.fmt
body = None
if data is not None: # empty dict is valid
body = Serializer().serialize(data, content_type)
if admin_context:
return create_request(
path, body, content_type, method, query_string=query_string)
else:
return create_request(
path, body, content_type, method, query_string=query_string,
context=context.Context('', 'tenant_id'))
def _path_create_request(self, path, data, admin_context=True):
return self._path_req(path, method='POST', data=data,
admin_context=admin_context)
def _path_show_request(self, path, admin_context=True):
return self._path_req(path, admin_context=admin_context)
def _path_delete_request(self, path, admin_context=True):
return self._path_req(path, method='DELETE',
admin_context=admin_context)
def _path_update_request(self, path, data, admin_context=True):
return self._path_req(path, method='PUT', data=data,
admin_context=admin_context)
def _list_routers_hosted_by_l3_agent(self, agent_id,
expected_code=exc.HTTPOk.code,
admin_context=True):
path = "/agents/%s/%s.%s" % (agent_id,
l3agentscheduler.L3_ROUTERS,
self.fmt)
return self._request_list(path, expected_code=expected_code,
admin_context=admin_context)
def _list_networks_hosted_by_dhcp_agent(self, agent_id,
expected_code=exc.HTTPOk.code,
admin_context=True):
path = "/agents/%s/%s.%s" % (agent_id,
dhcpagentscheduler.DHCP_NETS,
self.fmt)
return self._request_list(path, expected_code=expected_code,
admin_context=admin_context)
def _list_l3_agents_hosting_router(self, router_id,
expected_code=exc.HTTPOk.code,
admin_con | text=True):
path = "/routers/%s/%s.%s" % (router_id,
l3agentscheduler.L3_AGENTS,
self.fmt)
return self._request_list(path, expected_code=expected_code,
admin_contex | t=admin_context)
def _list_dhcp_agents_hosting_network(self, network_id,
expected_code=exc.HTTPOk.code,
admin_context=True):
path = "/networks/%s/%s.%s" % (network_id,
dhcpagentscheduler.DHCP_AGENTS,
self.fmt)
return self._request_list(path, expected_code=expected_code,
admin_context=admin_context)
def _add_router_to_l3_agent(self, id, router_id,
expected_code=exc.HTTPCreated.code,
admin_context=True):
path = "/agents/%s/%s.%s" % (id,
l3agentscheduler.L3_ROUTERS,
self.fmt)
req = self._path_create_request(path,
{'router_id': router_id},
admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
def _add_network_to_dhcp_agent(self, id, network_id,
expected_code=exc.HTTPCreated.code,
admin_context=True):
path = "/agents/%s/%s.%s" % (id,
dhcpagentscheduler.DHCP_NETS,
self.fmt)
req = self._path_create_request(path,
{'network_id': network_id},
admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
def _remove_network_from_dhcp_agent(self, id, network_id,
expected_code=exc.HTTPNoContent.code,
admin_context=True):
path = "/agents/%s/%s/%s.%s" % (id,
dhcpagentscheduler.DHCP_NETS,
network_id,
self.fmt)
req = self._path_delete_request(path,
admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
def _remove_router_from_l3_agent(self, id, router_id,
expected_code=exc.HTTPNoContent.code,
admin_context=True):
path = "/agents/%s/%s/%s.%s" % (id,
l3agentscheduler.L3_ROUTERS,
router_id,
self.fmt)
req = self._path_delete_request(path, admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
def _register_one_agent_state(self, agent_state):
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': agent_state},
time=timeutils.strtime())
def _disable_agent(self, agent_id, admin_state_up=False):
new_agent = {}
new_agent['agent |
kralla/django-base | base/urls.py | Python | gpl-2.0 | 180 | 0.005556 | """urlconf for | the base application"""
from django.conf.urls import url, patterns
urlpatterns = pattern | s('base.views',
url(r'^$', 'home', name='home'),
)
|
nirgal/ngw | core/authbackend.py | Python | bsd-2-clause | 1,058 | 0 | from djan | go.contrib.auth.models import update_last_login
from django.contrib.auth.signals import user_logged_in
from ngw.core.models import Contact
# Ugly work around for NOT using update_last_login that is hardcoded in
# crontrib.auth :
user_logged_in.disconnect(update_last_login)
class NgwAuthBackend(object):
"""
Authenticate a user
"""
# required by contrib.auth:
supports_inactive_user = Fals | e
def authenticate(self, request, username=None, password=None):
if not username or not password:
return None
try:
contact = Contact.objects.get_by_natural_key(username)
except (Contact.DoesNotExist, Contact.MultipleObjectsReturned):
return None
if contact.check_password(password):
contact.update_lastconnection()
return contact
return None # authentification failed
def get_user(self, user_id):
try:
return Contact.objects.get(pk=user_id)
except Contact.DoesNotExist:
return None
|
jbedorf/tensorflow | tensorflow/compiler/tests/argminmax_test.py | Python | apache-2.0 | 3,905 | 0.002561 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ArgMin and ArgMax Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ArgMinMaxTest(xla_test.XLATestCase):
def _assertOpOutputMatchesExpected(self, op, axis, output_type, op_input,
expected):
"""Verifies that 'op' produces 'expected' when fed input 'op_input' .
Args:
op: argmin or argmax operator to test.
axis: integer axis to reduce across.
output_type: numpy datatype of the output to produce.
op_input: numpy input array to use as input to 'op'.
expected: numpy array representing the expected output of 'op'.
"""
with self.cached_session() as session:
with self.test_scope():
pinp = array_ops.placeholder(
dtypes.as_dtype(op_input.dtype), op_input.shape, name="a")
output = op(pinp, axis=axis, output_type=output_type)
result = session.run(output, {pinp: op_input})
self.assertAllEqual(result, expected)
def testArgMinMax(self):
# Complex numbers do not support argmin/argmax.
minmax_types = self.all_types & {np.int32, np.int64}
for dtype in minmax_types:
# output_type is a numpy data type that is used to specify the desired
# output type of the op as well as to convert the Python number to the
# array scalar of the type.
for output_type in minmax_types:
self._assertOpOutputMatchesExpected(
math_ops.argmax,
axis=0,
output_type=output_type,
op_input=np.array([1, 10, 27, 3, 3, 4], dtype=dtype),
expected=output_type(2))
self._assertOpOutputMatchesExpected(
| math_ops.argmax,
axis=0,
output_type=output_type,
op_input=np.array([[4, 1, 7], [3, 2, 4]], dtype=dtype),
expected=np.array([0, 1, 0], dtype=output_type))
| self._assertOpOutputMatchesExpected(
math_ops.argmax,
axis=1,
output_type=output_type,
op_input=np.array([[4, 1], [3, 2]], dtype=dtype),
expected=np.array([0, 0], dtype=output_type))
self._assertOpOutputMatchesExpected(
math_ops.argmin,
axis=0,
output_type=output_type,
op_input=np.array([3, 10, 27, 3, 2, 4], dtype=dtype),
expected=output_type(4))
self._assertOpOutputMatchesExpected(
math_ops.argmin,
axis=0,
output_type=output_type,
op_input=np.array([[4, 1, 7], [3, 2, 4]], dtype=dtype),
expected=np.array([1, 0, 1], dtype=output_type))
self._assertOpOutputMatchesExpected(
math_ops.argmin,
axis=1,
output_type=output_type,
op_input=np.array([[4, 1], [3, 2]], dtype=dtype),
expected=np.array([1, 1], dtype=output_type))
if __name__ == "__main__":
test.main()
|
bretth/django-auth0user | auth0user/urls.py | Python | bsd-3-clause | 147 | 0 | from django. | conf.urls import url
from . import views
ap | p_name = 'auth0user'
urlpatterns = [
url(r'^alogin/', views.alogin, name='alogin'),
]
|
GitYiheng/reinforcement_learning_test | test00_previous_files/gym_test5.py | Python | mit | 47 | 0.021277 | from gym | import envs
print(envs.registry | .all()) |
pdbaines/KepleR | python/wavelets.py | Python | mit | 3,443 | 0.01278 |
import matplotlib.pyplot as plt
from matplotlib import cm
import pywt
import scipy
import numpy as np
import time
import random
import math
import sys
import kplr
import wavelet_funcs
data = np.loadtxt("KIC_7874976_llc.dat")
data.shape # (60020,4)
# From Montet:
# Columns are time, raw-flux, detrended-flux, uncertainty
# Plot detrended light curve
timepts = data[:,0]
y_raw = data[:,1]
y_det = data[:,2]
# 102 NA's:
np.sum(np.isnan(y_det))
plot(y_raw)
plot(timepts,y_det)
# Wavelet specs:
wavelet = 'db2'
mode = 'zpd'
# Wavelet transform:
wvb = mwavelets(y,wavelet=wavelet,mode=mode)
# Look at distributions of wavelets at each scale:
# Look at autocorrelations of wavelets at each scale:
# Look at QQ-plots of wavelets at each scale (check for normality)
assert 0
###################################
# TODO:
# (1) Handle non-power of 2 series
# (2) Handle nan's
###################################
# Set the seed:
rng_seed = 3123194149
random.seed(rng_seed)
######################
# Simulate:
N = pow(2,10)
g_mean = 0.0
h_width = 8
t_depth = 1.0
sigma = 0.2 # 0.2
#######################
# Flat mean...
m = np.empty(N)
m.fill(g_mean)
# Noisy mean...
sin_cycles = 2.0
sin_height = 0.4
m = np.empty(N)
for i in range(0,N):
m[i] = sin_height*math.sin(2*math.pi*i*sin_cycles/N)
# Layer in a transit...
for i in range(0,2*h_width):
ix = (N/2)+i
m[ix] = m[ix]-t_depth
# Simulate:
y = np.empty(N)
for i in range(0,N):
y[i] = np.random.normal(loc=m[i],scale=sigma)
#####################################
# Begin wavelet stuff...
# pywt.families()
# ['haar', 'db', 'sym', 'coif', 'bior', 'rbio', 'dmey']
# pywt.wavelist('haar')
# ['haar'] # other families have lots...
#
# y_dwt = pywt.dwt(y, wavelet='haar', mode='zpd')
# print y_dwt
#
#pywt.wavedec(y, wavelet='haar', level=1)
#
#full_y_dwt = pywt.wavedec(y, wavelet='haar')
#coef_pyramid_plot(full_y_dwt)
#
#fdwt_vec = np.empty(0)
| #for i in range(0,len(fdwt)):
# fdwt_vec = np.append(fdwt_vec,fdwt[i])
#
#plt.plot(fdwt_vec)
#plt.show()
#
# cm.coolwarm
# cm.afmhot
#f_ret = wvanalyze(y,wtype='father',wavelet='haar',mode='zpd',cmap=cm.afmhot)
m_ret = wvanalyze(y,wtype='mother',wavelet='haar',mode='zpd',cmap=cm.afmhot)
assert 0
# Now, REAL Kepler data... :)
import kplr
import pyfits
data = np.loadtxt("transit.t | xt")
len(data)
data = data - np.median(data)
data = data[0:512]
f_ret = wvanalyze(data,wtype='father',wavelet='haar',mode='zpd',cmap=cm.afmhot)
client = kplr.API()
planet = client.planet("62b")
do_fetch = False
if do_fetch:
all_datasets = []
ndatasets = 0
for dataset in planet.data:
print "Fetching Kepler dataset " + str(ndatasets) + "..."
all_datasets.append(dataset.fetch())
ndatasets = ndatasets + 1
else:
print "TODO: Read local file..."
# Filename (unicode):
all_datasets[0].filename
data = kplr.Dataset(all_datasets[0].filename)
# data.saplux is lightcurve...
foo = data.sapflux.copy()
sum(foo)
f_ret = wvanalyze(data.sapflux,wtype='father',wavelet='haar',mode='zpd',cmap=cm.afmhot)
assert 0
plt.plot(fgrid)
plt.show()
fig = plt.figure(1)
# Top plot:
ax1 = plt.subplot(2,1,1)
p1 = ax1.plot(y)
# Bottom plot:
ax2 = plt.subplot(2,1,2)
p2 = ax2.plot(y)
plt.show()
scales = wave.autoscales(N=y.shape[0], dt=1, dj=0.25, wf='dog', p=2)
X = wave.cwt(x=x, dt=1, scales=scales, wf='dog', p=2)
p2 = ax2.imshow(np.abs(X), interpolation='nearest')
plt.show()
|
zielmicha/satori | satori.web/satori/web/__init__.py | Python | mit | 616 | 0.001623 | # vim:ts=4:sts=4:sw=4:expandtab
import os
import satori.web.setup
def manage():
from django.core. | management import execute_manager
import satori.web.settings as settings
# HACK
import django.core.management
old_fmm = django.core.management.find_management_module
def find_management_module(app_name):
if app_name == 'satori.web':
return os.path.join(os.path.dirname(__file__), 'management')
else:
return old_fmm(app_name)
django.core.management.find_management_module = find_management_module
# END OF | HACK
execute_manager(settings)
|
lukegb/ehacktivities | eactivities/models/finances.py | Python | mit | 7,365 | 0.000815 | from . import Model, LazyModelMixin, LazyDictFromArrayModel, ArrayModel, Account, Amount, Images, PdfableModelMixin, NoneableModelMixin
from eactivities.parsers.finances import FinancesParser, BankingRecordsParser, SalesInvoicesParser, ClaimsParser, PurchaseOrdersParser, \
TransactionCorrectionsParser, InternalChargesParser, MembersFundsRedistributionsParser, FundingRedistributionsParser
class TransactionLine(Model):
_submodels = {
'value': Amount,
'unit_value': Amount,
'account': Account,
'activity': Account,
'funding_source': Account,
'consolidation': Account
}
class TransactionLineSet(ArrayModel):
_submodel = TransactionLine
class AuditEntry(Model):
pass
class AuditTrail(ArrayModel):
_submodel = AuditEntry
class Authoriser(Model):
pass
class NextAuthorisers(NoneableModelMixin, Model):
pass
class FinancialDocumentCollection(LazyDictFromArrayModel):
_submodel = None
_lazy_id_attribute = 'item_id'
class FinancialDocument(LazyModelMixin, Model):
pass
class BankingRecord(FinancialDocument):
_lazy_loader_parser = BankingRecordsParser
_attributes = [
'id', 'date', 'transaction_lines', 'gross_amount',
'paying_in_slips'
]
_submodels = {
'transaction_lines': TransactionLineSet,
'paying_in_slips': Images
}
class BankingRecords(FinancialDocumentCollection):
_submodel = BankingRecord
_lazy_loader_parser = BankingRecordsParser
class SalesInvoice(PdfableModelMixin, FinancialDocument):
_lazy_loader_parser = SalesInvoicesParser
_attributes = [
'id', 'date', 'customer', 'customer_purchase_order_number',
'gross_amount', 'status', 'international', 'audit_trail',
'next_authorisers', 'transaction_lines', 'purchase_order_attachments',
]
_submodels = {
'transaction_lines': TransactionLineSet,
'next_authorisers': NextAuthorisers,
'purchase_order_attachments': Images
}
class SalesInvoices(FinancialDocumentCollection):
_submodel = SalesInvoice
_lazy_loader_parser = SalesInvoicesParser
class Claim(FinancialDocument):
_lazy_loader_parser = ClaimsParser
_attributes = [
'id', 'person', 'status', 'payment_date', 'gross_amount',
'notes', 'audit_trail', 'next_authorisers', 'transaction_lines',
'receipts'
]
_submodels = {
'transaction_lines': TransactionLineSet,
'next_authorisers': NextAuthorisers,
'receipts': Images
}
class Claims(FinancialDocumentCollection):
_submodel = Claim
_lazy_loader_parser = ClaimsParser
class PurchaseOrder(PdfableModelMixin, FinancialDocument):
_lazy_loader_parser = PurchaseOrdersParser
_attributes = [
'id', 'supplier', 'status', 'payment_date', 'gross_amount',
'invoice_received', 'finished_goods_receipting', 'pro_forma',
'audit_trail', 'next_authorisers', 'transaction_lines',
'gross_amount', 'invoices'
]
_submodels = {
'transaction_lines': TransactionLineSet,
'next_authorisers': NextAuthorisers,
'invoices': Images
}
class PurchaseOrders(FinancialDocumentCollection):
_submodel = PurchaseOrder
_lazy_loader_parser = PurchaseOrdersParser
class TransactionCorrection(FinancialDocument):
_lazy_loader_parser = TransactionCorrectionsParser
_attributes = [
'id', 'status', 'gross_amount', 'from_transaction_lines',
'to_transaction_lines', 'next_authorisers', 'audit_trail'
]
_submodels = {
'from_transaction_lines': TransactionLineSet,
'to_transaction_lines': TransactionLineSet,
'next_authorisers': NextAuthorisers
}
class TransactionCorrections(FinancialDocumentCollection):
_submodel = TransactionCorrection
_lazy_loader_parser = TransactionCorrectionsParser
class InternalCharge(FinancialDocument):
_lazy_loader_parser = InternalChargesParser
_attributes = [
'id', 'status', 'gross_amount', 'charged_committee', 'receiving_committee',
'notes', 'audit_trail', 'next_authorisers', 'transaction_lines'
]
_submodels = {
'transaction_lines': TransactionLineSet,
'next_authorisers': NextAuthorisers
}
class InternalCharges(FinancialDocumentCollection):
_submodel = InternalCharge
_lazy_loader_parser = InternalChargesParser
class MembersFundsRedistribution(FinancialDocument):
_lazy_loader_parser = MembersFundsRedistributionsParser
_attributes = [
'id', 'status', 'person', 'from_transaction_lines',
'to_transaction_lines', 'next_authorisers', 'audit_trail',
'funding_source', 'gross_amount'
]
_submodels = {
'from_transaction_lines': TransactionLineSet,
'to_transaction_lines': TransactionLineSet,
'next_authorisers': NextAuthorisers
}
class MembersFundsRedistributions(FinancialDocumentCollection):
_submodel = MembersFundsRedistribution
_lazy_loader_parser = MembersFundsRedistributionsParser
class FundingRedistribution(FinancialDocument):
_lazy_loader_parser = FundingRedistributionsParser
_attributes = [
'id', 'status', 'gross_amount', 'funding_source',
'audit_trail', 'next_authorisers', 'from_transaction_lines',
'to_transaction_lines'
]
_submodels = {
'from_transaction_lines': TransactionLineSet,
'to_transaction_lines': TransactionLineSet,
'next_authorisers': NextAuthorisers
}
class FundingRedistributions(FinancialDocumentCollection):
_submodel = FundingRedistribution
_lazy_loader_parser = FundingRedistributionsParser
class Finances(LazyModelMixin, Model):
_lazy_loader_parser = FinancesParser
_attributes = [
'funding_overview'
]
# Income #
def banking_records(self):
return self._spawn(BankingRecords, BankingRecordsParser)
def sales_invoices(self):
return self._spawn(SalesInvoices, SalesInvoicesParser)
# TODO: Credit Notes
# Expenditure #
def claims(self):
return self._spawn(Claims, ClaimsParser)
def purchase_orders(self):
return self._spawn(PurchaseOrders, PurchaseOrdersParser)
# TODO: Imprests, Credit Card Requests, Charitable Donation | s
# Transfers #
def transaction_corrections(self):
return self._spawn(TransactionCorrections, TransactionCorrectionsParser)
def internal_charges(self):
return self._spawn(InternalCharges, InternalChargesParser)
def members_funds_redistributions(self):
return self._spawn(MembersFundsRedistributions, MembersFundsRedistributionsParser)
def funding_redistributions(self):
| return self._spawn(FundingRedistributions, FundingRedistributionsParser)
# TODO: Designated Members Funds Transfers
def _spawn(self, model_cls, parser_cls):
return model_cls(
eactivities=self._eactivities,
parent=self,
data=dict(
club_id=self._data['club_id'],
year=self._data['year']
)
)
|
abram777/torrents-from-xml | examples/geektool script/torrents.geektool.py | Python | mit | 449 | 0 | #!/usr/bin/env python
from sys import argv
from GeekToolKickassTorrentFeed import GeekToolKickassTorrentFeed
if __name__ == "__main__" and len(argv) > 1:
mode = argv[1]
print(str.format("{0}{1}", "\t" * 4, mod | e.capitalize()))
feed = GeekToolKickassTorrentFeed(mode)
if feed:
feed_data = feed.get_top_seeded_torrents().fetch().get_formatted_list()
print | (feed_data)
else:
print("No torrents available")
|
tcoxon/wiitar | midipy_src/test.py | Python | gpl-2.0 | 311 | 0.080386 |
import time
import midipy as midi
midi.open(128, 0, "midipy test", 0)
for (note, t) in [(48,0.5),(48,0.5),(50,1.0),(48,1.0),(53,1.0),(52,1.0),
(48,0.5),(48,0.5),(50,1.0),(48,1.0),(55,1.0),(53,1.0)]:
midi.note_on(note,127)
| time.sleep(t/2)
midi.n | ote_off(note,127)
midi.close()
|
sknepneklab/SAMoS | analysis/batch_nematic/batch_analyze_nematic_R8a.py | Python | gpl-3.0 | 2,379 | 0.045397 | # ################################################################
#
# Active Particles on Curved Spaces (APCS)
#
# Author: Silke Henkes
#
# ICSMB, Department of Physics
# University of Aberdeen
# Author: Rastko Sknepnek
#
# Division of Physics
# School of Engineering, Physics and Mathematics
# University of Dundee
#
# (c) 2013, 2014
#
# This program cannot be used, copied, or modified without
# explicit permission of the author.
#
# ################################################################
# Integrator code for batch processing of full data runs (incorporating parts of earlier analysis scripts)
# Data interfacing
from read_data import *
from read_param import *
# Pre-existing analysis scripts
from nematic_analysis import *
#from glob import glob
# This is the structured data file hierarchy. Replace as appropriate (do not go the Yaouen way and fully automatize ...)
basefolder='/home/silke/Documents/CurrentProjects/Rastko/nematic/data/'
#basefolder = '/home/silke/Documents/CurrentProjects/Rastko/nematic/data/J_1_0_v0_1_0/'
#outfolder= '/home/silke/Documents/CurrentProjects/Rastko/nematic/data/J_1_0_v0_1_0/'
outfolder = '/home/silke/Documents/CurrentProjects/Rastko/nematic/data/'
#v0val=['0.3','0.5','0.7','1.5','2.0','3.0','7.0','10.0']
v0val=['0.3','0.5','0.7','1.5']
sigma=1
rval=['8.0']
nstep=10100000
nsave=5000
nsnap=int(nstep/nsave)
#skip=835
skip=0
for r in rval:
for v0 in v0val:
#param = Param(basefolder)
files = sorted(glob(basefolder+'R_'+ r+ '/v0_' + v0 + '/sphere_*.dat'))[skip:]
defects=np.zeros((len(file | s),12))
ndefect=np.zeros((len(files),1))
u=0
for f in files:
print f
outname =outfolder +'R_'+ r+ '/v0_' + v0 + '/frame_data' + str(u)+'.vtk'
defects0,ndefect0=getDefects(f,float(r),sigma,outname,False,False)
defects[u,0:3]=defects0[0,:]
defects[u,3:6]=defects0[1,:]
defects[u,6:9]=defects0[2,:]
defects[u,9:12]=defects0[3,:]
| ndefect[u]=ndefect0
#outname = '.'.join((f).split('.')[:-1]) + '_defects.vtk'
#outname =outfolder +'R_'+ r+ '/v0_' + v0 + '/frame_defects' + str(u)+'.vtk'
#print outname
#writeDefects(defects0,ndefect0,outname)
u+=1
outfile2=outfolder + 'defects_v0_' + v0 + '_R_'+ r+ '.dat'
np.savetxt(outfile2,np.concatenate((ndefect,defects),axis=1),fmt='%12.6g', header='ndefect defects')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.