repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
adieu/authentic2 | authentic2/migrations/0006_auto__chg_field_user_username__chg_field_user_email.py | Python | agpl-3.0 | 266 | 0 | # -*- codi | ng: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigr | ation):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
}
complete_apps = ['authentic2']
|
cristhro/Machine-Learning | ejercicio 5/flaskApp/flaskApp.py | Python | gpl-3.0 | 6,023 | 0.01926 | from flask import Flask, jsonify, request, render_template, make_response
from datetime import datetime
from elasticsearch import Elasticsearch
es = Elasticsearch()
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template(
'index.html'
)
@app.route('/buscar', methods = ["POST"])
def buscar():
buscado = request.form['buscado']
resultado = realizar_busqueda_2(buscado)
return jsonify({
'resultado': resultado
})
def realizar_busqueda_2(buscado):
bodyQuery2 = {
"query": {
"match": {
"Title": {
"query": buscado,
"fuzziness": "AUTO",
"boost" : 2.0,
"prefix_length" : 1,
"max_expansions": 100,
#"minimum_should_match" : 10,
"operator": "and"
}
}
},
"highlight": {
"fields": {
"Title": {},
"Plot": {"fragment_size": 300, "number_of_fragments": 3}
},
# Permite el hightlight sobre campos que no se han hecho query
# como Plot en este ejemplo
"require_field_match": False
}
}
res = es.search(index="prueba-index", body= bodyQuery2)
print("Got %d Hits:" % res['hits']['total'])
# Uso el [0] porque solo hay 1 hit, si hubiese mas, pues habria mas campos
# de la lista, habria que usar el for de arriba para sacar el highlight de
# cada uno de la lista
# print res['hits']['hits'][0]['highlight']
resultado = []
for hit in res['hits']['hits']:
resultado.append(hit['highlight'])
return resultado
def realizar_busqueda(buscado):
bodyQuery = {
"query": {
"match": {
"Director": {
"query": buscado,
"fuzziness": "AUTO",
"operator": "and"
}
}
},
"highlight": {
"fields": {
"Title": {},
"Plot": {}
}
}
}
res = es.search(index="prueba-index", body= bodyQuery)
print("Got %d Hits:" % res['hits']['total'])
resultado = []
for hit in res['hits']['hits']:
resultado.append("%(T | itle)s" % hit["_source"])
return resultado
def realizar_busqueda_3(buscado):
bodyQuery = {
"query": {
"regexp":{
"Title": buscado +".*"
}
},
"highlight": {
"fields": {
"Title": {},
"Plot": {"fragment_size": 300, "number_of_fragments": 3},
"Director": {}
},
# Permite el hightlight sobre campos que no se han hecho query
# como Plot en | este ejemplo
"require_field_match": False
}
}
res = es.search(index="prueba-index", body= bodyQuery)
print("Got %d Hits:" % res['hits']['total'])
resultado = []
for hit in res['hits']['hits']:
resultado.append(hit['highlight'])
return resultado
def realizar_busqueda_4(buscado):
bodyQuery2 = {
"query": {
"bool": {
"should": [
{ "match": {
"Title": {
"query": buscado + ".*",
"fuzziness": "AUTO",
"prefix_length" : 1,
"operator": "and"
}
}},
{ "match": {
"Plot": {
"query": buscado,
"fuzziness": 2,
"prefix_length" : 1,
"operator": "and"
}
}
},
{ "match": {
"Genres": {
"query": buscado,
"fuzziness": "AUTO",
"prefix_length" : 1,
"operator": "and"
}
}},
{ "match": {
"Director": {
"query": buscado,
"fuzziness": "AUTO",
"prefix_length" : 1,
"operator": "and"
}
}},
{ "match": {
"Writer": {
"query": buscado,
"fuzziness": "AUTO",
"prefix_length" : 1,
"operator": "and"
}
}},
{ "match": {
"Cast": {
"query": buscado,
"fuzziness": "AUTO",
"prefix_length" : 1,
"operator": "and"
}
}},
{ "match": {
"Country": {
"query": buscado,
"fuzziness": "AUTO",
"prefix_length" : 1,
"operator": "and"
}
}},
{ "match": {
"Language": {
"query": buscado,
"fuzziness": "AUTO",
"prefix_length" : 1,
"operator": "and"
}
}},
{ "match": {
"Rating": {
"query": buscado,
"fuzziness": "AUTO",
"prefix_length" : 1,
"operator": "and"
}
}},
]
}
},
"highlight": {
"fields": {
"Title": {},
"Plot": {},
"Director": {}
},
# Permite el hightlight sobre campos que no se han hecho query
# como Plot en este ejemplo
"require_field_match": False
}
}
res = es.search(index="prueba-index", body= bodyQuery)
print("Got %d Hits:" % res['hits']['total'])
resultado = []
for hit in res['hits']['hits']:
resultado.append(hit['highlight'])
return resultado
if __name__ == '__main__':
app.run(debug=True)
|
endlisnis/weather-records | maxtemp.py | Python | gpl-3.0 | 3,298 | 0.010309 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import time, posix, daily
data = daily.load("ottawa")
class FloatValue():
__slots__ = ()
def __init__(self, field):
self.fieldIndex = field.index
def __call__(self, fields):
r = fields[self.fieldIndex]
if len(r) == 0:
return None
return float(r)
class IntValue():
__slots__ = ()
def __init__(self, field):
self.fieldIndex = field.index
def __call__(self, fields):
r = fields[self.fieldIndex]
if len(r) == 0:
return None
return int(float(r)+.5)
class IntDiff():
__slots__ = ()
def __init__(self, field1, field2):
self.field1Index = field1.index
self.field2Index = field2.index
def __call__(self, fields):
r1 = fields[self.field1Index]
r2 = fields[self.field2Index]
if len(r1) == 0 or len(r2) == 0:
return None
return int(float(r1) - float(r2) +.5)
class Max():
__slots__ = ()
def __init__(self, field):
self.fieldIndex = field.index
#
def __call__(self, fields):
r = fields[self.fieldIndex]
if len(r) == 0:
return -99
return float(r)
#
def better(self, one, two):
return self(one) > self(two)
class MaxDiff():
__slots__ = ()
def __init__(self, field1, field2):
self.field1Index = field1.index
self.field2Index = field2.index
#
def __call__(self, fields):
r1 = fields[self.field1Index]
r2 = fields[self.field2Index]
if len(r1) == 0 or len(r2) == 0:
return 0
return float(r1) - float(r2)
#
def better(self, one, two):
return self(one) > self(two)
class MinDiff():
__slots__ = ()
def __init__(self, field1, field2):
self.field1Index = field1.index
self.field2Index = field2.index
#
def __call__(self, fields):
r1 = fields[self.field1Index]
r2 = fields[self.field2Index]
if len(r1) == 0 or len(r2) == 0:
return 100
return float(r1) - float(r2)
#
def better(self, one, two):
| return self(one) < self(two)
def findBest(proc):
bv = None
date = None
#
for year in data:
yd = data[year]
for month in yd:
md = yd[month]
for day in md:
f = md[day]
if bv == None or proc.better(f, bv):
bv = f
date = (year,month,day)
#
return date, bv
| def histogram(proc):
hist = {}
#
for year in data:
yd = data[year]
for month in yd:
md = yd[month]
for day in md:
f = proc(md[day])
if f != None:
if f not in hist:
hist[f] = 0
hist[f] += 1
return hist
#findBest(Max(daily.MAX_TEMP))
#findBest(Max(daily.MAX_TEMP))
#findBest(Max(daily.TOTAL_RAIN_MM))
#findBest(Max(daily.TOTAL_SNOW_CM))
#findBest(MaxDiff(daily.MAX_TEMP, daily.MIN_TEMP))
#findBest(MinDiff(daily.MAX_TEMP, daily.MIN_TEMP))
mth = histogram(IntValue(daily.TOTAL_SNOW_CM))
for mt in sorted(mth.keys()):
print '%s\t%s' % (mt, mth[mt])
|
awslabs/sockeye | sockeye_contrib/benchmark/benchmark_to_output.py | Python | apache-2.0 | 1,162 | 0 | #!/usr/bin/env python
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http:/ | /aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import sys
from typing import Dict, Iterator
def read_benchmark_handler_output(stream: str) -> Iterator[Dict[str, str]]:
for line in stream:
fields = line.strip().split('\t')
| entry = dict(field.split('=', 1) for field in fields)
yield entry
def get_output_from_benchmark_output(input_stream) -> Iterator[str]:
for entry in read_benchmark_handler_output(input_stream):
yield entry['output']
def main():
for output in get_output_from_benchmark_output(sys.stdin):
print(output)
if __name__ == '__main__':
main()
|
prman-pixar/RenderManForBlender | rfb_utils/camera_utils.py | Python | mit | 2,235 | 0.004474 | from bpy_extras.view3d_utils import location_3d_to_region_2d
def render_get_resolution_(r):
xres = int(r.resolution_x * r.resolution_percentage * 0.01)
yres = int(r.resolution_y * r.resolution_percentage * 0.01)
return xres, yres
def render_get_aspect_(r, camera=None, x=-1, y=-1):
if x != -1 and y != -1:
xratio = x * r.pixel_aspect_x / 200.0
yratio = y * r.pixel_aspect_y / 200.0
else:
xres, yres = render_get_resolution_(r)
xratio = xres * r.pixel_aspect_x / 200.0
yratio = yres * r.pixel_aspect_y / 200.0
if camera is None or camera.type != 'PERSP':
fit = 'AUTO'
else:
fit = camera.sensor_fit
if fit == 'HORIZONTAL' or fit == 'AUTO' and xratio > yratio:
aspectratio = xratio / yratio
xaspect = aspectratio
yaspect = 1.0
elif fit | == 'VERTICAL' or fit == 'AUTO' and yratio > xratio:
aspectratio = yratio / xratio
xaspect = 1.0
yaspect = aspectratio
else:
aspectratio = xaspect = | yaspect = 1.0
return xaspect, yaspect, aspectratio
def get_viewport_cam_borders(ob, render, region, region_data, scene):
# Code reference:
# https://blender.stackexchange.com/questions/6377/coordinates-of-corners-of-camera-view-border
cam = ob.data
frame = cam.view_frame(scene=scene)
# move from object-space into world-space
frame = [ob.matrix_world @ v for v in frame]
# move into pixelspace
frame_px = [location_3d_to_region_2d(region, region_data, v) for v in frame]
min_x = -1
min_y = -1
max_x = -1
max_y = -1
for v in frame_px:
if min_x == -1:
min_x = v[0]
elif min_x > v[0]:
min_x = v[0]
if max_x < v[0]:
max_x = v[0]
if min_y == -1:
min_y = v[1]
elif min_y > v[1]:
min_y = v[1]
if max_y < v[1]:
max_y = v[1]
cam_width = max_x - min_x
cam_height = max_y - min_y
x0 = min_x + render.border_min_x * cam_width
x1 = min_x + render.border_max_x * cam_width
y0 = min_y + render.border_min_y * cam_height
y1 = min_y + render.border_max_y * cam_height
return (x0, x1, y0, y1) |
Fougere87/unsec | unsec/algorithm/__init__.py | Python | unlicense | 202 | 0.00495 | from unsec.algorithm | .algo import Algo
from unsec.algorithm.hierarchical_algo import HierarchicalAlgo
from unsec.algorithm.skmean_algo import SKMeanAlgo
from unsec.algor | ithm.kmean_algo import KMeanAlgo
|
fabioz/Pydev | plugins/org.python.pydev.core/pysrc/_pydevd_bundle/pydevd_bytecode_utils.py | Python | epl-1.0 | 25,462 | 0.001885 | """
Bytecode analysing utils. Originally added for using in smart step into.
Note: not importable from Python 2.
"""
import sys
if sys.version_info[0] < 3:
raise ImportError('This module is only compatible with Python 3.')
from _pydev_bundle import pydev_log
from types import CodeType
from _pydevd_frame_eval.vendored.bytecode.instr import _Variable
from _pydevd_frame_eval.vendored import bytecode
from _pydevd_frame_eval.vendored.bytecode import cfg as bytecode_cfg
import dis
import opcode as _opcode
from _pydevd_bundle.pydevd_constants import KeyifyList, DebugInfoHolder
from bisect import bisect
from collections import deque
# When True, throws errors on unknown bytecodes, when False, ignore | those as if they didn't change the stack.
STRICT_MODE = False
DEBUG = False
_BINARY_OPS = set([opname for opname in dis.opname if opname.startswith('BINARY_')])
_BINARY_OP_MAP = {
'BINARY_POWER': '__pow__',
'BINARY | _MULTIPLY': '__mul__',
'BINARY_MATRIX_MULTIPLY': '__matmul__',
'BINARY_FLOOR_DIVIDE': '__floordiv__',
'BINARY_TRUE_DIVIDE': '__div__',
'BINARY_MODULO': '__mod__',
'BINARY_ADD': '__add__',
'BINARY_SUBTRACT': '__sub__',
'BINARY_LSHIFT': '__lshift__',
'BINARY_RSHIFT': '__rshift__',
'BINARY_AND': '__and__',
'BINARY_OR': '__or__',
'BINARY_XOR': '__xor__',
'BINARY_SUBSCR': '__getitem__',
'BINARY_DIVIDE': '__div__'
}
_COMP_OP_MAP = {
'<': '__lt__',
'<=': '__le__',
'==': '__eq__',
'!=': '__ne__',
'>': '__gt__',
'>=': '__ge__',
'in': '__contains__',
'not in': '__contains__',
}
class Target(object):
__slots__ = ['arg', 'lineno', 'offset', 'children_targets']
def __init__(self, arg, lineno, offset, children_targets=()):
self.arg = arg
self.lineno = lineno
self.offset = offset
self.children_targets = children_targets
def __repr__(self):
ret = []
for s in self.__slots__:
ret.append('%s: %s' % (s, getattr(self, s)))
return 'Target(%s)' % ', '.join(ret)
__str__ = __repr__
class _TargetIdHashable(object):
def __init__(self, target):
self.target = target
def __eq__(self, other):
if not hasattr(other, 'target'):
return
return other.target is self.target
def __ne__(self, other):
return not self == other
def __hash__(self):
return id(self.target)
class _StackInterpreter(object):
'''
Good reference: https://github.com/python/cpython/blob/fcb55c0037baab6f98f91ee38ce84b6f874f034a/Python/ceval.c
'''
def __init__(self, bytecode):
self.bytecode = bytecode
self._stack = deque()
self.function_calls = []
self.load_attrs = {}
self.func = set()
self.func_name_id_to_code_object = {}
def __str__(self):
return 'Stack:\nFunction calls:\n%s\nLoad attrs:\n%s\n' % (self.function_calls, list(self.load_attrs.values()))
def _getname(self, instr):
if instr.opcode in _opcode.hascompare:
cmp_op = dis.cmp_op[instr.arg]
if cmp_op not in ('exception match', 'BAD'):
return _COMP_OP_MAP.get(cmp_op, cmp_op)
return instr.arg
def _getcallname(self, instr):
if instr.name == 'BINARY_SUBSCR':
return '__getitem__().__call__'
if instr.name == 'CALL_FUNCTION':
# Note: previously a '__call__().__call__' was returned, but this was a bit weird
# and on Python 3.9 this construct could appear for some internal things where
# it wouldn't be expected.
# Note: it'd be what we had in func()().
return None
if instr.name == 'MAKE_FUNCTION':
return '__func__().__call__'
if instr.name == 'LOAD_ASSERTION_ERROR':
return 'AssertionError'
name = self._getname(instr)
if isinstance(name, _Variable):
name = name.name
if not isinstance(name, str):
return None
if name.endswith('>'): # xxx.<listcomp>, xxx.<lambda>, ...
return name.split('.')[-1]
return name
def _no_stack_change(self, instr):
pass # Can be aliased when the instruction does nothing.
def on_LOAD_GLOBAL(self, instr):
self._stack.append(instr)
def on_POP_TOP(self, instr):
try:
self._stack.pop()
except IndexError:
pass # Ok (in the end of blocks)
def on_LOAD_ATTR(self, instr):
self.on_POP_TOP(instr) # replaces the current top
self._stack.append(instr)
self.load_attrs[_TargetIdHashable(instr)] = Target(self._getname(instr), instr.lineno, instr.offset)
on_LOOKUP_METHOD = on_LOAD_ATTR # Improvement in PyPy
def on_LOAD_CONST(self, instr):
self._stack.append(instr)
on_LOAD_DEREF = on_LOAD_CONST
on_LOAD_NAME = on_LOAD_CONST
on_LOAD_CLOSURE = on_LOAD_CONST
on_LOAD_CLASSDEREF = on_LOAD_CONST
# Although it actually changes the stack, it's inconsequential for us as a function call can't
# really be found there.
on_IMPORT_NAME = _no_stack_change
on_IMPORT_FROM = _no_stack_change
on_IMPORT_STAR = _no_stack_change
on_SETUP_ANNOTATIONS = _no_stack_change
def on_STORE_FAST(self, instr):
try:
self._stack.pop()
except IndexError:
pass # Ok, we may have a block just with the store
# Note: it stores in the locals and doesn't put anything in the stack.
on_STORE_GLOBAL = on_STORE_FAST
on_STORE_DEREF = on_STORE_FAST
on_STORE_ATTR = on_STORE_FAST
on_STORE_NAME = on_STORE_FAST
on_DELETE_NAME = on_POP_TOP
on_DELETE_ATTR = on_POP_TOP
on_DELETE_GLOBAL = on_POP_TOP
on_DELETE_FAST = on_POP_TOP
on_DELETE_DEREF = on_POP_TOP
on_DICT_UPDATE = on_POP_TOP
on_SET_UPDATE = on_POP_TOP
on_GEN_START = on_POP_TOP
def on_NOP(self, instr):
pass
def _handle_call_from_instr(self, func_name_instr, func_call_instr):
self.load_attrs.pop(_TargetIdHashable(func_name_instr), None)
call_name = self._getcallname(func_name_instr)
target = None
if not call_name:
pass # Ignore if we can't identify a name
elif call_name in ('<listcomp>', '<genexpr>', '<setcomp>', '<dictcomp>'):
code_obj = self.func_name_id_to_code_object[_TargetIdHashable(func_name_instr)]
if code_obj is not None:
children_targets = _get_smart_step_into_targets(code_obj)
if children_targets:
# i.e.: we have targets inside of a <listcomp> or <genexpr>.
# Note that to actually match this in the debugger we need to do matches on 2 frames,
# the one with the <listcomp> and then the actual target inside the <listcomp>.
target = Target(call_name, func_name_instr.lineno, func_call_instr.offset, children_targets)
self.function_calls.append(
target)
else:
# Ok, regular call
target = Target(call_name, func_name_instr.lineno, func_call_instr.offset)
self.function_calls.append(target)
if DEBUG and target is not None:
print('Created target', target)
self._stack.append(func_call_instr) # Keep the func call as the result
def on_COMPARE_OP(self, instr):
try:
_right = self._stack.pop()
except IndexError:
return
try:
_left = self._stack.pop()
except IndexError:
return
cmp_op = dis.cmp_op[instr.arg]
if cmp_op not in ('exception match', 'BAD'):
self.function_calls.append(Target(self._getname(instr), instr.lineno, instr.offset))
self._stack.append(instr)
def on_IS_OP(self, instr):
try:
self._stack.pop()
except IndexError:
return
try:
self._stack.pop()
except IndexError:
return
def on_BINARY_SUBSCR(self, instr):
try:
_sub = self._stack |
Shir0kamii/py3status | py3status/modules/mpd_status.py | Python | bsd-3-clause | 8,004 | 0.00025 | # coding: utf-8
"""
Display information from mpd.
Configuration parameters:
cache_timeout = how often we refresh this module in seconds (2s default)
color = enable coloring output (default False)
color_pause = custom pause color (default i3status color degraded)
color_play = custom play color (default i3status color good)
color_stop = custom stop color (default i3status color bad)
format = template string (see below)
hide_when_paused: hide the status if state is paused
hide_when_stopped: hide the status if state is stopped
host: mpd host
max_width: maximum status length
password: mpd password
port: mpd port
state_pause: label to display for "paused" state
state_play: label to display for "playing" state
state_stop: label to display for "stopped" state
Requires:
- python-mpd2 (NOT python2-mpd2)
# pip install python-mpd2
Refer to the mpc(1) manual page for the list of available placeholders to be
used in `format`.
You can also use the %state% placeholder, that will be replaced with the state
label (play, pause or stop).
Every placeholder can also be prefixed with `next_` to retrieve the data for
the song following the one currently playing.
You can also use {} instead of %% for placeholders (backward compatibility).
Examples of `format`:
Show state and (artist -) title, if no title fallback to file:
%state% [[[%artist% - ]%title%]|[%file%]]
Alternative legacy syntax:
{state} [[[{artist} - ]{title}]|[{file}]]
Show state, [duration], title (or file) and next song title (or file):
%state% \[%time%\] [%title%|%file%] → [%next_title%|%next_file%]
@author shadowprince
@author zopieux
@license Eclipse Public License
"""
import ast
import datetime
import itertools
import socket
import time
from mpd import MPDClient, CommandError
def parse_template(instr, value_getter, found=True):
"""
MPC-like parsing of `instr` using `value_getter` callable to retrieve the
text representation of placeholders.
"""
instr = iter(instr)
ret = []
for char in instr:
if char in '%{':
endchar = '%' if char == '%' else '}'
key = ''.join(itertools.takewhile(lambda e: e != endchar, instr))
value = value_getter(key)
if value:
found = True
ret.append(value)
else:
found = False
elif char == '#':
ret.append(next(instr, '#'))
elif char == '\\':
ln = next(instr, '\\')
if ln in 'abtnvfr':
ret.append(ast.literal_eval('"\\{}"'.format(ln)))
else:
ret.append(ln)
elif char == '[':
subret, found = parse_template(instr, value_getter, found)
subret = ''.join(subret)
ret.append(subret)
elif char == ']':
if found:
ret = ''.join(ret)
return ret, True
else:
return '', False
elif char == '|':
s | ubret, subfound = parse_template(instr, value_getter, found)
if found:
pass
elif subfound:
ret.append(''.join(subret))
found = True
else:
return '', False
elif char == '&':
subret, subfound = parse_template(instr, value_getter, found)
if found and subf | ound:
subret = ''.join(subret)
ret.append(subret)
else:
return '', False
else:
ret.append(char)
ret = ''.join(ret)
return ret, found
def song_attr(song, attr):
def parse_mtime(date_str):
return datetime.datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%SZ')
if attr == 'time':
try:
duration = int(song['time'])
if duration > 0:
minutes, seconds = divmod(duration, 60)
return '{:d}:{:02d}'.format(minutes, seconds)
raise ValueError
except (KeyError, ValueError):
return ''
elif attr == 'position':
try:
return '{}'.format(int(song['pos']) + 1)
except (KeyError, ValueError):
return ''
elif attr == 'mtime':
return parse_mtime(song['last-modified']).strftime('%c')
elif attr == 'mdate':
return parse_mtime(song['last-modified']).strftime('%x')
return song.get(attr, '')
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 2
color = False
color_pause = None
color_play = None
color_stop = None
format = '%state% [[[%artist%] - %title%]|[%file%]]'
hide_when_paused = False
hide_when_stopped = True
host = 'localhost'
max_width = 120
password = None
port = '6600'
state_pause = '[pause]'
state_play = '[play]'
state_stop = '[stop]'
def __init__(self):
self.text = ''
def _state_character(self, state):
if state == 'play':
return self.state_play
elif state == 'pause':
return self.state_pause
elif state == 'stop':
return self.state_stop
return '?'
def current_track(self, i3s_output_list, i3s_config):
try:
c = MPDClient()
c.connect(host=self.host, port=self.port)
if self.password:
c.password(self.password)
status = c.status()
song = int(status.get('song', 0))
next_song = int(status.get('nextsong', 0))
state = status.get('state')
if ((state == 'pause' and self.hide_when_paused) or
(state == 'stop' and self.hide_when_stopped)):
text = ''
else:
playlist_info = c.playlistinfo()
try:
song = playlist_info[song]
except IndexError:
song = {}
try:
next_song = playlist_info[next_song]
except IndexError:
next_song = {}
song['state'] = next_song['state'] \
= self._state_character(state)
def attr_getter(attr):
if attr.startswith('next_'):
return song_attr(next_song, attr[5:])
return song_attr(song, attr)
text, _ = parse_template(self.format, attr_getter)
except socket.error:
text = "Failed to connect to mpd!"
state = None
except CommandError:
text = "Failed to authenticate to mpd!"
state = None
c.disconnect()
else:
c.disconnect()
if len(text) > self.max_width:
text = text[:-self.max_width - 3] + '...'
if self.text != text:
transformed = True
self.text = text
else:
transformed = False
response = {
'cached_until': time.time() + self.cache_timeout,
'full_text': self.text,
'transformed': transformed
}
if self.color and state:
if state == 'play':
response['color'] = self.color_play or i3s_config['color_good']
elif state == 'pause':
response['color'] = (self.color_pause or
i3s_config['color_degraded'])
elif state == 'stop':
response['color'] = self.color_stop or i3s_config['color_bad']
return response
if __name__ == "__main__":
"""
Test this module by calling it directly.
"""
from time import sleep
x = Py3status()
config = {
'color_bad': '#FF0000',
'color_degraded': '#FFFF00',
'color_good': '#00FF00'
}
while True:
print(x.current_track([], config))
sleep(1)
|
Micronaet/micronaet-script | DropboxWebsite/dropbox.py | Python | agpl-3.0 | 4,978 | 0.010848 | ###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import erppeek
import shutil
import parameters # Micronaet: configuration file
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
# -----------------------------------------------------------------------------
# Parameters:
# -----------------------------------------------------------------------------
# ODOO connection:
odoo_server = parameters.odoo_server
odoo_port = parameters.odoo_port
odoo_user = parameters.odoo_user
odoo_password = parameters.odoo_password
odoo_database = parameters.odoo_database
# Dropbox:
demo = parameters.demo
samba_path = parameters.samba_path
dropbox_path = parameters.dropbox_path
print '''
Setup parameters:
ODOO: Connection: %s:%s DB %s utente: %s
Demo: %s
Samba folders: %s
Dropbox path: %s
''' % (
odoo_server,
odoo_port,
odoo_database,
odoo_user,
demo,
samba_path,
dropbox_path,
)
# -----------------------------------------------------------------------------
# UTILITY:
# ------------------------------------- | ----------------------------------------
def get_modify_date(fullname):
''' Return modify da | te for file
'''
modify_date = datetime.fromtimestamp(
os.stat(fullname).st_mtime).strftime('%Y-%m-%d')
return modify_date
# -----------------------------------------------------------------------------
# ODOO operation:
# -----------------------------------------------------------------------------
odoo = erppeek.Client(
'http://%s:%s' % (
odoo_server, odoo_port),
db=odoo_database,
user=odoo_user,
password=odoo_password,
)
# Pool used:
product_pool = odoo.model('product.product.web.server')
product_ids = product_pool.search([
('connector_id.wordpress', '=', True),
])
# Check elements:
#error = [] # Error database
#warning = [] # Warning database
#info = [] # Info database
#log = [] # Log database
#log_sym = [] # Log database for symlinks
#product_odoo = {}
# Only if new file (check how):
dropbox_root_path = os.path.expanduser(dropbox_path)
samba_root_path = os.path.expanduser(samba_path)
# -----------------------------------------------------------------------------
# Save current files (Dropbox folder):
# -----------------------------------------------------------------------------
current_files = []
for root, folders, files in os.walk(dropbox_root_path):
for f in files:
current_files.append(
os.path.join(root, f)
break # only first folder!
# -----------------------------------------------------------------------------
# Logg on all product image selected:
# -----------------------------------------------------------------------------
for product in product_pool.browse(product_ids):
for image in product.image_ids:
image_id = image.id
code = image.album_id.code
samba_relative_path = image.album_id.path # TODO dropbox_path
filename = product.filename
origin = os.path.(samba_relative_path, filename)
destination = os.path.(dropbox_root_path, '%s.%s' % (code, filename))
if destination in current_files:
current_files.remove(destination)
# Create symlink:
try:
os.symlink(origin, destination)
log_sym.append('CREATO: origin: %s destination: %s' % (
origin, destination))
except:
log_sym.append('ERRORE: origin: %s destination: %s' % (
origin, destination))
# Find dropbox link:
# Save dropbox link:
os.system('chmod 777 "%s" -R' % dropbox_path)
for filename in current_files:
os.rm(filename)
# file_modify = get_modify_date(fullname)
# os.system('mkdir -p "%s"' % product_folder)
print 'End operation'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
flingone/frameworks_base_cmds_remoted | libs/boost/tools/build/test/module_actions.py | Python | apache-2.0 | 2,354 | 0.000425 | #!/usr/bin/python
# Copyright 2003 Dave Abrahams
# Copyright 2006 Rene Rivera
# Copyright 2003 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Demonstration that module variables have the correct effect in actions.
import BoostBuild
import os
import re
t = BoostBuild.Tester(["-d+1"], pass_toolset=0)
t.write("boost-build.jam", "boost-build . ;")
t.write("bootstrap.jam", """\
# Top-level rule causing a target to be built by invoking the specified action.
rule make ( target : sources * : act )
{
DEPENDS all : $(target) ;
DEPENDS $(target) : $(sources) ;
$(act) $(target) : $(sources) ;
}
X1 = X1-global ;
X2 = X2-global ;
X3 = X3-global ;
module A
{
X1 = X1-A ;
rule act ( target )
{
NOTFILE $(target) ;
ALWAYS $(target) ;
}
actions act { echo A.act $(<): $(X1) $(X2) $(X3) }
make t1 : : A.act ;
make t2 : : A.act ;
make t3 : : A.act ;
}
module B
{
X2 = X2-B ;
actions act { echo B.act $(<): $(X1) $(X2) $(X3) }
make t1 : : B.act ;
make t2 : : B.act ;
make t3 : : B.act ;
}
actions act { echo act $(<): $(X1) $(X2) $(X3) }
make t1 : : act ;
make t2 : : act ;
make t3 : : act ;
X1 on t1 = X1-t1 ;
X2 on t2 = X2-t2 ;
X3 on t3 = X3-t3 ;
DEPENDS all : t1 t2 t3 ;
""")
expected_lines = [
"...found 4 targets...",
"...updating 3 targets...",
"A.act | t1",
"A.act t1: X1-t1 ",
"B.act t1",
"B.act t1: X1-t1 X2-B ",
"act t1",
"act t1: X1-t1 X2-global X3-global ",
"A.act t2",
"A.act t2: X1-A X2-t2 ",
"B.act t2",
"B.act t2: X2-t2 ",
"act t2",
"act t2: X1-global X2-t2 X3-global ",
| "A.act t3",
"A.act t3: X1-A X3-t3 ",
"B.act t3",
"B.act t3: X2-B X3-t3 ",
"act t3",
"act t3: X1-global X2-global X3-t3 ",
"...updated 3 targets...",
""]
# Accommodate for the fact that on Unixes, a call to 'echo 1 2 3 '
# produces '1 2 3' (note the spacing).
if os.name != 'nt':
expected_lines = [re.sub(" +", " ", x.rstrip()) for x in expected_lines]
t.run_build_system()
t.expect_output_lines(expected_lines)
t.expect_nothing_more()
t.cleanup()
|
Orav/kbengine | kbe/src/lib/python/Tools/scripts/rgrep.py | Python | lgpl-3.0 | 1,542 | 0 | #! /usr/bin/env python3
"""Reverse grep.
Usage: rgrep [-i] pattern file
"""
import sys
import re
import getopt
def main():
bufsize = 64 * 1024
reflags = 0
opts, args = getopt.getopt(sys.argv[1:], "i")
for o, a in opts:
if o == '-i':
reflags = reflags | re.IGNORECASE
if len(args) < 2:
usage("not enough arguments")
if len(args) > 2:
usage("exactly one file argument required")
pattern, filename = args
try:
prog = re.compile(pattern, reflags)
except re.error as msg:
usage("error in regular expression: %s" % msg)
try:
f = open(filename)
except IOError | as msg:
usage("can't open %r: %s" % (filename, msg), 1)
f.seek(0, 2)
pos = f.tell()
leftover = None
while pos > 0:
size = min(pos, bufsize)
pos = pos - size
f.seek(pos)
buffer = f.read(size)
lines = buffer.split("\n")
del buffer
if leftover is None:
| if not lines[-1]:
del lines[-1]
else:
lines[-1] = lines[-1] + leftover
if pos > 0:
leftover = lines[0]
del lines[0]
else:
leftover = None
for line in reversed(lines):
if prog.search(line):
print(line)
def usage(msg, code=2):
sys.stdout = sys.stderr
print(msg)
print(__doc__)
sys.exit(code)
if __name__ == '__main__':
main()
|
tectronics/pipal | face.py | Python | gpl-3.0 | 34 | 0.029412 | #!/usr/bin/env python3
i | mport | cv2 |
Adarnof/allianceauth | allianceauth/srp/auth_hooks.py | Python | gpl-2.0 | 699 | 0 | from allianceauth.services.hooks import MenuItemHook, UrlHook
from allianceauth import | hooks
from . import urls
class SrpMenu(MenuItemHook):
def __init__(self):
MenuItemHook.__init__(self, 'Ship Replacement',
'fa fa-money | fa-fw',
'srp:management',
navactive=['srp:'])
def render(self, request):
if request.user.has_perm('srp.access_srp'):
return MenuItemHook.render(self, request)
return ''
@hooks.register('menu_item_hook')
def register_menu():
return SrpMenu()
@hooks.register('url_hook')
def register_url():
return UrlHook(urls, 'srp', r'^srp/')
|
neherlab/treetime | treetime/seq_utils.py | Python | mit | 14,020 | 0.013623 | import numpy as np
from Bio import Seq, SeqRecord
alphabet_synonyms = {'nuc':'nuc', 'nucleotide':'nuc', 'aa':'aa', 'aminoacid':'aa',
'nuc_nogap':'nuc_nogap', 'nucleotide_nogap':'nuc_nogap',
'aa_nogap':'aa_nogap', 'aminoacid_nogap':'aa_nogap',
'DNA':'nuc', 'DNA_nogap':'nuc_nogap'}
alphabets = {
"nuc": np.array(['A', 'C', 'G', 'T', '-']),
"nuc_nogap":np.array(['A', 'C', 'G', 'T']),
"aa": np.array(['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K',
'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V',
'W', 'Y', '*', '-']),
"aa_nogap": np.array(['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K',
'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V',
'W', 'Y'])
}
profile_maps = {
'nuc':{
'A': np.array([1, 0, 0, 0, 0], dtype='float'),
'C': np.array([0, 1, 0, 0, 0], dtype='float'),
'G': np.array([0, 0, 1, 0, 0], dtype='float'),
'T': np.array([0, 0, 0, 1, 0], dtype='float'),
'-': np.array([0, 0, 0, 0, 1], dtype='float'),
| 'N': np.array([1, 1, 1, 1, 1], dtype='float'),
'X': np.array([1, 1, 1, 1, 1], dtype='float'),
'R': np.array([1, 0, 1, 0, 0], dtype='float'),
'Y': np.array([0, 1, 0, 1, 0], dtype='float'),
'S': np.array([0, 1, 1, 0, 0], dtype='float'),
'W': np.array([1, 0, 0, 1, 0], dtype='float'),
'K': np.array([0, 0, 1, 1, 0], dtype='float'),
'M': np.array([1, 1, 0, 0, 0], dtype='float'),
'D': np.array([1, 0, 1, | 1, 0], dtype='float'),
'H': np.array([1, 1, 0, 1, 0], dtype='float'),
'B': np.array([0, 1, 1, 1, 0], dtype='float'),
'V': np.array([1, 1, 1, 0, 0], dtype='float')
},
'nuc_nogap':{
'A': np.array([1, 0, 0, 0], dtype='float'),
'C': np.array([0, 1, 0, 0], dtype='float'),
'G': np.array([0, 0, 1, 0], dtype='float'),
'T': np.array([0, 0, 0, 1], dtype='float'),
'-': np.array([1, 1, 1, 1], dtype='float'), # gaps are completely ignored in distance computations
'N': np.array([1, 1, 1, 1], dtype='float'),
'X': np.array([1, 1, 1, 1], dtype='float'),
'R': np.array([1, 0, 1, 0], dtype='float'),
'Y': np.array([0, 1, 0, 1], dtype='float'),
'S': np.array([0, 1, 1, 0], dtype='float'),
'W': np.array([1, 0, 0, 1], dtype='float'),
'K': np.array([0, 0, 1, 1], dtype='float'),
'M': np.array([1, 1, 0, 0], dtype='float'),
'D': np.array([1, 0, 1, 1], dtype='float'),
'H': np.array([1, 1, 0, 1], dtype='float'),
'B': np.array([0, 1, 1, 1], dtype='float'),
'V': np.array([1, 1, 1, 0], dtype='float')
},
'aa':{
'A': np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Alanine Ala
'C': np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Cysteine Cys
'D': np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Aspartic AciD Asp
'E': np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Glutamic Acid Glu
'F': np.array([0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Phenylalanine Phe
'G': np.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Glycine Gly
'H': np.array([0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Histidine His
'I': np.array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Isoleucine Ile
'K': np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Lysine Lys
'L': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Leucine Leu
'M': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Methionine Met
'N': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #AsparagiNe Asn
'P': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Proline Pro
'Q': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Glutamine Gln
'R': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #ARginine Arg
'S': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], dtype='float'), #Serine Ser
'T': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], dtype='float'), #Threonine Thr
'V': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], dtype='float'), #Valine Val
'W': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], dtype='float'), #Tryptophan Trp
'Y': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], dtype='float'), #Tyrosine Tyr
'*': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], dtype='float'), #stop
'-': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], dtype='float'), #gap
'X': np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype='float'), #not specified/any
'B': np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Asparagine/Aspartic Acid Asx
'Z': np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Glutamine/Glutamic Acid Glx
},
'aa_nogap':{
'A': np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Alanine Ala
'C': np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Cysteine Cys
'D': np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Aspartic AciD Asp
'E': np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Glutamic Acid Glu
'F': np.array([0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Phenylalanine Phe
'G': np.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Glycine Gly
'H': np.array([0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Histidine His
'I': np.array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Isoleucine Ile
'K': np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Lysine Lys
'L': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Leucine Leu
'M': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Methionine Met
'N': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #AsparagiNe Asn
'P': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], dtype='float'), #Proline Pro
'Q': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], dtype='float'), #Glutamine Gln
'R': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], dtype='float'), #ARginine Arg
'S': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], dtype='float'), #Serine Ser
'T': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], dtype='float'), #Threonine Thr
'V': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], dtype='float'), #Valine Val
'W': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], dtype='float'), #Tryptophan Trp
'Y': np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], dtype='float'), #Tyrosine Tyr
'X': np. |
jpf/ftp-to-s3 | server.py | Python | mit | 3,319 | 0.000603 | import hashlib
import os
import threading
from Queue import Queue
import requests
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
from konfig import Konfig
konf = Konfig()
s3_connection = S3Connection(konf.aws_access_key_id,
konf.aws_secret_access_key)
s3_bucket = s3_connection.get_bucket(konf.aws_bucket_name)
job_queue = Queue()
# From: http://stackoverflow.com/a/3431835
def hashfile(afile, hasher, blocksize=65536):
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.hexdigest()
def process_file(filename):
# Write to log
f = open('log', 'w+')
f.write("Got file: {}".format(filename))
f.close()
# Calculate SHA-1 sum of file
sum = hashfile(open(filename, 'rb'), hashlib.sha1())
print("SHA-1: {}".format(sum))
# Upload to S3, get URL
s3_key = Key(s3_bucket)
s3_key.key = sum
s3_key.set_contents_from_filename(filename)
s3_key.set_acl('public-read')
url = s3_key.generate_url(expires_in=86400) # 1 day
print("File now in S3 at: {}".format(url))
# Delete file
os.unlink(filename)
print("Deleted file: {}".format(filename))
## Send URL to specified endpoint
payload = {'MediaUrls': url}
r = requests.post(konf.messaging_request_url, data=payload)
print("Request made: {}".format(r))
class FTPWorker(threading.Thread):
def __init__(self, q):
self.q = q
threading.Thread.__init__(self)
def run(self):
| print "Worker online"
while True:
print "Worker waiting for job ... %s" % str(job_queue.qsize() | )
filename = job_queue.get()
print "Worker got job: %s, qsize: %s" % (
filename,
str(job_queue.qsize()))
process_file(filename)
# time.sleep(1)
job_queue.task_done()
print "Task done, qsize: %s" % str(job_queue.qsize())
class FTPHandler(FTPHandler):
def on_file_received(self, filename):
job_queue.put(filename)
def main():
# Instantiate a dummy authorizer for managing 'virtual' users
authorizer = DummyAuthorizer()
# Define a new user having full r/w permissions and a read-only
# anonymous user
authorizer.add_user(konf.ftp_username,
konf.ftp_password,
'ftp/',
perm='elradfmwM')
# authorizer.add_anonymous(os.getcwd())
# Instantiate FTP handler class
handler = FTPHandler
handler.authorizer = authorizer
# Define a customized banner (string returned when client connects)
handler.banner = "pyftpdlib based ftpd ready."
# Instantiate FTP server class and listen on 0.0.0.0:2121
address = ('', 2121)
server = FTPServer(address, handler)
# set a limit for connections
server.max_cons = 256
server.max_cons_per_ip = 5
# start ftp server
server.serve_forever()
if __name__ == '__main__':
for i in range(0, 4):
t = FTPWorker(job_queue)
t.daemon = True
t.start()
print "Started worker"
main()
|
iulian787/spack | var/spack/repos/builtin/packages/alquimia/package.py | Python | lgpl-2.1 | 2,700 | 0.00037 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Alquimia(CMakePackage):
"""Alquimia is an interface that exposes the capabilities
of mature geochemistry codes such as CrunchFlow and PFLOTRAN"""
homepage = "https://github.com/LBL-EESA/alquimia-dev"
git = "https://github.com/LBL-EESA/alquimia-dev.git"
m | aintainers = ['smolins', 'balay']
version('develop')
version('xsdk-0.6.0', commit='9a0aedd3a927d4d5e837f8fd18b74ad5a78c3821')
version('xsdk-0.5.0', commit='8397c3b00a09534c5473ff3ab21f0e32bb159380')
version('xsdk-0.4.0', commit='2edad6733106142d014bb6e6a73c2b21d5e3cf2d')
version('xsdk-0.3.0', tag='xsdk-0.3.0')
version('xsdk-0.2.0 | ', tag='xsdk-0.2.0')
variant('shared', default=True,
description='Enables the build of shared libraries')
depends_on('mpi')
depends_on('hdf5')
depends_on('pflotran@xsdk-0.6.0', when='@xsdk-0.6.0')
depends_on('pflotran@xsdk-0.5.0', when='@xsdk-0.5.0')
depends_on('pflotran@xsdk-0.4.0', when='@xsdk-0.4.0')
depends_on('pflotran@xsdk-0.3.0', when='@xsdk-0.3.0')
depends_on('pflotran@xsdk-0.2.0', when='@xsdk-0.2.0')
depends_on('pflotran@develop', when='@develop')
depends_on('petsc@3.10.0:3.10.99', when='@xsdk-0.4.0')
depends_on('petsc@3.8.0:3.8.99', when='@xsdk-0.3.0')
depends_on('petsc@xsdk-0.2.0', when='@xsdk-0.2.0')
depends_on('petsc@3.10:', when='@develop')
def cmake_args(self):
spec = self.spec
options = ['-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
'-DCMAKE_Fortran_COMPILER=%s' % spec['mpi'].mpifc,
'-DUSE_XSDK_DEFAULTS=YES',
'-DBUILD_SHARED_LIBS:BOOL=%s' % (
'ON' if '+shared' in spec else 'OFF'),
'-DTPL_ENABLE_MPI:BOOL=ON',
'-DMPI_BASE_DIR:PATH=%s' % spec['mpi'].prefix,
'-DTPL_ENABLE_HDF5:BOOL=ON',
'-DXSDK_WITH_PFLOTRAN:BOOL=ON',
# This is not good.
# It assumes that the .a file exists and is not a .so
'-DTPL_PFLOTRAN_LIBRARIES=%s' % (
spec['pflotran'].prefix.lib + "/libpflotranchem.a"),
'-DTPL_PFLOTRAN_INCLUDE_DIRS=%s' % (
spec['pflotran'].prefix.include),
'-DTPL_ENABLE_PETSC:BOOL=ON',
'-DPETSC_EXECUTABLE_RUNS=ON',
'-DCMAKE_INSTALL_NAME_DIR:PATH=%s/lib' % self.prefix]
return options
|
wangyum/tensorflow | tensorflow/python/tools/saved_model_cli_test.py | Python | apache-2.0 | 15,971 | 0.00288 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModelCLI tool.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import pickle
import shutil
import sys
import numpy as np
from six import StringIO
from tensorflow.python.debug.wrapp | ers import local_cli_wrapper
from tensorflow.python.platform import test
from tensorflow.python.tools import saved_model_cli
SAVED_MODEL_PATH = ('cc/saved_model/testdata/half_plus_two/00000123')
@contextlib.co | ntextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class SavedModelCLITestCase(test.TestCase):
def testShowCommandAll(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(['show', '--dir', base_path, '--all'])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
# pylint: disable=line-too-long
exp_out = """MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['classify_x2_to_y3']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: x2:0
The given SavedModel SignatureDef contains the following output(s):
outputs['scores'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y3:0
Method name is: tensorflow/serving/classify
signature_def['classify_x_to_y']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_STRING
shape: unknown_rank
name: tf_example:0
The given SavedModel SignatureDef contains the following output(s):
outputs['scores'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y:0
Method name is: tensorflow/serving/classify
signature_def['regress_x2_to_y3']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: x2:0
The given SavedModel SignatureDef contains the following output(s):
outputs['outputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y3:0
Method name is: tensorflow/serving/regress
signature_def['regress_x_to_y']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_STRING
shape: unknown_rank
name: tf_example:0
The given SavedModel SignatureDef contains the following output(s):
outputs['outputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y:0
Method name is: tensorflow/serving/regress
signature_def['regress_x_to_y2']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_STRING
shape: unknown_rank
name: tf_example:0
The given SavedModel SignatureDef contains the following output(s):
outputs['outputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y2:0
Method name is: tensorflow/serving/regress
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['x'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: x:0
The given SavedModel SignatureDef contains the following output(s):
outputs['y'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y:0
Method name is: tensorflow/serving/predict"""
# pylint: enable=line-too-long
self.assertMultiLineEqual(output, exp_out)
self.assertEqual(err.getvalue().strip(), '')
def testShowCommandTags(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(['show', '--dir', base_path])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
exp_out = 'The given SavedModel contains the following tag-sets:\nserve'
self.assertMultiLineEqual(output, exp_out)
self.assertEqual(err.getvalue().strip(), '')
def testShowCommandSignature(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(
['show', '--dir', base_path, '--tag_set', 'serve'])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
exp_header = ('The given SavedModel MetaGraphDef contains SignatureDefs '
'with the following keys:')
exp_start = 'SignatureDef key: '
exp_keys = [
'"classify_x2_to_y3"', '"classify_x_to_y"', '"regress_x2_to_y3"',
'"regress_x_to_y"', '"regress_x_to_y2"', '"serving_default"'
]
# Order of signatures does not matter
self.assertMultiLineEqual(
output,
'\n'.join([exp_header] + [exp_start + exp_key for exp_key in exp_keys]))
self.assertEqual(err.getvalue().strip(), '')
def testShowCommandErrorNoTagSet(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(
['show', '--dir', base_path, '--tag_set', 'badtagset'])
with self.assertRaises(RuntimeError):
saved_model_cli.show(args)
def testShowCommandInputsOutputs(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args([
'show', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'serving_default'
])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
expected_output = (
'The given SavedModel SignatureDef contains the following input(s):\n'
'inputs[\'x\'] tensor_info:\n'
' dtype: DT_FLOAT\n shape: (-1, 1)\n name: x:0\n'
'The given SavedModel SignatureDef contains the following output(s):\n'
'outputs[\'y\'] tensor_info:\n'
' dtype: DT_FLOAT\n shape: (-1, 1)\n name: y:0\n'
'Method name is: tensorflow/serving/predict')
self.assertEqual(output, expected_output)
self.assertEqual(err.getvalue().strip(), '')
def testInputPreProcessFormats(self):
input_str = 'input1=/path/file.txt[ab3], input2=file2,,'
input_dict = saved_model_cli.preprocess_input_arg_string(input_str)
self.assertTrue(input_dict['input1'] == ('/path/file.txt', 'ab3'))
self.assertTrue(input_dict['input2'] == ('file2', None))
def testInputPreProcessQuoteAndWhitespace(self):
input_str = '\' input1 = file[v_1]\', input2=file ["sd"] '
input_dict = saved_model_cli.preprocess_input_arg_string(input_str)
self.assertTrue(input_dict['input1'] == ('file', 'v_1'))
self.assertTrue(input_dict['input2'] == ('file', 'sd'))
self.assertTrue(len(input_dict) == 2)
def testInputPreProcessErrorBadFormat(self):
input_str = 'inputx=file[[v1]v2'
with self.assertRaises(RuntimeError):
saved_model_cli.preprocess_input_arg_string(input_str)
input_str = 'inputx:file'
with self.assertRaises(RuntimeError):
saved_model_cli.preprocess_input_arg_string(input_str)
input_str = 'inputx=file(v_1)'
with self.assertRaises(RuntimeError):
saved_model_cli.preprocess_input_arg_ |
comic/comic-django | app/grandchallenge/retina_api/mixins.py | Python | apache-2.0 | 1,522 | 0 | from django.conf import settings
from django.contrib.auth.mixins import AccessMixin
from rest_framework import permissions
def is_in_retina_graders_group(user):
"""
Checks if the user is in the retina graders group
:param user: Django User model
:return: true/false
"""
return user.groups.filter(name=settings.RETINA_GRADERS_GROUP_NAME).exists()
def is_in_retina_admins_group(user):
"""
Checks if the user is in the retina admins group
:param user: Django User model
:return: true/fal | se
""" |
return user.groups.filter(name=settings.RETINA_ADMINS_GROUP_NAME).exists()
def is_in_retina_group(user):
"""
Checks if the user is in the retina graders or retina admins group
:param user: Django User model
:return: true/false
"""
return is_in_retina_graders_group(user) or is_in_retina_admins_group(user)
class RetinaAPIPermission(permissions.BasePermission):
"""
Permission class for APIViews in retina app.
Checks if user is in retina graders or admins group
"""
def has_permission(self, request, view):
return is_in_retina_group(request.user)
class RetinaAPIPermissionMixin(AccessMixin):
"""
Mixin for non APIViews in retina app.
Verify that the current user is in the retina_graders group.
"""
def dispatch(self, request, *args, **kwargs):
if not is_in_retina_group(request.user):
return self.handle_no_permission()
return super().dispatch(request, *args, **kwargs)
|
thefirstwind/s3qloss | src/s3ql/backends/common.py | Python | gpl-3.0 | 38,276 | 0.003919 | '''
common.py - this file is part of S3QL (http://s3ql.googlecode.com)
Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function, absolute_import
from ..common import QuietError, BUFSIZE
from abc import ABCMeta, abstractmethod
from base64 import b64decode, b64encode
from cStringIO import StringIO
from contextlib import contextmanager
from functools import wraps
from getpass import getpass
from pycryptopp.cipher import aes
from s3ql.common import ChecksumError
import ConfigParser
import bz2
import cPickle as pickle
import errno
import hashlib
import hmac
import httplib
import logging
import lzma
import math
import os
import re
import socket
import stat
import struct
import sys
import threading
import time
import zlib
# Not available in every pycryptopp version
if hasattr(aes, 'start_up_self_test'):
aes.start_up_self_test()
log = logging.getLogger("backend")
HMAC_SIZE = 32
RETRY_TIMEOUT = 60 * 60 * 24
def retry(fn):
'''Decorator for retrying a method on some exceptions
If the decorated method raises an exception for which the instance's
`is_temp_failure(exc)` method is true, the decorated method is called again
at increasing intervals. If this persists for more than *timeout* seconds,
the most-recently caught exception is re-raised.
'''
@wraps(fn)
def wrapped(self, *a, **kw):
interval = 1 / 50
waited = 0
while True:
try:
return fn(self, *a, **kw)
except Exception as exc:
# Access to protected member ok
#pylint: disable=W0212
if not self.is_temp_failure(exc):
raise
if waited > RETRY_TIMEOUT:
log.error('%s.%s(*): Timeout exceeded, re-raising %r exception',
self.__class__.__name__, fn.__name__, exc)
raise
log.info('Encountered %s exception (%s), retrying call to %s.%s...',
type(exc).__name__, exc, self.__class__.__name__, fn.__name__)
if hasattr(exc, 'retry_after') and exc.retry_after:
interval = exc.retry_after
time.sleep(interval)
waited += interval
interval = min(5*60, 2*interval)
# False positive
#pylint: disable=E1101
wrapped.__doc__ += '''
This method has been decorated and will automatically recall itself in
increasing intervals for up to s3ql.backends.common.RETRY_TIMEOUT seconds if it
raises an exception for which the instance's `is_temp_failure` method returns
True.
'''
return wrapped
def is_temp_network_error(exc):
'''Return true if *exc* represents a potentially temporary network problem'''
if isinstance(exc, (httplib.IncompleteRead, socket.timeout)):
return True
# Server closed connection
elif (isinstance(exc, httplib.BadStatusLine)
and (not exc.line or exc.line == "''")):
return True
elif (isinstance(exc, IOError) and
exc.errno in (errno.EPIPE, errno.ECONNRESET, errno.ETIMEDOUT,
errno.EINTR)):
return True
# Formally this is a permanent error. However, it may also indicate
# that there is currently no network connection to the DNS server
elif (isinstance(exc, socket.gaierror)
and exc.errno in (socket.EAI_AGAIN, socket.EAI_NONAME)):
return True
return False
def http_connection(hostname, port, ssl=False):
'''Return http connection to *hostname*:*port*
This method honors the http_proxy and https_proxy environment
variables.
'''
log.debug('Connecting to %s...', hostname)
if 'https_proxy' in os.environ:
proxy = os.environ['https_proxy']
hit = re.match(r'^(https?://)?([a-zA-Z0-9.-]+)(:[0-9]+)?/?$', proxy)
if not hit:
log.warn('Unable to parse proxy setting %s', proxy)
if hit.group(1) == 'https://':
log.warn('HTTPS connection to proxy is probably pointless and not supported, '
'will use standard HTTP')
if hit.group(3):
proxy_port = int(hit.group(3)[1:])
else:
proxy_port = 80
proxy_host = hit.group(2)
log.info('Using proxy %s:%d', proxy_host, proxy_port)
if ssl:
conn = httplib.HTTPSConnection(proxy_host, proxy_port)
else:
conn = httplib.HTTPConnection(proxy_host, proxy_port)
conn.set_tunnel(hostname, port)
return conn
elif ssl:
return httplib.HTTPSConnection(hostname, port)
else:
return httplib.HTTPConnection(hostname, port)
def sha256(s):
return hashlib.sha256(s).digest()
class BackendPool(object):
'''A pool of backends
This class is threadsafe. All methods (except for internal methods
starting with underscore) may be called concurrently by different
threads.
'''
def __init__(self, factory):
'''Init pool
*factory* should be a callable that provides new
connections.
'''
self.factory = factory
self.pool = []
self.lock = threading.Lock()
def pop_conn(self):
'''Pop connection from pool'''
with self.lock:
if self.pool:
return self.pool.pop()
else:
return self.factory()
def push_conn(self, conn):
'''Push connection back into pool'''
with self.lock:
self.pool.append(conn)
@contextmanager
def __call__(self):
'''Provide connection from pool (context manager)'''
conn = self.pop_conn()
try:
yield conn
finally:
self.push_conn(conn)
class AbstractBackend(object):
'''Functionality shared between all backends.
Instances behave similarly to dicts. They can be iterated over and
indexed into, but raise a separate set of exceptions.
The backend guarantees get after create consistency, i.e. a newly created
object will be immediately retrievable. Additional consistency guarantees
may or may not be available and can be queried for with instance methods.
'''
__metaclass__ = ABCMeta
needs_login = True
def __init__(self):
super(AbstractBackend, self).__init__()
def __getitem__(self, key):
return self.fetch(key)[0]
def __setitem__(self, key, value):
self.store(key, value)
def __delitem__(self, key):
self.delete(key)
def __iter__(self):
return self.list()
def __contains__(self, key):
return self.contains(key)
def iteritems(self):
for key in self.list():
yield (key, self[key])
@retry
def perform_read(self, fn, key):
'''Read object data using *fn*, retry on temporary failure
Open object for reading, call `fn(fh)` and close object. If | a temporary error (as defined by
`is_temp_failure`) occurs during opening, closing or execution of *fn*, the operation is
retried.
'''
with self.open_read(key) as fh:
return fn(fh)
@retry
def perform_write(self, fn, key, metadata=None, is_compressed=False):
'''Read object da | ta using *fn*, retry on temporary failure
Open object for writing, call `fn(fh)` and close object. If a temporary error (as defined by
`is_temp_failure`) occurs during opening, closing or execution of *fn*, the operation is
retried.
'''
with self.open_write(key, metadata, is_compressed) as fh:
return fn(fh)
def fetch(self, key):
"""Return data stored under `key`.
Returns a tuple with the data and metadata. If only the data itself is
required, ``backend[key]`` is a more concise notation for
``backend.fetch(key)[0]``.
"""
def do_read(fh):
data = |
laserson/luigi | test/import_test.py | Python | apache-2.0 | 1,296 | 0 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License") | ;
# you may not use this file except in compliance with the License.
# You may obt | ain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from helpers import unittest
class ImportTest(unittest.TestCase):
def import_test(self):
"""Test that all module can be imported
"""
luigidir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..'
)
packagedir = os.path.join(luigidir, 'luigi')
for root, subdirs, files in os.walk(packagedir):
package = os.path.relpath(root, luigidir).replace('/', '.')
if '__init__.py' in files:
__import__(package)
for f in files:
if f.endswith('.py') and not f.startswith('_'):
__import__(package + '.' + f[:-3])
|
hxp2k6/https-github.com-stamparm-maltrail | trails/feeds/badips.py | Python | mit | 636 | 0.001572 | #!/usr/bin/env python
"""
Copyright (c) 2014-2015 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying pe | rmission
"""
from core.common import retrieve_content
__url__ = "https://www.badips.com/get/list/any/2?age=7d"
__check__ = ".1"
__info__ = "attacker"
__reference__ = "badips.com"
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#') or '.' not in line:
continue
retval[line] = (__info__, __reference | __)
return retval
|
WorldViews/Spirals | play/tweet.py | Python | mit | 6,221 | 0.003376 | from flask import Flask, request, redirect, url_for, session, flash, g, \
render_template
from flask_oauth import OAuth
from sqlalchemy import create_engine, Column, Integer, String
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# configuration
#DATABASE_URI = 'sqlite:////tmp/flask-oauth.db'
DATABASE_URI = 'sqlite:///flask-oauth.db'
#DATABASE_URI = 'flask-oauth.db'
SECRET_KEY = 'development key'
DEBUG = True
# setup flask
app = Flask(__name__)
app.debug = DEBUG
app.secret_key = SECRET_KEY
oauth = OAuth()
# Use Twitter as example remote application
twitter = oauth.remote_app('twitter',
# unless absolute urls are used to make requests, this will be added
# before all URLs. This is also true for request_token_url and others.
base_url='https://api.twitter.com/1/',
# where flask should look for new request tokens
request_token_url='https://api.twitter.com/oauth/request_token',
# where flask should exchange the token with the remote application
access_token_url='https://api.twitter.com/oauth/access_token',
# twitter knows two authorizatiom URLs. /authorize and /authenticate.
# they mostly work the same, but for sign on /authenticate is
# expected because this will give the user a slightly different
# user interface on the twitter side.
authorize_url='https://api.twitter.com/oauth/authenticate',
# the consumer keys from the twitter application registry.
consumer_key='xBeXxg9lyElUgwZT6AZ0A',
consumer_secret='aawnSpNTOVuDCjx7HMh6uSXetjNN8zWLpZwCEU4LBrk'
)
# setup sqlalchemy
engine = create_engine(DATABASE_URI)
print "got engine:", engine
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
print "db_session:", db_session
Base = declarative_base()
Base.query = db_session.query_property()
print "Base.query:", Base.query
print "Base.metadata:", Base.metadata
def init_db():
Base.metadata.create_all(bind=engine)
class User(Base):
__tablename__ = 'users'
id = Column('user_id', Integer, primary_key=True)
name = Column(String(60))
oauth_token = Column(String(200))
oauth_secret = Column(String(200))
def __init__(self, name):
self.name = name
@app.before_request
def before_request():
g.user = None
if 'user_id' in session:
g.user = User.query.get(session['user_id'])
@app.after_request
def after_request(response):
db_session.remove()
return response
@twitter.tokengetter
def get_twitter_token():
"""This is used by the API to look for the auth token and secret
it should use for API calls. During the authorization handshake
a temporary set of token and secret is used, but afterwards this
function has to return the token and secret. If you don't want
to store this in the database, consider putting it into the
session instead.
"""
user = g.user
if user is not None:
return user.oauth_token, user.oauth_secret
@app.route('/')
def index():
tweets = None
if g.user is not None:
resp = twitter.get('statuses/home_timeline.json')
if resp.status == 200:
tweets = resp.data
else:
flash('Unable to load tweets from Twitter. Maybe out of '
'API calls or Twitter is overloaded.')
return render_template('index.html', tweets=tweets)
@app.route('/tweet', methods=['POST'])
def tweet():
"""Calls the remote twitter API to create a new status update."""
if g.user is None:
return redirect(url_for('login', next= | requ | est.url))
status = request.form['tweet']
if not status:
return redirect(url_for('index'))
resp = twitter.post('statuses/update.json', data={
'status': status
})
if resp.status == 403:
flash('Your tweet was too long.')
elif resp.status == 401:
flash('Authorization error with Twitter.')
else:
flash('Successfully tweeted your tweet (ID: #%s)' % resp.data['id'])
return redirect(url_for('index'))
@app.route('/login')
def login():
"""Calling into authorize will cause the OpenID auth machinery to kick
in. When all worked out as expected, the remote application will
redirect back to the callback URL provided.
"""
return twitter.authorize(callback=url_for('oauth_authorized',
next=request.args.get('next') or request.referrer or None))
@app.route('/logout')
def logout():
session.pop('user_id', None)
flash('You were signed out')
return redirect(request.referrer or url_for('index'))
@app.route('/oauth-authorized')
@twitter.authorized_handler
def oauth_authorized(resp):
"""Called after authorization. After this function finished handling,
the OAuth information is removed from the session again. When this
happened, the tokengetter from above is used to retrieve the oauth
token and secret.
Because the remote application could have re-authorized the application
it is necessary to update the values in the database.
If the application redirected back after denying, the response passed
to the function will be `None`. Otherwise a dictionary with the values
the application submitted. Note that Twitter itself does not really
redirect back unless the user clicks on the application name.
"""
next_url = request.args.get('next') or url_for('index')
if resp is None:
flash(u'You denied the request to sign in.')
return redirect(next_url)
user = User.query.filter_by(name=resp['screen_name']).first()
# user never signed on
if user is None:
user = User(resp['screen_name'])
db_session.add(user)
# in any case we update the authenciation token in the db
# In case the user temporarily revoked access we will have
# new tokens here.
user.oauth_token = resp['oauth_token']
user.oauth_secret = resp['oauth_token_secret']
db_session.commit()
session['user_id'] = user.id
flash('You were signed in')
return redirect(next_url)
if __name__ == '__main__':
app.run()
|
pf4d/dolfin-adjoint | dolfin_adjoint/adjglobals.py | Python | lgpl-3.0 | 3,298 | 0.005458 | import coeffstore
import expressions
import caching
import libadjoint
from dolfin_adjoint import backend
if backend.__name__ == "dolfin":
import lusolver
# Create the adjointer, the central object that records the forward solve
# as it happens.
adjointer = libadjoint.Adjointer()
mem_checkpoints = set()
disk_checkpoints = set()
adj_variables = coeffstore.CoeffStore()
def adj_start_timestep(time=0.0):
'''Dolfin does not supply us with information about timesteps, and so more information
is required from the user for certain features. This function should be called at the
start of the time loop with the initial time (defaults to 0).
See also: :py:func:`dolfin_adjoint.adj_inc_timestep`
'''
if not backend.parameters["adjoint"]["stop_annotating"]:
adjointer.time.start(time)
def adj_inc_timestep(time=None, finished=False):
'''Dolfin does not supply us with information about timesteps, and so more information
is required from the user for certain features. This function should be called at
the end of the time loop with two arguments:
- :py:data:`time` -- the time at the end of the timestep just computed
- :py:data:`finished` -- whether this is the final timestep.
With this information, complex functional expressions using the :py:class:`Functional` class
can be used.
The finished argument is necessary because the final step of a functional integration must perform
additional calculations.
See also: :py:func:`dolfin_adjoint.adj_start_timestep`
'''
if not backend.parameters["adjoint"]["stop_annotating"]:
adj_variables.increment_timestep()
if time is not None:
adjointer.time.next(time)
if finished:
adjointer.time.fin | ish()
# A dictionary that saves the functionspaces of all checkpoint variables that have been saved to disk
checkpoint_fs = {}
function_names = s | et()
def adj_check_checkpoints():
adjointer.check_checkpoints()
def adj_reset_cache():
if backend.parameters["adjoint"]["debug_cache"]:
backend.info_blue("Resetting solver cache")
caching.assembled_fwd_forms.clear()
caching.assembled_adj_forms.clear()
caching.lu_solvers.clear()
caching.localsolvers.clear()
caching.pis_fwd_to_tlm.clear()
caching.pis_fwd_to_adj.clear()
if backend.__name__ == "dolfin":
lusolver.lu_solvers = [None] * len(lusolver.lu_solvers)
lusolver.adj_lu_solvers = [None] * len(lusolver.adj_lu_solvers)
def adj_html(*args, **kwargs):
'''This routine dumps the current state of the adjglobals.adjointer to a HTML visualisation.
Use it like:
- adj_html("forward.html", "forward") # for the equations recorded on the forward run
- adj_html("adjoint.html", "adjoint") # for the equations to be assembled on the adjoint run
'''
return adjointer.to_html(*args, **kwargs)
def adj_reset():
'''Forget all annotation, and reset the entire dolfin-adjoint state.'''
adjointer.reset()
expressions.expression_attrs.clear()
adj_variables.__init__()
function_names.__init__()
adj_reset_cache()
backend.parameters["adjoint"]["stop_annotating"] = False
# Map from FunctionSpace to LUSolver that has factorised the fsp mass matrix
fsp_lu = {}
|
lduarte1991/edx-platform | lms/djangoapps/verify_student/services.py | Python | agpl-3.0 | 1,283 | 0.001559 | """
Implementation of "reverification" service to communicate with Reverification XBlock
"""
import logging
from django.core.urlresolvers import reverse
from student.models import User
from .models import SoftwareSecurePhotoVerification
log = logging.getLogger(__name__)
class VerificationService(object):
"""
Learner verification XBlock service
"""
def get_status(self, user_id):
"""
Returns the user's current photo verification status.
Args:
user_id: the user's id
Returns: one of the following strings
'none' - no such verification exists
'expired' - verification has expired
'approved' - verification has been approved
'pending' - verification process is still ongoing
'must_reverify' - verification has been denied and user must resubmit photos
"""
user = User.objects.get(id= | user_id)
# TODO: provide a photo verification abstraction so that this
# isn't hard-coded to use Software Secure.
return SoftwareSecurePhotoVerification.user_status(use | r)
def reverify_url(self):
"""
Returns the URL for a user to verify themselves.
"""
return reverse('verify_student_reverify')
|
pylayers/pylayers | pylayers/util/project.py | Python | mit | 10,692 | 0.020675 | #-*- coding:Utf-8 -*-
from __future__ import print_function
"""
.. curentmodule:: pylayers.util.project
.. autosummary::
"""
import numpy as np
import os
import sys
import shutil
import pkgutil
import pdb
import seaborn as sns
import logging
class PyLayers(object):
""" Generic PyLayers Meta Class
"""
# sns.set_style("white")
def help(self,letter='az',typ='mt'):
""" generic help
Parameters
----------
txt : string
'mb' | 'mt'
mb :members
mt :methods
"""
members = [ x for x in self.__dict__.keys() if x not in dict.__dict__ ]
lmeth = [ x for x in np.sort(dir(self)) if x not in dict.__dict__]
if typ=='mb':
print(np.sort(self.__dict__.keys()))
if typ=='mt':
for s in lmeth:
if s not in members:
if s[0]!='_':
if len(letter)>1:
if (s[0]>=letter[0])&(s[0]<letter[1]):
try:
doc = eval('self.'+s+'.__doc__').split('\n')
print(s+': '+ doc[0])
except:
pass
else:
if (s[0]==letter[0]):
try:
doc = eval('self.'+s+'.__doc__').split('\n')
print(s+': '+ doc[0])
except:
pass
def _writedotpylayers(typ,path):
""" write .pylayers file
Parameters
----------
typ: string
source : update the path to the pylayers' source directory
project : update the path to the pylayers' project directory
path : string
path to typ
"""
home = os.path.expanduser('~')
# with open(os.path.join(home,'.pylayers'),'r') as f:
# lines = f.readlines()
with open(os.path.join(home,'.pylayers'),'a') as f:
f.write(typ+'\n')
f.write(path+'\n')
# replaceline=False
# for l in lines:
# if replaceline :
# f.write(path+"\n")
# replaceline=False
# elif typ in l:
# f.write(l)
# replaceline=True
# else:
# f.write(l)
home = os.path.expanduser('~')
currentdir = os.getcwd()
#if .pylayers exists
if os.path.isfile(os.path.join(home,'.pylayers')):
with open(os.path.join(home,'.pylayers'),'r') as f:
lines = f.readlines()
#''.join... to remove the '\n' character
pylayersdir = ''.join(lines[1].splitlines())
basename = ''.join(lines[3].splitlines())
# BACKWARD COMPATIBILITY MODE (from now .pylayers is create each install)
else:
if os.getenv('PYLAYERS') != None:
pylayersdir = os.getenv('PYLAYERS')
_writedotpylayers('source',pylayersdir)
print('PYLAYERS environement variable detected: ~/.pylayers updated')
else :
raise EnvironmentError('pylayers source path not found. Try to re-run setup.py')
if os.getenv('BASENAME') != None:
basename = os.getenv('BASENAME')
_writedotpylayers('project',basename)
print('BASENAME environement variable detected: ~/.pylayers updated')
else :
raise EnvironmentError('pylayers source path not found. Try to re-run setup.py')
# =======
# # if os.path.isfile(os.path.join(home,'.pylayers')):
# # with open(os.path.join(home,'.pylayers'),'r') as f:
# # lines = f.readlines()
# # #[:-1] to remove the '\n' character
# # pylayersdir = lines[1][:-1]
# # basename = lines[3]
# # else :
# try:
# pylayersdir = os.environ['PYLAYERS']
# except:
# pylayersdir = currentdir.split('pylayers')[0] + 'pylayers'
# if pylayersdir[-1] == '/' or pylayersdir[-1] == '\\':
# pylayersdir = pylayersdir[:-1]
# if len(pylayersdir) == 1:
# raise EnvironmentError('Please verify that pylayers sources are into the "pylayers/" directory')
# try:
# basename = os.environ['BASENAME']
# except:
# raise EnvironmentError('Please position an environement variable $BASENAME where your pylayers project will be hosted')
# >>>>>>> master
try:
mesdir = os.environ['MESDIR']
except:
mesdir = os.path.join(basename ,'meas')
try:
datadir = os.environ['DATADIR']
except:
datadir = os.path.join(basename, 'meas')
try:
os.path.isdir(os.path.join(basename ,'figures'))
except:
os.mkdir(os.path.join(basename,'figures'))
# Dictionnary which associate PYLAYERS environment variable with sub directories
# of the project
#
pstruc = {}
pstruc['DIRSIMUL'] ='ini'
pstruc['DIRWRL'] =os.path.join('struc','wrl')
pstruc['DIRLAY'] =os.path.join('struc','lay')
pstruc['DIROSM'] =os.path.join('struc','osm')
pstruc['DIRFUR'] = os.path.join('struc','furnitures')
pstruc['DIRIMAGE'] = os.path.join('struc','images')
pstruc['DIRPICKLE'] = os.path.join('struc','gpickle')
pstruc['DIRRES'] = os.path.join('struc','res')
pstruc['DIRSTR'] = os.path.join('struc','str')
pstruc['DIRSLAB'] = 'ini'
pstruc['DIRSLAB2'] = 'ini'
pstruc['DIRMAT'] = 'ini'
pstruc['DIRMAT2'] = 'ini'
pstruc['DIRANT'] = 'ant'
pstruc['DIRTRA'] = 'output'
pstruc['DIRLCH'] = 'output'
pstruc['DIRTUD'] = 'output'
pstruc['DIRTx'] = os.path.join('output','Tx001')
pstruc['DIRGEOM'] = 'geom'
pstruc['DIRTRA'] = 'output'
pstruc['DIRCIR'] = 'output'
pstruc['DIRMES'] = 'meas'
pstruc['DIRNETSAVE'] = 'netsave'
# pstruc['DIRSIG'] = os.path.join('output','sig')
pstruc['DIRR2D'] = os.path.join('output','r2d')
pstruc['DIRR3D'] = os.path.join('output','r3d')
pstruc['DIRCT'] = os.path.join('output','Ct')
pstruc['DIRH'] = os.path.join('output','H')
pstruc['DIRLNK'] = 'output'
pstruc['DIRBODY'] = 'body'
pstruc['DIRGIS'] = 'gis'
pstruc['DIRC3D'] = os.path.join('body','c3d')
pstruc['DIROOSM'] = os.path.join('gis','osm')
pstruc['DIRWEAR'] = os.path.join('body','wear')
# if basename directory does not exit it is created
try:
os.chdir(basename)
except:
print("Create directory " + basename)
| os.mkdir(basename)
#
# write file project.conf
#
fd = open(os.path.join(basename,'project.conf'),'w')
fd.close()
#for nm in pstruc.keys():
for nm,nv in pstruc.items():
dirname = os.path.join(basename , pstruc[nm])
if not 'win' in sys.platform: |
spl = nv.split('/') # never again a variable called sp
else:
spl = nv.split('\\') # never again a variable called sp
if len(spl)>1:
if not os.path.isdir(os.path.join(basename ,spl[0])):
os.mkdir(os.path.join(basename ,spl[0]))
os.mkdir(os.path.join(basename,nv))
print("create ",os.path.join(basename ,nv))
else:
if not os.path.isdir(os.path.join(basename ,nv)):
os.mkdir(os.path.join(basename ,nv))
print("create ",os.path.join(basename ,nv))
else :
if not os.path.isdir(dirname):
try:
os.mkdir(dirname)
except:
# dictionnary is not necessarly ordonned !
# parent directory may not be created
dirtmp= os.path.dirname(dirname)
os.mkdir(dirtmp)
os.mkdir(dirname)
print("create ",dirname)
# try:
# os.chdir(dirname)
# os.chdir('..')
# except:
# pdb.set_trace()
# sp = nv.split('/')
# if len(sp)>1:
# try:
# os.chdir(basename + '/'+sp[0])
# os.chdir('..')
# except:
# os.mkdir(basename + '/'+sp[0])
# os.chdir(basename + '/'+sp[0])
# os.mkdir(basename + '/'+sp[1])
# os.chdir('..')
# else:
# print "create "+ dirname
# os.mkdir(dirname)
# os.chdir('..')
if nm == 'DIRANT':
antdir = dirname
if nm == 'DIRFUR':
furdir = dirname
if nm == 'DIRGEOM':
geomdir = dirname
if nm == 'DIRLCH':
lchdir = dirname
if nm == 'DIRTUD':
|
HarmeetDhillon/Socket-Programming | Client.py | Python | gpl-2.0 | 561 | 0.026738 | from so | cket import *
import sys
clientSocket = socket(AF_INET, SOCK_STREAM) #creates socket
server_address = ('127.0.0.1', 80)#create connection at this given port
print >>sys.stderr, 'CONNECTING TO %s AT PORT %s' % server_address
clientSocket.connect(server_address)#connect to server at given address
filename=raw_input("ENTER THE FILENAME: ")
f = open(filename[0:])
outputdata = f.read()#read the input file into variable
print "HTML CODE OF THE GIVEN FILE:", outputdata #display the html code of the | file
clientSocket.close() #close the connection
|
amaas-fintech/amaas-core-sdk-python | tests/unit/monitor/item.py | Python | apache-2.0 | 1,147 | 0.003487 | from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import json
import unittest
from amaascore.monitor.item import Item
from amaascore.tools.generate_monitor_item import generate_item
class ItemTest(unittest.TestCase):
def setUp(self):
self.longMessage = True # Print complete error message on failure
self.item = generate_item()
self.item_id = self.item.item_id
def tearDown(self):
pass
def test_Item(self):
self.assertEqual(type(self.item), Item)
def test_ItemToDict(self):
item_dict = self.item.__dict__
self.assertEqual(type(item_dict), dict)
self.assertEqual(item_dict.get('item_id'), self.item_id)
def test_ItemToJSON(self):
item_json = self.item.to_json()
self.assertEqual(item_json.get('item_id'), self.it | em_id)
# If item_json is valid JSON, this will run without serialisation errors
item_json_id = json.loads(json.dum | ps(item_json, ensure_ascii=False)).get('item_id')
self.assertEqual(item_json_id, self.item_id)
if __name__ == '__main__':
unittest.main()
|
arnomoonens/DeepRL | yarll/functionapproximation/tile_coding.py | Python | mit | 2,943 | 0.005097 | # -*- coding: utf8 -*-
from typing import List, Tuple
import numpy as np
from yarll.functionapproximation.function_approximator import FunctionApproximator
class TileCoding(FunctionApproximator):
"""Map states to tiles"""
def __init__(self, x_low, x_high, y_low, y_high, n_tilings: int, n_y_tiles: int, n_x_tiles: int, n_actions: int) -> None:
super(TileCoding, self).__init__(n_actions)
self.x_low = x_low
self.x_high = x_high
s | elf.y_low = y_low
se | lf.y_high = y_high
self.n_x_tiles = n_x_tiles
self.n_y_tiles = n_y_tiles
self.n_tilings = n_tilings
self.n_actions = n_actions
if self.n_x_tiles % 1 != 0 or self.n_x_tiles <= 0:
raise TypeError("Number of x tiles must be a positive natural number instead of {}".format(self.n_x_tiles))
if self.n_y_tiles % 1 != 0 or self.n_y_tiles <= 0:
raise TypeError("Number of y tiles must be a positive natural number instead of {}".format(self.n_y_tiles))
self.tile_width = (self.x_high - self.x_low) / self.n_x_tiles
self.tile_height = (self.y_high - self.y_low) / self.n_y_tiles
self.tiling_width = self.tile_width * self.n_x_tiles
self.tiling_height = self.tile_height * self.n_y_tiles
self.tile_starts: List[Tuple[float, float]] = []
# Each tiling starts at a random offset that is a fraction of the tile width and height
for _ in range(self.n_tilings):
self.tile_starts.append((
self.x_low + np.random.rand() * self.tile_width,
self.y_low + np.random.rand() * self.tile_height))
self.features_shape = (self.n_tilings, self.n_y_tiles, self.n_x_tiles, self.n_actions)
self.thetas = np.random.uniform(size=self.features_shape) # Initialise randomly with values between 0 and 1
def summed_thetas(self, state, action):
"""Theta values for features present for state and action."""
summed = 0
for i in range(self.n_tilings):
shifted = state - self.tile_starts[i] # Subtract the randomly chosen offsets
x, y = shifted
if (x >= 0 and x <= self.tiling_width) and (y >= 0 and y <= self.tiling_height):
summed += self.thetas[i][int(y // self.tile_height)][int(x // self.tile_width)][action]
return summed
def present_features(self, state, action):
"""Features that are active for the given state and action."""
result = np.zeros(self.thetas.shape) # By default, all of them are inactve
for i in range(self.n_tilings):
shifted = state - self.tile_starts[i]
x, y = shifted
if(x >= 0 and x <= self.tiling_width) and(y >= 0 and y <= self.tiling_height):
# Set the feature to active
result[i][int(y // self.tile_height)][int(x // self.tile_width)][action] = 1
return result
|
gizela/gizela | gizela/data/PointListSimple.py | Python | gpl-3.0 | 2,885 | 0.020104 | # gizela
#
# Copyright (C) 2010 Michal Seidl, Tomas Kubin
# Author: Tomas Kubin <tomas.kubin@fsv.cvut.cz>
# URL: <http://slon.fsv.cvut.cz/gizela>
#
# $Id: PointListSimple.py 68 2010-08-19 09:42:00Z tomaskubin $
'''
class for list of points without finding by id and without duplicity handling
'''
from gizela.util.Error import Error
from gizela.text.TextTable import TextTable
from gizela.data.PointBase import PointBase
class PointListSimpleError(Error): pass
class PointListSimple(object):
"""List of geodetic points CoordBase without searching by id"""
__slots__ = ["_list", "_textTable"]
def _get_textTable(self): return self._textTable
def _set_textTable(self,textTable):
if isinstance(textTable, TextTable):
self._textTable = textTable
for point in self._list: point.textTable = textTable
else:
raise PointListSimpleError, "TextTable instance expected"
textTable = property(_get_textTable, _set_textTable)
def __init__(self, textTable):
'''duplicity ... what to do with duplicit point
textTable ... format of table for text output
'''
self._list = [] # list of points
self.textTable = textTable # TextTable instance
def add_point(self, point):
'''adds CoordBase instance into list'''
if isinstance(point,PointBase):
self._list.append(point)
else:
raise PointListSimpleError("Requires PointBase or its inheritance")
# set text table
point.textTable = self._textTable
def __len__(self):
'''number of points in dictionary'''
return len(self._list)
def __iter__(self):
"""iterator"""
#return iter(self.pointDict)
#self.idi=0 # iterator reset
#return self
return iter(self._list)
#return iter(self._index)
def make_table(self):
"""makes text table of points in dictionary"""
if not self._list:
return "Empty pointList"
return self._list[0].make_header() +\
"".joi | n([point.make_table_row() for point in self._list]) +\
self._list[0].make_footer()
def __str__(self):
return self.make_table()
if __name__ = | = "__main__":
from gizela.data.PointCart import PointCart
c1 = PointCart(id="C",x=1,y=2,z=3)
c2 = PointCart(id="B",x=4,y=5,z=6)
#pd=PointList(textTable=c1.get_text_table())
# setting text table accoridng to PointCart instance
pd=PointListSimple(textTable=TextTable([("ID","%2s"),("XX","%2i"),("YY","%2i"),("ZZ","%2i")]))
print pd
pd.add_point(c1)
pd.add_point(c2)
pd.add_point(PointCart(id="A",x=7,y=8,z=9))
print pd
print "C" in pd
print "c" in pd
for point in pd: print point
|
huggingface/pytorch-transformers | tests/test_modeling_wav2vec2.py | Python | apache-2.0 | 23,117 | 0.002855 | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch Wav2Vec2 model. """
import math
import unittest
from tests.test_modeling_common import floats_tensor, ids_tensor, random_attention_mask
from transformers import is_torch_available
from transformers.testing_utils import require_datasets, require_soundfile, require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, _config_zero_init
if is_torch_available():
import torch
from transformers import Wav2Vec2Config, Wav2Vec2ForCTC, Wav2Vec2ForMaskedLM, Wav2Vec2Model, Wav2Vec2Processor
from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices
class Wav2Vec2ModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=1024, # speech is longer
is_training=False,
hidden_size=16,
feat_extract_norm="group",
feat_extract_dropout=0.0,
feat_extract_activation="gelu",
conv_dim=(32, 32, 32),
conv_stride=(4, 4, 4),
conv_kernel=(8, 8, 8),
conv_bias=False,
num_conv_pos_embeddings=16,
num_conv_pos_embedding_groups=2,
num_hidden_layers=4,
num_attention_heads=2,
hidden_dropout_prob=0.1, # this is most likely not correctly set yet
intermediate_size=20,
layer_norm_eps=1e-5,
hidden_act="gelu",
initializer_range=0.02,
vocab_size=32,
do_stable_layer_norm=False,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_dropout = feat_extract_dropout
self.feat_extract_activation = feat_extract_activation
self.conv_dim = conv_dim
self.conv_stride = conv_stride
self.conv_kernel = conv_kernel
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout_prob = hidden_dropout_prob
self.intermediate_size = intermediate_size
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.scope = scope
output_seq_length = self.seq_length
for kernel, stride in zip(self.conv_kernel, self.conv_stride):
output_seq_length = (output_seq_length - (kernel - 1)) / stride
self.output_seq_length = int(math.ceil(output_seq_length))
self.encoder_seq_length = self.output_seq_length
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
config = Wav2Vec2Config(
hidden_size=self.hidden_size,
feat_extract_norm=self.feat_extract_norm,
feat_extract_dropout=self.feat_extract_dropout,
feat_extract_activation=self.feat_extract_activation,
conv_dim=self.conv_dim,
conv_stride=self.conv_stride,
conv_kernel=self.conv_kernel,
conv_bias=self.conv_bias,
num_conv_pos_embeddings=self.num_conv_pos_embeddings,
num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
hidden_dropout_prob=self.hidden_dropout_prob,
intermediate_size=self.intermediate_size,
layer_norm_eps=self.layer_norm_eps,
hidden_act=self.hidden_act,
initializer_range=self.initializer_range,
vocab_size=self.vocab_size,
)
return config, input_values, attention_mask
def create_and_check_model(self, config, input_values, attention_mask):
model = Wav2Vec2Model(config=config)
model.to(torch_device)
model.eval()
result = model(input_values, attention_mask=attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size)
)
def create_and_check_batch_inference(self, config, input_values, *args):
# test does not pass for models making use of `group_norm`
# check: https://github.com/pytorch/fairseq/issues/3227
model = Wav2Vec2Model(config=config)
model.to(torch_device)
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
# pad input
for i in | range(len(input_le | ngths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0.0
batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state
for i in range(input_values.shape[0]):
input_slice = input_values[i : i + 1, : input_lengths[i]]
output = model(input_slice).last_hidden_state
batch_output = batch_outputs[i : i + 1, : output.shape[1]]
self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3))
def check_ctc_loss(self, config, input_values, *args):
model = Wav2Vec2ForCTC(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
model.config.ctc_loss_reduction = "sum"
sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss
model.config.ctc_loss_reduction = "mean"
mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss
self.parent.assertTrue(abs(labels.shape[0] * labels.shape[1] * mean_loss.item() - sum_loss.item()) < 1e-3)
def check_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = Wav2Vec2ForCTC(config=config)
model.to(torch_device)
model.train()
# freeze feature encoder
model.freeze_feature_extractor()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
|
thauser/pnc-cli | test/integration/test_buildconfigsetrecords_api.py | Python | apache-2.0 | 1,504 | 0.00266 | import pytest
__author__ = 'thauser'
from pnc_cli.swagger_client.apis import BuildconfigsetrecordsApi
from test import testutils
import pnc_cli.user_config as uc
bcsr_api = None
@pytest.fixture(scope='function', autouse=True)
def init_api():
global bcsr_api
bcsr_api = BuildconfigsetrecordsApi(uc.user.get_api_client())
def test_get_all_invalid_param():
testutils.assert_raises_typeerror(bcsr_api, 'get_all')
def test_get_all():
bcrsets = bcsr_api.get_all(page_index=0, page_size=1000000, sort='', q='').content
assert bcrsets is not None
def test_get_specific_no_id():
testutils.assert_raises_valueerror(bcsr_api, 'get_specific', id=None)
def test_get_specific_invalid_param():
testutils.assert_raises_typeerror(bcsr_api, 'get_specific', id=1)
def test_get_specific():
bcrsets = bcsr_api.get_all(page_index=0, page_size=1000000, sort='', q='').content
bcrset = bcsr_api.get_specific(bcrsets[1].id).content
assert bcrset is not None
def test_get_build_records_no_id():
testutils.assert_raises_valueerror(bcsr_api, 'get_build_re | cords', id=None)
def test_get_build_records_invalid_param():
testutils.assert_raises_typeerror(bcsr_api, 'get_build_records', id=1)
@pytest.mark.xpass(reason='none of the default record sets contain build records, and there is no api to add them.')
def test_g | et_build_records():
records = bcsr_api.get_build_records(id=1, page_index=0, page_size=1000000, sort='', q='').content
assert records is None
|
chen2aaron/SnirteneCodes | StarterLearningPython/208-namespaces-2.py | Python | gpl-2.0 | 303 | 0.0033 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Created by xixijun
# Date: 15-5-7
# Blog: morningchen.com
def outer_foo():
a = 10
def inner_foo():
| global a
a = 20
print "inne | r_foo: a=", a
# inner_foo()
print "outer_foo: a=", a
a = 30
outer_foo()
print "a=", a
|
kgorman/WMG_speed | app/app.py | Python | mit | 5,559 | 0.004317 | #!/usr/bin/env python
from flask import Flask
from flask import render_template
import pandas as pd
import numpy as np
import datetime as datetime
app = Flask(__name__)
if not app.debug:
import logging
file_handler = logging.FileHandler('error.log')
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
def int_to_dow(dayno):
""" convert an integer into a day of week string """
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday',
'Sunday']
return days[int(dayno)]
def create_graph_strings(input_list):
return None
def get_raw_data():
file_name = "static/files/all_data.csv"
dataframe = pd.read_csv(file_name, header=0)
dataframe['date'] = pd.to_datetime(dataframe['date'])
return dataframe
def get_max_speed(df):
return float(max(df['peak_speed']))
def get_vehicle_count(df):
return float(sum(df['vehicle_count']))
def get_violator_count(df):
return float(sum(df['violator_count']))
def get_avg_speed(df):
theavg = np.mean(df['peak_speed'])
return round(theavg, 2)
def get_over_limit(df):
theavg = get_avg_speed(df)
return (30-theavg)*-1
def get_timeseries_by_year(df):
''' group by keys, then return strings suitable for graphing '''
df['year'] = df.date.map(lambda x: '{year}'.format(year=x.year))
grouped = df.sort(['year'], ascending=1).groupby(['year'])
vehicle_count_by_month = grouped.aggregate(np.sum)['vehicle_count']
violator_count_by_month = grouped.aggregate(np.sum)['violator_count']
keys = vehicle_count_by_month.index.get_values()
# convert to specially formatted strings
vehicle_count_by_month_l = [str(i) for i in list(vehicle_count_by_month.get_values())]
violator_count_by_month_l = [str(i) for i in list(violator_count_by_month.get_values())]
keys_l = [str(i) for i in list(keys)]
vehicle_count_by_month_str = ','.join(vehicle_count_by_month_l)
violator_count_by_month_str = ','.join(violator_count_by_month_l)
keys_str = ",".join(keys_l)
return [keys_str, vehicle_count_by_month_str, violator_count_by_month_str]
def get_speed_by_hour(df):
grouped = df.sort(['hour_of_day'], ascending=1).groupby(['hour_of_day'])
mean_speed = grouped.aggregate(np.mean)['peak_speed']
max_speed = grouped.aggregate( | np.max)['peak_speed']
keys = mean_speed.index.get_values()
mean_speed_l = [str(i) for i in list(mean_speed.get_values())]
max_speed_l = | [str(i) for i in list(max_speed.get_values())]
keys_l = [str(i) for i in list(keys)]
mean_speed_str = ','.join(mean_speed_l)
max_speed_str = ','.join(max_speed_l)
keys_str = ",".join(keys_l)
return [keys_str, mean_speed_str, max_speed_str]
def get_speed_by_day(df):
grouped = df.sort(['weekday'], ascending=0).groupby(['weekday'])
mean_speed = grouped.aggregate(np.mean)['peak_speed']
max_speed = grouped.aggregate(np.max)['peak_speed']
keys = mean_speed.index.get_values()
mean_dow_l = [str(i) for i in list(mean_speed.get_values())]
max_dow_l = [str(i) for i in list(max_speed.get_values())]
dow_keys_l = [int_to_dow(i) for i in list(keys)]
mean_speed_str = ','.join(mean_dow_l)
max_speed_str = ','.join(max_dow_l)
keys_str = "','".join(dow_keys_l)
keys_str = "'"+keys_str+"'"
return [keys_str, mean_speed_str, max_speed_str]
def car_count_by_hour(df):
grouped = df.sort(['date'], ascending=0).groupby(['hour_of_day'])
car_count = grouped.aggregate(np.mean)['vehicle_count']
violator_count = grouped.aggregate(np.max)['violator_count']
keys = car_count.index.get_values()
car_count_l = [str(i) for i in list(car_count.get_values())]
violator_count_l = [str(i) for i in list(violator_count.get_values())]
keys_l = [str(i) for i in list(keys)]
car_count_str = ','.join(car_count_l)
violator_count_str = ','.join(violator_count_l)
keys_str = ",".join(keys_l)
return [keys_str, car_count_str, violator_count_str]
@app.route("/")
def dashboard():
df = get_raw_data()
violator_pct = round((get_violator_count(df)/get_vehicle_count(df)*100), 2)
violator_graph = get_timeseries_by_year(df)
speed_graph = get_speed_by_hour(df)
dow_graph = get_speed_by_day(df)
car_count_graph = car_count_by_hour(df)
return render_template('index.html',
car_count=get_vehicle_count(df),
violator_count=get_violator_count(df),
violator_pct=violator_pct,
max_speed=get_max_speed(df),
avg_speed=get_avg_speed(df),
over_limit=get_over_limit(df),
ts_labels=violator_graph[0],
ts_vehicle=violator_graph[1],
ts_violator=violator_graph[2],
ts_speed_labels=speed_graph[0],
ts_mean_speed_data=speed_graph[1],
ts_max_speed_data=speed_graph[2],
ts_dow_labels=dow_graph[0],
ts_dow_mean=dow_graph[1],
ts_dow_max=dow_graph[2],
ts_car_count_labels=car_count_graph[0],
ts_car_count_count=car_count_graph[1],
ts_car_count_violators=car_count_graph[2]
)
@app.route("/about")
def about():
return render_template('about.html')
@app.route("/contact")
def contact():
return render_template('contact.html')
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
RazerM/logbook | logbook/ticketing.py | Python | bsd-3-clause | 19,231 | 0.000312 | # -*- coding: utf-8 -*-
"""
logbook.ticketing
~~~~~~~~~~~~~~~~~
Implements long handlers that write to remote data stores and assign
each logging message a ticket id.
:copyright: (c) 2010 by Armin Ronacher, Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
from time import time
import json
from logbook.base import NOTSET, level_name_property, LogRecord
from logbook.handlers import Handler, HashingHandlerMixin
from logbook.helpers import cached_property, b, PY2, u
class Ticket(object):
"""Represents a ticket from the database."""
level_name = level_name_property()
def __init__(self, db, row):
self.db = db
self.__dict__.update(row)
@cached_property
def last_occurrence(self):
"""The last occurrence."""
rv = self.get_occurrences(limit=1)
if rv:
return rv[0]
def get_occurrences(self, order_by='-time', limit=50, offset=0):
"""Returns the occurrences for this ticket."""
return self.db.get_occurrences(self.ticket_id, order_by, limit, offset)
def solve(self):
"""Marks this ticket as solved."""
self.db.solve_ticket(self.ticket_id)
self.solved = True
def delete(self):
"""Deletes the ticket from the database."""
self.db.delete_ticket(self.ticket_id)
# Silence DeprecationWarning
__hash__ = None
def __eq__(self, other):
equal = True
for key in self.__dict__.keys():
if getattr(self, key) != getattr(other, key):
equal = False
break
return equal
def __ne__(self, other):
return not self.__eq__(other)
class Occurrence(LogRecord):
"""Represents an occurrence of a ticket."""
def __init__(self, db, row):
self.update_from_dict(json.loads(row['data']))
self.db = db
self.time = row['time']
self.ticket_id = row['ticket_id']
self.occurrence_id = row['occurrence_id']
class BackendBase(object):
"""Provides an abstract interface to various databases."""
def __init__(self, **options):
self.options = options
self.setup_backend()
def setup_backend(self):
"""Setup the database backend."""
raise NotImplementedError()
def record_ticket(self, record, data, hash, app_id):
"""Records a log record as ticket."""
raise NotImplementedError()
def count_tickets(self):
"""Returns the number of tickets."""
raise NotImplementedError()
def get_tickets(self, order_by='-last_occurrence_time',
limit=50, offset=0):
"""Selects tickets from the database."""
raise NotImplementedError()
def solve_ticket(self, ticket_id):
"""Marks a ticket as solved."""
raise NotImplementedError()
def delete_ticket(self, ticket_id):
"""Deletes a ticket from the database."""
raise NotImplementedError()
def get_ticket(self, ticket_id):
"""Return a single ticket with all occurrences."""
raise NotImplementedError()
def get_occurrences(self, ticket, order_by='-time', limit=50, offset=0):
"""Selects occurrences from th | e database for a ticket."""
raise NotImplementedError()
class SQLAlchemyBackend(BackendBase):
"""Implements a backend that is writing into a database SQLAlchemy can
interface.
This backend takes some additional options:
`table_prefix`
an optional table prefix for | all tables created by
the logbook ticketing handler.
`metadata`
an optional SQLAlchemy metadata object for the table creation.
`autocreate_tables`
can be set to `False` to disable the automatic
creation of the logbook tables.
"""
def setup_backend(self):
from sqlalchemy import create_engine, MetaData
from sqlalchemy.orm import sessionmaker, scoped_session
engine_or_uri = self.options.pop('uri', None)
metadata = self.options.pop('metadata', None)
table_prefix = self.options.pop('table_prefix', 'logbook_')
if hasattr(engine_or_uri, 'execute'):
self.engine = engine_or_uri
else:
# Pool recycle keeps connections from going stale,
# which happens in MySQL Databases
# Pool size is more custom for out stack
self.engine = create_engine(engine_or_uri, convert_unicode=True,
pool_recycle=360, pool_size=1000)
# Create session factory using session maker
session = sessionmaker()
# Bind to the engined
session.configure(bind=self.engine)
# Scoped session is a thread safe solution for
# interaction with the Database
self.session = scoped_session(session)
if metadata is None:
metadata = MetaData()
self.table_prefix = table_prefix
self.metadata = metadata
self.create_tables()
if self.options.get('autocreate_tables', True):
self.metadata.create_all(bind=self.engine)
def create_tables(self):
"""Creates the tables required for the handler on the class and
metadata.
"""
import sqlalchemy as db
def table(name, *args, **kwargs):
return db.Table(self.table_prefix + name, self.metadata,
*args, **kwargs)
self.tickets = table('tickets',
db.Column('ticket_id', db.Integer,
primary_key=True),
db.Column('record_hash', db.String(40),
unique=True),
db.Column('level', db.Integer),
db.Column('channel', db.String(120)),
db.Column('location', db.String(512)),
db.Column('module', db.String(256)),
db.Column('last_occurrence_time', db.DateTime),
db.Column('occurrence_count', db.Integer),
db.Column('solved', db.Boolean),
db.Column('app_id', db.String(80)))
self.occurrences = table('occurrences',
db.Column('occurrence_id',
db.Integer, primary_key=True),
db.Column('ticket_id', db.Integer,
db.ForeignKey(self.table_prefix +
'tickets.ticket_id')),
db.Column('time', db.DateTime),
db.Column('data', db.Text),
db.Column('app_id', db.String(80)))
def _order(self, q, table, order_by):
if order_by[0] == '-':
return q.order_by(table.c[order_by[1:]].desc())
return q.order_by(table.c[order_by])
def record_ticket(self, record, data, hash, app_id):
"""Records a log record as ticket."""
# Can use the session instead engine.connection and transaction
s = self.session
try:
q = self.tickets.select(self.tickets.c.record_hash == hash)
row = s.execute(q).fetchone()
if row is None:
row = s.execute(self.tickets.insert().values(
record_hash=hash,
level=record.level,
channel=record.channel or u(''),
location=u('%s:%d') % (record.filename, record.lineno),
module=record.module or u('<unknown>'),
occurrence_count=0,
solved=False,
app_id=app_id
))
ticket_id = row.inserted_primary_key[0]
else:
ticket_id = row['ticket_id']
s.execute(self.occurrences.insert()
.values(ticket_id=ticket_id,
time=record.time,
|
openstack/ironic | ironic/objects/__init__.py | Python | apache-2.0 | 1,737 | 0 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(comstud): You may scratch your head as you see code that imports
# this module and then accesses attributes for objects such as Node,
# etc, yet you do not see these attributes in here. Never fear, there is
# a little bit of magic. When objects are registered, an attribute is set
# on this module automatically, pointing to the newest/latest version of
# the object.
def register_all():
# NOTE(danms): You must make sure your object gets imported in this
# function in order | for it to be registered by services that may
| # need to receive it via RPC.
__import__('ironic.objects.allocation')
__import__('ironic.objects.bios')
__import__('ironic.objects.chassis')
__import__('ironic.objects.conductor')
__import__('ironic.objects.deploy_template')
__import__('ironic.objects.deployment')
__import__('ironic.objects.node')
__import__('ironic.objects.node_history')
__import__('ironic.objects.port')
__import__('ironic.objects.portgroup')
__import__('ironic.objects.trait')
__import__('ironic.objects.volume_connector')
__import__('ironic.objects.volume_target')
|
gregbdunn/aws-ec2rescue-linux | tools/moduletests/unit/test_selinuxpermissive.py | Python | apache-2.0 | 8,783 | 0.004213 | # Copyright 2016-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
Unit tests for the selinuxpermissive module
"""
import os
import sys
import unittest
import mock
import moduletests.src.selinuxpermissive
try:
# Python 2.x
from cStringIO import StringIO
except ImportError:
# Python 3.x
from io import StringIO
if sys.hexversion >= 0x3040000:
# contextlib.redirect_stdout was introduced in Python 3.4
import contextlib
else:
# contextlib2 is a backport of contextlib from Python 3.5 and is compatible with Python2/3
import contextlib2 as contextlib
class Testselinuxpermissive(unittest.TestCase):
config_file_path = "/etc/selinux/config"
def setUp(self):
self.output = StringIO()
def tearDown(self):
self.output.close()
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=False)
def test_detect_no_selinux(self, isfile_mock):
self.assertFalse(moduletests.src.selinuxpermissive.detect(self.config_file_path))
self.assertTrue(isfile_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.open", mock.mock_open(read_data="SELINUX=enforcing"))
def test_detect_problem(self, isfile_mock):
self.assertTrue(moduletests.src.selinuxpermissive.detect(self.config_file_path))
self.assertTrue(isfile_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.open", mock.mock_open(read_data="SELINUX=permissive"))
def test_detect_noproblem(self, isfile_mock):
self.assertFalse(moduletests.src.selinuxpermissive.detect(self.config_file_path))
self.assertTrue(isfile_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.open", mock.mock_open(read_data="SELINUX=enforcing"))
def test_fix_success(self):
self.assertTrue(moduletests.src.selinuxpermissive.fix(self.config_file_path))
@mock.patch("moduletests.src.selinuxpermissive.open", side_effect=IOError)
def test_fix_exception(self, open_mock):
with contextlib.redirect_stdout(self.output):
self.assertRaises(IOError, moduletests.src.selinuxpermissive.fix, self.config_file_path)
self.assertEqual(self.output.getvalue(), "[WARN] Unable to replace contents of /etc/selinux/config\n")
self.assertTrue(open_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")
@mock.patch("moduletests.src.selinuxpermissive.detect", side_effe | ct=(True, False))
| @mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.backup", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.fix", return_value=True)
def test_run_success_fixed(self, fix_mock, backup_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True}
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.selinuxpermissive.run())
self.assertTrue("[SUCCESS] selinux set to permissive" in self.output.getvalue())
self.assertTrue(fix_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.detect", return_value=False)
def test_run_success(self, detect_mock, config_mock):
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.selinuxpermissive.run())
self.assertTrue("[SUCCESS] selinux is not set to enforcing" in self.output.getvalue())
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")
@mock.patch("moduletests.src.selinuxpermissive.detect", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.backup", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.fix", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.restore", return_value=True)
def test_run_failure_isfile(self,
restore_mock,
fix_mock,
backup_mock,
isfile_mock,
detect_mock,
config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.config_file_path: "/some/path"},
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.selinuxpermissive.run())
self.assertTrue("[FAILURE] failed to set selinux set to permissive" in self.output.getvalue())
self.assertTrue(restore_mock.called)
self.assertTrue(fix_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")
@mock.patch("moduletests.src.selinuxpermissive.detect", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=False)
@mock.patch("moduletests.src.selinuxpermissive.fix", return_value=True)
def test_run_failure(self, fix_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.selinuxpermissive.run())
self.assertTrue("[FAILURE] failed to set selinux set to permissive" in self.output.getvalue())
self.assertTrue(fix_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")
@mock.patch("moduletests.src.selinuxpermissive.detect", side_effect=IOError)
@mock.patch("moduletests.src.selinuxpermissive.restore", return_value=True)
def test_run_failure_exception(self, restore_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.config_file_path: "/some/path"},
"REMEDIATE": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.selinuxpermissive.run())
self.assertTrue(self.output.getvalue().endswith("Review the logs to deter |
gilch/drython | drython/expression.py | Python | apache-2.0 | 13,683 | 0.000292 | # Copyright 2016 Matthew Egan Odendahl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module exports a set of expression replacement functions.
`In` substitutes for generator expressions (thus comprehensions also).
`generator` substitutes for `yield from` and `yield` in cases where
it would be incompatible with the statement module.
`Elif/Else`substitutes for nested `if`/`else`. (The expression form of
`if` lacks `elif`.)
Operator functions are already available in Python's included
`operator` module, so they are not provided here.
`entuple`, `enlist`, `enset`, and `edict` subsitute for tuple, list,
set, and dictionary displays. `efset` makes a frozenset, and `ordict`
makes an OrderedDict.
Unlike statements, expressions already work in lambdas and eval,
so why replace them too?
Besides being easier to use with higher-order functions, the stack
and s-expression modules work primarily with function calls, so these
substitutes have uses in metaprogramming. In many cases you can use
expressions directly anyway, or convert a non-call expression to a
call with a lambda, but sometimes you need to manipulate the code of
the expression itself, in which case it must be made of calls to
begin with.
The simple case of addition illustrates the three styles.
>>> from core import identity
>>> from s_expression import S
>>> from operator import add
When used directly it's like a constant as far as S is concerned.
>>> S(identity,1+2)()
3
Wrap in lambda and you can change the arguments
>>> S(lambda x,y:x+y,1,2)()
3
function call version is more natural for s-expressions
>>> S(add,1,2)()
3
A more advanced case with generator expressions.
>>> from expression import entuple; from macro import L1
Direct use acts like a constant
>>> S(identity,[(x,y) for x in (1,2) for y in 'abc'])()
[(1, 'a'), (1, 'b'), (1, 'c'), (2, 'a'), (2, 'b'), (2, 'c')]
lambda version is adjustable with arguments.
>>> S(lambda z:[(x,y) for x in (1,2) for y in z],'abc')()
[(1, 'a'), (1, 'b'), (1, 'c'), (2, 'a'), (2, 'b'), (2, 'c')]
>>> S(list, # function call version using expression.In
... S(In,(1,2),S(L1,S.x,
... S(In,'abc',S(L1,S.y,
... S(entuple,S(entuple,S.x,S.y)))))))()
[(1, 'a'), (1, 'b'), (1, 'c'), (2, 'a'), (2, 'b'), (2, 'c')]
Why use the function call version when it's so much harder? Besides
the new `whilst` feature, the main advantage here is that you can
simplify it with a macro.
>>> from s_expression import macro
>>> @macro
... def genx(expr,*specs):
... if specs:
... return S(In,specs[1],S(L1,specs[0],S(genx,expr,*specs[2:])))
... else:
... return S(entuple,expr)
Now we've got generator s-expressions with arguments in familiar
Python order.
>>> S(list,
... S(genx, S(entuple, S.x, S.y), S.x, (1, 2), S.y, 'abc'))()
[(1, 'a'), (1, 'b'), (1, 'c'), (2, 'a'), (2, 'b'), (2, 'c')]
A more advanced macro could include Python's other features like `if`
filters and unpacking. But more importantly, since you can
metaprogram this, you can add new features in the macro that raw
Python lacks, like whilst.
"""
import threading
import weakref
from collections import OrderedDict
from functools import wraps
from itertools import chain
import sys
from drython.core import partition
from drython.statement import Atom, Pass, Print
if sys.version_info[0] == 2: # pragma: no cover
import Queue as Q
else: # pragma: no cover
import queue as Q
# entuple = unstar(tuple)
def entuple(*args):
"""
returns args as a tuple
>>> entuple(1, 2, 3)
(1, 2, 3)
"""
return tuple(args)
# enlist = unstar(list)
def enlist(*args):
"""
returns args as a list
>>> enlist(1, 2, 3)
[1, 2, 3]
"""
return list(a | rgs)
# enset = unstar(set)
def enset(*args):
"""
returns args as a set
>>> enset(1, 2, 3) == {1, 2, 3}
True
"""
return set(args)
# efset = unstar(frozenset)
def efs | et(*args):
"""
return args as a frozenset
>>> efset(1, 2, 3) == frozenset([1, 2, 3])
True
"""
return frozenset(args)
def edict(*args, **kwargs):
"""
pairs args and makes a dictionary with them
>>> edict(1, 2)
{1: 2}
>>> edict(1, 2, 3, 4, 5, 6)[3]
4
>>> edict(1, 2,
... 3, 4) == {1: 2, 3: 4}
True
kwargs become string keys
>>> edict(1,2, c=3) == {1:2, 'c':3}
True
"""
return dict(chain(partition(args), kwargs.items()))
def ordict(*args):
"""
pairs args (in order) and makes an OrderedDict with them
>>> ordict(1,2, 3,4)
OrderedDict([(1, 2), (3, 4)])
"""
return OrderedDict(partition(args))
def In(target_list, comp_lambda):
"""
Generator expressions made of function calls. Similar to the list
monad in functional languages.
The lexical scoping rules for lambda require the variable term to
be last--unlike Python's comprehensions which put that first. To
enable nesting of In, the comp_lambda must always return an
iterable, even for the innermost In.
`In` is a generator expression substitute, but it can also
substitute for list comprehensions by wrapping with list(),
as in Python:
>>> [c+d for c in 'abc' for d in 'xyz'] # list comprehension
['ax', 'ay', 'az', 'bx', 'by', 'bz', 'cx', 'cy', 'cz']
generator expression acting as the above list comprehension
>>> list(c+d for c in 'abc' for d in 'xyz')
['ax', 'ay', 'az', 'bx', 'by', 'bz', 'cx', 'cy', 'cz']
Two `In` functions acting as the above generator expression
acting as the list comprehension above that.
>>> list(In('abc', lambda c:
... In('xyz', lambda d:
... (c+d,) # comp_lambda ALWAYS returns an iterable
... )))
['ax', 'ay', 'az', 'bx', 'by', 'bz', 'cx', 'cy', 'cz']
dictionary and set comprehensions work similarly:
>>> ({'a', 'b', 'c'} ==
... {c for c in 'abc'} ==
... set(c for c in 'abc') ==
... set(In('abc', lambda c: (c,))))
True
>>> ({'one': 1} ==
... {k:v for k,v in [('one',1)]} ==
... dict((k,v) for k,v in [('one',1)]))
True
The dict translation is a bit trickier. Note the tuple-in-tuple
((k,v),) and star(), similar to statement.For()
>>> from drython.core import star
>>> dict(In([('one',1)], star(lambda k, v: ((k,v),) )))
{'one': 1}
"""
# The double for/yield is a flatten. I would have used
# return itertools.chain.from_iterable(map(comp_lambda,target_list))
# but whilst raises StopIteration, and chain can't handle it.
for target in target_list:
for x in comp_lambda(target):
yield x
# the name "While" was already taken.
def whilst(b, x):
"""
Like using a takewhile in comprehensions. It aborts the remainder
of the iterable.
But unlike a StopIteration, the remaining other loops continue.
>>> from itertools import takewhile
>>> [(x,y) for x in takewhile(lambda x:x<3,range(10))
... for y in takewhile(lambda y:y<2,range(10))]
[(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)]
>>> list(In(range(10),lambda x:
... whilst(x<3, In(range(10), lambda y:
... whilst(y<2,((x,y),))))))
[(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)]
Notice that y has to be bound twice in the
list-comprehension/takewhile version, but not using In/whilst.
>>> [x+y for x in 'abc' for y in takewhile(lambda y: x!=y,'zabc')]
['az', 'bz', 'ba', 'cz', 'ca', 'cb']
>>> list(In('abc',lambda x:
... In('zabc',lambda y:
... whilst(x!=y, (x+y,) ))))
['az', 'bz', 'ba', 'cz', 'ca', 'cb']
This is different than if (or `when` inside `In`), which kee |
DistributedComponents/verdi-aggregation | extraction/aggregation-dynamic/script/client.py | Python | bsd-2-clause | 2,030 | 0.002463 | import socket
import re
import uuid
from struct import pack, unpack
from select import select
def poll(sock, timeout):
return sock in select([sock], [], [], timeout)[0]
class SendError(Exception):
pass
class ReceiveError(Exception):
pass
class Client(object):
re_aggregate_response = re.compile(r'AggregateResponse[^0-9-]+(-?[0-9]+)')
re_level_response = r | e.compile(r'LevelResponse[^0-9-]+([0-9]+|-)')
def __init__(self | , host, port, sock=None):
if not sock:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
else:
self.sock = sock
def send_msg(self, msg):
n = self.sock.send(pack("<I", len(msg)))
if n < 4:
raise SendError
else:
self.sock.send(msg)
def recv_msg(self, re):
len_bytes = self.sock.recv(4)
if len_bytes == '':
raise ReceiveError
else:
len = unpack("<I", len_bytes)[0]
data = self.sock.recv(len)
if data == '':
raise ReceiveError
else:
return self.parse_response(data, re)
def send_local(self, local):
self.send_msg('Local' + ' ' + str(local))
def send_send_aggregate(self):
self.send_msg('SendAggregate')
def send_broadcast(self):
self.send_msg('Broadcast')
def send_aggregate_request(self):
self.send_msg('AggregateRequest')
return self.recv_msg(self.re_aggregate_response)
def send_level_request(self):
self.send_msg('LevelRequest')
return self.recv_msg(self.re_level_response)
def deserialize(self, data):
if data == '-':
return None
return data
def parse_response(self, data, re):
try:
match = re.match(data)
return self.deserialize(match.group(1))
except Exception as e:
print "Parse error, data=%s" % data
raise e
|
EDRN/labcas-backend | workflows/src/main/resources/rnaseq/pges/rnaseq-task1.py | Python | apache-2.0 | 2,942 | 0.009857 | # python script to execute RNA Sequence Pipeline Task #1
# usage: python rnaseq-pipeline-task1.py --num_threads <num_threads> --gene_transcript_file <gene_transcript_file> --genome_index <genome_index >
# --data_dir <data_dir> --output_dir <output_dir> --sample_id <sample_id>
# example: python rnaseq-pipeline- | task1.py --num_threads 24 --gene_transcript_file genes.gtf --genome_index genome
# --data_dir /data/EDRN --output_dir thout --sample_id ERR164503
import argparse
import os
INPUT_FILE_EXTENSIONS = ['.sra','.fastq','.gtf','.fa','.bt2']
if __name__ == '__main__':
# parse command line arguments
parser = argparse.ArgumentParser(description="RNA Sequen | ce Pipeline Task #1")
parser.add_argument('--num_threads', dest='num_threads', type=int, help="Number of THreads", default=1)
parser.add_argument('--gene_transcript_file', dest='gene_transcript_file', type=str, help="Gene Transcript File", default=None)
parser.add_argument('--genome_index', dest='genome_index', type=str, help="Genome index", default=None)
parser.add_argument('--data_dir', dest='data_dir', type=str, help="Data directory", default=None)
parser.add_argument('--output_dir', dest='output_dir', type=str, help="Output directory", default=None)
parser.add_argument('--sample_id', dest='sample_id', type=str, help="Sample id", default=None)
args_dict = vars( parser.parse_args() )
print 'Number of threads=%s' % args_dict['num_threads']
print'Gene transcripit file=%s' % args_dict['gene_transcript_file']
print'Genome index=%s' % args_dict['genome_index']
print'Data Directory=%s' % args_dict['data_dir']
print'Output directory=%s' % args_dict['output_dir']
print'Sample Id=%s' % args_dict['sample_id']
# tophat -p 24 -G genes.gtf -o thout genome ERR164503_1.fastq ERR164503_2.fastq
command = "tophat -p %s -G %s -o %s %s" % (args_dict['num_threads'],
args_dict['gene_transcript_file'],
args_dict['output_dir'],
args_dict['genome_index'])
# symlink or copy input data into working directory
# look for .fastq files in sample directory
input_dir = os.path.join(args_dict['data_dir'], args_dict['sample_id'])
for f in os.listdir(input_dir):
input_file = os.path.join(input_dir, f)
if os.path.isfile(input_file):
file_name, file_extension = os.path.splitext(f)
if file_extension == '.fastq':
command += " %s" % f
if file_extension in INPUT_FILE_EXTENSIONS:
if not os.path.exists(f):
# symlink other files
os.symlink(input_file, f)
# execute command
print "Executing command: %s" % command
os.system( command ) |
MakingMexico/web_page | makingmx/settings/base.py | Python | gpl-3.0 | 3,343 | 0.001197 | """
Django settings for makingmx project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Our apps
'web_page',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'makingmx.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'makingmx.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
| },
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https | ://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'staticfiles'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, '../static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = (
os.path.join(BASE_DIR, "../media")
)
|
shantanu69/pygame-physics | PyParticleSystem/PyParticles/utils.py | Python | mit | 330 | 0.027273 | import math
def add_vectors(vector1, vector2):
#Note the vectors are tuple (angle, magnitude)
x = math.sin(vector | 1[0]) * vector1[1] + math.sin(vector2[0]) * vector2[1]
y = math.cos(vector1[0]) * vector1[1] + math.cos(vector2[0]) * vector2[1]
mag = math.hypot(x, y)
angle = (math.pi/2) - math.atan2(y, x)
return (angle, ma | g) |
CatherineH/adolphus | adolphus/yamlparser.py | Python | gpl-3.0 | 13,944 | 0.002438 | """\
YAML parser module.
@author: Aaron Mavrinac
@organization: University of Windsor
@contact: mavrin1@uwindsor.ca
@license: GPL-3
"""
import os
import yaml
from math import pi
import pkg_resources
from itertools import chain
from .robot import Robot
from .solid import Solid
from .laser import RangeModel
from .coverage import PointCache, Model
from .tensor import CameraTensor, TensorModel
from .posable import OcclusionTriangle, SceneObject
from .geometry import Angle, Point, DirectionalPoint, Pose, Rotation, Quaternion
modeltypes = {'standard': Model, 'range': RangeModel, 'tensor': TensorModel}
class YAMLParser(object):
"""\
YAML experiment parser class.
"""
def __init__(self, filename):
"""\
Constructor. Parses an experiment from YAML.
@param filename: The YAML file to load from.
@type filename: C{str}
"""
self._path = os.path.split(filename)[0]
self._mounts = {}
experiment = yaml.load(open(filename))
try:
modeltype = modeltypes[experiment['type']]
except KeyError:
modeltype = modeltypes['standard']
self.model = self._parse_model(experiment['model'], modeltype)
self.tasks = {}
if 'tasks' in experiment:
for task in experiment['tasks']:
self.tasks[task['name']] = self._parse_task(task, modeltype)
@property
def experiment(self):
"""\
Tuple containing the coverage model and task models for this experiment.
"""
return self.model, self.tasks
@staticmethod
def _external_path(basepath, filename):
"""\
Return the path to an external file specified inside another file.
Defaults to searching the package resources if the file is not found.
@param basepath: The current base path context.
@type basepath: C{str}
@param filename: The filename of the external file.
@type filename: C{str}
@return: The path to the external file.
@rtype: C{str}
"""
for path in [os.path.join(basepath, filename),
pkg_resources.resource_filename(__name__, 'resources/' + filename)]:
if os.path.exists(path):
return path
raise IOError('external file %s not found' % filename)
@staticmethod
def _parse_pose(pose):
"""\
Parse a pose from YAML.
@param pose: The YAML dict of the pose.
@type pose: C{dict}
@return: The parsed pose.
@rtype: L{Pose}
"""
if 'T' in pose:
T = Point(*pose['T'])
else:
T = Point(0, 0, 0)
if 'R' in pose:
if pose['Rformat'] == 'quaternion':
R = Rotation(Quaternion(pose['R'][0], Point(*pose['R'][1])))
elif pose['Rformat'] == 'matrix':
R = Rotation.from_rotation_matrix(pose['R'])
elif pose['Rformat'].startswith('axis-angle'):
unit = pose['Rformat'].split('-')[2]
angle = Angle(pose['R'][0], unit)
R = Rotation.from_axis_angle(angle, Point(*pose['R'][1]))
elif pose['Rformat'].startswith('euler'):
convention, unit = pose['Rformat'].split('-')[1:]
R = [Angle(r, unit) for r in pose['R']]
R = Rotation.from_euler(convention, Point(R[0], R[1], R[2]))
else:
raise ValueError('unrecognized rotation format')
else:
R = Rotation()
return Pose(T, R)
def _parse_primitives(self, sprite):
"""\
Parse the primitives of a sprite from YAML.
@param sprite: The YAML dict or filename of the sprite.
@type sprite: C{dict} or C{str}
@return: The primitives list.
@rtype: C{list} of C{dict}
"""
path = self._path
if isinstance(sprite, str):
sprite_file = self._external_path(path, sprite)
sprite = yaml.load(open(sprite_file, 'r'))
path = os.path.split(sprite_file)[0]
try:
for primitive in sprite['primitives']:
if 'texture' in primitive:
primitive['texture'] = \
self._external_path(path, primitive['texture'] + '.tga')
return sprite['primitives']
except KeyError:
return []
def _parse_triangles(self, sprite, path):
"""\
Parse the triangles of a sprite from YAML.
@param sprite: The YAML dict or filename of the sprite.
@type sprite: C{dict} or C{str}
@param path: The current path context.
@type path: C{str}
@return: The triangles list.
@rtype: C{list} of L{OcclusionTriangle}
"""
if isinstance(sprite, str):
sprite_file = self._external_path(path, sprite)
sprite = yaml.load(open(sprite_file, 'r'))
path = os.path.split(sprite_file)[0]
try:
triangles = sprite['triangles']
if isinstance(triangles, str):
# load from raw ASCII triangle mesh format
filename = triangles
triangles = []
with open(self._external_path(path, filename), 'r') as f:
for line in f.readlines():
triangles.append({'vertices': [[float(c) for c in \
line.rstrip().split(' ')][i * 3:i * 3 + 3] \
for i in range(3)]})
parsed_triangles = []
for triangle in triangles:
print triangle
try:
triangle['pose'] = self._parse_pose(triangle['pose'])
except KeyError:
pass
'''
try:
parsed_triangles.append(OcclusionTriangle(**triangle))
except ValueError:
pass
'''
parsed_triangles.append(OcclusionTriangle(**triangle))
return parsed_triangles
except KeyError:
return []
def _parse_links(self, robot):
"""\
Parse the links of a robot from YAML.
@param robot: The YAML dict or filename of the robot.
@type robot: C{dict} or C{str}
@return: The parsed robot links | .
@rtype: C{list}
"""
path = self._path
if isinstance(robot, str):
robot_file = self._external_path(path, robot)
robot = yaml.load(open(robot_file, 'r'))
path = os.path.split(robot_file) | [0]
links = robot['links']
print links
for link in links:
link['offset'] = self._parse_pose(link['offset'])
link['triangles'] = self._parse_triangles(link, path)
return links
def _parse_model(self, model, modeltype):
"""\
Parse a multi-camera model from YAML.
@param model: The YAML dict of the multi-camera model.
@type model: C{dict}
@param modeltype: The model class.
@type modeltype: C{type}
@return: The parsed multi-camera model.
@rtype: L{Model}
"""
# create model object
rmodel = modeltype()
mounts = {}
for objecttype in ['cameras', 'lasers', 'robots', 'scene']:
if not objecttype in model:
continue
for obj in model[objecttype]:
pose = self._parse_pose(obj['pose']) \
if 'pose' in obj else Pose()
mount_pose = self._parse_pose(obj['mount_pose']) \
if 'mount_pose' in obj else Pose()
if 'sprites' in obj:
primitives = list(chain.from_iterable(\
[self._parse_primitives(sprite) \
for sprite in obj['sprites']]))
else:
primitives = []
occlusion = bool(obj['occlusion']) \
if 'occlusion' in obj else True
_solid = False
if occlusion and 'sprites' in obj:
|
rochacbruno/flasgger | tests/test_examples.py | Python | mit | 2,399 | 0 | # coding: utf-8
from flex.core import validate
from flex import load as validate_fully
def test_validate_example_specs(test_data):
"""
This test is generated by conftest.py
for each app in examples app and `test_data` param is:
mod, client, specs_data, opts = test_data
mod: examples.example_name (the test module)
client: Flask app test client
specs_data: {'url': {swag_specs}} for every spec in app
opts: a dictionary of test metadata defined in the example
:expectedresults: validate specs dictionary using flex
"""
mod, client, specs_data, opts = test_data
skip_full_validation = opts.get('skip_full_validation', False)
for url, spec in specs_data.items():
if 'openapi' not in spec and not skip_full_validation:
# Flex can do a sophisticated and thorough validatation of
# Swagger 2.0 sp | ecs, before it was renamed to OpenAPI.
validate_fully(spec)
else:
# OpenAPI specs are not yet supported by flex, so we should fall
# back to a fairly simple structural validation.
validate(spec)
def test_required_at | tributes(test_data):
"""
This test is generated by conftest.py
for each app in examples app and `test_data` param is:
mod, client, specs_data, opts = test_data
mod: examples.example_name (the test module)
client: Flask app test client
specs_data: {'url': {swag_specs}} for every spec in app
opts: a dictionary of test metadata defined in the example
:expectedresults: assert required attributes are present
"""
mod, client, specs_data, opts = test_data
for url, spec in specs_data.items():
assert 'paths' in spec, 'paths is required'
assert 'info' in spec, 'info is required'
def test_example_swag(test_data):
"""
This test is generated by conftest.py
for each app in examples app and `test_data` param is:
mod, client, specs_data, opts = test_data
mod: examples.example_name (the test module)
client: Flask app test client
specs_data: {'url': {swag_specs}} for every spec in app
opts: a dictionary of test metadata defined in the example
:expectedresults: run mod.test_swag with no errors
"""
mod, client, specs_data, opts = test_data
if getattr(mod, 'test_swag', None) is not None:
mod.test_swag(client, specs_data)
|
sanyi/awsutils | examples/tornado-sqs.py | Python | mit | 536 | 0.01306 |
import tornado.ioloop
#!! this example requires the file /test/setting.py with the folowing constants is created
from awsutils.tornado.sqsclient import SQSCl | ient
from test.settings import access_key, secret_key
sqsclient = SQSClient(endpoint='sqs.us-east-1.amazonaws.com', access_key=access_key, secret_key=secret_key, | secure=False)
def renderResult(data):
print("message received", data)
sqsclient.sendMessage(callback=renderResult, qName="test", messageBody="this is a test message")
tornado.ioloop.IOLoop.instance().start() |
Hellseher/ProjectEuler | src/pe_python/pe_050_problem.py | Python | gpl-2.0 | 2,384 | 0 | #! /usr/bin/env python
# _*_ coding: UTF-8 _*_
# File : 50-problem.py
# Created : Sat 11 Apr 2015 20:57:18
# Last Modified : Sat 18 Apr 2015 22:21:45
# Maintainer : sh | arlatan, <sharlatanus@gmail.com>
# Title : CONSECUTIVE PRIME SUM
# License : Same as Python (GPL)
# Credits : https://projecteuler.net/
#
# -=:[ Description ]:=-
# The prime 41, can be written as the sum of six consecutive primes:
# 41 = 2 + 3 + 5 + 7 + 11 + 13
# This is the longest sum of consecutive primes that adds to a prime below
# one-hundred. The longest sum of consecutive | primes below one-thousand that
# adds to a prime, contains 21 terms, and is equal to 953.
# Which prime, below one-million, can be written as the sum of the most
# consecutive primes?
from termcolor import colored
import sys
import time
import math
INIT_TIME = time.time()
def isprime(num):
search_lim = int(math.sqrt(num))
for i in xrange(2, search_lim + 1):
if num % i == 0:
return False
return True
def cons_prime_sum(prime_list):
""" takes a list of prime, return a list with consecutive prime sum """
pl = [] # prime list of consecutive prime sum
for p_check in prime_list:
for p_step in xrange(0, prime_list.index(p_check)):
for p_index in xrange(p_step, prime_list.index(p_check)):
pl[::] = [] # clean up list
get_that_prime = p_index
while sum(pl) <= p_check:
pl.append(prime_list[get_that_prime])
get_that_prime += 1
print pl, p_check
def perf():
""" Return perfomance time of the programm """
return ("Perfomance time: %s" % (time.time() - INIT_TIME))
def error():
""" Massage if error """
print colored("..::Incorect amount of arguments::..", 'red')
print colored("\tEnter just one integer", 'blue')
quit()
def main():
if len(sys.argv) != 2:
error()
PRIME_LIMIT = int(sys.argv[1])
PRIME_LIST = []
for i in xrange(2, PRIME_LIMIT + 1):
if isprime(i):
PRIME_LIST.append(i)
# p_sum = 0
# for p in PRIME_LIST:
# p_sum += p
# print p_sum, p
# if p_sum > 953:
# break
# print PRIME_LIST
cons_prime_sum(PRIME_LIST)
print perf()
if __name__ == '__main__':
main()
|
pvanhorn/openthread | tests/scripts/thread-cert/Cert_6_6_02_KeyIncrementRollOver.py | Python | bsd-3-clause | 3,389 | 0.000885 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import config
import node
LEADER = 1
ED = 2
class Cert_6_6_2_KeyIncrement1(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1,3):
self.nodes[i] = node.Node(i, (i == ED), simulator=self.simulator)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ED].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[LEADER].set_key_switch_guardtime(0)
self.nodes[LEADER].set_key_sequence_counter(127)
self.nodes[ED].set_panid(0xface)
self.nodes[ED].set_mode('rsn')
self.nodes[ED].add_whitelist(self.nodes[LEADER].get_addr64())
self.no | des[ED].enable_whitelist()
self.nodes[ED].set_key_switch_guardtime(0)
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
del self.simulator
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ED].start()
| self.simulator.go(5)
self.assertEqual(self.nodes[ED].get_state(), 'child')
addrs = self.nodes[ED].get_addrs()
for addr in addrs:
self.assertTrue(self.nodes[LEADER].ping(addr))
key_sequence_counter = self.nodes[LEADER].get_key_sequence_counter()
self.nodes[LEADER].set_key_sequence_counter(key_sequence_counter + 1)
addrs = self.nodes[ED].get_addrs()
for addr in addrs:
self.assertTrue(self.nodes[LEADER].ping(addr))
if __name__ == '__main__':
unittest.main()
|
domofwk/domofwk-docs | source/formations/flask/fabfile.py | Python | gpl-2.0 | 122 | 0.008197 | from fabric.api import local
def html():
local('hovercraft -t ./sixfeetup_hovercraft formation_flask.rst ./bui | ld/')
| |
alexforencich/python-ivi | ivi/rigol/rigolDS1000Z.py | Python | mit | 2,014 | 0.001986 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WAR | RANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .rigolBaseScope import *
from .rigolDSSource import *
class rigolDS1000Z(rigolBaseScope, rigolDSSource):
"Rigol DS1000Z series IVI oscilloscop | e driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
super(rigolDS1000Z, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._bandwidth = 100e6
self._bandwidth_limit = {'20M': 20e6}
self._max_averages = 1024
self._horizontal_divisions = 12
self._vertical_divisions = 8
# Internal source
self._output_count = 2
self._identity_description = "Rigol DS1000Z series IVI oscilloscope driver"
self._identity_supported_instrument_models = ['DS1074Z', 'DS1104Z', 'MSO1074Z', 'MSO1104Z']
self._init_channels()
self._init_outputs()
|
zidik/Thermal_Camera | PC software/cmdparser.py | Python | mit | 1,830 | 0.003825 | import threading
import time
import queue
import logging
class CmdParser(threading.Thread):
"""
Thread that parses incoming messages
and executes functions accordingly
"""
def __init__(self, rx, thermal_data):
threading.Thread.__init__(self)
self.rx = rx
self.thermal_data = thermal_data
def run(self):
logging.info("Parser thread starting")
try:
whil | e(True):
parsed = self.parse()
# if there was nothing to do - sleep a bit..
if not parsed:
time.sleep(0.01)
finally:
logging.error("Parser thread stopped")
def par | se(self):
"""
Fetches a new message from queue and tries to parse it
returns boolean - true if message was successfully parsed
"""
try:
message = self.rx.get_nowait()
except queue.Empty:
# Nothing to parse
return False
parsed = self.parse_CMD(message)
return parsed
def parse_CMD(self, cmd_string):
"""
Parses supplied string and executes functions accordingly
returns boolean - true if message was successfully parsed
"""
cmdTokens = cmd_string.split(':')
cmd = cmdTokens[0]
cmd_arguments = cmdTokens[1:]
if cmd == "Scan":
if len(cmd_arguments) == 3:
(x, y, value) = cmd_arguments
self.thermal_data.set_datapoint(int(x), int(y), int(value))
return True
else:
logging.warning(("Serial:\"{}\"\nScan command has more arguments than needed!").format(cmd_string))
else:
logging.warn(("Unknown serial command recieved: \"{}\"").format(cmd_string))
return False |
cbgaindia/parsers | state_budget/sikkim/sikkim_budget_csv_generator.py | Python | mit | 4,866 | 0.007398 | import argparse
import csv
import glob
import logging
from logging.config import fileConfig
import os
import re
import sys
import xlrd
fileConfig('parsers/logging_config.ini')
logger = logging.getLogger()
reload(sys)
sys.setdefaultencoding('utf-8')
FILE_REGEX = "Demand|Budget at a glance|Receipt"
BUDGET_START_SLUG = "Actual|Budget"
CURRENCY_SLUG_REGEX = "\(In Thousands of Rupees\)|\(Rupees in thousand\)|\( In Lakhs of Rupees\)|\(Rs. in thousand\)"
HEADER_ROWS_NUM = 3
NOTES_SLUG = "Notes:|Note:"
class SikkimBudgetCSVGenerator():
def __init__(self):
self.currency_handle = ""
def process_budget_files(self, input_dir):
budget_files = self.find_files_for_conversion(input_dir)
for input_file in budget_files:
logger.info("Processing input: %s" % input_file)
workbook = xlrd.open_workbook(input_file)
worksheet = workbook.sheet_by_index(0)
file_name = "".join(worksheet.row_values(0)).strip() + " - " + "".join(worksheet.row_values(1)).strip()
start_index = 0
for row_index in xrange(2, worksheet.nrows):
row_value_join = "".join([str(x).encode('string_escape') for x in worksheet.row_values(row_index)]).strip()
matched_currency_handle = re.findall(r"%s" % CURRENCY_SLUG_REGEX, row_value_join)
if matched_currency_handle:
self.currency_handle = matched_currency_handle[0]
start_index = row_index+1
break
header_row = self.create_file_header(worksheet, start_index)
self.create_csv_file(worksheet, start_index, header_row, file_name, input_file)
def create_file_header(self, worksheet, start_index):
header_row = []
budget_values_start_index = 0
for index in range(start_index, start_index+HEADER_ROWS_NUM):
temp_header = worksheet.row_values(index)
for col_index in range(len(temp_header)):
if not budget_values_start_index and re.findall(r"%s" % BUDGET_START_SLUG, str(temp_header[col_index]).strip()):
budget_values_start_index = col_index
temp_header[col_index] = str(temp_header[col_index]).replace("\n", " ").strip()
temp_header[col_index] = re.sub(r"\s{2,}", " ", temp_header[col_index])
if budget_values_start_index and col_index > budget_values_start_index and not temp_header[col_index].strip():
temp_header[col_index] = temp_header[col_index-1]
if len(header_row)-1 < col_index:
header_row.append(temp_header[col_index])
elif temp_header[col_index].strip():
header_row[col_index] += " " + temp_header[col_index]
for index in range(budget_values_start_index, len(header_row)):
header_row[index] = header_row[index] + " " + self.currency_handle
if not header_row[0].strip():
header_row[0] = "Major Head and Totals"
if not header_row[1].strip():
header_row[1] = "Budget Head"
return header_row
def create_csv_file(self, worksheet, start_index, header_row, file_name, input_file):
output_dir = "/".join(input_file.split("/")[0:-1])
out_csv_file = open(output_dir + "/" + file_name + ".csv", "wb")
csv_writer = csv.writer(out_csv_file, delimiter=',')
csv_writer.writerow(header_row)
col_index_join = "".join(str(float(x)) for x in range(1,len(header_row)+1))
for row_index in xrange(start_index+HEADER_ROWS_NUM, worksheet.nrows):
row_value_join = "".join([str(x).encode('string_escape') for x in worksheet.row_values(row_index)]).strip()
if not row_value_join or row_value_join == col_index_join or re.findall(r"%s" % NOTES_SLUG, str(worksheet.row_values(row_index)[0])):
continue
else:
csv_writer.writerow(worksheet.row_values(row_index))
out_csv_file.close()
def find_files_for_conversion(self, input_dir):
budget_files = []
files = glob.glob('%s/**/**/*.xls*' % input_dir)
files += glob.glob('%s/**/**/*.XLS' % input_dir)
for file_name in files:
if re.findall(r"%s" % FILE_REGEX, file_name):
budget_files.append(file_name)
return bu | dget_files
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Generates CSV files for Sikkim Budget(from XLSX & XLS Documents)")
parser.add_argument("input_dir", help="In | put directory for budget documents")
args = parser.parse_args()
if not args.input_dir:
logger.error("Please input directory to begin CSV extraction")
else:
obj = SikkimBudgetCSVGenerator()
obj.process_budget_files(args.input_dir)
|
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractEmergencyExitsReleaseBlog.py | Python | bsd-3-clause | 226 | 0.026549 | def extractEmergencyExitsReleaseBlog(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title | '])
if not (chp or vol or frag) or 'preview' in item['title'].l | ower():
return None
return False
|
samedder/azure-cli | src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/custom.py | Python | mit | 108,353 | 0.004125 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=no-self-use,too-many-lines
from __future__ import print_function
import getpass
import json
import os
import re
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from six.moves.urllib.request import urlopen # noqa, pylint: disable=import-error,unused-import
from azure.cli.command_modules.vm._validators import _get_resource_group_from_vault_name
from azure.cli.core.commands.validators import validate_file_or_dict, DefaultStr, DefaultInt
from azure.keyvault import KeyVaultId
from azure.cli.core.commands import LongRunningOperation, DeploymentOutputLongRunningOperation
from azure.cli.core.commands.arm import parse_resource_id, resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_data_service_client
from azure.cli.core.util import CLIError
import azure.cli.core.azlogging as azlogging
from azure.cli.core.profiles import get_sdk, ResourceType, supported_api_version
from ._vm_utils import read_content_if_is_file
from ._vm_diagnostics_templates import get_default_diag_config
from ._actions import (load_images_from_aliases_doc,
load_extension_images_thru_services,
load_images_thru_services)
from ._client_factory import _compute_client_factory, cf_public_ip_addresses
logger = azlogging.get_az_logger(__name__)
_MSI_PORT = 50342
_MSI_EXTENSION_VERSION = '1.0'
CachingTypes, VirtualHardDisk, VirtualMachineScaleSet, VirtualMachineCaptureParameters, \
VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = get_sdk(
ResourceType.MGMT_COMPUTE,
'CachingTypes', 'VirtualHardDisk', 'VirtualMachineScaleSet', 'VirtualMachineCaptureParameters',
'VirtualMachineScaleSetExtension', 'VirtualMachineScaleSetExtensionProfile',
mod='models')
def get_resource_group_location(resource_group_name):
client = get_mgmt_service_client(ResourceType.MGMT_RESOURCE_RESOURCES)
# pylint: disable=no-member
return client.resource_groups.get(resource_group_name).location
def get_vm(resource_group_name, vm_name, expand=None):
'''Retrieves a VM'''
client = _compute_client_factory()
return client.virtual_machines.get(resource_group_name,
vm_name,
expand=expand)
def set_vm(instance, lro_operation=None, no_wait=False):
'''Update the given Virtual Machine instance'''
instance.resources = None # Issue: https://github.com/Azure/autorest/issues/934
client = _compute_client_factory()
parsed_id = _parse_rg_name(instance.id)
poller = client.virtual_machines | .create_or_update(
resource_group_name=parsed_id[0],
vm_name=parsed_id[1],
parameters=instance, raw=no_wait)
if lro_operation:
return lro_operation(poller)
return LongRunningOperation()(poller)
def _parse_rg_name(strid):
'''From an ID, extract the contained (resource group, name) tuple
'''
parts = parse_resource_id(strid)
| return (parts['resource_group'], parts['name'])
# Use the same name by portal, so people can update from both cli and portal
# (VM doesn't allow multiple handlers for the same extension)
_ACCESS_EXT_HANDLER_NAME = 'enablevmaccess'
_LINUX_ACCESS_EXT = 'VMAccessForLinux'
_WINDOWS_ACCESS_EXT = 'VMAccessAgent'
_LINUX_DIAG_EXT = 'LinuxDiagnostic'
_WINDOWS_DIAG_EXT = 'IaaSDiagnostics'
extension_mappings = {
_LINUX_ACCESS_EXT: {
'version': '1.4',
'publisher': 'Microsoft.OSTCExtensions'
},
_WINDOWS_ACCESS_EXT: {
'version': '2.0',
'publisher': 'Microsoft.Compute'
},
_LINUX_DIAG_EXT: {
'version': '3.0',
'publisher': 'Microsoft.Azure.Diagnostics'
},
_WINDOWS_DIAG_EXT: {
'version': '1.5',
'publisher': 'Microsoft.Azure.Diagnostics'
}
}
def _get_access_extension_upgrade_info(extensions, name):
version = extension_mappings[name]['version']
publisher = extension_mappings[name]['publisher']
auto_upgrade = None
if extensions:
extension = next((e for e in extensions if e.name == name), None)
from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error
if extension and LooseVersion(extension.type_handler_version) < LooseVersion(version):
auto_upgrade = True
elif extension and LooseVersion(extension.type_handler_version) > LooseVersion(version):
version = extension.type_handler_version
return publisher, version, auto_upgrade
def _get_storage_management_client():
return get_mgmt_service_client(ResourceType.MGMT_STORAGE)
def _trim_away_build_number(version):
# workaround a known issue: the version must only contain "major.minor", even though
# "extension image list" gives more detail
return '.'.join(version.split('.')[0:2])
# Hide extension information from output as the info is not correct and unhelpful; also
# commands using it mean to hide the extension concept from users.
class ExtensionUpdateLongRunningOperation(LongRunningOperation): # pylint: disable=too-few-public-methods
def __call__(self, poller):
super(ExtensionUpdateLongRunningOperation, self).__call__(poller)
# That said, we surppress the output. Operation failures will still
# be caught through the base class
return None
def list_vm(resource_group_name=None, show_details=False):
''' List Virtual Machines. '''
ccf = _compute_client_factory()
vm_list = ccf.virtual_machines.list(resource_group_name=resource_group_name) \
if resource_group_name else ccf.virtual_machines.list_all()
if show_details:
return [get_vm_details(_parse_rg_name(v.id)[0], v.name) for v in vm_list]
return list(vm_list)
def show_vm(resource_group_name, vm_name, show_details=False):
return get_vm_details(resource_group_name, vm_name) if show_details else get_vm(resource_group_name, vm_name)
def get_vm_details(resource_group_name, vm_name):
result = get_instance_view(resource_group_name, vm_name)
network_client = get_mgmt_service_client(ResourceType.MGMT_NETWORK)
public_ips = []
fqdns = []
private_ips = []
mac_addresses = []
# pylint: disable=line-too-long,no-member
for nic_ref in result.network_profile.network_interfaces:
nic_parts = parse_resource_id(nic_ref.id)
nic = network_client.network_interfaces.get(nic_parts['resource_group'], nic_parts['name'])
if nic.mac_address:
mac_addresses.append(nic.mac_address)
for ip_configuration in nic.ip_configurations:
private_ips.append(ip_configuration.private_ip_address)
if ip_configuration.public_ip_address:
res = parse_resource_id(ip_configuration.public_ip_address.id)
public_ip_info = network_client.public_ip_addresses.get(res['resource_group'],
res['name'])
if public_ip_info.ip_address:
public_ips.append(public_ip_info.ip_address)
if public_ip_info.dns_settings:
fqdns.append(public_ip_info.dns_settings.fqdn)
setattr(result, 'power_state',
','.join([s.display_status for s in result.instance_view.statuses if s.code.startswith('PowerState/')]))
setattr(result, 'public_ips', ','.join(public_ips))
setattr(result, 'fqdns', ','.join(fqdns))
setattr(result, 'private_ips', ','.join(private_ips))
setattr(result, 'mac_addresses', ','.join(mac_addresses))
del result.instance_view # we don't need other instance_view info as people won't care
return result
def list_ |
Chilledheart/chromium | tools/telemetry/telemetry/internal/platform/profiler/vtune_profiler.py | Python | bsd-3-clause | 5,290 | 0.008885 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import subprocess
import sys
import tempfile
from telemetry.core import exceptions
from telemetry.internal.platform import profiler
from telemetry.internal.platform.profiler import android_profiling_helper
class _SingleProcessVTuneProfiler(object):
"""An internal class for using vtune for a given process."""
def __init__(self, pid, output_file, browser_backend, platform_backend):
self._pid = pid
self._browser_backend = browser_backend
self._platform_backend = platform_backend
self._output_file = output_file
self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
cmd = ['amplxe-cl', '-collect', 'hotspots',
'-target-pid', str(pid), '-r', self._output_file]
self._is_android = platform_backend.GetOSName() == 'android'
if self._is_android:
cmd += ['-target-system', 'android']
self._proc = subprocess.Popen(
cmd, stdout=self._tmp_output_file, stderr=subprocess.STDOUT)
def CollectProfile(self):
if 'renderer' in self._output_file:
try:
self._platform_backend.GetCommandLine(self._pid)
except exceptions.ProcessGoneException:
logging.warning('Renderer was swapped out during profiling. '
'To collect a full profile rerun with '
'"--extra-browser-args=--single-process"')
subprocess.call(['amplxe-cl', '-command', 'stop', '-r', self._output_file])
exit_code = self._proc.wait()
try:
# 1: amplxe: Error: Cannot find a running process with the specified ID.
# Provide a valid PID.
if exit_code not in (0, 1):
raise Exception(
'amplxe-cl failed with exit code %d. Output:\n%s' % (exit_code,
self._GetStdOut()))
finally:
self._tmp_output_file.close()
if exit_code:
# The renderer process was swapped out. Now that we made sure VTune has
# stopped, return without further processing the invalid profile.
return self._output_file
if self._is_android:
required_libs = \
android_profiling_helper.GetRequiredLibrariesForVTuneProfile(
self._output_file)
device = self._browser_backend.device
symfs_root = os.path.dirname(self._output_file)
android_profiling_helper.CreateSymFs(device,
symfs_root,
required_libs,
use_symlinks=True)
logging.info('Resolving symbols in profile.')
subprocess.call(['amplxe-cl', '-finalize', '-r', self._output_file,
'-search-dir', symfs_root])
print 'To view the profile, run:'
print ' amplxe-gui %s' % self._output_file
return self._output_file
def _GetStdOut(self):
self._tmp_output_file.flush()
try:
with open(self._tmp_output_file.name) as f:
return f.read()
except IOError:
return ''
class VTuneProfiler(profiler.Profiler):
def __init__(self, browser_backend, platform_backend, output_path, state):
super(VTuneProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
process_output_file_map = self._GetProcessOutputFileMap()
self._process_profilers = []
has_ | renderer = False
for pid, output_file in process_output_file_map.iteritems():
if 'renderer' in output_file:
has_renderer = True
break
for pid, output_file in process_output_file_map.iteritems():
if has_renderer:
if not 'renderer' in output_file:
continue
elif not 'browser0' in output_file:
continue
self._proce | ss_profilers.append(
_SingleProcessVTuneProfiler(pid, output_file, browser_backend,
platform_backend))
@classmethod
def name(cls):
return 'vtune'
@classmethod
def is_supported(cls, browser_type):
if sys.platform != 'linux2':
return False
if browser_type.startswith('cros'):
return False
try:
proc = subprocess.Popen(['amplxe-cl', '-version'],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
proc.communicate()
if proc.returncode != 0:
return False
if browser_type.startswith('android'):
# VTune checks if 'su' is available on the device.
proc = subprocess.Popen(['adb', 'shell', 'su', '-c', 'id'],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
return 'not found' not in proc.communicate()[0]
return True
except OSError:
return False
@classmethod
def CustomizeBrowserOptions(cls, browser_type, options):
options.AppendExtraBrowserArgs([
'--no-sandbox',
'--allow-sandbox-debugging',
])
def CollectProfile(self):
print 'Processing profile, this will take a few minutes...'
output_files = []
for single_process in self._process_profilers:
output_files.append(single_process.CollectProfile())
return output_files
|
nomad-mystic/nomadmystic | fileSystem/school-projects/development/softwaredesignandcomputerlogiccis122/cis122lab3/python/Lab3.py | Python | mit | 18,030 | 0.00416 | # File = Lab3.py
# Programmer = Keith Murphy
# date created = 2-5-2015
# date Modified = 2-9-2015
__author__ = 'pather'
# Hello Mark,
# This is my first a stab at the Lab3 assignment. Everything tested well logically ans tried to fallow pseudocode
# specs closely. Let me know if I can make any improvements. Thanks
# Input = name_of_continent , years_in_the_future
# Output = name_of_continent, year_population_50, year_population_100
# Declare Variables:
# Declare Real years_in_the_future
# Declare String name_of_continent
# Declare Real year_population_50
# Declare Real ye | ar_population_100
# Module welcome_message()
# Display String Welcome Message
# End module
def welcome_message():
print('Welcome to the future continental population calculator!!')
# Module try_again()
# Call main()
# End Module
def try_again():
main()
# Function choose_continent()
# Declare Stri | ng name_of_continent
# Declare Real years_in_the_future
#
# Display string 'The six populated continents are...'
# Display 'Please type the name of the continent...'
# Input name_of_continent
# Display String 'please type 50 or 100 years from now...'
# Input years_in_the_future
#
# If name of the continent matches a known Then
# If years_in_the_future == 50.0 or years_in_the_future == 100.0 Then
# Return name_of_continent, years_in_the_future
# Else
# Display 'Exit Message'
# Call try_again()
# Else
# Display 'This is not a Continent I know About'
# Call try_again()
# End Else
# End Function
def choose_continent():
print('The future population finder will find your chosen continents future population. '
'The six populated continents are: Asia, Africa, Europe, South America, North America, or Oceania')
name_of_continent = input(str('Please type the name of the continent you would like to know the future '
'population of: '))
years_in_the_future = float(input('Look into the future please type 50 or 100: '))
if name_of_continent == 'Asia' or name_of_continent == 'asia' or name_of_continent == 'Africa' or \
name_of_continent == 'africa' or name_of_continent == 'Europe' or name_of_continent == 'europe' or \
name_of_continent == 'South America' or name_of_continent == 'south america' or \
name_of_continent == 'North America' or name_of_continent == 'north america' or \
name_of_continent == 'Oceania' or name_of_continent == 'oceania':
if years_in_the_future == 50.0 or years_in_the_future == 100.0:
return name_of_continent, years_in_the_future
else:
print("That is not a year we looking at, Please try again")
try_again()
else:
print("That is not the name of a continent I know , Please try again")
try_again()
# Function string, real chosen_continent_pop_calculator(string name_of_continent, real years_in_the_future)
# Declare Real year_population_50
# Declare Real year_population_100
#
# If name_of_continent == 'Asia' or name_of_continent == 'asia' Then
# If years_in_the_future == 50.0 Then
# Set current_population = Real 4298723288
# Set current_rate_of_change = Real .0103 * 50
# Set year_population_50 = current_population * current_rate_of_change
# Return name_of_continent, year_population_50, year_population_100
# Else Then
# Set current_population = Real 4298723288
# Set current_rate_of_change = Real .0103 * 100
# Set year_population_100 = current_population * current_rate_of_change
# Return name_of_continent, year_population_50, year_population_100
# Else If name_of_continent == 'Africa' or name_of_continent == 'africa' Then
# If years_in_the_future == 50.0 Then
# Set current_population = Real 1110635062
# Set current_rate_of_change = Real .0245 * 50
# Set year_population_50 = current_population * current_rate_of_change
# Return name_of_continent, year_population_50, year_population_100
# Else Then
# Set current_population = Real1110635062
# Set current_rate_of_change = Real .0245 * 100
# Set year_population_100 = current_population * current_rate_of_change
# Return name_of_continent, year_population_50, year_population_100
# Else If name_of_continent == 'Europe' or name_of_continent == 'europe' Then
# If years_in_the_future == 50.0 Then
# Set current_population = Real 742452170
# Set current_rate_of_change = Real .0008 * 50
# Set year_population_50 = current_population * current_rate_of_change
# Return name_of_continent, year_population_50, year_population_100
# Else Then
# Set current_population = Real 742452170
# Set current_rate_of_change = Real .0008 * 100
# Set year_population_100 = current_population * current_rate_of_change
# Return name_of_continent, year_population_50, year_population_100
# Else If name_of_continent == 'South America' or name_of_continent == 'south america' Then
# If years_in_the_future == 50.0 Then
# Set current_population = Real 616644503
# Set current_rate_of_change = Real .00111 * 50
# Set year_population_50 = current_population * current_rate_of_change
# Return name_of_continent, year_population_50, year_population_100
# Else Then
# Set current_population = Real 616644503
# Set current_rate_of_change = Real .00111 * 100
# Set year_population_100 = current_population * current_rate_of_change
# Return name_of_continent, year_population_50, year_population_100
# Else If name_of_continent == 'Oceania' or name_of_continent == 'oceania' Then
# If years_in_the_future == 50.0 Then
# Set current_population = Real 38303620
# Set current_rate_of_change = Real .0142 * 50
# Set year_population_50 = current_population * current_rate_of_change
# Return name_of_continent, year_population_50, year_population_100
# Else Then
# Set current_population = Real 38303620
# Set current_rate_of_change = Real .0142 * 100
# Set year_population_100 = current_population * current_rate_of_change
# Return name_of_continent, year_population_50, year_population_100
# Else Then
# Display 'Leaving Message'
# Call try_again()
#
# End If
# End Function
def chosen_continent_pop_calculator(name_of_continent, years_in_the_future):
year_population_50 = float()
year_population_100 = float()
if name_of_continent == 'Asia' or name_of_continent == 'asia':
if years_in_the_future == 50.0:
current_population = float(4298723288)
current_rate_of_change = float(.0103) * 50
year_population_50 = current_population * current_rate_of_change
return name_of_continent, year_population_50, year_population_100
else:
current_population = float(4298723288)
current_rate_of_change = float(.0103) * 100
year_population_100 = current_population * current_rate_of_change
return name_of_continent, year_population_50, year_population_100
elif name_of_continent == 'Africa' or name_of_continent == 'africa':
if years_in_the_future == 50.0:
current_population = float(1110635062)
current_rate_of_change = float(.0245) * 50
year_population_50 = current_population * current_rate_of_change
return name_of_continent, year_population_50, year_population_100
else:
current_population = float(1110635062)
current_rate_of_change = float(.0245) * 100
year_population_100 = current_population * current_rate_of_change
return name_of_continent, year_population_50, year_population_100
elif name_of_continent == 'Europe' or name_of_continent == 'europe':
if years_in_the_future == 50.0:
current_population = f |
sixninetynine/pex | pex/pex_info.py | Python | apache-2.0 | 9,898 | 0.011619 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, print_function
import json
import os
import warnings
from collections import namedtuple
from .common import open_zip
from .compatibility import string as compatibility_string
from .compatibility import PY2
from .orderedset import OrderedSet
from .util import merge_split
from .variables import ENV
PexPlatform = namedtuple('PexPlatform', 'interpreter version strict')
# TODO(wickman) Split this into a PexInfoBuilder/PexInfo to ensure immutability.
# Issue #92.
class PexInfo(object):
"""PEX metadata.
# Build metadata:
build_properties: BuildProperties # (key-value information about the build system)
code_hash: str # sha1 hash of all names/code in the archive
distributions: {dist_name: str} # map from distribution name (i.e. path in
# the internal cache) to its cache key (sha1)
requirements: list # list of requirements for this environment
# Environment options
pex_root: string # root of all pex-related files eg: ~/.pex
entry_point: string # entry point into this pex
script: string # script to execute in this pex environment
# at most one of script/entry_point can be specified
zip_safe: True, default False # is this pex zip safe?
inherit_path: True, default False # should this pex inherit site-packages + PYTHONPATH?
ignore_errors: True, default False # should we ignore inability to resolve dependencies?
always_write_cache: False # should we always write the internal cache to disk first?
# this is useful if you have very large dependencies that
# do not fit in RAM constrained environments
.. versionchanged:: 0.8
Removed the ``repositories`` and ``indices`` information, as they were never
implemented.
"""
PATH = 'PEX-INFO'
INTERNAL_CACHE = '.deps'
@classmethod
def make_build_properties(cls):
from .interpreter import PythonInterpreter
from pkg_resources import get_platform
pi = PythonInterpreter.get()
return {
'class': pi.identity.interpreter,
'version': pi.identity.version,
'platform': get_platform(),
}
@classmethod
def default(cls):
pex_info = {
'requirements': [],
'distributions': {},
'build_properties': cls.make_build_properties(),
}
return cls(info=pex_info)
@classmethod
def from_pex(cls, pex):
if os.path.isfile(pex):
with open_zip(pex) as zf:
pex_info = zf.read(cls.PATH)
else:
with open(os.path.join(pex, cls.PATH)) as fp:
pex_info = fp.read()
return cls.from_json(pex_info)
@classmethod
def from_json(cls, content):
if isinstance(content, bytes):
content = content.decode('utf-8')
return cls(info=json.loads(content))
@classmethod
def from_env(cls, env=ENV):
supplied_env = env.strip_defaults()
zip_safe = None if supplied_env.PEX_FORCE_LOCAL is None else not supplied_env.PEX_FORCE_LOCAL
pex_info = {
'pex_root': supplied_env.PEX_ROOT,
'entry_point': supplied_env.PEX_MODULE,
'script': supp | lied_env.PEX_SCRIPT,
'zip_safe': zip_safe,
'inherit_path': supplied_env.PEX_INHERIT_PATH,
'ignore_errors': supplied_env.PEX_IGNORE_ERRORS,
'always_write_cache': supplied_env.PEX_ALWAYS_CACHE,
}
# Filter out empty entries not explicitly set in | the environment.
return cls(info=dict((k, v) for (k, v) in pex_info.items() if v is not None))
@classmethod
def _parse_requirement_tuple(cls, requirement_tuple):
if isinstance(requirement_tuple, (tuple, list)):
if len(requirement_tuple) != 3:
raise ValueError('Malformed PEX requirement: %r' % (requirement_tuple,))
# pre 0.8.x requirement type:
warnings.warn('Attempting to use deprecated PEX feature. Please upgrade past PEX 0.8.x.')
return requirement_tuple[0]
elif isinstance(requirement_tuple, compatibility_string):
return requirement_tuple
raise ValueError('Malformed PEX requirement: %r' % (requirement_tuple,))
def __init__(self, info=None):
"""Construct a new PexInfo. This should not be used directly."""
if info is not None and not isinstance(info, dict):
raise ValueError('PexInfo can only be seeded with a dict, got: '
'%s of type %s' % (info, type(info)))
self._pex_info = info or {}
self._distributions = self._pex_info.get('distributions', {})
requirements = self._pex_info.get('requirements', [])
if not isinstance(requirements, (list, tuple)):
raise ValueError('Expected requirements to be a list, got %s' % type(requirements))
self._requirements = OrderedSet(self._parse_requirement_tuple(req) for req in requirements)
def _get_safe(self, key):
if key not in self._pex_info:
return None
value = self._pex_info[key]
return value.encode('utf-8') if PY2 else value
@property
def build_properties(self):
"""Information about the system on which this PEX was generated.
:returns: A dictionary containing metadata about the environment used to build this PEX.
"""
return self._pex_info.get('build_properties', {})
@build_properties.setter
def build_properties(self, value):
if not isinstance(value, dict):
raise TypeError('build_properties must be a dictionary!')
self._pex_info['build_properties'] = self.make_build_properties()
self._pex_info['build_properties'].update(value)
@property
def zip_safe(self):
"""Whether or not this PEX should be treated as zip-safe.
If set to false and the PEX is zipped, the contents of the PEX will be unpacked into a
directory within the PEX_ROOT prior to execution. This allows code and frameworks depending
upon __file__ existing on disk to operate normally.
By default zip_safe is True. May be overridden at runtime by the $PEX_FORCE_LOCAL environment
variable.
"""
return self._pex_info.get('zip_safe', True)
@zip_safe.setter
def zip_safe(self, value):
self._pex_info['zip_safe'] = bool(value)
@property
def pex_path(self):
"""A colon separated list of other pex files to merge into the runtime environment.
This pex info property is used to persist the PEX_PATH environment variable into the pex info
metadata for reuse within a built pex.
"""
return self._pex_info.get('pex_path')
@pex_path.setter
def pex_path(self, value):
self._pex_info['pex_path'] = value
@property
def inherit_path(self):
"""Whether or not this PEX should be allowed to inherit system dependencies.
By default, PEX environments are scrubbed of all system distributions prior to execution.
This means that PEX files cannot rely upon preexisting system libraries.
By default inherit_path is False. This may be overridden at runtime by the $PEX_INHERIT_PATH
environment variable.
"""
return self._pex_info.get('inherit_path', False)
@inherit_path.setter
def inherit_path(self, value):
self._pex_info['inherit_path'] = bool(value)
@property
def ignore_errors(self):
return self._pex_info.get('ignore_errors', False)
@ignore_errors.setter
def ignore_errors(self, value):
self._pex_info['ignore_errors'] = bool(value)
@property
def code_hash(self):
return self._pex_info.get('code_hash')
@code_hash.setter
def code_hash(self, value):
self._pex_info['code_hash'] = value
@property
def entry_point(self):
return self._get_safe('entry_point')
@entry_point.setter
def entry_point(self, value):
self._pex_info['entry_point'] = value
@property
def script(self):
return self._get_safe('script')
@script.setter
def script(self, value):
self._pex_info['script'] = value
def add_requirement(self, requirement):
self._requirements.add(str(requirement))
@property
def requirements(self):
return self |
graywizardx/slacksocket | slacksocket/webclient.py | Python | mit | 1,363 | 0.000734 | import logging
import requests
import slacksocket.errors as errors
log = logging.getLogger('slacksocket')
class WebClient(requests.Session):
""" Minimal client for connecting to Slack web API """
def __init__(self, token):
self._token = token
super(Web | Client, self).__init__()
def get(self, url, method='GET', max_attempts=3, **params):
if max_attempts == 0:
raise errors.SlackAPIError('Max retries exceeded')
elif max_attempts < 0:
message = 'Expected max_attemp | ts >= 0, got {0}'\
.format(max_attempts)
raise ValueError(message)
params['token'] = self._token
res = self.request(method, url, params=params)
try:
res.raise_for_status()
except requests.exceptions.HTTPError as e:
raise errors.SlackAPIError(e)
rj = res.json()
if rj['ok']:
return rj
# process error
if rj['error'] == 'migration_in_progress':
log.info('socket in migration state, retrying')
time.sleep(2)
return self.get(url,
method=method,
max_attempts=max_attempts - 1,
**params)
else:
raise errors.SlackAPIError('Error from slack api:\n%s' % res.text)
|
skywind3000/collection | script/pocketsnes_cht.py | Python | mit | 8,764 | 0.007188 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#======================================================================
#
# pocketsnes_cht.py -
#
# Created by skywind on 2020/06/17
# Last Modified: 2020/06/17 20:26:39
#
#======================================================================
from __future__ import print_function, unicode_literals
import sys
import os
import time
import struct
import codecs
#----------------------------------------------------------------------
# CheatItem
#----------------------------------------------------------------------
class CheatItem (object):
def __init__ (self, name = '', address = 0, byte = 0, enable = False):
self.name = name
self.address = address
self.byte = byte
self.enable = enable
self.saved = False
self.saved_byte = 0
def encode (self, pocketsnes = True):
namesize = pocketsnes and 48 or 20
head = [0] * 8
if not self.enable:
head[0] |= 4
if self.saved:
head[0] |= 8
head[1] = self.byte
head[2] = (self.address >> 0) & 0xff
head[3] = (self.address >> 8) & 0xff
head[4] = (self.address >> 16) & 0xff
head[5] = self.saved_byte
head[6] = 254
head[7] = 252
name = self.name
if not isinstance(name, bytes):
name = name.encode('utf-8', 'ignore')
name = name[:namesize]
if len(name) < namesize:
name += b'\x00' * (namesize - len(name))
data = b''
for n in head:
data += struct.pack('<B', n)
return data + name
def decode (self, data, pocketsnes = True):
if not isinstance(data, bytes):
data = data.encode('ascii', 'ignore')
namesize = pocketsnes and 48 or 20
if len(data) != namesize + 8:
raise ValueError('invalid binary cheat size')
head = [ int(n) for n in data[:8] ]
if head[6] != 254 or head[7] != 252:
raise ValueError('invalid binary cheat data')
self.address = head[2] + (head[3] << 8) + (head[4] << 16)
self.byte = head[1]
self.saved_byte = head[5]
self.saved = (head[0] & 8) and True or False
self.enable = ((head[0] & 4) == 0) and True or False
name = data[8:]
p1 = name.find(b'\x00')
if p1 > 0:
name = name[:p1]
self.name = name.decode('utf-8', 'ignore')
return 0
def code (self):
return '%06x=%02x'%(self.address, self.byte)
def __repr__ (self):
return 'CheatItem(%s, %d, %d, %s)'%(repr(self.name), self.address,
self.byte, repr(self.enable))
def __str__ (self):
return '%s:%s'%(self.name, self.code())
#----------------------------------------------------------------------
# CheatFile
#----------------------------------------------------------------------
class CheatFile (object):
def __init__ (self):
self.cheats = []
def __iter__ (self):
return self.cheats.__iter__()
def __getitem__ (self, key):
return self.cheats[key]
def __len__ (self):
return len(self.cheats)
def snes9x_load (self, filename):
self.cheats = []
cheats = {}
index = -1
avail = False
with codecs.open(filename, 'r', encoding = 'utf-8') as fp:
for line in fp:
line = line.rstrip('\r\n\t ')
if not line:
continue
space = 0
while space < len(line):
if line[space].isspace():
space += 1
else:
| break
if space == 0:
if line == 'cheat':
index += 1
avail = True
else:
avail = False
elif avail:
text = line.strip('\r\n\t ')
if not text:
continue
| if index not in cheats:
cheats[index] = {}
if text == 'enable':
cheats[index]['enable'] = True
elif ':' in text:
key, _, val = text.partition(':')
key = key.strip()
val = val.strip()
cheats[index][key] = val
size = index
for i in range(size + 1):
if i not in cheats:
continue
ni = cheats[i]
if 'name' not in ni:
continue
if 'code' not in ni:
continue
code = ni['code']
if '+' in code:
continue
if '=' not in code:
continue
if '?' in code:
code = code[:code.find('?')]
if len(code) != 9:
continue
if code[6] != '=':
continue
address = int(code[:6], 16)
byte = int(code[7:9], 16)
enable = ni.get('enable', False) and True or False
cc = CheatItem(ni['name'].strip(), address, byte, enable)
self.cheats.append(cc)
return 0
def snes9x_save (self, filename):
with codecs.open(filename, 'w', encoding = 'utf-8') as fp:
for cheat in self.cheats:
fp.write('cheat\n')
fp.write(' name: %s'%cheat.name)
fp.write(' code: %s\n'%(cheat.code(),))
if cheat.enable:
fp.write(' enable\n')
fp.write('\n')
return 0
def pocketsnes_load (self, filename, legacy = False):
self.cheats = []
newfmt = (not legacy) and True or False
namesize = newfmt and 48 or 20
cheatsize = namesize + 8
with open(filename, 'rb') as fp:
while True:
data = fp.read(cheatsize)
if len(data) == cheatsize:
cc = CheatItem()
cc.decode(data, newfmt)
self.cheats.append(cc)
else:
break
return 0
def pocketsnes_save (self, filename, legacy = False):
with open(filename, 'wb') as fp:
for cheat in self.cheats:
data = cheat.encode((not legacy) and True or False)
fp.write(data)
return 0
def goldfinger_save (self, filename):
with codecs.open(filename, 'w', encoding = 'utf-8') as fp:
for cc in self.cheats:
text = '%06x-%02x-%02x'%(cc.address, cc.byte, cc.saved_byte)
fp.write('%s #%s\n'%(text, cc.name))
return 0
def goldfinger_load (self, filename):
return -1
def load (self, filename):
data = open(filename, 'rb').read()
mark = b'\xfe\xfc'
if data[6:8] == mark:
p2 = data.find(mark, 10)
if p2 >= 0:
size = p2 - 6
else:
size = len(data)
if size in (28, 48 + 8):
return self.pocketsnes_load(filename, (size == 28))
if b'#' in data:
return self.goldfinger_load(filename)
return self.snes9x_load(filename)
#----------------------------------------------------------------------
# testing suit
#----------------------------------------------------------------------
if __name__ == '__main__':
def test1():
c1 = CheatItem('hello', 12345, 99)
data = c1.encode()
print(c1)
c2 = CheatItem('life', 11, 2)
c2.decode(data)
print(c2)
print(repr(c2))
import ascmini
ascmini.utils.print_binary(data)
return 0
def test2():
cf = CheatFile()
cf.snes9x_load('d:/games/emulator |
mrshu/err | tests/repo_manager_tests.py | Python | gpl-3.0 | 4,299 | 0.002559 | import tempfile
import shutil
import os
import pytest
from errbot import repo_manager
from errbot.storage.memory import MemoryStoragePlugin
assets = os.path.join(os.path.dirname(__file__), 'assets')
@pytest.fixture
def plugdir_and_storage(request):
plugins_dir = tempfile.mkdtemp()
storage_plugin = MemoryStoragePlugin('repomgr')
def on_finish():
shutil.rmtree(plugins_dir)
request.addfinalizer(on_finish)
return plugins_dir, storage_plugin
def test_index_population(plugdir_and_storage):
plugdir, storage = plugdir_and_storage
manager = repo_manager.BotRepoManager(storage,
plugdir,
(os.path.join(assets, 'repos', 'simple.json'),))
manager.index_update()
index_entry = manager[repo_manager.REPO_INDEX]
assert repo_manager.LAST_UPDATE in index_entry
assert 'pluginname1' in index_entry['name1/err-reponame1']
assert 'pluginname2' in index_entry['name2/err-reponame2']
def test_index_merge(plugdir_and_storage):
plugdir, storage = plugdir_and_storage
manager = repo_manager.BotRepoManager(storage,
plugdir,
(os.path.join(assets, 'repos', 'b.json'),
os.path.join(assets, 'repos', 'a.json'),))
manager.index_update()
index_entry = manager[repo_manager.REPO_INDEX]
# First they should be all here
assert 'pluginname1' in index_entry['name1/err-reponame1']
assert 'pluginname2' in index_entry['name2/err-reponame2']
assert 'pluginname3' in index_entry['name3/err-reponame3']
# then it must be the correct one of the overriden one
assert index_entry['name2/err-reponame2']['pluginname2']['name'] == 'NewPluginName2'
def test_reverse_merge(plugdir_and_storage):
plugdir, storage = plugdir_and_storage
manager = repo_manager.BotRepoManager(storage,
plugdir,
(os.path.join(assets, 'repos', 'a.json'),
os.path.join(assets, 'repos', 'b.json'),))
manager.index_update()
index_entry = manager[repo_manager.REPO_INDEX]
assert not index_entry['name2/err-reponame2']['pluginname2']['name'] == 'NewPluginName2'
def test_no_update_if_one_fails(plugdir_and_storage):
plugdir, storage = plugdir_and_storage
manager = repo_manager.BotRepoManager(storage,
plugdir,
(os.path.join(assets, 'repos', 'a.json'),
os.path.join(assets | , 'repos', 'doh.json'),))
manager.index_update()
assert repo_manager.REPO_INDEX not in manager
def test_tokenization():
e = {
"python": "2+",
"repo": "https://github.com/name/err-reponame1",
"path": "/plugin1.plug",
"avatar_url": "https://avatars.githubusercont | ent.com/u/588833?v=3",
"name": "PluginName1",
"documentation": "docs1"
}
words = {'https',
'com',
'name',
'err',
'docs1',
'reponame1',
'plug',
'2',
'plugin1',
'avatars',
'github',
'githubusercontent',
'u',
'v',
'3',
'588833',
'pluginname1'
}
assert repo_manager.tokenizeJsonEntry(e) == words
def test_search(plugdir_and_storage):
plugdir, storage = plugdir_and_storage
manager = repo_manager.BotRepoManager(storage,
plugdir,
(os.path.join(assets, 'repos', 'simple.json'),))
a = [p for p in manager.search_repos('docs2')]
assert len(a) == 1
assert a[0].name == 'pluginname2'
a = [p for p in manager.search_repos('zorg')]
assert len(a) == 0
a = [p for p in manager.search_repos('plug')]
assert len(a) == 2
def test_git_url_name_guessing():
assert repo_manager.human_name_for_git_url('https://github.com/errbotio/err-imagebot.git') \
== 'errbotio/err-imagebot'
|
martijnvermaat/rpclib | src/rpclib/test/test_service.py | Python | lgpl-2.1 | 8,487 | 0.003064 | #!/usr/bin/env python
#
# rpclib - Copyright (C) Rpclib contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
#
# Most of the service tests are performed through the interop tests.
#
import datetime
import unittest
from lxml import etree
from rpclib.application import Application
from rpclib.auxproc.thread import ThreadAuxProc
from rpclib.auxproc.sync import SyncAuxProc
from rpclib.decorator import rpc
from rpclib.decorator import srpc
from rpclib.interface.wsdl import Wsdl11
from rpclib.model.complex import Array
from rpclib.model.complex import ComplexModel
from rpclib.model.primitive import DateTime
from rpclib.model.primitive import Float
from rpclib.model.primitive import Integer
from rpclib.model.primitive import String
from rpclib.protocol.soap import Soap11
from rpclib.protocol.http import HttpRpc
from rpclib.server.null import NullServer
from rpclib.server.wsgi import WsgiApplication
from rpclib.service import ServiceBase
Application.transport = 'test'
class Address(ComplexModel):
__namespace__ = "TestSer | vice"
street = String
city = String
zip = Integer
since = DateTime
laditude = Float
longitude = Float
class Person(ComplexModel):
__namespace__ = "T | estService"
name = String
birthdate = DateTime
age = Integer
addresses = Array(Address)
titles = Array(String)
class Request(ComplexModel):
__namespace__ = "TestService"
param1 = String
param2 = Integer
class Response(ComplexModel):
__namespace__ = "TestService"
param1 = Float
class TypeNS1(ComplexModel):
__namespace__ = "TestService.NS1"
s = String
i = Integer
class TypeNS2(ComplexModel):
__namespace__ = "TestService.NS2"
d = DateTime
f = Float
class MultipleNamespaceService(ServiceBase):
@rpc(TypeNS1, TypeNS2)
def a(ctx, t1, t2):
return "OK"
class TestService(ServiceBase):
@rpc(String, _returns=String)
def aa(ctx, s):
return s
@rpc(String, Integer, _returns=DateTime)
def a(ctx, s, i):
return datetime.datetime.now()
@rpc(Person, String, Address, _returns=Address)
def b(ctx, p, s, a):
return Address()
@rpc(Person, isAsync=True)
def d(ctx, Person):
pass
@rpc(Person, isCallback=True)
def e(ctx, Person):
pass
@rpc(String, String, String, _returns=String,
_in_variable_names={'_from': 'from', '_self': 'self',
'_import': 'import'},
_out_variable_name="return")
def f(ctx, _from, _self, _import):
return '1234'
class MultipleReturnService(ServiceBase):
@rpc(String, _returns=(String, String, String))
def multi(ctx, s):
return s, 'a', 'b'
class TestSingle(unittest.TestCase):
def setUp(self):
self.app = Application([TestService], 'tns', Soap11(), Soap11())
self.app.transport = 'null.rpclib'
self.srv = TestService()
wsdl = Wsdl11(self.app.interface)
wsdl.build_interface_document('URL')
self.wsdl_str = wsdl.get_interface_document()
self.wsdl_doc = etree.fromstring(self.wsdl_str)
def test_portypes(self):
porttype = self.wsdl_doc.find('{http://schemas.xmlsoap.org/wsdl/}portType')
self.assertEquals(
len(self.srv.public_methods), len(porttype.getchildren()))
def test_override_param_names(self):
for n in ['self', 'import', 'return', 'from']:
assert n in self.wsdl_str, '"%s" not in self.wsdl_str'
class TestMultiple(unittest.TestCase):
def setUp(self):
self.app = Application([MultipleReturnService], 'tns', Soap11(), Soap11())
self.app.transport = 'none'
self.wsdl = Wsdl11(self.app.interface)
self.wsdl.build_interface_document('URL')
def test_multiple_return(self):
message_class = list(MultipleReturnService.public_methods.values())[0].out_message
message = message_class()
self.assertEquals(len(message._type_info), 3)
sent_xml = etree.Element('test')
self.app.out_protocol.to_parent_element(message_class, ('a', 'b', 'c'),
MultipleReturnService.get_tns(), sent_xml)
sent_xml = sent_xml[0]
print(etree.tostring(sent_xml, pretty_print=True))
response_data = self.app.out_protocol.from_element(message_class, sent_xml)
self.assertEquals(len(response_data), 3)
self.assertEqual(response_data[0], 'a')
self.assertEqual(response_data[1], 'b')
self.assertEqual(response_data[2], 'c')
class MultipleMethods1(ServiceBase):
@srpc(String)
def multi(s):
return "%r multi 1" % s
class MultipleMethods2(ServiceBase):
@srpc(String)
def multi(s):
return "%r multi 2" % s
class TestMultipleMethods(unittest.TestCase):
def test_single_method(self):
try:
app = Application([MultipleMethods1,MultipleMethods2], 'tns', Soap11(), Soap11())
except ValueError:
pass
else:
raise Exception('must fail.')
def test_simple_aux_nullserver(self):
data = []
class Service(ServiceBase):
@srpc(String)
def call(s):
data.append(s)
class AuxService(ServiceBase):
__aux__ = SyncAuxProc()
@srpc(String)
def call(s):
data.append(s)
app = Application([Service, AuxService], 'tns', Soap11(), Soap11())
server = NullServer(app)
server.service.call("hey")
assert data == ['hey', 'hey']
def test_simple_aux_wsgi(self):
data = []
class Service(ServiceBase):
@srpc(String, _returns=String)
def call(s):
data.append(s)
class AuxService(ServiceBase):
__aux__ = SyncAuxProc()
@srpc(String, _returns=String)
def call(s):
data.append(s)
def start_response(code, headers):
print code, headers
app = Application([Service, AuxService], 'tns', HttpRpc(), HttpRpc())
server = WsgiApplication(app)
server({
'QUERY_STRING': 's=hey',
'PATH_INFO': '/call',
'REQUEST_METHOD': 'GET',
}, start_response, "http://null")
assert data == ['hey', 'hey']
def test_thread_aux_wsgi(self):
import logging
logging.basicConfig(level=logging.DEBUG)
data = set()
class Service(ServiceBase):
@srpc(String, _returns=String)
def call(s):
data.add(s)
class AuxService(ServiceBase):
__aux__ = ThreadAuxProc()
@srpc(String, _returns=String)
def call(s):
data.add(s + "aux")
def start_response(code, headers):
print code, headers
app = Application([Service, AuxService], 'tns', HttpRpc(), HttpRpc())
server = WsgiApplication(app)
server({
'QUERY_STRING': 's=hey',
'PATH_INFO': '/call',
'REQUEST_METHOD': 'GET',
}, start_response, "http://null")
import time
time.sleep(1)
assert data == set(['hey', 'heyaux'])
def test_mixing_primary_and_aux_methods(self):
try:
class Service(ServiceBase):
@srpc(String, _returns=String, _aux=ThreadAuxProc())
def call(s):
pass
@srpc(String |
jawilson/home-assistant | homeassistant/components/tellduslive/entry.py | Python | apache-2.0 | 4,213 | 0 | """Base Entity for all TelldusLive entities."""
from datetime import datetime
import logging
from tellduslive import BATTERY_LOW, BATTERY_OK, BATTERY_UNKNOWN
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_VIA_DEVICE,
DEVICE_DEFAULT_NAME,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers. | entity import DeviceInfo, Entity
from .const import SIGNAL_UPDATE_ENTITY
_LOGGER = logging.getLogger(__name__)
ATTR_LAST_UPDATED = "time_last_updated"
class TelldusLiveEntity(Entity):
"""Base class for all Telldus Live entities."""
def __init__(self, client, device_id):
"""Initialize the entity."""
self._id = device_id
self._client = client
self._name = self.device.name
self._async_unsub_dispatcher_connect = None
async def async | _added_to_hass(self):
"""Call when entity is added to hass."""
_LOGGER.debug("Created device %s", self)
self._async_unsub_dispatcher_connect = async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_ENTITY, self._update_callback
)
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
if self._async_unsub_dispatcher_connect:
self._async_unsub_dispatcher_connect()
@callback
def _update_callback(self):
"""Return the property of the device might have changed."""
if self.device.name:
self._name = self.device.name
self.async_write_ha_state()
@property
def device_id(self):
"""Return the id of the device."""
return self._id
@property
def device(self):
"""Return the representation of the device."""
return self._client.device(self.device_id)
@property
def _state(self):
"""Return the state of the device."""
return self.device.state
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return True
@property
def name(self):
"""Return name of device."""
return self._name or DEVICE_DEFAULT_NAME
@property
def available(self):
"""Return true if device is not offline."""
return self._client.is_available(self.device_id)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
attrs = {}
if self._battery_level:
attrs[ATTR_BATTERY_LEVEL] = self._battery_level
if self._last_updated:
attrs[ATTR_LAST_UPDATED] = self._last_updated
return attrs
@property
def _battery_level(self):
"""Return the battery level of a device."""
if self.device.battery == BATTERY_LOW:
return 1
if self.device.battery == BATTERY_UNKNOWN:
return None
if self.device.battery == BATTERY_OK:
return 100
return self.device.battery # Percentage
@property
def _last_updated(self):
"""Return the last update of a device."""
return (
str(datetime.fromtimestamp(self.device.lastUpdated))
if self.device.lastUpdated
else None
)
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._id
@property
def device_info(self) -> DeviceInfo:
"""Return device info."""
device = self._client.device_info(self.device.device_id)
device_info = DeviceInfo(
identifiers={("tellduslive", self.device.device_id)},
name=self.device.name,
)
if (model := device.get("model")) is not None:
device_info[ATTR_MODEL] = model.title()
if (protocol := device.get("protocol")) is not None:
device_info[ATTR_MANUFACTURER] = protocol.title()
if (client := device.get("client")) is not None:
device_info[ATTR_VIA_DEVICE] = ("tellduslive", client)
return device_info
|
Ziqi-Li/bknqgis | numpy/numpy/core/tests/test_multiarray.py | Python | gpl-2.0 | 259,483 | 0.000748 | from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import functools
import ctypes
import os
import gc
from contextlib import contextmanager
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import strchar, unicode
from .test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array,
)
from numpy.testing import (
run_module_suite, assert_, assert_raises, assert_warns,
assert_equal, assert_almost_equal, assert_array_equal, assert_raises_regex,
assert_array_almost_equal, assert_allclose, IS_PYPY, HAS_REFCOUNT,
assert_array_less, runstring, dec, SkipTest, temppath, suppress_warnings
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta, datetime
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
def _aligned_zeros(shape, dtype=float, order | ="C", align=None):
"""Alloc | ate a new ndarray with aligned memory."""
dtype = np.dtype(dtype)
if dtype == np.dtype(object):
# Can't do this, fall back to standard allocation (which
# should always be sufficiently aligned)
if align is not None:
raise ValueError("object array alignment not supported")
return np.zeros(shape, dtype=dtype, order=order)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + align + 1, np.uint8)
offset = buf.__array_interface__['data'][0] % align
if offset != 0:
offset = align - offset
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
class TestFlags(object):
def setup(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(object):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(object):
def setup(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
assert_(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not
# fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except Exception as e:
raise RuntimeError(e)
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
assert_raise |
homeworkprod/byceps | tests/unit/services/shop/cart/test_cart_repr.py | Python | bsd-3-clause | 1,558 | 0 | """
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from decimal import Decimal
from byceps.database import ge | nerate_uuid
from byceps.services.shop.article.transfer.models import (
Article,
ArticleNumber,
ArticleID,
ArticleNumber,
Arti | cleType,
)
from byceps.services.shop.cart.models import Cart
from byceps.services.shop.shop.transfer.models import ShopID
def test_cart_empty_repr():
cart = Cart()
assert repr(cart) == '<Cart(0 items)>'
def test_cart_filled_repr():
article1 = create_article(
ArticleNumber('a-001'), 'Article #1', Decimal('19.99'), Decimal('0.19')
)
article2 = create_article(
ArticleNumber('a-002'), 'Article #2', Decimal('24.99'), Decimal('0.19')
)
cart = Cart()
cart.add_item(article1, 5)
cart.add_item(article1, 3)
assert repr(cart) == '<Cart(2 items)>'
# helpers
def create_article(
item_number: ArticleNumber,
description: str,
price: Decimal,
tax_rate: Decimal,
) -> Article:
return Article(
id=ArticleID(generate_uuid()),
shop_id=ShopID('any-shop'),
item_number=item_number,
type_=ArticleType.other,
description=description,
price=price,
tax_rate=tax_rate,
available_from=None,
available_until=None,
total_quantity=99,
quantity=1,
max_quantity_per_order=10,
not_directly_orderable=False,
separate_order_required=False,
shipping_required=False,
)
|
amit-bansil/netsci | robocompviz/trueskill/trueskillhelpers.py | Python | mit | 5,897 | 0.001357 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from contextlib import contextmanager
import functools
import inspect
import logging
import trueskill
from trueskill.backends import available_backends
from trueskill.factorgraph import (Variable, Factor, PriorFactor,
LikelihoodFactor, SumFactor)
from trueskill.mathematics import Gaussian
__all__ = ['substituted_trueskill', 'calc_dynamic_draw_probability',
'factor_graph_logging']
@contextmanager
def substituted_trueskill(*args, **kwargs):
"""Setup the global environment only within the context::
assert Rating().mu == 25
with substituted_trueskill(mu=0):
assert Rating().mu == 0
"""
env = trueskill.global_env()
params = [['mu', env.mu], ['sigma', env.sigma], ['beta', env.beta],
['tau', env.tau], ['draw_probability', env.draw_probability],
['backend', env.backend]]
# merge settings with previous TrueSkill object
for x, arg in enumerate(args):
params[x][1] = arg
params = dict(params)
for kw, arg in kwargs.items():
params[kw] = arg
try:
# setup the environment
yield trueskill.setup(**params)
finally:
# revert the environment
trueskill.setup(env=env)
def calc_dynamic_draw_probability(rating_group1, rating_group2, env=None):
from trueskill.factorgraph import (Variable, )
if env is None:
env = global_env()
team_perf_vars = []
for rating_group in [rating_group1, rating_group2]:
team_perf_var = Variable()
team_perf_vars.append(team_perf_var)
perf_vars = []
for rating in rating_group:
rating_var, perf_var = Variable(), Variable()
perf_vars.append(perf_var)
PriorFactor(rating_var, rating, env.tau).down()
LikelihoodFactor(rating_var, perf_var, env.beta ** 2).down()
SumFactor(team_perf_var, perf_vars, [1] * len(perf_vars)).down()
return env.draw_probability(trueskill.Rating(team_perf_vars[0]),
trueskill.Rating(team_perf_vars[1]), env)
@contextmanager
def factor_graph_logging(color=False):
"""In the context, a factor graph prints logs as DEBUG level. It will help
to follow factor graph running schedule::
with factor_graph_logging() as logger:
logger.setLevel(DEBUG)
logger.addHandler(StreamHandler(sys.stderr))
rate_1vs1(Rating(), Rating())
"""
import inspect
# color mode uses the termcolor module
if color:
try:
from termcolor import colored
except ImportError:
raise ImportError('To enable color mode, install termcolor')
else:
colored = lambda s, *a, **k: s
logger = logging.getLogger('TrueSkill')
orig_factor_init = Factor.__init__
orig_variable_set = Variable.set
def repr_factor(factor):
return '{0}@{1}'.format(type(factor).__name__, id(factor))
def repr_gauss(gauss):
return 'N(mu=%.3f, sigma=%.3f, pi=%r, tau=%r)' % \
(gauss.mu, gauss.sigma, gauss.pi, gauss.tau)
def r(val):
if isinstance(val, Factor):
return repr_factor(val)
elif isinstance(val, Gaussian):
return repr_gauss(val)
else:
return repr(val)
def factor_init(self, *args, **kwargs):
frames = inspect.getouterframes(inspect.currentframe())
layer_builder_name = frames[2][3]
assert (layer_builder_name.startswith('build_') and
layer_builder_name.endswith('_layer'))
self._layer_name = layer_builder_name[6:].replace('_', ' ').title()
return orig_factor_init(self, *args, **kwargs)
def variable_set(self, val):
old_value = Gaussian(pi=self.pi, tau=self.tau)
old_messages = dict((fac, Gaussian(pi=msg.pi, tau=msg.tau))
for fac, msg in self.messages.items())
delta = orig_variable_set(self, val)
# inspect outer frames
frames = inspect.getouterframes(inspect.currentframe())
methods = [None, None]
for frame in frames:
method = frame[3]
if method.startswith('update_'):
| methods[0] = method
elif m | ethod in ('up', 'down'):
methods[1] = method
break
factor = frame[0].f_locals['self']
before = Gaussian(pi=self.pi, tau=self.tau)
# helpers for logging
logs = []
l = logs.append
bullet = lambda changed: colored(' * ', 'red') if changed else ' '
# print layer
if getattr(logger, '_prev_layer_name', None) != factor._layer_name:
logger._prev_layer_name = factor._layer_name
l(colored('[{0}]'.format(factor._layer_name), 'blue'))
# print factor
l(colored('<{0}.{1}>'.format(r(factor), methods[1]), 'cyan'))
# print value
if old_value == self:
line = '{0}'.format(r(self))
else:
line = '{0} -> {1}'.format(r(old_value), r(self))
l(bullet(methods[0] == 'update_value') + line)
# print messages
fmt = '{0}: {1} -> {2}'.format
for fac, msg in self.messages.items():
old_msg = old_messages[fac]
changed = fac is factor and methods[0] == 'update_message'
if old_msg == msg:
line = '{0}: {1}'.format(r(fac), r(msg))
else:
line = '{0}: {1} -> {2}'.format(r(fac), r(old_msg), r(msg))
l(bullet(changed) + line)
# print buffered logs
map(logger.debug, logs)
return delta
try:
Factor.__init__, Variable.set = factor_init, variable_set
yield logger
finally:
Factor.__init__, Variable.set = orig_factor_init, orig_variable_set
|
ProjetPP/PPP-CAS | ppp_cas/calchasLex.py | Python | mit | 876 | 0.026256 | import ply.lex as lex
tokens = (
'NUMBER',
'PLUS',
'MINUS',
'TIMES',
'DIVIDE',
'LPAREN',
'RPAREN',
'COMMA',
'ID',
'EXCL',
'POW',
'DTIMES',
'MOD',
'EQ',
'APOSTROPHE',
'AND',
'OR',
'NOT',
)
t_DTIMES = r'\*\*'
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_COMMA = r','
t_EXCL | = r'!'
t_POW = r'\^'
t_MOD = r'%'
t_EQ = r'='
t_APOSTROPHE = r'\''
t_AND = r'&'
t_OR = r'\|'
t_NOT = r'~'
def t_NUMBER(t):
r"""([0-9]*\.[0-9]+|[0-9]+)"""
return t
def t_ID(t):
r"""[a-zA-Z_][a-zA-Z0-9_]*"""
return t
def t_newline(t):
r"""\n+"""
t.lexer.lineno += len(t.va | lue)
t_ignore = ' \t'
def t_error(t):
t.lexer.skip(1)
calchasLexer = lex.lex()
|
Informatorio/Sis_LegajosV2 | courses/migrations/0003_course_usuario.py | Python | gpl-3.0 | 692 | 0.001445 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-12-05 20:53
from __future__ import unicode_literals
from django.conf import set | tings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('courses', '0002_auto_20161204_2330'),
]
operations = [
migrations.Ad | dField(
model_name='course',
name='usuario',
field=models.OneToOneField(default='', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
Mog333/DeepRL | network/DeepQNetwork.py | Python | mit | 7,211 | 0.012342 | """
Author: Robert Post
Based on code from Nathan Sprague
from: https://github.com/spragunr/deep_q_rl
"""
import DeepNetworks
import lasagne
import numpy as np
import theano
import theano.tensor as T
import cPickle
import imp
class DeepQNetwork(object):
def __init__(self, batchSize, numFrames, inputHeight, inputWidth, numActions,
discountRate, learningRate, rho, rms_epsilon, momentum, networkUpdateDelay, useSARSAUpdate, kReturnLength,
networkType = "conv", updateRule = "deepmind_rmsprop", batchAccumulator = "sum", clipDelta = 1.0, inputScale = 255.0):
self.batchSize = batchSize
self.numFrames = numFrames
self.inputWidth = inputWidth
self.inputHeight = inputHeight
self.inputScale = inputScale
self.numActions = numActions
self.discountRate = discountRate
self.learningRate = learningRate
self.rho = rho
self.rms_epsilon = rms_epsilon
self.momentum = momentum
self.networkUpdateDelay = networkUpdateDelay
self.useSARSAUpdate = useSARSAUpdate
self.kReturnLength = kReturnLength
self.networkType = networkType
self.updateRule = updateRule
self.batchAccumulator = batchAccumulator
self.clipDelta = clipDelta
self.updateCounter = 0
states = T.tensor4("states")
nextStates = T.tensor4("nextStates")
rewards = T.col("rewards")
actions = T.icol("actions")
nextActions= T.icol("nextActions")
terminals = T.icol("terminals")
self.statesShared = theano.shared(np.zeros((self.batchSize, self.numFrames, self.inputHeight, self.inputWidth), dtype=theano.config.floatX))
self.nextStatesShared = theano.shared(np.zeros((self.batchSize, self.numFrames, self.inputHeight, self.inputWidth), dtype=theano.config.floatX))
self.rewardsShared = theano.shared(np.zeros((self.batchSize, 1), dtype=theano.config.floatX), broadcastable=(False, True))
self.actionsShared = theano.shared(np.zeros((self.batchSize, 1), dtype='int32'), broadcastable=(False, True))
self.nextActionsShared = theano.shared(np.zeros((self.batchSize, 1), dtype='int32'), broadcastable=(False, True))
self.terminalsShared = theano.shared(np.zeros((self.batchSize, 1), dtype='int32'), broadcastable=(False, True))
self.qValueNetwork = DeepNetworks.buildDeepQNetwork(
self.batchSize, self.numFrames, self.inputHeight, self.inputWidth, self.numActions, self.networkType)
qValues = lasagne.layers.get_output(self.qValueNetwork, states / self.inputScale)
if self.networkUpdateDelay > 0:
self.nextQValueNetwork = DeepNetworks.buildDeepQNetwork(
self.batchSize, self.numFrames, self.inputHeight, self.inputWidth, self.numActions, self.networkType)
self.resetNextQValueNetwork()
nextQValues = lasagne.layers.get_output(self.nextQValueNetwork, nextStates / self.inputScale)
else:
nextQValues = lasagne.layers.get_output(self.qValueNetwork, nextStates / self.inputScale)
nextQValues = theano.gradient.disconnected_grad(nextQValues)
if self.useSARSAUpdate:
target = rewards + terminals * (self.discountRate ** self.kReturnLength) * nextQValues[T.arange(self.batchSize), nextActions.reshape((-1,))].reshape((-1, 1))
else:
target = rewards + terminals * (se | lf.discountRate ** self.kReturnLength) * T.max(nextQValues, axis = 1, keepdims = True)
targetDifference = target - qValues[T.arange(self.batchSize), actions.reshape((-1,))].reshape((-1, 1))
quadraticPart = T.minimum(abs(targetDifference), self.clipDelta)
linea | rPart = abs(targetDifference) - quadraticPart
# if self.clipDelta > 0:
# targetDifference = targetDifference.clip(-1.0 * self.clipDelta, self.clipDelta)
if self.batchAccumulator == "sum":
# loss = T.sum(targetDifference ** 2)
loss = T.sum(0.5 * quadraticPart ** 2 + self.clipDelta * linearPart)
elif self.batchAccumulator == "mean":
# loss = T.mean(targetDifference ** 2)
loss = T.mean(0.5 * quadraticPart ** 2 + self.clipDelta * linearPart)
else:
raise ValueError("Bad Network Accumulator. {sum, mean} expected")
networkParameters = lasagne.layers.helper.get_all_params(self.qValueNetwork)
if self.updateRule == "deepmind_rmsprop":
updates = DeepNetworks.deepmind_rmsprop(loss, networkParameters, self.learningRate, self.rho, self.rms_epsilon)
elif self.updateRule == "rmsprop":
updates = lasagne.updates.rmsprop(loss, networkParameters, self.learningRate, self.rho, self.rms_epsilon)
elif self.updateRule == "sgd":
updates = lasagne.updates.sgd(loss, networkParameters, self.learningRate)
else:
raise ValueError("Bad update rule. {deepmind_rmsprop, rmsprop, sgd} expected")
if self.momentum > 0:
updates.lasagne.updates.apply_momentum(updates, None, self.momentum)
lossGivens = {
states: self.statesShared,
nextStates: self.nextStatesShared,
rewards:self.rewardsShared,
actions: self.actionsShared,
nextActions: self.nextActionsShared,
terminals: self.terminalsShared
}
self.__trainNetwork = theano.function([], [loss, qValues], updates=updates, givens=lossGivens, on_unused_input='warn')
self.__computeQValues = theano.function([], qValues, givens={states: self.statesShared})
def trainNetwork(self, stateBatch, actionBatch, rewardBatch, nextStateBatch, nextActionBatch, terminalBatch):
self.statesShared.set_value(stateBatch)
self.nextStatesShared.set_value(nextStateBatch)
self.actionsShared.set_value(actionBatch)
self.nextActionsShared.set_value(nextActionBatch)
self.rewardsShared.set_value(rewardBatch)
self.terminalsShared.set_value(terminalBatch)
if self.networkUpdateDelay > 0 and self.updateCounter % self.networkUpdateDelay == 0:
self.resetNextQValueNetwork()
loss, qValues = self.__trainNetwork()
self.updateCounter += 1
return np.sqrt(loss)
def computeQValues(self, state):
stateBatch = np.zeros((self.batchSize, self.numFrames, self.inputHeight, self.inputWidth), dtype=theano.config.floatX)
stateBatch[0, ...] = state
self.statesShared.set_value(stateBatch)
return self.__computeQValues()[0]
def chooseAction(self, state, epsilon):
if np.random.rand() < epsilon:
return np.random.randint(0, self.numActions)
qValues = self.computeQValues(state)
return np.argmax(qValues)
def resetNextQValueNetwork(self):
networkParameters = lasagne.layers.helper.get_all_param_values(self.qValueNetwork)
lasagne.layers.helper.set_all_param_values(self.nextQValueNetwork, networkParameters) |
alexm92/sentry | tests/sentry/api/serializers/test_group.py | Python | bsd-3-clause | 3,998 | 0 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from datetime import timedelta
from django.utils import timezone
from mock import patch
from sentry.api.serializers | import serialize
from sentry.models import (
GroupResolution, GroupResolutionStatus, GroupSnooze, GroupSubscription,
GroupStatus, Release
)
from sentry.testutils import TestCase
class GroupSerializerTest(TestCase):
def test_is_ignored_with_expired_snooze(self):
now = timezone.now().replace(microsecond=0)
user = self.create_user()
group = self.create_group(
status=GroupStatus.IGNORED,
| )
GroupSnooze.objects.create(
group=group,
until=now - timedelta(minutes=1),
)
result = serialize(group, user)
assert result['status'] == 'unresolved'
assert result['statusDetails'] == {}
def test_is_ignored_with_valid_snooze(self):
now = timezone.now().replace(microsecond=0)
user = self.create_user()
group = self.create_group(
status=GroupStatus.IGNORED,
)
snooze = GroupSnooze.objects.create(
group=group,
until=now + timedelta(minutes=1),
)
result = serialize(group, user)
assert result['status'] == 'ignored'
assert result['statusDetails'] == {'ignoreUntil': snooze.until}
def test_resolved_in_next_release(self):
release = Release.objects.create(
project=self.project,
version='a',
)
user = self.create_user()
group = self.create_group(
status=GroupStatus.RESOLVED,
)
GroupResolution.objects.create(
group=group,
release=release,
)
result = serialize(group, user)
assert result['status'] == 'resolved'
assert result['statusDetails'] == {'inNextRelease': True}
def test_resolved_in_next_release_expired_resolution(self):
release = Release.objects.create(
project=self.project,
version='a',
)
user = self.create_user()
group = self.create_group(
status=GroupStatus.RESOLVED,
)
GroupResolution.objects.create(
group=group,
release=release,
status=GroupResolutionStatus.RESOLVED,
)
result = serialize(group, user)
assert result['status'] == 'resolved'
assert result['statusDetails'] == {}
@patch('sentry.models.Group.is_over_resolve_age')
def test_auto_resolved(self, mock_is_over_resolve_age):
mock_is_over_resolve_age.return_value = True
user = self.create_user()
group = self.create_group(
status=GroupStatus.UNRESOLVED,
)
result = serialize(group, user)
assert result['status'] == 'resolved'
assert result['statusDetails'] == {'autoResolved': True}
def test_subscribed(self):
user = self.create_user()
group = self.create_group()
GroupSubscription.objects.create(
user=user,
group=group,
project=group.project,
is_active=True,
)
result = serialize(group, user)
assert result['isSubscribed']
def test_explicit_unsubscribed(self):
user = self.create_user()
group = self.create_group()
GroupSubscription.objects.create(
user=user,
group=group,
project=group.project,
is_active=False,
)
result = serialize(group, user)
assert not result['isSubscribed']
def test_implicit_subscribed(self):
user = self.create_user()
group = self.create_group()
result = serialize(group, user)
assert result['isSubscribed']
def test_no_user_unsubscribed(self):
group = self.create_group()
result = serialize(group)
assert not result['isSubscribed']
|
sumeetsk/NEXT-1 | next/apps/test_utils.py | Python | apache-2.0 | 5,270 | 0.007211 | """
This file is to provide a unifed testing framework for NEXT.
"""
import requests
import os
import json
import time
import numpy as np
HOSTNAME = os.environ.get('NEXT_BACKEND_GLOBAL_HOST', 'localhost') \
+ ':' + os.environ.get('NEXT_BACKEND_GLOBAL_PORT', '8000')
def initExp(initExp_args_dict, assert_200=True):
url = "http://"+HOSTNAME+"/api/experiment"
response = requests.post(url, json.dumps(initExp_args_dict),
headers={'content-type':'application/json'})
print("POST initExp response =",response.text, response.status_code)
if assert_200: assert response.status_code is 200
initExp_response_dict = json.loads(response.text)
exp_uid = initExp_response_dict['exp_uid']
#################################################
# Test initExperiment
#################################################
url = "http://"+HOSTNAME+"/api/experiment/"+exp_uid
response = requests.get(url)
print "GET experiment response =", response.text, response.status_code
if assert_200: assert response.status_code is 200
initExp_response_dict = json.loads(response.text)
return initExp_response_dict, {'exp_uid': exp_uid}
def getQuery(getQuery_args_dict, assert_200=True, verbose=False):
url = 'http://'+HOSTNAME+'/api/experiment/getQuery'
response,dt = timeit(requests.post)(url, json.dumps(getQuery_args_dict),headers={'content-type':'application/json'})
if verbose:
print "POST getQuery response = ", response.text, response.status_code
if assert_200: assert response.status_code is 200
if verbose:
print "POST getQuery duration = ", dt
query_dict = json.loads(response.text)
return query_dict, dt
def processAnswer(processAnswer_args_dict, assert_200=True, verbose=False):
url = 'http://'+HOSTNAME+'/api/experiment/processAnswer'
if verbose:
print "POST processAnswer args = ", processAnswer_args_dict
response,dt = timeit(requests.post)(url, json.dumps(processAnswer_args_dict), headers={'content-type':'application/json'})
if verbose:
print "POST processAnswer response", response.text, response.status_code
if assert_200: assert response.status_code is 200
if verbose:
print "POST processAnswer duration = ", dt
print
processAnswer_json_response = eval(response.text)
return processAnswer_json_response, dt
def timeit(f):
"""
Refer to next.utils.timeit for further documentation
"""
def timed(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
if type(result)==tuple:
return result + ((te-ts),)
else:
return result,(te-ts)
return timed
def getModel(exp_uid, app_id, supported_alg_ids, alg_list, assert_200=True):
# Test loading the dashboard
dashboard_url = ("http://" + HOSTNAME + "/dashboard"
"/experiment_dashboard/{}/{}".format(exp_uid, app_id))
response = requests.get(dashboard_url)
if assert_200: assert response.status_code is 200
stats_url = ("http://" + HOSTNAME + "/dashboard"
"/get_stats".format(exp_uid, app_id))
args = {'exp_uid': exp_uid, 'args': {'params': {'alg_label':
supported_alg_ids[0]}}}
args = {'exp_uid': exp_uid, 'args': {'params': {}}}
alg_label = alg_list[0]['alg_label']
params = {'api_activity_histogram': {},
'compute_duration_multiline_plot': {'task': 'getQuery'},
'compute_duration_detailed_stacked_area_plot': {'alg_label': alg_label, 'task': 'getQuery'},
'response_time_histogram': {'alg_label': alg_label},
'network_delay_histogram': {'alg_label': alg_label}}
for stat_id in ['api_activity_histogram',
'compute_duration_multiline_plot',
'compute_duration_detailed_stacked_area_plot',
'response_time_histogram',
'network_delay_histogram']:
args['args']['params'] = params[stat_id]
args['args']['stat_id'] = stat_id
response = requests.post(stats_url, json=args)
if assert_200: assert response.status_code is 200
def getExp(exp_uid, assert_200=True):
url = "http://"+HOSTNAME+"/api/experiment/"+exp_uid
r | esponse = requests.get(url)
print "GET experiment response =",response.text, response.status_code
if assert_200: assert response.status_code is 200
initExp_response_dict = json.loads(response.text)
return initExp_response_dict
def forma | t_times(getQuery_times, processAnswer_times, total_pulls,
participant_uid):
processAnswer_times.sort()
getQuery_times.sort()
return_str = '%s \n\t getQuery\t : %f (5), %f (50), %f (95)\n\t processAnswer\t : %f (5), %f (50), %f (95)\n' % (participant_uid,getQuery_times[int(.05*total_pulls)],getQuery_times[int(.50*total_pulls)],getQuery_times[int(.95*total_pulls)],processAnswer_times[int(.05*total_pulls)],processAnswer_times[int(.50*total_pulls)],processAnswer_times[int(.95*total_pulls)])
return return_str
def response_delay(std=0.05, mean=0.1):
ts = time.time()
sleep_time = np.abs(np.random.randn()*std + mean)
time.sleep(sleep_time)
return ts
|
colloquium/spacewalk | backend/server/test/unit-test/rhnSQL/test_executemany.py | Python | gpl-2.0 | 1,833 | 0.007092 | #!/usr/bin/python
# Copyright (c) 2005--2010 Red Hat, Inc.
#
#
#
# $Id$
raise Exception("""
This test is no more valid; see the bug
https://bugzilla.redhat.com/show_bug.cgi?id=423351
""")
import os
import unittest
from spacewalk.server import rhn | SQL
DB = 'rhnuser/rhnuser@webdev'
class ExecutemanyTest(unittest.TestCase):
def setUp(self):
self.table_name = "misatest_%d" % os.getpid()
rhnSQL.initDB(DB)
self._cleanup()
rhnSQL.execute("create table %s (id int, val varchar2(10))" %
self.table_name)
def _cleanup(self):
try:
rhn | SQL.execute("drop table %s" % self.table_name)
except rhnSQL.SQLStatementPrepareError:
pass
def tearDown(self):
self._cleanup()
rhnSQL.commit()
def test_executemany(self):
"""
Tests the case of passing an integer as a value into a VARCHAR2 column
(executemany makes it more interesting because the driver generally
verifies the param types; passing a string and an Int takes it one
step further)
"""
h = rhnSQL.prepare("""
insert into %s (id, val) values (:id, :val)
""" % self.table_name)
params = {
'id' : [1, 2],
'val' : ['', 3],
}
apply(h.executemany, (), params)
h = rhnSQL.prepare("select id, val from %s" % self.table_name)
h.execute()
rows = h.fetchall_dict()
self.assertEqual(len(rows), 2)
v_id, v_val = rows[0]['id'], rows[0]['val']
self.assertEqual(v_id, 1)
self.assertEqual(v_val, None)
v_id, v_val = rows[1]['id'], rows[1]['val']
self.assertEqual(v_id, 2)
self.assertEqual(v_val, '3')
if __name__ == '__main__':
unittest.main()
|
research-team/NEUCOGAR | NEST/cube/noradrenaline/scripts-2/keys.py | Python | gpl-2.0 | 700 | 0.017143 | # Keys for connection type
GABA = 0
Glu = 1
ACh = 2
NA_ex = 3
NA_in = 4
DA_ex = 3
DA_in = 4
# Keys for synapse models
model = 0
basic_weight = 1
# Keys for parts dictionary
k_outer = 'Outer ID'
k_outer_ids = 'Outer children IDs'
k_inner = 'Inner ID'
k_inner_ids = 'Inner children IDs'
k_name = 'Name'
k_NN = 'NN'
k_NN_inner = 'Inner NN'
k_NN_outer = 'Outer NN'
k_mod | el = 'Model'
# Synapse models
glu_synapse = 'glu_synapse'
gaba_syna | pse = 'gaba_synapse'
ach_synapse = 'ach_synapse'
nora_synapse_ex = 'nora_synapse_ex'
nora_synapse_in = 'nora_synapse_in'
gen_static_syn = 'noise_conn'
#########dopa
dopa_synapse_ex = 'dopa_synapse_ex'
dopa_synapse_in = 'dopa_synapse_in' |
mfnch/pyrtist | pyrtist/examples/arcs.py | Python | lgpl-2.1 | 1,134 | 0.005291 | #!PYRTIST:VERSION:0:0:1
from pyrtist.lib2d import Point, Tri
#!PYRTIST:REFPOINTS:BEGIN
bbox1 = Point(0, 50); bbox2 = Point(100, 0)
p1 = Point(25.7575757576, 30.3787878788)
p2 = Point(19.814314022, 27.0345941837)
p3 = Point(27.7318108704, 19.2297513487)
p4 = Point(47.8787878788, 18.4090909091)
p5 = Point(67.7272727273, 24.7727272727)
p6 = Point(49.3939393939, 36.2878787879)
p7 = Point(28.4848484848, 38.8636363636); p8 = Point(40.4545454545, 42.5)
p9 = Point(38.9393939394, 35.98 | 48484848)
#!PYRTIST:REFPOINTS:END
# Example showing how to draw arcs and compute their bounding box.
from pyrtist.lib2d import *
w = Window()
w << BBox(bbox1, bbox2)
# Define three arcs.
arc1 = Arc(Through(p1, p2, p3), Close.no)
arc2 = Arc(Through(p4, p5, p6), Color.red, StrokeStyle(1, Color.blue, Cap.round))
arc3 = Arc(Through(p7, p8, p9))
# Draw them.
w << Stroke(StrokeStyle(0.5), Path(arc1)) << arc2 << arc3
# Find the bounding box of arc3 and draw it as a rectangle.
bb | = BBox(arc3)
w << Stroke(0.2, Color.red, Rectangle(*bb))
# Find the point in the middle of arc1 using 0.5 as an index.
w << Circle(Color(0, 0.5, 0), 1, arc1[0.5])
gui(w)
|
MoiTux/pyramid-request-log | pyramid_request_log/config.py | Python | mit | 1,497 | 0 | from __future__ import absolute_import
import logging
import re
from pyramid.settings import aslist
from . import request_log
log = logging.getLogger(__name__)
def includeme(config):
settings = config.registry.settings
config.scan('pyramid_request_log.request_log')
if 'pyramid_request_log.pattern' in settings:
unlog_pattern = aslist(settings['pyramid_request_log.pattern'])
if not unlog_pattern:
log.info('Pyramid-Request-Log will ignore no key: '
'variable define but empty')
else:
log.info('Pyramid-Request-Log will ignore keys: %s', unlog_pattern)
re_compile = re.compile('({})'.format(')|('.join(unlog_pattern)))
request_log.unlog_pattern = re_compile
else:
log.warning('No pyramid_request_log.pattern found in settings')
if 'pyramid_request_log.ignore_route' in settings:
unlog_route = aslist(settings['pyramid_request_log.ignore_route'])
if not unlog_route:
log.info('Pyramid-Request-Log will ignore no route: '
'variable define but empty')
else:
| log.info('Pyramid-Request-Log will ignore routes: %s',
unlog_route)
re_compile = re.compile('({})'.format(')|('.join(unlog_route)))
request_log.unlog_route = re_compile
key = 'pyramid_request_log.authenticated_id'
if key in settings:
request_log.authenticated_id = settings[key]
| |
drix00/pymcxray | pymcxray/Simulation.py | Python | apache-2.0 | 58,934 | 0.003954 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. py:currentmodule:: pymcxray.Simulation
.. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca>
MCXRay simulation parameters.
"""
###############################################################################
# Copyright 2017 Hendrix Demers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
import logging
import os.path
from itertools import product
# Third party modules.
import numpy as np
# Local modules.
from pymcxray.ElementProperties import getAtomicMass_g_mol
# Project modules
import pymcxray.AtomData as AtomData
import pymcxray.FileFormat.SimulationInputs as SimulationInputs
import pymcxray.FileFormat.Specimen as Specimen
import pymcxray.FileFormat.Region as Region
import pymcxray.FileFormat.RegionType as RegionType
import pymcxray.FileFormat.RegionDimensions as RegionDimensions
import pymcxray.FileFormat.Element as Element
import pymcxray.FileFormat.Models as Models
import pymcxray.FileFormat.MicroscopeParameters as MicroscopeParameters
import pymcxray.FileFormat.SimulationParameters as SimulationParameters
import pymcxray.FileFormat.ResultsParameters as ResultsParameters
from pymcxray.SimulationsParameters import PARAMETER_INCIDENT_ENERGY_keV, PARAMETER_NUMBER_ELECTRONS, \
PARAMETER_NUMBER_XRAYS, PARAMETER_TIME_s, PARAMETER_CURRENT_nA, PARAMETER_BEAM_DIAMETER_nm, \
PARAMETER_BEAM_TILT_deg, PARAMETER_BEAM_POSITION_nm, PARAMETER_DETECTOR_DISTANCE_cm, \
PARAMETER_DETECTOR_RADIUS_cm, PARAMETER_DETECTOR_THICKNESS_cm, PARAMETER_DETECTOR_NOISE_eV, \
PARAMETER_DETECTOR_CHANNEL_WIDTH_eV, PARAMETER_TOA_deg, PARAMETER_DETECTOR_AZIMUTHAL_ANGLE_deg, \
PARAMETER_NUMBER_WINDOWS, PARAMETER_ELASTIC_CROSS_SECTION_SCALING_FACTOR, PARAMETER_ENERGY_LOSS_SCALING_FACTOR, \
PARAMETER_REPETITION, PARAMETER_MODEL_SAMPLE_ENERGY_LOSS, PARAMETER_MODEL_XRAY_CHARACTERISTIC, \
PARAMETER_MODEL_XRAY_BREMSSTRAHLUNG, PARAMETER_MODEL_ATOM_CROSS_SECTION, PARAMETER_MODEL_ATOM_COLLISION, \
PARAMETER_MODEL_ATOM_MAC, PARAMETER_NUMBER_LAYERS_X, PARAMETER_NUMBER_LAYERS_Y, PARAMETER_NUMBER_LAYERS_Z
# Globals and constants variables.
def createPureBulkSample(atomic_number):
specimen = Specimen.Specimen()
specimen.name = AtomData.getAtomSymbol(atomic_number)
specimen.numberRegions = 1
region = Region.Region()
region.numberElements = 1
element = Element.Element(atomic_number)
region.elements.append(element)
region.regionType = RegionType.REGION_TYPE_BOX
parameters = [-10000000000.0, 10000000000.0, -10000000000.0, 10000000000.0, 0.0, 20000000000.0]
region.regionDimensions = RegionDimensions.RegionDimensionsBox(parameters)
specimen.regions.append(region)
return specimen
def createAlloyBulkSample(elements, sampleName=None):
specimen = Specimen.Specimen()
specimen.numberRegions = 1
region = Region.Region()
region.numberElements = len(elements)
name = ""
for atomicNumber, weightFraction in elements:
name += "%s%0.6f" % (AtomData.getAtomSymbol(atomicNumber), weightFraction)
element = Element.Element(atomicNumber, massFraction=weightFraction)
region.elements.append(element)
if sampleName is None:
specimen.name = name
else:
specimen.name = sampleName
region.regionType = RegionType.REGION_TYPE_BOX
parameters = [-10000000000.0, 10000000000.0, -10000000000.0, 10000000000.0, 0.0, 20000000000.0]
region.regionDimensions = RegionDimensions.RegionDimensionsBox(parameters)
specimen.regions.append(region)
return specimen
def createAlloyThinFilm(elements, filmThickness_nm):
specimen = Specimen.Specimen()
specimen.numberRegions = 1
region = Region.Region()
region.numberElements = len(elements)
name = ""
for atomicNumber, weightFraction in elements:
name += "%s%0.6f" % (AtomData.getAtomSymbol(atomicNumber), weightFraction)
element = Element.Element(atomicNumber, massFraction=weightFraction)
region.elements.append(element)
film_thickness_A = filmThickness_nm*10.0
name += "T%iA" % (film_thickness_A)
specimen.name = name
region.regionType = RegionType.REGION_TYPE_BOX
parameters = [-10000000000.0, 10000000000.0, -10000000000.0, 10000000000.0, 0.0, film_thickness_A]
region.regionDimensions = RegionDimensions.RegionDimensionsBox(parameters)
specimen.regions.append(region)
return specimen
def creat | eAlloyThinFilm2(elements, filmThickness_nm):
specimen = Specimen.Specimen()
specimen.numberRegions = 2
# Region 0
region = Region.Region()
region.numberElements = 0
region.regionType = RegionTyp | e.REGION_TYPE_BOX
parameters = [-10000000000.0, 10000000000.0, -10000000000.0, 10000000000.0, 0.0, 20000000000.0]
region.regionDimensions = RegionDimensions.RegionDimensionsBox(parameters)
specimen.regions.append(region)
# Region 1
region = Region.Region()
region.numberElements = len(elements)
name = ""
for atomicNumber, weightFraction in elements:
name += "%s%0.6f" % (AtomData.getAtomSymbol(atomicNumber), weightFraction)
element = Element.Element(atomicNumber, massFraction=weightFraction)
region.elements.append(element)
filmThickness_A = filmThickness_nm*10.0
name += "T%iA" % (filmThickness_A)
specimen.name = name
region.regionType = RegionType.REGION_TYPE_BOX
parameters = [-10000000000.0, 10000000000.0, -10000000000.0, 10000000000.0, 0.0, filmThickness_A]
region.regionDimensions = RegionDimensions.RegionDimensionsBox(parameters)
specimen.regions.append(region)
return specimen
def createFilmOverSubstrate(atomicNumberFilm, atomicNumberSubstrate,
filmThickness_nm=10.0):
specimen = Specimen.Specimen()
symbolFilm = AtomData.getAtomSymbol(atomicNumberFilm)
symbolSubstrate = AtomData.getAtomSymbol(atomicNumberSubstrate)
name = "%s_T%inm_%s" % (symbolFilm, filmThickness_nm, symbolSubstrate)
specimen.name = name
specimen.numberRegions = 2
region = Region.Region()
region.numberElements = 1
element = Element.Element(atomicNumberSubstrate)
region.elements.append(element)
region.regionType = RegionType.REGION_TYPE_BOX
parameters = [-10000000000.0, 10000000000.0, -10000000000.0, 10000000000.0, 0.0, 20000000000.0]
region.regionDimensions = RegionDimensions.RegionDimensionsBox(parameters)
specimen.regions.append(region)
region = Region.Region()
region.numberElements = 1
element = Element.Element(atomicNumberFilm)
region.elements.append(element)
region.regionType = RegionType.REGION_TYPE_BOX
filmThickness_A = filmThickness_nm*1.0e1
parameters = [-10000000000.0, 10000000000.0, -10000000000.0, 10000000000.0, 0.0, filmThickness_A]
region.regionDimensions = RegionDimensions.RegionDimensionsBox(parameters)
specimen.regions.append(region)
return specimen
def createAlloyFilmOverSubstrate(film_elements, substrate_elements, film_thickness_nm=10.0,
film_mass_density_g_cm3=None, substrate_mass_density_g_cm3=None):
specimen = Specimen.Specimen()
name = ""
specimen.numberRegions = 2
region = Region.Region()
region.numberElements = len(substrate_elements)
for atomicNumber, weightFraction in substrate_elements:
name += "%s%i" % (AtomData.getAtomSymbol(atomicNumber), weightFraction*100)
element = Element.Element(atomicNumber, massFraction=weightFraction)
region.elements.append(el |
RasaHQ/rasa_core | rasa/core/tracker_store.py | Python | apache-2.0 | 13,497 | 0 | import itertools
import json
import logging
import pickle
# noinspection PyPep8Naming
from typing import Iterator, KeysView, List, Optional, Text
from rasa.core.actions.action import ACTION_LISTEN_NAME
from rasa.core.broker import EventChannel
from rasa.core.domain import Domain
from rasa.core.trackers import (
ActionExecuted, DialogueStateTracker, EventVerbosity)
from rasa.core.utils import class_from_module_path
logger = logging.getLogger(__name__)
class TrackerStore(object):
def __init__(self,
domain: Optional[Domain],
event_broker: Optional[EventChannel] = None) -> None:
self.domain = domain
self.event_broker = event_broker
self.max_event_history = None
@staticmethod
| def find_tracker_store(domain, store=None, event_broker=None):
if store is None or store.type is None:
return InMemoryTrackerStore(domain, event_broker=event_broker)
elif store.type == 'redis':
return RedisTrackerStore(domain=domain,
host=store.url,
event_broker=event_broker,
**store.kwargs)
elif store.type == 'mongod':
| return MongoTrackerStore(domain=domain,
host=store.url,
event_broker=event_broker,
**store.kwargs)
elif store.type.lower() == 'sql':
return SQLTrackerStore(domain=domain,
url=store.url,
event_broker=event_broker,
**store.kwargs)
else:
return TrackerStore.load_tracker_from_module_string(domain, store)
@staticmethod
def load_tracker_from_module_string(domain, store):
custom_tracker = None
try:
custom_tracker = class_from_module_path(store.type)
except (AttributeError, ImportError):
logger.warning("Store type '{}' not found. "
"Using InMemoryTrackerStore instead"
.format(store.type))
if custom_tracker:
return custom_tracker(domain=domain,
url=store.url, **store.kwargs)
else:
return InMemoryTrackerStore(domain)
def get_or_create_tracker(self, sender_id, max_event_history=None):
tracker = self.retrieve(sender_id)
self.max_event_history = max_event_history
if tracker is None:
tracker = self.create_tracker(sender_id)
return tracker
def init_tracker(self, sender_id):
if self.domain:
return DialogueStateTracker(
sender_id,
self.domain.slots,
max_event_history=self.max_event_history)
else:
return None
def create_tracker(self, sender_id, append_action_listen=True):
"""Creates a new tracker for the sender_id.
The tracker is initially listening."""
tracker = self.init_tracker(sender_id)
if tracker:
if append_action_listen:
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
self.save(tracker)
return tracker
def save(self, tracker):
raise NotImplementedError()
def retrieve(self, sender_id: Text) -> Optional[DialogueStateTracker]:
raise NotImplementedError()
def stream_events(self, tracker: DialogueStateTracker) -> None:
old_tracker = self.retrieve(tracker.sender_id)
offset = len(old_tracker.events) if old_tracker else 0
evts = tracker.events
for evt in list(itertools.islice(evts, offset, len(evts))):
body = {
"sender_id": tracker.sender_id,
}
body.update(evt.as_dict())
self.event_broker.publish(body)
def keys(self):
# type: () -> Optional[List[Text]]
raise NotImplementedError()
@staticmethod
def serialise_tracker(tracker):
dialogue = tracker.as_dialogue()
return pickle.dumps(dialogue)
def deserialise_tracker(self, sender_id, _json):
dialogue = pickle.loads(_json)
tracker = self.init_tracker(sender_id)
tracker.recreate_from_dialogue(dialogue)
return tracker
class InMemoryTrackerStore(TrackerStore):
def __init__(self,
domain: Domain,
event_broker: Optional[EventChannel] = None
) -> None:
self.store = {}
super(InMemoryTrackerStore, self).__init__(domain, event_broker)
def save(self, tracker: DialogueStateTracker) -> None:
if self.event_broker:
self.stream_events(tracker)
serialised = InMemoryTrackerStore.serialise_tracker(tracker)
self.store[tracker.sender_id] = serialised
def retrieve(self, sender_id: Text) -> Optional[DialogueStateTracker]:
if sender_id in self.store:
logger.debug('Recreating tracker for '
'id \'{}\''.format(sender_id))
return self.deserialise_tracker(sender_id, self.store[sender_id])
else:
logger.debug('Creating a new tracker for '
'id \'{}\'.'.format(sender_id))
return None
def keys(self) -> KeysView[Text]:
return self.store.keys()
class RedisTrackerStore(TrackerStore):
def keys(self):
pass
def __init__(self, domain, host='localhost',
port=6379, db=0, password=None, event_broker=None,
record_exp=None):
import redis
self.red = redis.StrictRedis(host=host, port=port, db=db,
password=password)
self.record_exp = record_exp
super(RedisTrackerStore, self).__init__(domain, event_broker)
def save(self, tracker, timeout=None):
if self.event_broker:
self.stream_events(tracker)
if not timeout and self.record_exp:
timeout = self.record_exp
serialised_tracker = self.serialise_tracker(tracker)
self.red.set(tracker.sender_id, serialised_tracker, ex=timeout)
def retrieve(self, sender_id):
stored = self.red.get(sender_id)
if stored is not None:
return self.deserialise_tracker(sender_id, stored)
else:
return None
class MongoTrackerStore(TrackerStore):
def __init__(self,
domain,
host="mongodb://localhost:27017",
db="rasa",
username=None,
password=None,
auth_source="admin",
collection="conversations",
event_broker=None):
from pymongo.database import Database
from pymongo import MongoClient
self.client = MongoClient(host,
username=username,
password=password,
authSource=auth_source,
# delay connect until process forking is done
connect=False)
self.db = Database(self.client, db)
self.collection = collection
super(MongoTrackerStore, self).__init__(domain, event_broker)
self._ensure_indices()
@property
def conversations(self):
return self.db[self.collection]
def _ensure_indices(self):
self.conversations.create_index("sender_id")
def save(self, tracker, timeout=None):
if self.event_broker:
self.stream_events(tracker)
state = tracker.current_state(EventVerbosity.ALL)
self.conversations.update_one(
{"sender_id": tracker.sender_id},
{"$set": state},
upsert=True)
def retrieve(self, sender_id):
stored = self.conversations.find_one({"sender_id": sender_id})
# look for conversations which have used an `int` sender_id in the past
# and update them.
if stored is None and sender_id |
airekans/paxosim | simulator.py | Python | mit | 10,270 | 0.001071 | import sys
import importlib
import inspect
class Process(object):
def __init__(self):
pass
def process(self, _in):
pass
def get_id(self):
return self._id
class Simulator(object):
def __init__(self, processes, links, commands):
self._cur_time = 0
self._processes = processes
self._stopped_processes = [None for _ in xrange(len(processes))]
self._links = {}
for link, distance in links.iteritems():
id1, id2 = link
self._links[(id1, id2)] = distance
self._links[(id2, id1)] = distance
self._removed_links = {}
self._input_queues = [[{}] for _ in processes] # Initial input
self._commands = commands
# this is the main method to start the simulation
def run(self):
skip_round_num = 1
while True:
for process in self._processes:
if process:
proc_id = process.get_id()
proc_input = self.get_input(proc_id)
output = process.process(proc_input | , self._cur_time)
if output:
self.sent_output(proc_id, output)
skip_round_num -= 1
if skip_round_num <= 0:
round_num = self.process_command()
if round_num is None:
break
else:
skip_round_num = round_num
self.goto_next_round(1)
def get_input(self, proc_id):
if len(self._input_queu | es[proc_id]) > 0:
return self._input_queues[proc_id][0]
else:
return {}
def sent_output(self, process_id, output):
for target_process, msg in output.iteritems():
try:
distance = self._links[(process_id, target_process)]
except KeyError:
print 'SIM_ERROR: no link from %d to %d, drop msg' % (process_id, target_process)
continue
queue_size = len(self._input_queues[target_process])
if queue_size <= distance:
for _ in range(queue_size, distance + 1):
self._input_queues[target_process].append({})
queue = self._input_queues[target_process][distance]
if process_id in queue:
queue[process_id].append(msg)
else:
queue[process_id] = [msg]
def process_command(self):
try:
while True:
command = self.get_next_commands()
if command == '':
return 1
elif command == 'exit':
return None
elif command.startswith('next'):
args = command.split(' ')
if len(args) > 1:
try:
skip_round = int(args[1])
except ValueError:
skip_round = 1
return skip_round
elif command.startswith('kill '):
proc_id = Simulator.get_process_id_from_command(command)
if proc_id >= 0:
self.kill_process(proc_id)
elif command.startswith('stop '):
proc_id = Simulator.get_process_id_from_command(command)
if proc_id >= 0:
self.stop_process(proc_id)
elif command.startswith('recover '):
proc_id = Simulator.get_process_id_from_command(command)
if proc_id >= 0:
self.recover_process(proc_id)
elif command.startswith('rmlink '):
source, target = Simulator.get_link_from_command(command)
if source >= 0:
self.remove_link(source, target)
elif command.startswith('addlink '):
source, target = Simulator.get_link_from_command(command)
if source >= 0:
self.add_link(source, target)
elif command.startswith('rmmsg '):
target, source = Simulator.get_msg_target_source_from_command(command)
if target >= 0:
self.remove_msg(target, source)
elif command == 'read': # change to read from stdin
self._commands = None
elif command == 'status':
self.print_status()
elif command == 'history':
pass
else:
print 'Unknown command:', command
except EOFError:
return None
@staticmethod
def print_process(process):
if hasattr(process.__class__, 'print_status') and \
inspect.ismethod(process.print_status):
process.print_status()
else:
print '%s(%d)' % (process.__class__.__name__, process.get_id())
@staticmethod
def get_process_id_from_command(command):
args = command.split(' ')
if len(args) == 1:
print 'Please give a process id to kill'
return -1
try:
return int(args[1])
except ValueError:
print 'Please give a process id to kill'
return -1
@staticmethod
def get_link_from_command(command):
args = command.split(' ')
if len(args) < 3:
print 'Please give a link in format (from, to)'
return -1, -1
try:
return int(args[1]), int(args[2])
except ValueError:
print 'Please give a process id to kill'
return -1, -1
@staticmethod
def get_msg_target_source_from_command(command):
args = command.split(' ')
try:
target = int(args[1])
except ValueError:
print 'Please give a target process id to delete msg'
return -1, -1
if len(args) < 3:
return target, -1
try:
source = int(args[2])
return target, source
except ValueError:
return target, -1
def kill_process(self, proc_id):
if proc_id >= len(self._processes):
return False
process = self._processes[proc_id]
if process is not None:
assert proc_id == process.get_id()
self._processes[proc_id] = None
return True
def stop_process(self, proc_id):
if proc_id >= len(self._processes):
return False
process = self._processes[proc_id]
if process is not None:
assert proc_id == process.get_id()
self._processes[proc_id] = None
self._stopped_processes[proc_id] = process
return True
else:
return False
def recover_process(self, proc_id):
if proc_id >= len(self._processes):
return False
process = self._stopped_processes[proc_id]
if process is not None:
assert proc_id == process.get_id()
self._stopped_processes[proc_id] = None
self._processes[proc_id] = process
return True
else:
return False
def remove_link(self, source, target):
if source < 0 or target < 0:
print 'Please give a link in format (from, to)'
return False
try:
distance = self._links[(source, target)]
del self._links[(source, target)]
del self._links[(target, source)]
self._removed_links[(source, target)] = distance
self._removed_links[(target, source)] = distance
return True
except KeyError:
return False
def add_link(self, source, target):
if source < 0 or target < 0:
print 'Please give a link in format (from, to)'
return False
try:
distance = self._removed_links[(source, target)]
del self._removed_links[(source, target)]
|
CIRALabs/DSAP | manage.py | Python | mit | 249 | 0.004016 | #!/usr/bin/env python
import os
import sys
if | __name__ == "__main__":
| os.environ.setdefault("DJANGO_SETTINGS_MODULE", "src.web.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv) |
tgbugs/hypush | test/memex/util/markdown_test.py | Python | mit | 3,893 | 0.001798 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from hyputils.memex.util import markdown
class TestRender(object):
def t | est_it_renders_markdown(self):
actual = markdown.render("_emphasis_ **bold**")
assert "<p><em>emphasis</em> <strong>bold</strong></p>\n" == actual
def test_it_ignores_math_block(self):
actual = markdown.render("$$1 + 1 = 2$$")
assert "<p>$$1 + 1 = 2$$</p>\n" == actual
def test_it_ignores_inline_match(self):
actual = markdown.render(r"Fooba | r \(1 + 1 = 2\)")
assert "<p>Foobar \\(1 + 1 = 2\\)</p>\n" == actual
def test_it_sanitizes_the_output(self, markdown_render, sanitize):
markdown.render("foobar")
sanitize.assert_called_once_with(markdown_render.return_value)
@pytest.fixture
def markdown_render(self, patch):
return patch("hyputils.memex.util.markdown.markdown")
@pytest.fixture
def sanitize(self, patch):
return patch("hyputils.memex.util.markdown.sanitize")
class TestSanitize(object):
@pytest.mark.parametrize(
"text,expected",
[
(
'<a href="https://example.org">example</a>',
'<a href="https://example.org" rel="nofollow noopener" target="_blank">example</a>',
),
# Don't add rel and target attrs to mailto: links
('<a href="mailto:foo@example.net">example</a>', None),
('<a title="foobar">example</a>', None),
(
'<a href="https://example.org" rel="nofollow noopener" target="_blank" title="foobar">example</a>',
None,
),
("<blockquote>Foobar</blockquote>", None),
("<code>foobar</code>", None),
("<em>foobar</em>", None),
("<hr>", None),
("<h1>foobar</h1>", None),
("<h2>foobar</h2>", None),
("<h3>foobar</h3>", None),
("<h4>foobar</h4>", None),
("<h5>foobar</h5>", None),
("<h6>foobar</h6>", None),
('<img src="http://example.com/img.jpg">', None),
('<img src="/img.jpg">', None),
('<img alt="foobar" src="/img.jpg">', None),
('<img src="/img.jpg" title="foobar">', None),
('<img alt="hello" src="/img.jpg" title="foobar">', None),
("<ol><li>foobar</li></ol>", None),
("<p>foobar</p>", None),
("<pre>foobar</pre>", None),
("<strong>foobar</strong>", None),
("<ul><li>foobar</li></ul>", None),
],
)
def test_it_allows_markdown_html(self, text, expected):
if expected is None:
expected = text
assert markdown.sanitize(text) == expected
@pytest.mark.parametrize(
"text,expected",
[
("<script>evil()</script>", "<script>evil()</script>"),
(
'<a href="#" onclick="evil()">foobar</a>',
'<a href="#" rel="nofollow noopener" target="_blank">foobar</a>',
),
(
'<a href="#" onclick=evil()>foobar</a>',
'<a href="#" rel="nofollow noopener" target="_blank">foobar</a>',
),
("<a href=\"javascript:alert('evil')\">foobar</a>", "<a>foobar</a>"),
('<img src="/evil.jpg" onclick="evil()">', '<img src="/evil.jpg">'),
("<img src=\"javascript:alert('evil')\">", "<img>"),
],
)
def test_it_escapes_evil_html(self, text, expected):
assert markdown.sanitize(text) == expected
def test_it_adds_target_blank_and_rel_nofollow_to_links(self):
actual = markdown.sanitize('<a href="https://example.org">Hello</a>')
expected = '<a href="https://example.org" rel="nofollow noopener" target="_blank">Hello</a>'
assert actual == expected
|
tensorflow/tensorflow | tensorflow/python/estimator/canned/prediction_keys.py | Python | apache-2.0 | 1,219 | 0.001641 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in wri | ting, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================== | ===================================================
"""prediction_keys python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from tensorflow_estimator.python.estimator.canned import prediction_keys
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
prediction_keys.__all__ = [
s for s in dir(prediction_keys) if not s.startswith('__')
]
from tensorflow_estimator.python.estimator.canned.prediction_keys import *
|
kirbyj/vPhon | rules.py | Python | gpl-3.0 | 11,430 | 0.084898 | #rules.py
#coding: utf-8
onsets = { 'b' : 'ɓ',
'c' : 'k',
'ch' : 'c',
'd' : 'j',
'đ' : 'ɗ',
'g' : 'ɣ',
'gh' : 'ɣ',
'gi' : 'z',
'h' : 'h',
'k' : 'k',
'kh' : 'x',
'l' : 'l',
'm' : 'm',
'n' : 'n',
'ng' : 'ŋ',
'ngh' : 'ŋ',
'nh' : 'ɲ',
'ph' : 'f',
'p' : 'p',
'qu' : 'kʷ',
'r' : 'r',
's' : 'ʂ',
't' : 't',
'th' : 'tʰ',
'tr' : 'ʈ',
'v' : 'v',
'x' : 's'}
gi = { 'gi' : 'zi', 'gí': 'zi', 'gì' : 'zi', 'gỉ' : 'zi', 'gĩ' : 'zi', 'gị' : 'zi'}
qu = {'quy' : 'kʷi', 'quý' : 'kʷi', 'quỳ' : 'kʷi', 'quỷ' : 'kʷi', 'quỹ' : 'kʷi', 'quỵ' : 'kʷi'}
nuclei = { 'a' : 'aː', 'á' : 'aː', 'à' : 'aː', 'ả' : 'aː', 'ã' : 'aː', 'ạ' : 'aː',
'â' : 'ə', 'ấ' : 'ə', 'ầ' : 'ə', 'ẩ' : 'ə', 'ẫ' : 'ə', 'ậ' : 'ə',
'ă' : 'a', 'ắ' : 'a', 'ằ' : 'a', 'ẳ' : 'a', 'ẵ' : 'a', 'ặ' : 'a',
'e' : 'ɛ', 'é' : 'ɛ', 'è' : 'ɛ', 'ẻ' : 'ɛ', 'ẽ' : 'ɛ', 'ẹ' : 'ɛ',
'ê' : 'e', 'ế' : 'e', 'ề' : 'e', 'ể' : 'e', 'ễ' : 'e', 'ệ' : 'e',
'i' : 'i', 'í' : 'i', 'ì' : 'i', 'ỉ' : 'i', 'ĩ' : 'i', 'ị' : 'i',
'o' : 'ɔ', 'ó' : 'ɔ', 'ò' : 'ɔ', 'ỏ' : 'ɔ', 'õ' : 'ɔ', 'ọ' : 'ɔ',
'ô' : 'o', 'ố' : 'o', 'ồ' : 'o', 'ổ' : 'o', 'ỗ' : 'o', 'ộ' : 'o',
'ơ' : 'əː', 'ớ' : 'əː', 'ờ' : 'əː', 'ở' : 'əː', 'ỡ' : 'əː', 'ợ' : 'əː',
'u' : 'u', 'ú' : 'u', 'ù' : 'u', 'ủ' : 'u', 'ũ' : 'u', 'ụ' : 'u',
'ư' : 'ɨ', 'ứ' : 'ɨ', 'ừ' : 'ɨ', 'ử' : 'ɨ', 'ữ' : 'ɨ', 'ự' : 'ɨ',
'y' : 'i', 'ý' : 'i', 'ỳ' : 'i', 'ỷ' : 'i', 'ỹ' : 'i', 'ỵ' : 'i',
'ia' : 'iə', 'ía' : 'iə', 'ìa' : 'iə', 'ỉa' : 'iə', 'ĩa' : 'iə', 'ịa' : 'iə',
'ia' : 'iə', 'iá' : 'iə', 'ià' : 'iə', 'iả' : 'iə', 'iã' : 'iə', 'iạ' : 'iə',
'iê' : 'iə', 'iế' : 'iə', 'iề' : 'iə', 'iể' : 'iə', 'iễ' : 'iə', 'iệ' : 'iə',
'oo' : 'ɔː', 'óo' : 'ɔː', 'òo' : 'ɔː', 'ỏo' : 'ɔː', 'õo' : 'ɔː', 'ọo' : 'ɔː',
'oo' : 'ɔː', 'oó' : 'ɔː', 'oò' : 'ɔː', 'oỏ' : 'ɔː', 'oõ' : 'ɔː', 'oọ' : 'ɔː',
'ôô' : 'oː', 'ốô' : 'oː', 'ồô' : 'oː', 'ổô' : 'oː', 'ỗô' : 'oː', 'ộô' : 'oː',
'ôô' : 'oː', 'ôố' : 'oː', 'ôồ' : 'oː', 'ôổ' : 'oː', 'ôỗ' : 'oː', 'ôộ' : 'oː',
'ua' : 'uə', 'úa' : 'uə', 'ùa' : 'uə', 'ủa' : 'uə', 'ũa' : 'uə', 'ụa' : 'uə',
'uô' : 'uə', 'uố' : 'uə', 'uồ' : 'uə', 'uổ' : 'uə', 'uỗ' : 'uə', 'uộ' : 'uə',
'ưa' : 'ɨə', 'ứa' : 'ɨə', 'ừa' : 'ɨə', 'ửa' : | 'ɨə', 'ữa' : 'ɨə', 'ựa' : 'ɨə',
'ươ' : 'ɨə', 'ướ' : 'ɨə', 'ườ' : 'ɨə', 'ưở' : 'ɨə', 'ưỡ' : 'ɨə', 'ượ' : 'ɨə',
'yê' : 'iə', 'yế' : 'iə', 'yề' : 'iə', 'yể' : 'iə', 'yễ' : 'iə', 'yệ' : 'iə',
'uơ' : 'uə', 'uở' : 'uə', 'uờ': 'uə', 'uở' : 'uə', 'uỡ' : 'uə', 'uợ' : 'uə'
}
offglides = { 'ai' : 'aːj', 'ái' : 'aːj', 'ài' : 'aːj', 'ải' : 'aːj', 'ãi' : 'aːj', 'ại' : 'aːj', |
'ay' : 'aj', 'áy' : 'aj', 'ày' : 'aj', 'ảy' : 'aj', 'ãy' : 'aj', 'ạy' : 'aj',
'ao' : 'aːw', 'áo' : 'aːw', 'ào' : 'aːw', 'ảo' : 'aːw', 'ão' : 'aːw', 'ạo' : 'aːw',
'au' : 'aw', 'áu' : 'aw', 'àu' : 'aw', 'ảu' : 'aw', 'ãu' : 'aw', 'ạu' : 'aw',
'ây' : 'əj', 'ấy' : 'əj', 'ầy' : 'əj', 'ẩy' : 'əj', 'ẫy' : 'əj', 'ậy' : 'əj',
'âu' : 'əw', 'ấu' : 'əw', 'ầu' : 'əw', 'ẩu' : 'əw', 'ẫu' : 'əw', 'ậu' : 'əw',
'eo' : 'ɛw', 'éo' : 'ɛw', 'èo' : 'ɛw', 'ẻo' : 'ɛw', 'ẽo' : 'ɛw', 'ẹo' : 'ɛw',
'êu' : 'ew', 'ếu' : 'ew', 'ều' : 'ew', 'ểu' : 'ew', 'ễu' : 'ew', 'ệu' : 'ew',
'iu' : 'iw', 'íu' : 'iw', 'ìu' : 'iw', 'ỉu' : 'iw', 'ĩu' : 'iw', 'ịu' : 'iw',
'oi' : 'ɔj', 'ói' : 'ɔj', 'òi' : 'ɔj', 'ỏi' : 'ɔj', 'õi' : 'ɔj', 'ọi' : 'ɔj',
'ôi' : 'oj', 'ối' : 'oj', 'ồi' : 'oj', 'ổi' : 'oj', 'ỗi' : 'oj', 'ội' : 'oj',
'ui' : 'uj', 'úi' : 'uj', 'ùi' : 'uj', 'ủi' : 'uj', 'ũi' : 'uj', 'ụi' : 'uj',
'uy' : 'uj', 'úy' : 'uj', 'ùy' : 'uj', 'ủy' : 'uj', 'ũy' : 'uj', 'ụy' : 'uj',
'ơi' : 'əːj', 'ới' : 'əːj', 'ời' : 'əːj', 'ởi' : 'əːj', 'ỡi' : 'əːj', 'ợi' : 'əːj',
'ưi' : 'ɨj', 'ứi' : 'ɨj', 'ừi' : 'ɨj', 'ửi' : 'ɨj', 'ữi' : 'ɨj', 'ựi' : 'ɨj',
'ưu' : 'ɨw', 'ứu' : 'ɨw', 'ừu' : 'ɨw', 'ửu' : 'ɨw', 'ữu' : 'ɨw', 'ựu' : 'ɨw',
'iêu' : 'iəw', 'iếu' : 'iəw', 'iều' : 'iəw', 'iểu' : 'iəw', 'iễu' : 'iəw', 'iệu' : 'iəw',
'yêu' : 'iəw', 'yếu' : 'iəw', 'yều' : 'iəw', 'yểu' : 'iəw', 'yễu' : 'iəw', 'yệu' : 'iəw',
'uôi' : 'uəj', 'uối' : 'uəj', 'uồi' : 'uəj', 'uổi' : 'uəj', 'uỗi' : 'uəj', 'uội' : 'uəj',
'ươi' : 'ɨəj', 'ưới' : 'ɨəj', 'ười' : 'ɨəj', 'ưởi' : 'ɨəj', 'ưỡi' : 'ɨəj', 'ượi' : 'ɨəj',
'ươu' : 'ɨəw', 'ướu' : 'ɨəw', 'ườu' : 'ɨəw', 'ưởu' : 'ɨəw', 'ưỡu' : 'ɨəw', 'ượu' : 'ɨəw'
}
onglides = { 'oa' : 'aː', 'oá' : 'aː', 'oà' : 'aː', 'oả' : 'aː', 'oã' : 'aː', 'oạ' : 'aː',
'óa' : 'aː', 'òa' : 'aː', 'ỏa' : 'aː', 'õa' : 'aː', 'ọa' : 'aː',
'oă' : 'a', 'oắ' : 'a', 'oằ' : 'a', 'oẳ' : 'a', 'oẵ' : 'a', 'oặ' : 'a',
'oe' : 'ɛ', 'oé' : 'ɛ', 'oè' : 'ɛ', 'oẻ' : 'ɛ', 'oẽ' : 'ɛ', 'oẹ' : 'ɛ',
'oe' : 'ɛ', 'óe' : 'ɛ', 'òe' : 'ɛ', 'ỏe' : 'ɛ', 'õe' : 'ɛ', 'ọe' : 'ɛ',
'ua' : 'aː', 'uá' : 'aː', 'uà' : 'aː', 'uả' : 'aː', 'uã' : 'aː', 'uạ' : 'aː',
'uă' : 'a', 'uắ' : 'a', 'uằ' : 'a', 'uẳ' : 'a', 'uẵ' : 'a', 'uặ' : 'a',
'uâ' : 'ə', 'uấ' : 'ə', 'uầ' : 'ə', 'uẩ' : 'ə', 'uẫ' : 'ə', 'uậ' : 'ə',
'ue' : 'ɛ', 'ué' : 'ɛ', 'uè' : 'ɛ', 'uẻ' : 'ɛ', 'uẽ' : 'ɛ', 'uẹ' : 'ɛ',
'uê' : 'e', 'uế' : 'e', 'uề' : 'e', 'uể' : 'e', 'uễ' : 'e', 'uệ' : 'e',
'uy' : 'i', 'uý' : 'i', 'uỳ' : 'i', 'uỷ' : 'i', 'uỹ' : 'i', 'uỵ' : 'i',
'uya' : 'iə', 'uyá' : 'iə', 'uyà' : 'iə', 'uyả' : 'iə', 'uyã' : 'iə', 'uyạ' : 'iə',
'uyê' : 'iə', 'uyế' : 'iə', 'uyề' : 'iə', 'uyể' : 'iə', 'uyễ' : 'iə', 'uyệ' : 'iə',
'oen' : 'ɛn', 'oén' : 'ɛn', 'oèn' : 'ɛn', 'oẻn' : 'ɛn', 'oẽn' : 'ɛn', 'oẹn' : 'ɛn',
'oet' : 'ɛt', 'oét' : 'ɛt', 'oèt' : 'ɛt', 'oẻt' : 'ɛt', 'oẽt' : 'ɛt', 'oẹt' : 'ɛt'
}
onoffglides = { 'oai' : 'aːj', 'oái' : 'aːj', 'oài' : 'aːj', 'oải' : 'aːj', 'oãi' : 'aːj', 'oại' : 'aːj',
'oay' : 'aj', 'oáy' : 'aj', 'oày' : 'aj', 'oảy' : 'aj', 'oãy' : 'aj', 'oạy' : 'aj',
'oao' : 'aw', 'oáo' : 'aw', 'oào' : 'aw', 'oảo' : 'aw', 'oão' : 'aw', 'oạo' : 'aw',
'oeo' : 'ɛw', 'oéo' : 'ɛw', 'oèo' : 'ɛw', 'oẻo' : 'ɛw', 'oẽo' : 'ɛw', 'oẹo' : 'ɛw',
'oeo' : 'ɛw', 'óeo' : 'ɛw', 'òeo' : 'ɛw', 'ỏeo' : 'ɛw', 'õeo' : 'ɛw', 'ọeo' : 'ɛw',
'ueo' : 'ɛw', 'uéo' : 'ɛw', 'uèo' : 'ɛw', 'uẻo' : 'ɛw', 'uẽo' : 'ɛw', 'uẹo' : 'ɛw',
'uêu' : 'ew', 'uếu' : 'ew', 'uều' : 'ew', 'uểu' : 'ew', 'uễu' : 'ew', 'uệu' : 'ew',
'uyu' : 'iw', 'uyú' : 'iw', 'uyù' : 'iw', 'uyủ' : 'iw', 'uyũ' : 'iw', 'uyụ' : 'iw',
'uyu' : 'iw', 'uýu' : 'iw', 'uỳu' : 'iw', 'uỷu' : 'iw', 'uỹu' : 'iw', 'uỵu' : 'iw',
'uai' : 'aːj', 'uái' : 'aːj', 'uài' : 'aːj', 'uải' : 'aːj', 'uãi' : 'aːj', 'uại' : 'aːj',
'uay' : 'aj', 'uáy' : 'aj', 'uày' : 'aj', 'uảy' : 'aj', 'uãy' : 'aj', 'uạy' : 'aj',
'uây' : 'əj', 'uấy' : 'əj', 'uầy' : 'əj', 'uẩy' : 'əj', 'uẫy' : 'əj', 'uậy' : 'əj'
}
codas = { 'c' : 'k',
'ch' : 'c',
'k' : 'k',
'm' : 'm',
'n' : 'n',
'ng' : 'ŋ',
'nh' : 'ɲ',
'p' : 'p',
't' : 't'}
tones = { 'á' : 'B1', 'à' : 'A2', 'ả' : 'C1', 'ã' : 'C2', 'ạ' : 'B2',
'ấ' : 'B1', 'ầ' : 'A2', 'ẩ' : 'C1', 'ẫ' : 'C2', 'ậ' : 'B2',
'ắ' : 'B1', 'ằ' : 'A2', 'ẳ' : 'C1', 'ẵ' : 'C2', 'ặ' : 'B2',
'é' : 'B1', 'è' : 'A2', 'ẻ' : 'C1', 'ẽ' : 'C2', 'ẹ' : 'B2',
'ế' : 'B1', 'ề' : 'A2', 'ể' : 'C1', 'ễ' : 'C2', 'ệ' : 'B2',
'í' : 'B1', 'ì' : 'A2', 'ỉ' : 'C1', 'ĩ' : 'C2', 'ị' : 'B2', |
dirmeier/dataframe | dataframe/callable.py | Python | gpl-3.0 | 1,564 | 0.001279 | # dataframe: a data-frame implementation using method piping
#
# Copyright (C) 2016 Simon Dirmeier
#
# This file is part of dataframe.
#
# dataframe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dataframe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dataframe. If not, see <http://www.gnu.org/licenses/>.
#
#
# @author = 'Simon Dirmeier'
# @email = 'mail@simon-dirmeier.net'
class Callable:
"""
Super-class for all classes that should be callable.
E.g. | : whenever you want to use ``modify`` or ``aggregate`` you need to
wri | te a class that extends ``Callable`` and overwrite ``__call__``.
``__call__`` has to return a scalar or list, depending if you want to
aggregate columns or modify. So a class that modifies a column returns
list, while a class that aggregates returns a scalar.
"""
def __call__(self, *args):
"""
Call method. Is used when object is called like this: object()
:param args: tuple of columns
:type args: tuple
:return: returns a list/scalar
:rtype: list(any)/scalar
"""
pass
|
mastizada/kuma | vendor/packages/ipython/IPython/Magic.py | Python | mpl-2.0 | 135,645 | 0.005964 | # -*- coding: utf-8 -*-
"""Magic functions for InteractiveShell.
"""
#*****************************************************************************
# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> and
# Copyright (C) 2001-2006 Fernando Perez <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
#****************************************************************************
# Modules and globals
# Python standard modules
import __builtin__
import bdb
import inspect
import os
import pdb
import pydoc
import sys
import re
| import tempfile
import time
import cPickle as pickle
import textwrap
from cStringIO import StringIO
from getopt import getopt,GetoptError
from pprint import ppri | nt, pformat
# cProfile was added in Python2.5
try:
import cProfile as profile
import pstats
except ImportError:
# profile isn't bundled by default in Debian for license reasons
try:
import profile,pstats
except ImportError:
profile = pstats = None
# Homebrewed
import IPython
from IPython import Debugger, OInspect, wildcard
from IPython.FakeModule import FakeModule
from IPython.Itpl import Itpl, itpl, printpl,itplns
from IPython.PyColorize import Parser
from IPython.ipstruct import Struct
from IPython.macro import Macro
from IPython.genutils import *
from IPython import platutils
import IPython.generics
import IPython.ipapi
from IPython.ipapi import UsageError
from IPython.testing import decorators as testdec
#***************************************************************************
# Utility functions
def on_off(tag):
"""Return an ON/OFF string for a 1/0 input. Simple utility function."""
return ['OFF','ON'][tag]
class Bunch: pass
def compress_dhist(dh):
head, tail = dh[:-10], dh[-10:]
newhead = []
done = set()
for h in head:
if h in done:
continue
newhead.append(h)
done.add(h)
return newhead + tail
#***************************************************************************
# Main class implementing Magic functionality
class Magic:
"""Magic functions for InteractiveShell.
Shell functions which can be reached as %function_name. All magic
functions should accept a string, which they can parse for their own
needs. This can make some functions easier to type, eg `%cd ../`
vs. `%cd("../")`
ALL definitions MUST begin with the prefix magic_. The user won't need it
at the command line, but it is is needed in the definition. """
# class globals
auto_status = ['Automagic is OFF, % prefix IS needed for magic functions.',
'Automagic is ON, % prefix NOT needed for magic functions.']
#......................................................................
# some utility functions
def __init__(self,shell):
self.options_table = {}
if profile is None:
self.magic_prun = self.profile_missing_notice
self.shell = shell
# namespace for holding state we may need
self._magic_state = Bunch()
def profile_missing_notice(self, *args, **kwargs):
error("""\
The profile module could not be found. It has been removed from the standard
python packages because of its non-free license. To use profiling, install the
python-profiler package from non-free.""")
def default_option(self,fn,optstr):
"""Make an entry in the options_table for fn, with value optstr"""
if fn not in self.lsmagic():
error("%s is not a magic function" % fn)
self.options_table[fn] = optstr
def lsmagic(self):
"""Return a list of currently available magic functions.
Gives a list of the bare names after mangling (['ls','cd', ...], not
['magic_ls','magic_cd',...]"""
# FIXME. This needs a cleanup, in the way the magics list is built.
# magics in class definition
class_magic = lambda fn: fn.startswith('magic_') and \
callable(Magic.__dict__[fn])
# in instance namespace (run-time user additions)
inst_magic = lambda fn: fn.startswith('magic_') and \
callable(self.__dict__[fn])
# and bound magics by user (so they can access self):
inst_bound_magic = lambda fn: fn.startswith('magic_') and \
callable(self.__class__.__dict__[fn])
magics = filter(class_magic,Magic.__dict__.keys()) + \
filter(inst_magic,self.__dict__.keys()) + \
filter(inst_bound_magic,self.__class__.__dict__.keys())
out = []
for fn in set(magics):
out.append(fn.replace('magic_','',1))
out.sort()
return out
def extract_input_slices(self,slices,raw=False):
"""Return as a string a set of input history slices.
Inputs:
- slices: the set of slices is given as a list of strings (like
['1','4:8','9'], since this function is for use by magic functions
which get their arguments as strings.
Optional inputs:
- raw(False): by default, the processed input is used. If this is
true, the raw input history is used instead.
Note that slices can be called with two notations:
N:M -> standard python form, means including items N...(M-1).
N-M -> include items N..M (closed endpoint)."""
if raw:
hist = self.shell.input_hist_raw
else:
hist = self.shell.input_hist
cmds = []
for chunk in slices:
if ':' in chunk:
ini,fin = map(int,chunk.split(':'))
elif '-' in chunk:
ini,fin = map(int,chunk.split('-'))
fin += 1
else:
ini = int(chunk)
fin = ini+1
cmds.append(hist[ini:fin])
return cmds
def _ofind(self, oname, namespaces=None):
"""Find an object in the available namespaces.
self._ofind(oname) -> dict with keys: found,obj,ospace,ismagic
Has special code to detect magic functions.
"""
oname = oname.strip()
alias_ns = None
if namespaces is None:
# Namespaces to search in:
# Put them in a list. The order is important so that we
# find things in the same order that Python finds them.
namespaces = [ ('Interactive', self.shell.user_ns),
('IPython internal', self.shell.internal_ns),
('Python builtin', __builtin__.__dict__),
('Alias', self.shell.alias_table),
]
alias_ns = self.shell.alias_table
# initialize results to 'null'
found = 0; obj = None; ospace = None; ds = None;
ismagic = 0; isalias = 0; parent = None
# Look for the given name by splitting it in parts. If the head is
# found, then we look for all the remaining parts as members, and only
# declare success if we can find them all.
oname_parts = oname.split('.')
oname_head, oname_rest = oname_parts[0],oname_parts[1:]
for nsname,ns in namespaces:
try:
obj = ns[oname_head]
except KeyError:
continue
else:
#print 'oname_rest:', oname_rest # dbg
for part in oname_rest:
try:
parent = obj
obj = getattr(obj,part)
except:
# Blanket except b/c some badly implemented objects
# allow __getattr__ to raise exceptions other than
# AttributeError, which then crashes IPython.
break
else:
# If we finish the for loop (no break), we got all members
found |
Tendrl/commons | tendrl/commons/flows/import_cluster/__init__.py | Python | lgpl-2.1 | 5,760 | 0.000174 | import etcd
import json
import re
import sys
import traceback
from tendrl.commons import flows
from tendrl.commons.flows.exceptions import FlowExecutionFailedError
from tendrl.commons.objects import AtomExecutionFailedError
from tendrl.commons.utils import etcd_utils
class ImportCluster(flows.BaseFlow):
def __init__(self, *args, **kwargs):
super(ImportCluster, self).__init__(*args, **kwargs)
def run(self):
if "Node[]" not in self.parameters:
integration_id = self.parameters['TendrlContext.integration_id']
short_name = self.parameters.get('Cluster.short_name', None)
if short_name:
if not re.match('^[a-zA-Z0-9][A-Za-z0-9_]*$',
short_name) or \
len(short_name) > 64:
raise FlowExecutionFailedError(
"Invalid cluster short_name: %s. "
"Only alpha-numeric and underscore "
"allowed for short name, max length 64 chars" %
short_name
)
# Check for uniqueness of cluster short name
_clusters = NS._int.client.read(
'/clusters'
)
| for entry in _clusters.leaves:
_cluster = NS.tendrl.objects.Cluster(
integration_id=entry.key.split('/')[-1]
).load()
if _clus | ter.short_name and short_name and \
_cluster.is_managed == 'yes' and \
_cluster.short_name == short_name.strip().lower():
raise FlowExecutionFailedError(
"Cluster with name: %s already exists" % short_name
)
_cluster = NS.tendrl.objects.Cluster(
integration_id=NS.tendrl_context.integration_id).load()
if (_cluster.status is not None and
_cluster.status != "" and
_cluster.current_job['status'] == 'in_progress' and
_cluster.status in
["importing", "unmanaging", "expanding"]):
raise FlowExecutionFailedError(
"Another job in progress for cluster, please wait till "
"the job finishes (job_id: %s) (integration_id: %s) " % (
_cluster.current_job['job_id'],
_cluster.integration_id
)
)
if short_name not in [None, ""]:
_cluster.short_name = short_name
else:
_cluster.short_name = integration_id
_cluster.status = "importing"
_cluster.current_job = {
'job_id': self.job_id,
'job_name': self.__class__.__name__,
'status': 'in_progress'
}
_cluster.save()
try:
integration_id_index_key = \
"indexes/tags/tendrl/integration/%s" % integration_id
_node_ids = etcd_utils.read(
integration_id_index_key).value
self.parameters["Node[]"] = json.loads(_node_ids)
except etcd.EtcdKeyNotFound:
_cluster = NS.tendrl.objects.Cluster(
integration_id=NS.tendrl_context.integration_id).load()
_cluster.status = ""
_cluster.current_job['status'] = 'failed'
_cluster.save()
raise FlowExecutionFailedError("Cluster with "
"integration_id "
"(%s) not found, cannot "
"import" % integration_id)
else:
_cluster = NS.tendrl.objects.Cluster(
integration_id=NS.tendrl_context.integration_id
).load()
_cluster.volume_profiling_flag = self.parameters[
'Cluster.volume_profiling_flag']
_cluster.save()
try:
super(ImportCluster, self).run()
# Check if this job is parent and then only set status
# This could be called from parent import cluster or
# even from expand cluster flow. We should not set the
# cluster's current job status from child jobs
_job = NS.tendrl.objects.Job(job_id=self.job_id).load()
if 'parent' not in _job.payload and _job.status != "failed":
_cluster = NS.tendrl.objects.Cluster(
integration_id=NS.tendrl_context.integration_id
).load()
_cluster.status = ""
_cluster.current_job['status'] = "finished"
_cluster.is_managed = "yes"
_cluster.save()
except (FlowExecutionFailedError,
AtomExecutionFailedError,
Exception) as ex:
exc_type, exc_value, exc_traceback = sys.exc_info()
_cluster = NS.tendrl.objects.Cluster(
integration_id=NS.tendrl_context.integration_id).load()
_cluster.status = ""
_cluster.current_job['status'] = 'failed'
_errors = []
if hasattr(ex, 'message'):
_errors = [ex.message]
else:
_errors = [str(ex)]
if _errors:
_cluster.errors = _errors
_cluster.save()
raise FlowExecutionFailedError(str(
traceback.format_exception(exc_type,
exc_value,
exc_traceback)
))
|
gjover/Lima_subtree | python/__init__.py | Python | gpl-3.0 | 2,173 | 0.016107 | ############################################################################
# This file is part of LImA, a Library for Image Acquisition
#
# Copyright (C) : 2009-2011
# European Synchrotron Radiation Facility
# BP 220, Greno | ble 38043
# FRANCE
#
# This is free s | oftware; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
############################################################################
import os
root_name = __path__[0]
csadmin_dirs = ['/csadmin/local', '/csadmin/common']
script_get_os = 'scripts/get_compat_os.share'
get_os = None
for d in csadmin_dirs:
aux_get_os = os.path.join(d, script_get_os)
if os.path.exists(aux_get_os):
get_os = aux_get_os
break
if get_os is not None:
compat_plat = os.popen(get_os).readline().strip()
plat = None
compat_plat_list = compat_plat.split()
for aux_plat in compat_plat_list:
if aux_plat.strip() in os.listdir(root_name):
plat = aux_plat
break
if plat is None:
raise ImportError, ('Could not find Lima directory for %s '
'(nor compat. %s) platform(s) at %s' %
(compat_plat_list[0],
compat_plat_list[1:], root_name))
lima_plat = os.path.join(root_name, plat)
__path__.insert(0, lima_plat)
# This mandatory variable is systematically overwritten by 'make install'
os.environ['LIMA_LINK_STRICT_VERSION'] = 'MINOR'
if get_os is not None:
all_dirs = os.listdir(lima_plat)
all_dirs.remove('lib')
__all__ = all_dirs
del plat, compat_plat, aux_plat, lima_plat, all_dirs
del root_name, csadmin_dirs, get_os, script_get_os, d, aux_get_os
del os
|
foursquare/pants | tests/python/pants_test/cache/test_artifact.py | Python | apache-2.0 | 2,540 | 0.015354 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
from pants.cache.artifact import DirectoryArtifact, TarballArtifact
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_mkdir, safe_open
class TarballArtifactTest(unittest.TestCase):
def test_get_paths_after_collect(self):
with temporary_dir() as tmpdir:
artifact_root = os.path.join(tmpdir, 'artifacts')
cache_root = os.path.join(tmpdir, 'cache')
safe_mkdir(cache_root)
file_path = self.touch_file_in(artifact_root)
artifact = TarballArtifact(artifact_root, os.path.join(cache_root, 'some.tar'))
artifact.collect([file_path])
self.assertEquals([file_path], list(artifact.get_paths()))
def test_does_not_exist_when_no_tar_file(self):
with temporary_dir() as tmpdir:
artifact_root = os.path.join(tmpdir, 'artifacts')
cache_root = os.path.join(tmpdir, 'cache')
safe_mkdir(cache_root)
artifact = TarballArtifact(artifact_root, os.path.join(cache_root, 'some.tar'))
self.assertFalse(artifact.exists())
def test_exists_true_when_exists(self):
with temporary_dir() as tmpdir:
artifact_root = os.path.join(tmpdir, 'artifacts')
cache_root = os.path.join(tmpdir, 'cache')
safe_mkdir(cache_root)
path = self.touch_file_in(artifact_root)
artifact = TarballArtifact(artifact_root, os.path.join(cache_root, 'some.tar'))
artifact.collect([path])
self.assertTrue(artifact.exists())
def touch_file_in(self, artifact_root):
path = os.path.join(artifact_root, 'some.file')
with safe_open(path, 'w') as f:
f.write('')
return path
class DirectoryArtifactTest(unittest.TestCase):
def test_exists_when_dir_exists(self) | :
with temporary_dir() as tmpdir:
artifact_root = os.path.join(tmpdir, 'artifacts')
artifact_dir = os.path.join(tmpdir, 'cache')
safe_mkdir(artifact_di | r)
artifact = DirectoryArtifact(artifact_root, artifact_dir)
self.assertTrue(artifact.exists())
def test_does_not_exist_when_dir_missing(self):
with temporary_dir() as tmpdir:
artifact_root = os.path.join(tmpdir, 'artifacts')
artifact_dir = os.path.join(tmpdir, 'cache')
artifact = DirectoryArtifact(artifact_root, artifact_dir)
self.assertFalse(artifact.exists())
|
DGA-MI-SSI/YaCo | deps/swig-3.0.7/Examples/python/std_map/runme.py | Python | gpl-3.0 | 1,089 | 0 | # file: runme.py
import example
pmap = example.pymap()
pmap["hi"] = 1
pmap["hello"] = 2
dmap = {}
dmap["hello"] = 1.0
dmap["hi"] = 2.0
print dmap.items()
print dmap.keys()
print dmap.values()
print dmap
hmap = example. | halfd | (dmap)
dmap = hmap
print dmap
for i in dmap.iterkeys():
print "key", i
for i in dmap.itervalues():
print "val", i
for k, v in dmap.iteritems():
print "item", k, v
dmap = example.DoubleMap()
dmap["hello"] = 1.0
dmap["hi"] = 2.0
for i in dmap.iterkeys():
print "key", i
for i in dmap.itervalues():
print "val", i
for k, v in dmap.iteritems():
print "item", k, v
print dmap.items()
print dmap.keys()
print dmap.values()
hmap = example.halfd(dmap)
print hmap.keys()
print hmap.values()
dmap = {}
dmap["hello"] = 2
dmap["hi"] = 4
hmap = example.halfi(dmap)
print hmap
print hmap.keys()
print hmap.values()
dmap = hmap
for i in dmap.iterkeys():
print "key", i
for i in dmap.itervalues():
print "val", i
for i in dmap.iteritems():
print "item", i
for k, v in dmap.iteritems():
print "item", k, v
print dmap
|
alejandro-mc/trees | testWeightedSPR.py | Python | mit | 4,022 | 0.021631 | #randSPRwalk.py
#writes random SPR walks to files
#calls GTP on each SPR random walk file to get
#the ditances between each tree and the first tree of the sequence
#the results are written to csv files with lines delimited by \t
import tree_utils as tu
import w_tree_utils as wtu
import os
import sys
import numpy as np
import random
from math import sqrt
__pid__ = 0
__prefix__="SPR_"
#daf: distance algorithm file
def randSPRwalk(daf,size,steps,runs,seed,weighted=False):
global __pid__
global __prefix__
#set the seed
random.seed(seed)
np.random.seed(seed)
#select tree utils module
if weighted:
tum = wtu
genRandBinTree = lambda leaves: wtu.genRandBinTree(leaves,np.random.exponential)
else:
tum = tu
genRandBinTree = lambda leaves: tu.genRandBinTree(leaves)
tum.treeNorm = lambda x: 0.25
out_file_name = __prefix__ + str(size) + "_" + str(steps) + "_" +\
str(runs) + "_" + str(seed)
normsfile_name = out_file_name + '.norms'
#create a file for each spr sequence
for k in range(runs):
rand_tree = genRandBinTree(list(range(size)))
total_nodes = size - 1
#write current sequence to file
infile_prefix = "tmpsprseq" + str(__pid__)
infile = infile_prefix + str(k)
with open(infile,'w') as treefile, open(normsfile_name,'w') as nrmfile:
treefile.write(tum.toNewickTree(rand_tree) + "\n")
#write tree norms-----
#save norm of first tree
norm1 = sqrt(tum.treeNorm(rand_tree))
walknorms = ''
for i in range(steps):
current_tree = tum.randSPR(rand_tree,total_nodes)[0]
treefile.write(tum.toNewickTree(current_tree) + "\n")
#write ||T1|| + ||T2||
walknorms += str(norm1 + sqrt(tum.treeNorm(current_tree))) + ','
#write norms sequence
nrmfile.write(walknorms[0:-1] + '\n')
#assumes GTP file is in current working directory
outfile = "tempseq" + str(__pid__) + ".csv"
infile_prefix = "tmpsprseq" + str(__pid__)
infile = infile_prefix + str(k)
os.system("java -jar " + daf + " -r 0 -o " + outfile + " " + infile)
#append output to final sequence file
os.system("cat " + outfile + " | ./toLines.py >> " + out_file_name)
#cleanup
os.system("rm " + outfile)
os.system("rm " + infile_prefix + "*")
if __name__=='__main__':
if len(sys.argv)<6:
print ("Too few arguments!!")
print ("Usage: [-w] <distance algorithm file .jar> <size or size range> <no. SPR steps> <no. runs> <seed or seed range>")
sys.exit(-1)
WEIGHTED = False
if len(sys.argv) == 7:
WEIGHTED = sys.argv.pop(1) == '-w'
dist_algo_file = sys.argv[1]
if dist_algo_file != "gtp.jar":
__prefix__ = "RSP_"
if WEIGHTED:
__prefix__ = 'W' + __prefix__
else:
__p | refix__ = 'U' + __prefix__
#take a single size or a range of sizes
if ":" in sys.argv[2]:
size_start, size_end = map(lambda x: int(x),sys.argv[2].split(':'))
else:
size_start = int(sys.argv[2])
size_end = size_start + 1
size_range = range(size_start,size_end)
steps = int(sys.argv[3])
runs = int(sys.argv[4])
|
#take a single seed or a range of seeds
if ":" in sys.argv[5]:
seed_start,seed_end = map(lambda x: int(x),sys.argv[5].split(':'))
else:
seed_start = int(sys.argv[5])
seed_end = seed_start + 1
seed_range = range(seed_start,seed_end)
#set pid property before calling randSPRWalk
__pid__ = os.getpid()
for size in size_range:
for seed in seed_range:
randSPRwalk(dist_algo_file,size,steps,runs,seed,WEIGHTED) |
dparaujo/projeto | app_administrativo/usuario/forms.py | Python | gpl-3.0 | 1,398 | 0.046662 | # -*- coding: utf-8 -*-
from django import forms
from app_academico.curso.models import *
from app_academico.turno.models import TblAcademicoTurno
class UsuarioModelForm(forms.ModelForm):
# def __init__(self, *args, **kwargs):
# super(CursoModelForm, self).__init__(*args, **kwargs)
descricao = forms.CharField(
label = "Descrição",
widget = forms.TextInput(
attrs = {
'placeholder': 'Descrição do curso',
'required': 'required',
'class': 'form-control'
}
)
)
quant_vaga = forms.CharField(
label = "Vagas",
widget = forms.TextInput(
attrs = {
'placeholder': 'Quantidade de vagas',
'required': 'required',
'class': 'form-control'
}
)
)
turno = forms.IntegerField(
label="Turno",
widget=forms.TextInput(
attrs={
'placeholder': 'Nome da Equipe',
'required': 'required',
'class': 'form-control'
}
)
)
# turno = forms.ModelMultipleChoiceField(queryset=TblAcademicoTurno.objects.all())
pre_requisito = forms.ChoiceField(
label="Pré-Requisito",
choices=CONST.PREREQUISITO.choices,
)
ativo = forms.ChoiceField(
label="Ativo",
choices=CONST.ATIVO.choices,
)
# ativo = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(
# choices=CONST.ATIVO.choices), help_text="Ativo"
# )
class Meta:
model = | TblAcademicoCurso
fields = | ('descricao','quant_vaga','turno','pre_requisito', 'ativo') |
sigvef/elma | docs/conf.py | Python | mit | 365 | 0 | import sys
import os
sys.path.insert(0, os.path.abspath('..'))
extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.napoleon']
source_suffix = '.rst'
ma | ster_doc = 'index'
project = u'Elma Python Library'
copyright = u'2015, Elma Library Contributors'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
todo_include_todos = False
html_theme | = 'sphinx_rtd_theme'
|
kubevirt/client-python | test/test_k8s_io_apimachinery_pkg_apis_meta_v1_root_paths.py | Python | apache-2.0 | 1,105 | 0.002715 | # coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.k8s_io_apimachinery_pkg_apis_meta_v1_root_paths import K8sIoApimachineryPkgApisMetaV1RootPaths
class TestK8sIoApimachineryPkgApisMetaV1RootPaths(unittest.TestCase):
""" K8sIoApimachineryPkgApisMetaV1RootP | aths unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testK8sIoApimachineryPkgApisMetaV1RootPaths(self):
"""
Test K8sIoApimachineryPkgApisMetaV1RootPaths
| """
# FIXME: construct object with mandatory attributes with example values
#model = kubevirt.models.k8s_io_apimachinery_pkg_apis_meta_v1_root_paths.K8sIoApimachineryPkgApisMetaV1RootPaths()
pass
if __name__ == '__main__':
unittest.main()
|
mgax/zechat | migrations/versions/52ceb70dfec2_message.py | Python | mit | 1,158 | 0.002591 | revision = '52ceb70dfec2'
down_revision = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table(
'message',
sa.Column('id', postgresql.UUID(), nullable=False),
sa.Column('sender', sa.String() | , nullable=False),
sa.Column('recipient', sa.String(), nullable=False),
sa.Column('hash', sa.String(), nullable=False),
sa.Column('payload', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
)
op.create_index(
op.f('ix_message_sender'),
'message',
['sender'],
unique=Fa | lse,
)
op.create_index(
op.f('ix_message_recipient'),
'message',
['recipient'],
unique=False,
)
op.create_index(
op.f('ix_message_hash'),
'message',
['hash'],
unique=False,
)
def downgrade():
op.drop_index(op.f('ix_message_hash'), table_name='message')
op.drop_index(op.f('ix_message_recipient'), table_name='message')
op.drop_index(op.f('ix_message_sender'), table_name='message')
op.drop_table('message')
|
ThunderGemios10/Corpse-Party-2U-Script-Editor | eboot_patch.py | Python | gpl-3.0 | 25,393 | 0.039601 | # -*- coding: utf-8 -*-
################################################################################
### Copyright © 2012-2013 BlackDragonHunt
### Copyright © 2012-2013 /a/nonymous scanlations
###
### This file is part of the Super Duper Script Editor.
###
### The Super Duper Script Editor is free software: you can redistribute it
### and/or modify it under the terms of the GNU General Public License as
### published by the Free Software Foundation, either version 3 of the License,
### or (at your option) any later version.
###
### The Super Duper Script Editor is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with the Super Duper Script Editor.
### If not, see <http://www.gnu.org/licenses/>.
################################################################################
from bitstring import ConstBitStream, BitStream
# from enum import Enum
import common
import clt
NAME = "name"
ENABLED = "enabled"
CFG_ID = "cfg_id"
DATA = "data"
POS = "pos"
ORIG = "orig"
PATCH = "patch"
# LANGUAGES = [u"Japanese", u"English", u"French", u"Spanish", u"German", u"Italian", u"Dutch", u"Portuguese", u"Russian", u"Ko | rean", u"Traditional Chinese", u"Simplified Chinese"]
LANGUAGES = [u"日本語", u"English", u"Français", u"Español", u"Deutsch", u"Italiano", u"Nederlands", u"Português", u"Русский", u"한국어", u"繁體中文", u"简体中文"]
LANG_CFG_ID = "sys_menu_lang"
CLT_CFG_ID = "custom_clt"
EBOOT_PATCHES = [
{NAME: "Extend EBOOT", ENABLED: True, CFG_ID: None, DATA:
[
{POS: 0x0000002C, | ORIG: ConstBitStream(hex = "0x0300"), PATCH: ConstBitStream(hex = "0x0400")},
{POS: 0x00000054, ORIG: ConstBitStream(hex = "0x0100000040CB200080CA20000000000028E40000842A2E000600000040000000"), PATCH: ConstBitStream(hex = "0x0100000040CB200020F54E000000000000200100002001000700000010000000")},
{POS: 0x00000074, ORIG: ConstBitStream(hex = "0xA000007070AF21000000000000000000D8560D000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000"), PATCH: ConstBitStream(hex = "0x0100000040EB210080CA20000000000028E40000842A2E000600000040000000A000007070CF22000000000000000000D8560D00000000000000000010000000")},
]
},
{NAME: "Game Button Order", ENABLED: True, CFG_ID: "01_game_btn", DATA:
[
{POS: 0x0007A4C8, ORIG: ConstBitStream(hex = "0x0400B18F"), PATCH: ConstBitStream(hex = "0x48CD330A")},
{POS: 0x0020CB40, ORIG: ConstBitStream(hex = "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), PATCH: ConstBitStream(hex = "0x0400B18F21202002002025320200A014004031360040313A0040843002008014002031360020313A03F9210A00000000")},
]
},
{NAME: "Home/Save Button Order", ENABLED: True, CFG_ID: "03_home_btn", DATA:
[
{POS: 0x0008CCF8, ORIG: ConstBitStream(hex = "0x21108000"), PATCH: ConstBitStream(hex = "0x01000224")},
]
},
{NAME: "Fix Error Code 80020148", ENABLED: True, CFG_ID: "04_fix_80020148", DATA:
[
{POS: 0x00000004, ORIG: ConstBitStream(hex = "0x00"), PATCH: ConstBitStream(hex = "0x01")},
{POS: 0x00000007, ORIG: ConstBitStream(hex = "0x01"), PATCH: ConstBitStream(hex = "0x00")},
]
},
{NAME: "Increase Dialog Line Limit to 96 Characters", ENABLED: True, CFG_ID: "05_dialog_line_96", DATA:
[
# Move current line buffer: raw addresses (0x08CAF3C6 -> 0x08CF3550)
{POS: 0x000CB528, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x000CB534, ORIG: ConstBitStream(hex = "0x46E9"), PATCH: ConstBitStream(hex = "0xD02A")},
{POS: 0x000D1BD0, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x000D1BD4, ORIG: ConstBitStream(hex = "0x46E9"), PATCH: ConstBitStream(hex = "0xD02A")},
{POS: 0x000D25D8, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x000D25DC, ORIG: ConstBitStream(hex = "0x46E9"), PATCH: ConstBitStream(hex = "0xD02A")},
{POS: 0x000D962C, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x000D9638, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x000D963C, ORIG: ConstBitStream(hex = "0x46E9"), PATCH: ConstBitStream(hex = "0xD02A")},
{POS: 0x000DB820, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x000DB824, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x000DB828, ORIG: ConstBitStream(hex = "0x46E9"), PATCH: ConstBitStream(hex = "0xD02A")},
{POS: 0x000DB82C, ORIG: ConstBitStream(hex = "0x26EA"), PATCH: ConstBitStream(hex = "0xD02D")},
{POS: 0x000DB95C, ORIG: ConstBitStream(hex = "0x46E9"), PATCH: ConstBitStream(hex = "0xD02A")},
{POS: 0x000DB960, ORIG: ConstBitStream(hex = "0x26EA"), PATCH: ConstBitStream(hex = "0xD02D")},
{POS: 0x000DB96C, ORIG: ConstBitStream(hex = "0x3800"), PATCH: ConstBitStream(hex = "0xC000")},
{POS: 0x000DCFE0, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x000DCFEC, ORIG: ConstBitStream(hex = "0x46E9"), PATCH: ConstBitStream(hex = "0xD02A")},
{POS: 0x000DE5E8, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x000DE5F4, ORIG: ConstBitStream(hex = "0x46E9"), PATCH: ConstBitStream(hex = "0xD02A")},
{POS: 0x000DE92C, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x000DE938, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x000DE93C, ORIG: ConstBitStream(hex = "0x46E9"), PATCH: ConstBitStream(hex = "0xD02A")},
{POS: 0x000FF1DC, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x000FF230, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x000FF1E4, ORIG: ConstBitStream(hex = "0x46E9"), PATCH: ConstBitStream(hex = "0xD02A")},
{POS: 0x000FF2D0, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x000FF2D8, ORIG: ConstBitStream(hex = "0x46E9"), PATCH: ConstBitStream(hex = "0xD02A")},
{POS: 0x000FFD34, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x000FFD38, ORIG: ConstBitStream(hex = "0x46E9"), PATCH: ConstBitStream(hex = "0xD02A")},
{POS: 0x000FFE44, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x000FFE48, ORIG: ConstBitStream(hex = "0x46E9"), PATCH: ConstBitStream(hex = "0xD02A")},
{POS: 0x001000B0, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x00100430, ORIG: ConstBitStream(hex = "0x46E9"), PATCH: ConstBitStream(hex = "0xD02A")},
{POS: 0x00100598, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x0010059C, ORIG: ConstBitStream(hex = "0x46E9"), PATCH: ConstBitStream(hex = "0xD02A")},
{POS: 0x00100724, ORIG: ConstBitStream(hex = "0x2A00"), PATCH: ConstBitStream(hex = "0x2E00")},
{POS: 0x0010072C, ORIG: ConstBitStream(hex = "0x46E9"), PATCH: ConstBitStream(hex = "0xD02A")},
# Move current line buffer: relative addresses
{POS: 0x0020CFF0, ORIG: ConstBitStream(hex = "0x00000000000000000000000000000000"), PATCH: ConstBitStream(hex = "0xCF08043CEA3284240800E00321104400")},
{POS: 0x000CE600, ORIG: ConstBitStream(hex = "0x4010050021104800"), PATCH: ConstBitStream(hex = "0x74CE330E40100500")},
{POS: 0x0020D000, ORIG: ConstBitStream(hex = "0x00000000000000000000000000000000"), PATCH: ConstBitStream(hex = "0xCF08073CEA32E7240800E00321104700")},
{POS: 0x000CE754, ORIG: ConstBitStream(hex = "0x4010060021104800"), PATCH: ConstBitStream(hex = "0x78CE330E40100600")},
{POS: 0x0020D010, ORIG: ConstBitStream(hex = "0x0000000 |
jmptrader/dirigible-spreadsheet | dirigible/fts/tests/test_2774_ExportCSV.py | Python | mit | 7,349 | 0.003402 | # Copyright (c) 2010 Resolver Systems Ltd.
# All Rights Reserved
#
from __future__ import with_statement
from os import path
import urllib2
from urlparse import urljoin
from functionaltest import FunctionalTest, PAGE_LOAD_TIMEOUT
class Test_2774_ExportCSV(FunctionalTest):
def test_can_export_csv(self):
# * Harold logs in to Dirigible and creates a nice shiny new sheet
sheet_id = self.login_and_create_new_sheet()
# * He puts some data including formulae and usercode-calculated stuff into a spreadsheet.
# Some of the data has commas and the like in it, and there is a blank column to the
# left of the data and a blank row above it. There are also non-string cell contents.
self.enter_cell_text(2, 2,
"Data at A2, from a constant")
self.enter_cell_text(2, 3,
"Data at A3, with a \\n that might need escaping")
self.enter_cell_text(2, 4,
"Data at A4, with 'single quotes'")
self.enter_cell_text(2, 5,
'Data at A5, with "double quotes"')
self.enter_cell_text(2, 6,
u'Data at A6, with some unicode: Sacr\xe9 bleu! The \xa3 is expensive, compared to the \u20ac!')
self.enter_cell_text(3, 2, "=2+2")
self.append_usercode("worksheet.E4.value = 'hellooooo there!'")
self.append_usercode(
"worksheet.E5.value = ['list item 1', 'list item 2', 3]")
self.append_usercode(
"worksheet.E6.value = {'oats': 'a cereal which in England is fed to horses, but in Scotland forms the sustenance of the nation'}")
self.append_usercode(
"worksheet.E7.value = lambda x : 2 * x")
self.wait_for_spinner_to_stop()
# * He sees a button that talks about exporting CSVs
self.wait_for_element_visibility('id=id_export_button', True)
self.assertEquals(
self.selenium.get_attribute('id=id_export_button@alt'),
"Download as CSV file"
)
self.assertEquals(
self.selenium.get_attribute('id=id_export_button@title'),
"Download as CSV file"
)
# * He clicks on it, and sees a popup dialog with two links and a close button
self.selenium.click('id=id_export_button')
self.wait_for_element_visibility('id=id_export_dialog', True)
self.wait_for_element_visibility('id=id_export_csv_excel_version', True)
self.wait_for_element_visibility('id=id_export_csv_unicode_version', True)
self.wait_for_element_visibility('id=id_export_dialog_close_button', True)
self.assertEquals(self.selenium.get_value('id=id_export_dialog_close_button'), "Close")
# Were he to click on the former, his browser would do whatever it normally
# does when a user starts a download. This is too hard to test with
# Selenium. Lets go shopping^W^W use urllib2
download_url = self.selenium.get_attribute('id=id_export_csv_excel_version@href')
download_url = urljoin(self.browser.current_url, download_url)
stream = self.get_url_with_session_cookie(download_url)
self.assertEquals(stream.info().gettype(), "text/csv")
sheet_name = 'Sheet %s' % (sheet_id,)
self.assertEquals(
stream.info()['Content-Disposition'],
'attachment; filename=%s.csv' % (sheet_name,)
)
expected_file_name = path.join(
path.dirname(__file__),
"test_data", "expected_csv_file.csv"
)
with open(expected_file_name) as expected_file:
self.assertEquals(
stream.read().replace("\r\n", "\n"),
expected_file.read().replace("\r\n", "\n")
)
# The file downloaded, he closes the dialog.
self.selenium.click('id=id_export_dialog_close_button')
self.wait_for_element_visibility('id=id_export_dialog', False)
def test_can_export_unicode(self):
#Harold-san has a sheet-u-des which has some zugoi kanji in:
sheet_id = self.login_and_create_new_sheet()
some_kanji = u'\u30bc\u30ed\u30a6\u30a3\u30f3\u30b0'
self.enter_cell_text(1, 1, some_kanji)
self.wait_for_spinner_to_stop()
page_url = self.browser.current_url
# * He clicks on a button that clearly allows him to export CSV data.
self.wait_for_element_visibility('id=id_export_button', True)
# * He clicks on it, and sees a popup dialog with two links
self.selenium.click('id=id_export_button')
self.wait_for_element_visibility('id=id_export_dialog', True)
self.wait_for_element_visibility('id=id_export_csv_excel_version', True)
self.wait_for_element_visibility('id=id_export_csv_unicode_version', True)
# * He likes excel, so he tries to download the excel version
self.click_link('id_export_csv_excel_version')
# He is taken to an error page, with a helpful message suggesting he
# tries again using the international version
self.assertEquals(self.browser.title, "CSV Export Error: Dirigible")
self.assertEquals(
self.get_text("id=id_server_error_title"),
"Could not export CSV file"
)
error_text = self.get_text("id=id_server_error_text")
msg = "Sorry, your spreadsheet contains characters that cannot be saved in Excel CSV format"
self.assertTrue(msg in error_text)
msg = "Please try again using the international version"
self.assertTrue(msg in error_text)
# * He notes there is a link back to the sheet page
self.wait_for_element_visibility('id=id_sheet_link', True)
# * But he spots a helpful link to the documentation, which he follows
self.selenium.click('css=a[href="/documentation/import_export.html"]')
self.selenium.wait_for_page_to_load(PAGE_LOAD_TIMEOUT)
self.assertTrue(
'Importing and Exporting' in self.browser.title
)
# He goes back to his sheet page
self.selenium.open(page_url)
self.wait_for_grid_to_appear()
# * And tries his export again
self.selenium.click('id=id_export_button')
# test unicode download
download_url = self.selenium.get_attribute('id=id_export_csv_unicode_version@href')
download_url = urljoin(self.browser.current_url, download_url)
opener = urllib2.build_opener()
session_cookie = self.selenium.get_cookie_by_name('sessionid')
opener.addheaders.append(('Cookie', 'sessionid=%s' % (session_cookie, )))
stream = opener.open(download_url)
self.assertEquals(stream.info().gettype(), "text/csv")
sheet_name = 'Sheet %s' % (sheet_id,)
self.assertEquals(
| stream.in | fo()['Content-Disposition'],
'attachment; filename=%s.csv' % (sheet_name,)
)
expected_file_name = path.join(
path.dirname(__file__),
"test_data", "expected_unicode_csv.csv"
)
with open(expected_file_name) as expected_file:
self.assertEquals(
stream.read().replace("\r\n", "\n"),
expected_file.read().replace("\r\n", "\n")
)
|
DoubleNegativeVisualEffects/gaffer | python/GafferTest/ExecutableOpHolderTest.py | Python | bsd-3-clause | 5,467 | 0.049387 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferTest
class TestOp (IECore.Op) :
def __init__( self ) :
IECore.Op.__init__( self, "Test op", IECore.IntParameter( "result", "", 0 ) )
self.counter = 0
def doOperation( self, args ) :
self.counter += 1
return IECore.IntData( self.counter )
class ExecutableOpHolderTest( unittest.TestCase ) :
def testType( self ) :
n = Gaffer.ExecutableOpHolder()
self.assertEqual( n.typeName(), "Gaffer::ExecutableOpHolder" )
self.failUnless( n.isInstanceOf( Gaffer.ParameterisedHolderNode.staticTypeId() ) )
self.failUnless( n.isInstanceOf( Gaffer.Node.staticTypeId() ) )
def testIsExecutable( self ) :
self.assertTrue( Gaffer.ExecutableNode.isExecutable( Gaffer.ExecutableOpHolder ) )
n = Gaffer.ExecutableOpHolder()
self.assertTrue( Gaffer.ExecutableNode.isExecutable( n ) )
def testExecutablePlugs( self ) :
n = Gaffer.ExecutableOpHolder()
self.assertEqual( n['requirement'].direction(), Gaffer.Plug.Direction.Out )
self.assertEqual( n['requirements'].direction(), Gaffer.Plug.Direction.In )
def testSetOp( self ) :
n = Gaffer.ExecutableOpHolder()
opSpec = GafferTest.ParameterisedHolderTest.classSpecification( "primitive/renameVariables", "IECORE_OP_PATHS" )[:-1]
n.setOp( *opSpec )
def testExecutableMethods( self ) :
n = Gaffer.ExecutableOpHolder()
opSpec = GafferTest.ParameterisedHolderTest.classSpecification( "primitive/renameVariables", "IECORE_OP_PATHS" )[:-1]
n.setOp( *opSpec )
c = Gaffer.Context()
h = n.executionHash(c)
self.assertEqual( n.executionHash(c), h )
def testSetParameterised( self ) :
n = Gaffer.ExecutableOpHolder()
op = TestOp()
n.setParameterised( op )
self.assertEqual( op, n.getOp() )
def testExecute( self ) :
n = Gaffer.ExecutableOpHolder()
op = TestOp()
n.setParameterised( op )
script = n.scriptNode()
self.assertEqual( op.counter, 0 )
n.execute( [ Gaffer.Context() ] )
self.assertEqual( op.counter, 1 )
def testRequirements( self ) :
n1 = Gaffer.ExecutableOpHolder()
n2 = Gaffer.ExecutableOpHolder()
n2a = Gaffer.ExecutableOpHolder()
n2b = Gaffer.ExecutableOpHolder()
r1 = Gaffer.Plug( name = "r1" )
n1['requirements'].addChild( r1 )
r1.setInput( n2['requirement'] )
r1 = Gaffer.Plug( name = "r1" )
n2['requirements'].addChild( r1 )
r1.setInput( n2a['requirement'] )
r2 = Gaffer.Plug( name = "r2" )
n2['requirements'].addChild( r2 )
r2.setInput( n2b['requirement'] )
c = Gaffer.Context()
self.assertEqual( n2a.executionRequirements(c), [] )
self.assertEqual( n2b.executionRequirements(c), [] )
n2Requirements = n2.executionRequirements(c)
self.assertEqual( n2Requirements[0].node, n2a )
self.assertEqual( n2Requirements[0].context, c )
self | .assertEqual( n2Requirements[1].node, n2b )
self.assertEqual( n2Requirements[1].context, c )
t1 = Gaffer.ExecutableNode.Task(n2a,c)
t2 = Gaffer.ExecutableNode.Task(n2b,c)
self.assertEqual( n2Requirements[0], t1 )
self.assertEqual( n2Requirements[1], t2 )
self.assertEqual( len(set(n2.executionRequirements(c)).difference([ t1, t2])), 0 )
self.assertEqual( | n1.executionRequirements(c), [ Gaffer.ExecutableNode.Task(n2,c) ] )
def testSerialise( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.ExecutableOpHolder()
opSpec = GafferTest.ParameterisedHolderTest.classSpecification( "primitive/renameVariables", "IECORE_OP_PATHS" )[:-1]
s["n"].setOp( *opSpec )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s["n"]["parameters"].keys(), s2["n"]["parameters"].keys() )
if __name__ == "__main__":
unittest.main()
|
danrg/RGT-tool | src/RGT/XML/SVG/clipPathNode.py | Python | mit | 3,223 | 0.003413 | from RGT.XML.SVG.basicSvgNode import BasicSvgNode
from RGT.XML.SVG.Attribs.conditionalProcessingAttributes import ConditionalProcessingAttributes
from RGT.XML.SVG.Attribs.presentationAttributes import PresentationAttributes
from RGT.XML.SVG.Attribs.classAttribute import ClassAttribute
from RGT.XML.SVG.Attribs.styleAttribute import StyleAttribute
from types import StringType
class ClipPathNode(BasicSvgNode, ConditionalProcessingAttributes, PresentationAttributes, ClassAttribute,
StyleAttribute):
svgNodeType = BasicSvgNode.SVG_CLIP_PATH_NODE
ATTRIBUTE_EXTERNAL_RESOURCES_REQUIRED = 'externalResourcesRequired'
ATTRIBUTE_TRANSFORM = 'transform'
ATTRIBUTE_CLIP_PATH_UNITS = 'clipPathUnits'
def __init__(self, ownerDoc):
BasicSvgNode.__init__(self, ownerDoc, 'clipPath')
ConditionalProcessingAttributes.__init__(self)
PresentationAttributes.__init__(self)
ClassAttribute.__init__(self)
StyleAttribute.__init__(self)
#add groups
self._allowedSvgChildNodes.update(self.SVG_GROUP_ANIMATION_ELEMENTS, self.SVG_GROUP_DESCRIPTIVE_ELEMENTS,
self.SVG_GROUP_SHAPE_ELEMENTS)
#ad individual nodes
self._allowedSvgChildNodes.update({self.SVG_TEXT_NODE, self.SVG_USE_NODE})
def setExternalResourcesRequired(self, data):
allowedValues = ['true', 'false']
| if data is not None:
if data not in allowedValues:
values = ''
for val | ue in allowedValues:
values += value + ', '
values = values[0: len(values) - 2]
raise ValueError('Value not allowed, only ' + values + 'are allowed')
else:
self._setNodeAttribute(self.ATTRIBUTE_EXTERNAL_RESOURCES_REQUIRED, data)
def setTransform(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_TRANSFORM, data)
def setClipPathUnits(self, data):
allowedValues = ['userSpaceOnUse', 'objectBoundingBox']
if data is not None:
if data not in allowedValues:
values = ''
for value in allowedValues:
values += value + ', '
values = values[0: len(values) - 2]
raise ValueError('Value not allowed, only ' + values + 'are allowed')
else:
self._setNodeAttribute(self.ATTRIBUTE_CLIP_PATH_UNITS, data)
def getExternalResourcesRequired(self):
node = self._getNodeAttribute(self.ATTRIBUTE_EXTERNAL_RESOURCES_REQUIRED)
if node is not None:
return node.nodeValue
return None
def getTransform(self):
node = self._getNodeAttribute(self.ATTRIBUTE_TRANSFORM)
if node is not None:
return node.nodeValue
return None
def getClipPathUnits(self):
node = self._getNodeAttribute(self.ATTRIBUTE_CLIP_PATH_UNITS)
if node is not None:
return node.nodeValue
return None |
Alignak-monitoring-contrib/alignak-app | test/test_all_items.py | Python | agpl-3.0 | 12,482 | 0.000801 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018:
# Matthieu Estrada, ttamalfor@gmail.com
#
# This file is part of (AlignakApp).
#
# (AlignakApp) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (AlignakApp) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (AlignakApp). If not, see <http://www.gnu.org/licenses/>.
import unittest2
from alignak_app.utils.config import settings
from alignak_app.locales.locales import init_localization
from alignak_app.backend.backend import app_backend
from alignak_app.items.daemon import Daemon
from alignak_app.items.event import Event
from alignak_app.items.history import History
from alignak_app.items.host import Host
from alignak_app.items.item import *
from alignak_app.items.livesynthesis import LiveSynthesis
from alignak_app.items.service import Service
from alignak_app.items.user import User
from alignak_app.items.realm import Realm
class TestAllItems(unittest2.TestCase):
"""
This file test methods of ItemModel class objects
"""
settings.init_config()
init_localization()
app_backend.login()
# Host data test
host_list = []
for i in range(0, 10):
host = Host()
host.create(
'_id%d' % i,
{
'name': 'host%d' % i,
'ls_downtimed': True,
'ls_acknowledged': True,
'ls_state': 'UNKNOWN',
'_overall_state_id': 4,
'passive_checks_enabled': False,
'active_checks_enabled': True
},
'host%d' % i
)
host_list.append(host)
# Service data test
service_list = []
for i in range(0, 10):
service = Service()
service.create(
'_id%d' % i,
{
'name': 'service%d' % i,
'alias': 'Service %d' % i,
'host': '_id%d' % i,
'ls_acknowledged': False,
'ls_downtimed': False,
'ls_state': 'CRITICAL',
'aggregation': 'disk',
'_overall_s | tate_id': 4,
'passive_checks_enabled': False,
'active_checks_enabled': True
},
'service%d' % i
)
service_list.append(service)
def test_item_ | model(self):
"""Create ItemModel"""
under_test = Item()
under_test.create('_id', {'ls_state': 'DOWN'}, 'name')
self.assertTrue('_id' == under_test.item_id)
self.assertTrue('ls_state' in under_test.data)
self.assertTrue('DOWN' == under_test.data['ls_state'])
self.assertTrue('name' == under_test.name)
def test_item_model_get_data(self):
"""Get Data ItemModel"""
under_test = Item()
under_test.create('_id', {'ls_state': 'DOWN', 'ls_acknowledged': True}, 'name')
data_test = under_test.data['ls_state']
self.assertTrue('DOWN' == data_test)
def test_item_model_update_data(self):
"""Update Data ItemModel"""
under_test = Item()
under_test.create('_id', {'ls_state': 'DOWN', 'ls_acknowledged': True}, 'name')
under_test.update_data('ls_acknowledged', False)
data_test = under_test.data['ls_acknowledged']
self.assertTrue(data_test is False)
def test_get_icon_name(self):
"""Get Icon Name"""
under_test = get_icon_name(
'host', 'UP', acknowledge=False, downtime=False, monitored=1)
self.assertEqual('hosts_up', under_test)
under_test = get_icon_name(
'service', 'WARNING', acknowledge=False, downtime=False, monitored=1)
self.assertEqual('services_warning', under_test)
under_test = get_icon_name(
'host', 'DOWN', acknowledge=True, downtime=False, monitored=1)
self.assertEqual('acknowledge', under_test)
under_test = get_icon_name(
'service', 'UNREACHABLE', acknowledge=True, downtime=True, monitored=2)
self.assertEqual('downtime', under_test)
under_test = get_icon_name(
'host', 'WRONG_STATUS', acknowledge=False, downtime=False, monitored=1)
self.assertEqual('error', under_test)
under_test = get_icon_name(
'host', 'UP', acknowledge=False, downtime=False, monitored=False + False)
self.assertEqual('hosts_not_monitored', under_test)
def test_get_icon_name_from_state(self):
"""Get Icon Name from State"""
under_test = get_icon_name_from_state('host', 'UP')
self.assertEqual('hosts_up', under_test)
under_test = get_icon_name_from_state('service', 'CRITICAL')
self.assertEqual('services_critical', under_test)
under_test = get_icon_name_from_state('host', 'ACKNOWLEDGE')
self.assertEqual('acknowledge', under_test)
under_test = get_icon_name_from_state('service', 'DOWNTIME')
self.assertEqual('downtime', under_test)
def test_get_real_host_state_icon(self):
"""Get Real Host State Icon"""
# Service data test
services_test = []
for i in range(0, 5):
service = Service()
service.create(
'_id%d' % i,
{'name': 'service%d' % i, '_overall_state_id': i},
'service%d' % i
)
services_test.append(service)
service = Service()
service.create(
'other_id2%d' % i,
{'name': 'other_service2%d' % i, '_overall_state_id': i},
'other_service%d' % i
)
services_test.append(service)
under_test = get_overall_state_icon(services_test, 0)
self.assertEqual('all_services_critical', under_test)
# Overall state id of 10 does not exist
under_test = get_overall_state_icon([], 10)
self.assertEqual('all_services_none', under_test)
def test_get_request_history_model(self):
"""Get History Request Model"""
under_test = History.get_request_model()
self.assertTrue('endpoint' in under_test)
self.assertEqual('history', under_test['endpoint'])
self.assertTrue('params' in under_test)
self.assertTrue('projection' in under_test)
def test_get_history_icon_name_from_message(self):
"""Get History Icon from State"""
under_test = History.get_history_icon_name('UNKNOWN', 'downtime')
self.assertEqual('downtime', under_test)
under_test = History.get_history_icon_name('UP', 'ack')
self.assertEqual('acknowledge', under_test)
under_test = History.get_history_icon_name('UP', 'event_type')
self.assertEqual('hosts_up', under_test)
under_test = History.get_history_icon_name('DOWN', 'event_type')
self.assertEqual('hosts_down', under_test)
under_test = History.get_history_icon_name('UNREACHABLE', 'event_type')
self.assertEqual('services_unreachable', under_test)
under_test = History.get_history_icon_name('OK', 'event_type')
self.assertEqual('services_ok', under_test)
under_test = History.get_history_icon_name('WARNING', 'event_type')
self.assertEqual('services_warning', under_test)
under_test = History.get_history_icon_name('CRITICAL', 'event_type')
self.assertEqual('services_critical', under_test)
under_test = History.get_history_icon_name('UNKNOWN', 'event_type')
self.assertEqual('services_unknown', under_test)
under_test = History.get_history_icon_name('error', 'event_type')
self.assertEqual('error', under_test)
def test_get_request_user_model(self):
""" |
jaggu303619/asylum | openerp/addons/mail/wizard/invite.py | Python | agpl-3.0 | 4,418 | 0.004301 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import tools
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.translate import _
class invite_wizard(osv.osv_memory):
""" Wizard to invite partners and make them followers. """
_name = 'mail.wizard.invite'
_description = 'Invite wizard'
def default_get(self, cr, uid, fields, context=None):
result = super(invite_wizard, self).default_get(cr, uid, fields, context=context)
if 'message' in fields and result.get('res_model') and result.get('res_id'):
document_name = self.pool.get(result.get('res_model')).name_get(cr, uid, [result.get('res_id')], context=context)[0][1]
message = _('<div>You have been invited to follow %s.</div>') % document_name
result['message'] = message
elif 'message' in fields:
result['message'] = _('<div>You have been invited to follow a new document.</div>')
return result
_columns = {
'res_model': fields.char('Related Document Model', size=128,
required=True, select=1,
help='Model of the followed resource'),
'res_id': fields.integer('Related Document ID', select=1,
help='Id of the followed resource'),
'partner_ids': fields.many2many('res.partner', string='Partners'),
'message': fields.html('Message'),
}
def add_followers(self, cr, uid, ids, context=None):
for wizard in self.browse(cr, uid, ids, context=context):
model_obj = self.pool.get(wizard.res_model)
document = model_obj.browse(cr, uid, wizard.res_id, context=context)
# filter partner_ids to get the new followers, to avoid sending email to already following partners
new_follower_ids = [p.id for p in wizard.partner_ids if p.id not in document.message_follower_ids]
model_obj.message_subscribe(cr, uid, [wizard.res_id], new_follower_ids, context=context)
# send an email only if a personal message exists
if wizard.message and no | t wizard.message == '<br>': # when deleting the message, cleditor keeps a <br>
# add signature
user_id = self.pool.get | ("res.users").read(cr, uid, [uid], fields=["signature"], context=context)[0]
signature = user_id and user_id["signature"] or ''
if signature:
wizard.message = tools.append_content_to_html(wizard.message, signature, plaintext=True, container_tag='div')
# FIXME 8.0: use notification_email_send, send a wall message and let mail handle email notification + message box
for follower_id in new_follower_ids:
mail_mail = self.pool.get('mail.mail')
# the invite wizard should create a private message not related to any object -> no model, no res_id
mail_id = mail_mail.create(cr, uid, {
'model': wizard.res_model,
'res_id': wizard.res_id,
'subject': _('Invitation to follow %s') % document.name_get()[0][1],
'body_html': '%s' % wizard.message,
'auto_delete': True,
}, context=context)
mail_mail.send(cr, uid, [mail_id], recipient_ids=[follower_id], context=context)
return {'type': 'ir.actions.act_window_close'}
|
psusloparov/sneeze | pocket_change/pocket_change/rest/components/jira_extensions.py | Python | apache-2.0 | 5,279 | 0.007388 | from pocket_change.rest.util import Preserializer, JiraRelatedResource
from pocket_change.rest.components import kaichu
from flask import g, request, current_app
from pocket_change import sqlalchemy_db
from flask.ext.restful import Resource
from flask.ext.restful.reqparse import RequestParser
from flask.ext.login import current_user
from pocket_change.auth import get_user_from_token, PocketChangeUser
from datetime import timedelta
@Preserializer.test_cycle.expand_handler('jira_issue')
@Preserializer.case_execution.expand_handler('jira_issue')
def expand_issue(issue_row, expand_tree=None):
if expand_tree is None:
expand_tree = {}
if issue_row and issue_row.issue_id:
issue = g.jira.issue(str(issue_row.issue_id))
out = {'id' : issue.id, 'key' : issue.key,
'summary' : issue.fields.summary,
'description' : issue.fields.description,
'status' : {'id' : issue.fields.status.id,
'name' : issue.fields.status.name}}
if issue.fields.resolution:
out['resolution'] = {'id' : issue.fields.resolution.id,
'name' : issue.fields.resolution.name}
return out
return {}
@JiraRelatedResource.plugin('process_search_data')
def filter_by_jira_issue(resource, expand=None, query=None, **kwargs):
if not query:
query = sqlalchemy_db.session.query(resource.__class__.db_model)
try:
issue_id = int(request.args['issue_id'])
except KeyError:
issue_id = None
else:
query = (query.join(resource.__class__.jira_issue_db_model)
.filter(resource.__class__.jira_issue_db_model.issue_id==issue_id))
try:
issue_key = request.args['issue_key']
except KeyError:
pass
else:
try:
issue_id_from_key = g.jira.issue(issue_key).id
except:
kwargs['search_result'] = []
return {'data' : (resource, expand, query, kwargs),
'continue' : False}
else:
query = (query.join(resource.__class__.jira_issue_db_model)
.filter(resource.__class__.jira_issue_db_model.issue_id==issue_id_from_key))
if issue_id and issue_id != issue_id_from_key:
kwargs['search_result'] = []
return {'data' : (resource, expand, query, kwargs),
'continue' : False}
return {'data' : (resource, expand, query, kwargs),
'continue' : True}
@kaichu.endpoint('jira_auth_data')
@kaichu.route('/jira_auth_data')
class JiraAuthData(Resource):
def get(self):
app_key = request.args.get('app_key', None)
username = request.args.get('username', None)
password = request.args.get('password', None)
token = request.args.get('token', None)
user = None
if app_key and app_key == current_app.config['JIRA_APP_KEY']:
if username:
if token:
user = get_user_from_token(token, username)
used = False
if user:
used = user.token.use()
if used:
sqlalchemy_db.session.merge(user.token)
sqlalchemy_db.session.commit()
elif not password and not used:
return | {'message' : 'Invalid token.'}, 400
if password:
user = PocketChangeUser(None, username, password)
if user.is_authenticated():
User = sqlalchemy_db.models['User']
try:
db_user = (sqlalchemy_db.session.query(User)
.filter(User.name==user.name).one())
except:
db_user = User(name=user.na | me, password=password)
sqlalchemy_db.session.add(db_user)
sqlalchemy_db.session.commit()
user.token = db_user.get_new_token(current_app.secret_key[:16],
expires=timedelta(hours=6),
max_uses=1)
user.token.use()
sqlalchemy_db.session.commit()
else:
return {'message' : 'Invalid username/password.'}, 400
if not user:
return {'message' : 'Must provide password or token.'}, 400
else:
return {'message' : 'Must provide username.'}, 400
else:
return {'message' : 'app_key missing or invalid.'}, 400
with open(current_app.config['JIRA_RSA_KEY_FILE'], 'r') as rsa_file:
rsa_data = rsa_file.read()
if hasattr(user.user, 'jira') and user.user.jira and user.user.jira.active:
return {'rsa_key' : rsa_data,
'oauth_secret' : user.user.jira.oauth_secret,
'oauth_token' : user.user.jira.oauth_token}
else:
return {'message' : "User's token is expired or revoked."}, 400 |
nicememory/pie | pyglet/pyglet/input/__init__.py | Python | apache-2.0 | 7,513 | 0.001863 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (I | NCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Joystick, ta | blet and USB HID device support.
This module provides a unified interface to almost any input device, besides
the regular mouse and keyboard support provided by
:py:class:`~pyglet.window.Window`. At the lowest
level, :py:func:`get_devices` can be used to retrieve a list of all supported
devices, including joysticks, tablets, space controllers, wheels, pedals, remote
controls, keyboards and mice. The set of returned devices varies greatly
depending on the operating system (and, of course, what's plugged in).
At this level pyglet does not try to interpret *what* a particular device is,
merely what controls it provides. A :py:class:`Control` can be either a button,
whose value is either ``True`` or ``False``, or a relative or absolute-valued
axis, whose value is a float. Sometimes the name of a control can be provided
(for example, ``x``, representing the horizontal axis of a joystick), but often
not. In these cases the device API may still be useful -- the user will have
to be asked to press each button in turn or move each axis separately to
identify them.
Higher-level interfaces are provided for joysticks, tablets and the Apple
remote control. These devices can usually be identified by pyglet positively,
and a base level of functionality for each one provided through a common
interface.
To use an input device:
1. Call :py:func:`get_devices`, :py:func:`get_apple_remote` or
:py:func:`get_joysticks`
to retrieve and identify the device.
2. For low-level devices (retrieved by :py:func:`get_devices`), query the
devices list of controls and determine which ones you are interested in. For
high-level interfaces the set of controls is provided by the interface.
3. Optionally attach event handlers to controls on the device.
4. Call :py:meth:`Device.open` to begin receiving events on the device. You can
begin querying the control values after this time; they will be updated
asynchronously.
5. Call :py:meth:`Device.close` when you are finished with the device (not
needed if your application quits at this time).
To use a tablet, follow the procedure above using :py:func:`get_tablets`, but
note that no control list is available; instead, calling :py:meth:`Tablet.open`
returns a :py:class:`TabletCanvas` onto which you should set your event
handlers.
.. versionadded:: 1.2
"""
from __future__ import absolute_import
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import sys
from .base import Device, Control, RelativeAxis, AbsoluteAxis, Button
from .base import Joystick, AppleRemote, Tablet
from .base import DeviceException, DeviceOpenException, DeviceExclusiveException
_is_epydoc = hasattr(sys, 'is_epydoc') and sys.is_epydoc
def get_apple_remote(display=None):
"""Get the Apple remote control device.
The Apple remote is the small white 6-button remote control that
accompanies most recent Apple desktops and laptops. The remote can only
be used with Mac OS X.
:Parameters:
display : `~pyglet.canvas.Display`
Currently ignored.
:rtype: AppleRemote
:return: The remote device, or `None` if the computer does not support it.
"""
return None
if _is_epydoc:
def get_devices(display=None):
"""Get a list of all attached input devices.
:Parameters:
display : `~pyglet.canvas.Display`
The display device to query for input devices. Ignored on Mac
OS X and Windows. On Linux, defaults to the default display
device.
:rtype: list of :py:class:`Device`
"""
def get_joysticks(display=None):
"""Get a list of attached joysticks.
:Parameters:
display : `~pyglet.canvas.Display`
The display device to query for input devices. Ignored on Mac
OS X and Windows. On Linux, defaults to the default display
device.
:rtype: list of :py:class:`Joystick`
"""
def get_tablets(display=None):
"""Get a list of tablets.
This function may return a valid tablet device even if one is not
attached (for example, it is not possible on Mac OS X to determine if
a tablet device is connected). Despite returning a list of tablets,
pyglet does not currently support multiple tablets, and the behaviour
is undefined if more than one is attached.
:Parameters:
display : `~pyglet.canvas.Display`
The display device to query for input devices. Ignored on Mac
OS X and Windows. On Linux, defaults to the default display
device.
:rtype: list of :py:class:`Tablet`
"""
else:
def get_tablets(display=None):
return []
from pyglet import compat_platform
if compat_platform.startswith('linux'):
from .x11_xinput import get_devices as xinput_get_devices
from .x11_xinput_tablet import get_tablets
from .evdev import get_devices as evdev_get_devices
from .evdev import get_joysticks
def get_devices(display=None):
return (evdev_get_devices(display) +
xinput_get_devices(display))
elif compat_platform in ('cygwin', 'win32'):
from .directinput import get_devices, get_joysticks
try:
from .wintab import get_tablets
except:
pass
elif compat_platform == 'darwin':
from pyglet import options as pyglet_options
if pyglet_options['darwin_cocoa']:
from .darwin_hid import get_devices, get_joysticks, get_apple_remote
else:
from .carbon_hid import get_devices, get_joysticks, get_apple_remote
from .carbon_tablet import get_tablets
|
Alwnikrotikz/kegbot | pykeg/src/pykeg/core/migrations/0047_add_system_stats.py | Python | gpl-2.0 | 26,712 | 0.007637 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SystemStats'
db.create_table('core_systemstats', (
('date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('revision', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('stats', self.gf('pykeg.core.jsonfield.JSONField')(default='{}')),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.KegbotSite'])),
))
db.send_create_signal('core', ['SystemStats'])
def backwards(self, orm):
# Deleting model 'SystemStats'
db.delete_table('core_systemstats')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [] | , {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'T | rue', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'beerdb.beerimage': {
'Meta': {'object_name': 'BeerImage'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edited': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'num_views': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'original_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'revision': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'beerdb.beerstyle': {
'Meta': {'object_name': 'BeerStyle'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edited': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'revision': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'beerdb.beertype': {
'Meta': {'object_name': 'BeerType'},
'abv': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'brewer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beerdb.Brewer']"}),
'calories_oz': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'carbs_oz': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {}),
'edition': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'beers'", 'null': 'True', 'to': "orm['beerdb.BeerImage']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'original_gravity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'revision': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'specific_gravity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'style': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beerdb.BeerStyle']"})
},
'beerdb.brewer': {
'Meta': {'object_name': 'Brewer'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'country': ('pykeg.core.fields.CountryField', [], {'default': "'USA'", 'max_length': '3'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'brewers'", 'null': 'True', 'to': "orm['beerdb.BeerImage']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'origin_city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'origin_state': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'production': ('django.db.models.fields.CharField', [], {'default': "'commercial'", 'max_length': '128'}),
'revision': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '10 |
luoguizhou/gooderp_addons | core/models/res_company.py | Python | agpl-3.0 | 2,323 | 0.003017 | # -*- coding: utf-8 -*-
from odoo import api, fields, models, tools
from odoo.exceptions import UserError
import os
from odoo.tools import misc
import re
# 成本计算方法,已实现 先入先出
CORE_COST_METHOD = [('average', u'全月一次加权平均法'),
('std',u'定额成本'),
('fifo', u'先进先出法'),
]
class ResCompany(models.Model):
_inherit = 'res.company'
@api.one
@api.constrains('email')
def _che | ck_email(self):
''' 验证 email 合法性 '''
if self.email:
| res = re.match('^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$', self.email)
if not res:
raise UserError(u'请检查邮箱格式是否正确: %s' % self.email)
start_date = fields.Date(u'启用日期',
required=True,
default=lambda self: fields.Date.context_today(self))
cost_method = fields.Selection(CORE_COST_METHOD, u'存货计价方法',
help=u'''GoodERP仓库模块使用先进先出规则匹配
每次出库对应的入库成本和数量,但不实时记账。
财务月结时使用此方法相应调整发出成本''', default='average', required=True)
draft_invoice = fields.Boolean(u'根据发票确认应收应付',
help=u'勾选这里,所有新建的结算单不会自动记账')
import_tax_rate = fields.Float(string=u"默认进项税税率")
output_tax_rate = fields.Float(string=u"默认销项税税率")
bank_account_id = fields.Many2one('bank.account', string=u'开户行')
def _get_logo(self):
return self._get_logo_impl()
def _get_logo_impl(self):
''' 默认取 core/static/description 下的 logo.png 作为 logo'''
return open(misc.file_open('core/static/description/logo.png').name, 'rb') .read().encode('base64')
logo = fields.Binary(related='partner_id.image',
default=_get_logo, attachment=True)
company_id = fields.Many2one(
'res.company',
string=u'公司',
change_default=True,
default=lambda self: self.env['res.company']._company_default_get())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.