repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mozman/ezdxf
|
tests/test_01_dxf_entities/test_131_field_list.py
|
1
|
2369
|
# Copyright (c) 2019 Manfred Moitzi
# License: MIT License
from typing import cast
import pytest
import ezdxf
from ezdxf.entities.idbuffer import FieldList
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
FIELDLIST = """0
FIELDLIST
5
0
102
{ACAD_REACTORS
330
0
102
}
330
0
100
AcDbIdSet
90
12
100
AcDbFieldList
"""
@pytest.fixture
def entity():
return FieldList.from_text(FIELDLIST)
def test_registered():
from ezdxf.entities.factory import ENTITY_CLASSES
assert 'FIELDLIST' in ENTITY_CLASSES
def test_default_init():
entity = FieldList()
assert entity.dxftype() == 'FIELDLIST'
assert entity.dxf.handle is None
assert entity.dxf.owner is None
def test_default_new():
entity = FieldList.new(handle='ABBA', owner='0', dxfattribs={
})
assert entity.dxf.flags == 0
assert len(entity.handles) == 0
def test_load_from_text(entity):
assert entity.dxf.flags == 12
assert len(entity.handles) == 0
def test_write_dxf():
entity = FieldList.from_text(FIELDLIST)
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(FIELDLIST)
assert result == expected
@pytest.fixture(scope='module')
def doc():
return ezdxf.new('R2007')
def test_generic_field_list(doc):
field_list = doc.objects.new_entity('FIELDLIST', {})
assert field_list.dxftype() == 'FIELDLIST'
assert len(field_list.handles) == 0
def test_set_get_field_list(doc):
field_list = doc.objects.new_entity('FIELDLIST', {})
assert field_list.dxftype() == 'FIELDLIST'
field_list.handles = ['FF', 'EE', 'DD']
handles = field_list.handles
assert len(handles) == 3
assert handles == ['FF', 'EE', 'DD']
handles.append('FFFF')
assert handles[-1] == 'FFFF'
def test_dxf_tags(doc):
buffer = cast(FieldList, doc.objects.new_entity('FIELDLIST', {}))
buffer.handles = ['FF', 'EE', 'DD', 'CC']
tags = TagCollector.dxftags(buffer)[-4:]
assert len(tags) == 4
assert tags[0] == (330, 'FF')
assert tags[-1] == (330, 'CC')
def test_clone(doc):
buffer = cast(FieldList, doc.objects.new_entity('FIELDLIST', {}))
buffer.handles = ['FF', 'EE', 'DD', 'CC']
buffer2 = cast(FieldList, buffer.copy())
buffer2.handles[-1] = 'ABCD'
assert buffer.handles[:-1] == buffer2.handles[:-1]
assert buffer.handles[-1] != buffer2.handles[-1]
|
mit
| -7,176,212,482,512,180,000
| 21.561905
| 69
| 0.662727
| false
| 3.133598
| true
| false
| false
|
krautradio/PyRfK
|
lib/rfk/database/base.py
|
1
|
14479
|
import time
import hashlib
from datetime import timedelta
from passlib.hash import bcrypt
from sqlalchemy import *
from sqlalchemy.orm import relationship, backref, exc
from sqlalchemy.dialects.mysql import INTEGER as Integer
from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
from sqlalchemy.sql.expression import case
import re
import os
from flask.ext.login import AnonymousUserMixin
import rfk.database
from rfk.types import SET, ENUM
from rfk import exc as rexc
from rfk import CONFIG
from rfk.database import Base, UTCDateTime
from rfk.database.show import UserShow, Show
from rfk.helper import now, get_path
class Anonymous(AnonymousUserMixin):
def __init__(self):
AnonymousUserMixin.__init__(self)
self.locale = 'de'
self.timezone = 'Europe/Berlin'
def get_locale(self):
return self.locale
def get_timezone(self):
return self.timezone
def has_permission(self, code=None, permission=None):
return False
class User(Base):
__tablename__ = 'users'
user = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
username = Column(String(50), unique=True)
password = Column(String(64))
mail = Column(String(255))
country = Column(String(3))
register_date = Column(UTCDateTime, default=now)
last_login = Column(UTCDateTime, default=None)
def get_id(self):
return unicode(self.user)
def is_anonymous(self):
return False
def is_active(self):
return True
def is_authenticated(self):
return True
def get_locale(self):
return self.get_setting(code='locale')
def get_timezone(self):
return self.get_setting(code='timezone')
@staticmethod
def authenticate(username, password):
"""shorthand function for authentication a user
returns the user object
Keyword arguments:
username -- username
password -- unencrypted password
"""
user = User.get_user(username=username)
if user.check_password(password):
return user
else:
raise rexc.base.InvalidPasswordException()
@staticmethod
def get_user(id=None, username=None):
assert id or username
try:
if username is None:
return User.query.filter(User.user == id).one()
else:
return User.query.filter(User.username == username).one()
except exc.NoResultFound:
raise rexc.base.UserNotFoundException
@staticmethod
def check_username(username):
if re.match('^[0-9a-zA-Z_-]{3,}$', username) is None:
return False
else:
return True
@staticmethod
def make_password(password):
return bcrypt.encrypt(password)
@staticmethod
def add_user(username, password):
if not User.check_username(username):
raise rexc.base.InvalidUsernameException
try:
User.query.filter(User.username == username).one()
raise rexc.base.UserNameTakenException()
except exc.NoResultFound:
user = User(username=username, password=User.make_password(password))
rfk.database.session.add(user)
rfk.database.session.flush()
return user
def check_password(self, password):
try:
return bcrypt.verify(password, self.password)
except ValueError:
if hashlib.sha1(password).hexdigest() == self.password:
self.password = User.make_password(password)
return True
else:
return False
def add_permission(self, code=None, permission=None):
assert code or permission
if permission is None:
permission = Permission.get_permission(code)
try:
UserPermission.query.filter(UserPermission.user == self,
UserPermission.permission == permission) \
.one()
return False
except exc.NoResultFound:
self.permissions.append(UserPermission(permission))
return True
def has_permission(self, code=None, permission=None):
assert code or permission
if permission is None:
permission = Permission.get_permission(code)
try:
UserPermission.query.filter(UserPermission.user == self,
UserPermission.permission == permission) \
.one()
return True
except exc.NoResultFound:
return False
def get_setting(self, setting=None, code=None):
assert setting or code
if setting is None:
setting = Setting.get_setting(code)
try:
us = UserSetting.query.filter(UserSetting.user == self,
UserSetting.setting == setting).one()
return us.get_value()
except exc.NoResultFound:
return None
def set_setting(self, value, setting=None, code=None):
assert setting or code
if setting is None:
setting = Setting.get_setting(code)
UserSetting.set_value(self, setting, value)
rfk.database.session.flush()
def get_total_streamtime(self):
"""Returns a timedelta Object with the users total time streamed"""
try:
return timedelta(seconds= float(rfk.database.session
.query( func.sum( func.time_to_sec( func.timediff(Show.end,Show.begin) ) ) ) \
.join(UserShow).filter(UserShow.status == UserShow.STATUS.STREAMED,
UserShow.user == self).first()[0]))
except TypeError:
return timedelta(seconds=0)
def __repr__(self):
return "<USER username={0}>".format(self.username)
class Setting(Base):
__tablename__ = 'settings'
setting = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
code = Column(String(25), unique=True)
name = Column(String(50))
val_type = Column(Integer(unsigned=True))
TYPES = ENUM(['INT', 'STR'])
@staticmethod
def get_setting(code):
return Setting.query.filter(Setting.code == code).one()
@staticmethod
def add_setting(code, name, val_type):
try:
return Setting.query.filter(Setting.code == code).one()
except exc.NoResultFound:
return Setting(code=code, name=name, val_type=val_type)
class UserSetting(Base):
__tablename__ = 'user_settings'
userSetting = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref=backref('settings'))
setting_id = Column("setting", Integer(unsigned=True),
ForeignKey('settings.setting',
onupdate="CASCADE",
ondelete="RESTRICT"))
setting = relationship("Setting")
val_int = Column(Integer)
val_str = Column(String(255))
def get_value(self):
if self.setting.val_type == Setting.TYPES.INT:
return self.val_int
elif self.setting.val_type == Setting.TYPES.STR:
return self.val_str
@staticmethod
def set_value(user, setting, value):
if value == True:
value = 1
elif value == False:
value = 0
try:
us = UserSetting.query.filter(UserSetting.user == user,
UserSetting.setting == setting).one()
except exc.NoResultFound:
us = UserSetting(user=user, setting=setting)
if us.setting.val_type == Setting.TYPES.INT:
us.val_int = value
elif us.setting.val_type == Setting.TYPES.STR:
us.val_str = value
class Permission(Base):
__tablename__ = 'permissions'
permission = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
code = Column(String(25), unique=True)
name = Column(String(50))
@staticmethod
def get_permission(code):
return Permission.query.filter(Permission.code == code).one()
@staticmethod
def add_permission(code, name):
try:
return Permission.query.filter(Permission.code == code).one()
except exc.NoResultFound:
return Permission(code=code, name=name)
class UserPermission(Base):
__tablename__ = 'user_permissions'
userPermission = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref=backref('permissions', cascade="all, delete-orphan"))
permission_id = Column("permission", Integer(unsigned=True),
ForeignKey('permissions.permission',
onupdate="CASCADE",
ondelete="RESTRICT"))
permission = relationship("Permission", backref=backref('users', cascade="all, delete-orphan"))
def __init__(self, permission):
self.permission = permission
class Ban(Base):
__tablename__ = 'bans'
ban = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref=backref('bans'))
range = Column(String(50))
expiration = Column(UTCDateTime)
class News(Base):
__tablename__ = 'news'
news = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
time = Column(UTCDateTime, default=now())
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User")
title = Column(String(255))
content = Column(Text)
class ApiKey(Base):
__tablename__ = 'apikeys'
apikey = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref="apikeys")
key = Column(String(128))
counter = Column(Integer(unsigned=True), default=0)
access = Column(UTCDateTime, default=now())
application = Column(String(128))
description = Column(String(255))
flag = Column(Integer(unsigned=True), default=0)
FLAGS = SET(['DISABLED', 'FASTQUERY', 'KICK', 'BAN', 'AUTH'])
def gen_key(self):
c = 0
while True:
key = hashlib.sha1("%s%s%d%d" % (self.application, self.description, time.time(), c)).hexdigest()
if ApiKey.query.filter(ApiKey.key == key).first() == None:
break
self.key = key
@staticmethod
def check_key(key):
try:
apikey = ApiKey.query.filter(ApiKey.key == key).one()
except (exc.NoResultFound, exc.MultipleResultsFound):
raise rexc.api.KeyInvalidException()
if apikey.flag & ApiKey.FLAGS.DISABLED:
raise rexc.api.KeyDisabledException()
elif not apikey.flag & ApiKey.FLAGS.FASTQUERY:
if now() - apikey.access <= timedelta(seconds=1):
raise rexc.api.FastQueryException(last_access=apikey.access)
apikey.counter += 1
apikey.access = now()
return apikey
class Log(Base):
__tablename__ = 'log'
log = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
timestamp = Column(UTCDateTime, default=now)
severity = Column(Integer(unsigned=True))
module = Column(String(50))
message = Column(Text)
class Loop(Base):
__tablename__ = 'loops'
loop = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
begin = Column(Integer(unsigned=True), default=0)
end = Column(Integer(unsigned=True), default=1440)
filename = Column(String(50))
@hybrid_property
def length(self):
if (self.end >= self.begin):
return abs(self.end - self.begin)
else:
return abs((self.end + 2400) - self.begin)
@length.expression
def length(cls):
return func.abs(cast(case([(cls.begin <= cls.end, cls.end),
(cls.begin >= cls.end, cls.end + 2400)]), Integer) - cast(cls.begin, Integer))
@hybrid_method
def contains(self, point):
return case([(self.begin <= self.end, (self.begin <= point) & (self.end >= point)),
(self.begin >= self.end, (self.begin <= point) | (self.end >= point))])
@hybrid_property
def file_exists(self):
if self.filename is None:
return False
return os.path.exists(os.path.join(get_path(CONFIG.get('liquidsoap', 'looppath')), self.filename))
@staticmethod
def get_current_loop():
"""
returns the current loop to be scheduled
@todo maybe broken ;_;
"""
n = now()
#try to find a loop that should be running
loops = Loop.query.filter(Loop.contains(int(n.hour * 100 + (n.minute / 60.) * 100))).order_by(
Loop.length.asc()).all()
for loop in loops:
if loop.file_exists:
return loop;
# we found no loops
# just try to find the longest one
loops = Loop.query.order_by(Loop.length.asc()).all()
for loop in loops:
if loop.file_exists:
return loop;
#okay, now we have a problem, just retun none
return None
|
bsd-3-clause
| 1,369,849,253,642,008,600
| 34.662562
| 113
| 0.58243
| false
| 4.344134
| false
| false
| false
|
albertoferna/compmech
|
setup.py
|
1
|
1198
|
from glob import glob
import sys
import os
from subprocess import Popen
import numpy
#params = 'build_ext -inplace -IC:\clones\cubature\cubature ' + ' '.join(sys.argv[1:])
params = 'build_ext --inplace -I%s' % numpy.get_include() + ' '.join(sys.argv[1:]) + ' clean'
cwd = os.getcwd()
if os.name == 'nt':
use_sdk = 'DISTUTILS_USE_SDK'
if not use_sdk in os.environ.keys():
os.environ[use_sdk] = '1'
print('####################')
print('Compiling modules...')
print('####################')
print('')
basedirs = [
os.path.join('compmech', 'conecyl', 'clpt'),
os.path.join('compmech', 'conecyl', 'fsdt'),
os.path.join('compmech', 'integrate'),
os.path.join('compmech', 'conecyl', 'imperfections'),
os.path.join('compmech', 'aero', 'pistonplate', 'clpt'),
os.path.join('compmech', 'aero', 'pistonstiffpanel', 'clpt'),
]
for basedir in basedirs:
print('Compiling setup.py in %s' % basedir)
basedir = os.path.sep.join([cwd, basedir])
os.chdir(basedir)
for fname in glob('setup*.py'):
p = Popen(('python {} '.format(fname) + params), shell=True)
p.wait()
os.chdir(cwd)
|
bsd-3-clause
| 8,871,118,216,028,705,000
| 29.717949
| 93
| 0.569282
| false
| 3.040609
| false
| false
| false
|
MicBrain/Scheme-Interpreter
|
scheme.py
|
1
|
21214
|
"""This module implements the core Scheme interpreter functions, including the
eval/apply mutual recurrence, environment model, and read-eval-print loop.
"""
from scheme_primitives import *
from scheme_reader import *
from ucb import main, trace
##############
# Eval/Apply #
##############
def scheme_eval(expr, env):
"""Evaluate Scheme expression EXPR in environment ENV. If ENV is None,
simply returns EXPR as its value without further evaluation.
>>> expr = read_line("(+ 2 2)")
>>> expr
Pair('+', Pair(2, Pair(2, nil)))
>>> scheme_eval(expr, create_global_frame())
scnum(4)
"""
while env is not None:
# Note: until extra-credit problem 22 is complete, env will
# always be None on the second iteration of the loop, so that
# the value of EXPR is returned at that point.
if expr is None:
raise SchemeError("Cannot evaluate an undefined expression.")
# Evaluate Atoms
if scheme_symbolp(expr):
expr, env = env.lookup(expr).get_actual_value(), None
elif scheme_atomp(expr):
env = None
# All non-atomic expressions are lists.
elif not scheme_listp(expr):
raise SchemeError("malformed list: {0}".format(str(expr)))
else:
first, rest = scheme_car(expr), scheme_cdr(expr)
# Evaluate Combinations
if (scheme_symbolp(first) # first might be unhashable
and first in SPECIAL_FORMS):
if proper_tail_recursion:
expr, env = SPECIAL_FORMS[first](rest, env)
else:
expr, env = SPECIAL_FORMS[first](rest, env)
expr, env = scheme_eval(expr, env), None
else:
procedure = scheme_eval(first, env)
args = procedure.evaluate_arguments(rest, env)
if proper_tail_recursion:
expr, env = procedure.apply(args, env)
else:
# UPDATED 4/14/2014 @ 19:08
expr, env = scheme_apply(procedure, args, env), None
return expr
proper_tail_recursion = True
################################################################
# Uncomment the following line to apply tail call optimization #
################################################################
# proper_tail_recursion = True
def scheme_apply(procedure, args, env):
"""Apply PROCEDURE (type Procedure) to argument values ARGS
in environment ENV. Returns the resulting Scheme value."""
# UPDATED 4/14/2014 @ 19:08
# Since .apply is allowed to do a partial evaluation, we finish up
# with a call to scheme_eval to complete the evaluation. scheme_eval
# will simply return expr if its env argument is None.
expr, env = procedure.apply(args, env)
return scheme_eval(expr, env)
################
# Environments #
################
class Frame:
"""An environment frame binds Scheme symbols to Scheme values."""
def __init__(self, parent):
"""An empty frame with a PARENT frame (that may be None)."""
self.bindings = {}
self.parent = parent
def __repr__(self):
if self.parent is None:
return "<Global Frame>"
else:
s = sorted('{0}: {1}'.format(k,v) for k,v in self.bindings.items())
return "<{{{0}}} -> {1}>".format(', '.join(s), repr(self.parent))
def __eq__(self, other):
return isinstance(other, Frame) and \
self.parent == other.parent
def lookup(self, symbol):
"""Return the value bound to SYMBOL. Errors if SYMBOL is not found.
As a convenience, also accepts Python strings, which it turns into
symbols."""
if type(symbol) is str:
symbol = intern(symbol)
if symbol in self.bindings:
return self.bindings[symbol]
if self.parent is not None:
return self.parent.lookup(symbol)
raise SchemeError("unknown identifier: {0}".format(str(symbol)))
def global_frame(self):
"""The global environment at the root of the parent chain."""
e = self
while e.parent is not None:
e = e.parent
return e
def make_call_frame(self, formals, vals):
"""Return a new local frame whose parent is SELF, in which the symbols
in the Scheme formal parameter list FORMALS are bound to the Scheme
values in the Scheme value list VALS. Raise an error if too many or too
few arguments are given.
>>> env = create_global_frame()
>>> formals, vals = read_line("(a b c)"), read_line("(1 2 3)")
>>> env.make_call_frame(formals, vals)
<{a: 1, b: 2, c: 3} -> <Global Frame>>
"""
frame = Frame(self)
if len(formals) != len(vals):
raise SchemeError
for expression in range(len(formals)):
frame.define(formals[expression], vals[expression])
return frame
def define(self, sym, val):
"""Define Scheme symbol SYM to have value VAL in SELF. As a
convenience, SYM may be Python string, which is converted first
to a Scheme symbol. VAL must be a SchemeValue."""
assert isinstance(val, SchemeValue), "values must be SchemeValues"
if type(sym) is str:
sym = intern(sym)
self.bindings[sym] = val
#####################
# Procedures #
#####################
class Procedure(SchemeValue):
"""The superclass of all kinds of procedure in Scheme."""
# Arcane Technical Note: The odd placement of the import from scheme in
# evaluate_arguments is necessary because it introduces mutually recursive
# imports between this file and scheme.py. The effect of putting it
# here is that we delay attempting to access scheme.scheme_eval until
# after the scheme module's initialization is finished.
def evaluate_arguments(self, arg_list, env):
"""Evaluate the expressions in ARG_LIST in ENV to produce
arguments for this procedure. Default definition for procedures."""
from scheme import scheme_eval
return arg_list.map(lambda operand: scheme_eval(operand, env))
class PrimitiveProcedure(Procedure):
"""A Scheme procedure defined as a Python function."""
def __init__(self, fn, use_env=False):
self.fn = fn
self.use_env = use_env
def __str__(self):
return '#[primitive]'
def __repr__(self):
return "PrimitiveProcedure({})".format(str(self))
def apply(self, args, env):
"""Apply a primitive procedure to ARGS in ENV. Returns
a pair (val, None), where val is the resulting value.
>>> twos = Pair(SchemeInt(2), Pair(SchemeInt(2), nil))
>>> plus = PrimitiveProcedure(scheme_add, False)
>>> plus.apply(twos, None)
(scnum(4), None)
"""
try:
converted_list = []
while args != nil:
converted_list.append(args.first)
args = args.second
if self.use_env:
converted_list.append(env)
val = self.fn(*converted_list)
return val, None
except TypeError:
raise SchemeError
class LambdaProcedure(Procedure):
"""A procedure defined by a lambda expression or the complex define form."""
def __init__(self, formals, body, env = None):
"""A procedure whose formal parameter list is FORMALS (a Scheme list),
whose body is the single Scheme expression BODY, and whose parent
environment is the Frame ENV. A lambda expression containing multiple
expressions, such as (lambda (x) (display x) (+ x 1)) can be handled by
using (begin (display x) (+ x 1)) as the body."""
self.formals = formals
self.body = body
self.env = env
def _symbol(self):
return 'lambda'
def __str__(self):
# UPDATED 4/16/2014 @ 13:20
return "({0} {1} {2})".format(self._symbol(),
str(self.formals), str(self.body))
def __repr__(self):
args = (self.formals, self.body, self.env)
return "{0}Procedure({1}, {2}, {3})".format(self._symbol().capitalize(),
*(repr(a) for a in args))
def __eq__(self, other):
return type(other) is type(self) and \
self.formals == other.formals and \
self.body == other.body and \
self.env == other.env
def apply(self, args, env):
environment = self.env.make_call_frame(self.formals, args)
if proper_tail_recursion:
return self.body, self.env.make_call_frame(self.formals, args)
else:
return scheme_eval(self.body, self.env.make_call_frame(self.formals, args)), None
class MuProcedure(LambdaProcedure):
"""A procedure defined by a mu expression, which has dynamic scope.
"""
def _symbol(self):
return 'mu'
def apply(self, args, env):
if proper_tail_recursion:
return self.body, env.make_call_frame(self.formals, args)
else:
return scheme_eval(self.body, env.make_call_frame(self.formals, args)), None
# Call-by-name (nu) extension.
class NuProcedure(LambdaProcedure):
"""A procedure whose parameters are to be passed by name."""
def _symbol(self):
return 'nu'
def evaluate_arguments(self, arg_list, env):
"""Evaluate the expressions in ARG_LIST in ENV to produce
arguments for this procedure. Default definition for procedures."""
return arg_list.map(lambda operand: Thunk(nil, operand, env))
class Thunk(LambdaProcedure):
"""A by-name value that is to be called as a parameterless function when
its value is fetched to be used."""
def get_actual_value(self):
return scheme_eval(self.body, self.env)
#################
# Special forms #
#################
# All of the 'do_..._form' methods return a value and an environment,
# as for the 'apply' method on Procedures. That is, they either return
# (V, None), indicating that the value of the special form is V, or they
# return (Expr, Env), indicating that the value of the special form is what
# you would get by evaluating Expr in the environment Env.
def do_lambda_form(vals, env, function_type=LambdaProcedure):
"""Evaluate a lambda form with formals VALS[0] and body VALS.second
in environment ENV, create_global_frame eating a procedure of type FUNCTION_TYPE
(a subtype of Procedure)."""
check_form(vals, 2)
operands = vals.first
check_formals(operands)
body = vals.second
if len(body)!= 1:
return function_type(operands, Pair("begin", body), env), None
return function_type(operands, body.first, env), None
def do_mu_form(vals, env):
"""Evaluate a mu (dynamically scoped lambda) form with formals VALS[0]
and body VALS.second in environment ENV."""
return do_lambda_form(vals, env, function_type=MuProcedure)
def do_nu_form(vals, env):
"""Evaluate a mu (call-by-name scoped lambda) form with formals VALS[0]
and body VALS.second in environment ENV."""
return do_lambda_form(vals, env, function_type=NuProcedure)
def do_define_form(vals, env):
"""Evaluate a define form with parameters VALS in environment ENV."""
check_form(vals, 2)
target = vals[0]
if scheme_symbolp(target):
check_form(vals, 2, 2)
env.define(target, scheme_eval(vals[1], env))
return (target, None)
elif scheme_pairp(target):
func_name = target.first
if isinstance(func_name, SchemeNumber) or isinstance(func_name, SchemeFloat):
raise SchemeError("bad argument to define")
lambda_vals = Pair(target.second, vals.second)
lambda_func = do_lambda_form(lambda_vals, env)[0]
env.define(func_name, lambda_func)
return func_name, None
else:
raise SchemeError("bad argument to define")
def do_quote_form(vals, env):
"""Evaluate a quote form with parameters VALS. ENV is ignored."""
check_form(vals, 1, 1)
return vals[0], None
def do_let_form(vals, env):
"""Evaluate a let form with parameters VALS in environment ENV."""
check_form(vals, 2)
bindings = vals[0]
exprs = vals.second
if not scheme_listp(bindings):
raise SchemeError("bad bindings list in let form")
# Add a frame containing bindings
names, values = nil, nil
for item in bindings:
values = Pair(scheme_eval(item.second.first, env), values)
names = Pair(item.first, names)
new_env = env.make_call_frame(names, values)
# Evaluate all but the last expression after bindings, and return the last
last = len(exprs)-1
for i in range(0, last):
scheme_eval(exprs[i], new_env)
return exprs[last], new_env
#########################
# Logical Special Forms #
#########################
def do_if_form(vals, env):
"""Evaluate if form with parameters VALS in environment ENV."""
check_form(vals, 2, 3)
if (scheme_eval(vals.first, env)):
return vals.second.first, env
elif len(vals) == 2:
return okay, None
return vals.second.second.first, env
def do_and_form(vals, env):
"""Evaluate short-circuited and with parameters VALS in environment ENV."""
if len(vals):
for i in range(len(vals) - 1):
if not(scheme_eval(vals[i], env)):
return scheme_false, None
return vals[len(vals) - 1], env
return scheme_true, None
def quote(value):
"""Return a Scheme expression quoting the Scheme VALUE.
>>> s = quote('hello')
>>> print(s)
(quote hello)
>>> scheme_eval(s, Frame(None)) # "hello" is undefined in this frame.
intern('hello')
"""
return Pair("quote", Pair(value, nil))
def do_or_form(vals, env):
"""Evaluate short-circuited or with parameters VALS in environment ENV."""
for value in vals:
eval_expression = scheme_eval(value, env)
if eval_expression:
return eval_expression, None
return scheme_false, None
def do_cond_form(vals, env):
"""Evaluate cond form with parameters VALS in environment ENV."""
num_clauses = len(vals)
for i, clause in enumerate(vals):
check_form(clause, 1)
if clause.first is else_sym:
if i < num_clauses-1:
raise SchemeError("else must be last")
test = scheme_true
if clause.second is nil:
raise SchemeError("badly formed else clause")
else:
test = scheme_eval(clause.first, env)
if test:
if len(clause.second) == 0:
return test, None
if len(clause.second) >= 2:
return Pair('begin', clause.second), env
return clause.second.first, env
return okay, None
def do_begin_form(vals, env):
"""Evaluate begin form with parameters VALS in environment ENV."""
check_form(vals, 0)
if scheme_nullp(vals):
return okay, None
for i in range(len(vals) - 1):
scheme_eval(vals[i], env)
return vals[len(vals) - 1], env
# Collected symbols with significance to the interpreter
and_sym = intern("and")
begin_sym = intern("begin")
cond_sym = intern("cond")
define_macro_sym = intern("define-macro")
define_sym = intern("define")
else_sym = intern("else")
if_sym = intern("if")
lambda_sym = intern("lambda")
let_sym = intern("let")
mu_sym = intern("mu")
nu_sym = intern("nu")
or_sym = intern("or")
quasiquote_sym = intern("quasiquote")
quote_sym = intern("quote")
set_bang_sym = intern("set!")
unquote_splicing_sym = intern("unquote-splicing")
unquote_sym = intern("unquote")
# Collected special forms
SPECIAL_FORMS = {
and_sym: do_and_form,
begin_sym: do_begin_form,
cond_sym: do_cond_form,
define_sym: do_define_form,
if_sym: do_if_form,
lambda_sym: do_lambda_form,
let_sym: do_let_form,
mu_sym: do_mu_form,
nu_sym: do_nu_form,
or_sym: do_or_form,
quote_sym: do_quote_form,
}
# Utility methods for checking the structure of Scheme programs
def check_form(expr, min, max = None):
"""Check EXPR (default SELF.expr) is a proper list whose length is
at least MIN and no more than MAX (default: no maximum). Raises
a SchemeError if this is not the case."""
if not scheme_listp(expr):
raise SchemeError("badly formed expression: " + str(expr))
length = len(expr)
if length < min:
raise SchemeError("too few operands in form")
elif max is not None and length > max:
raise SchemeError("too many operands in form")
def check_formals(formals):
"""Check that FORMALS is a valid parameter list, a Scheme list of symbols
in which each symbol is distinct. Raise a SchemeError if the list of formals
is not a well-formed list of symbols or if any symbol is repeated.
>>> check_formals(read_line("(a b c)"))
"""
seen_symbols = []
while len(formals):
if not(scheme_symbolp(formals.first)) or formals.first in seen_symbols:
raise SchemeError
seen_symbols.append(formals.first)
formals = formals.second
################
# Input/Output #
################
def read_eval_print_loop(next_line, env, quiet=False, startup=False,
interactive=False, load_files=()):
"""Read and evaluate input until an end of file or keyboard interrupt."""
if startup:
for filename in load_files:
scheme_load(scstr(filename), True, env)
while True:
try:
src = next_line()
while src.more_on_line:
expression = scheme_read(src)
result = scheme_eval(expression, env)
if not quiet and result is not None:
scheme_print(result)
except (SchemeError, SyntaxError, ValueError, RuntimeError) as err:
if (isinstance(err, RuntimeError) and
'maximum recursion depth exceeded' not in err.args[0]):
raise
print("Error:", err)
except KeyboardInterrupt: # <Control>-C
if not startup:
raise
print("\nKeyboardInterrupt")
if not interactive:
return
except EOFError: # <Control>-D, etc.
return
def scheme_load(*args):
"""Load a Scheme source file. ARGS should be of the form (SYM, ENV) or (SYM,
QUIET, ENV). The file named SYM is loaded in environment ENV, with verbosity
determined by QUIET (default true)."""
if not (2 <= len(args) <= 3):
vals = args[:-1]
raise SchemeError("wrong number of arguments to load: {0}".format(vals))
sym = args[0]
quiet = args[1] if len(args) > 2 else True
env = args[-1]
if (scheme_stringp(sym)):
sym = intern(str(sym))
check_type(sym, scheme_symbolp, 0, "load")
with scheme_open(str(sym)) as infile:
lines = infile.readlines()
args = (lines, None) if quiet else (lines,)
def next_line():
return buffer_lines(*args)
read_eval_print_loop(next_line, env.global_frame(), quiet=quiet)
return okay
def scheme_open(filename):
"""If either FILENAME or FILENAME.scm is the name of a valid file,
return a Python file opened to it. Otherwise, raise an error."""
try:
return open(filename)
except IOError as exc:
if filename.endswith('.scm'):
raise SchemeError(str(exc))
try:
return open(filename + '.scm')
except IOError as exc:
raise SchemeError(str(exc))
def create_global_frame():
"""Initialize and return a single-frame environment with built-in names."""
env = Frame(None)
env.define("eval", PrimitiveProcedure(scheme_eval, True))
env.define("apply", PrimitiveProcedure(scheme_apply, True))
env.define("load", PrimitiveProcedure(scheme_load, True))
for names, fn in get_primitive_bindings():
for name in names:
proc = PrimitiveProcedure(fn)
env.define(name, proc)
return env
@main
def run(*argv):
next_line = buffer_input
interactive = True
load_files = ()
if argv:
try:
filename = argv[0]
if filename == '-load':
load_files = argv[1:]
else:
input_file = open(argv[0])
lines = input_file.readlines()
def next_line():
return buffer_lines(lines)
interactive = False
except IOError as err:
print(err)
sys.exit(1)
read_eval_print_loop(next_line, create_global_frame(), startup=True,
interactive=interactive, load_files=load_files)
tscheme_exitonclick()
|
apache-2.0
| 1,368,474,208,198,917,400
| 35.139693
| 93
| 0.593335
| false
| 3.926337
| false
| false
| false
|
beefoo/still-i-rise
|
collect_sound_data.py
|
1
|
2076
|
# -*- coding: utf-8 -*-
# Description: generate audio clips for lines, words, and syllables
import argparse
import json
import os
from pprint import pprint
import re
import subprocess
import sys
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILE", default="still_i_rise.wav", help="Path to input audio file file")
parser.add_argument('-pitch', dest="OUTPUT_PITCH_FILE", default="data/still_i_rise.Pitch", help="Path to output pitch data file")
parser.add_argument('-pulse', dest="OUTPUT_PULSE_FILE", default="data/still_i_rise.PointProcess", help="Path to output pulse data file")
parser.add_argument('-ts', dest="TIME_STEP", default="0.01", help="Time step in seconds")
parser.add_argument('-p0', dest="PITCH_FLOOR", default="70", help="Pitch floor in Hz")
parser.add_argument('-mc', dest="MAX_CANDIDATES", default="4", help="Maximum candidates per frame")
parser.add_argument('-va', dest="VERY_ACCURATE", default="on", help="Very accurate, on/off")
parser.add_argument('-st', dest="SILENCE_THRESHOLD", default="0.01", help="Silence threshold")
parser.add_argument('-vt', dest="VOICING_THRESHOLD", default="0.3", help="Voicing threshold")
parser.add_argument('-oc', dest="OCTAVE_COST", default="0.001", help="Octave cost")
parser.add_argument('-ojc', dest="OCTAVE_JUMP_COST", default="0.3", help="Octave jump cost")
parser.add_argument('-vc', dest="VOICED_COST", default="0.2", help="Voiced cost")
parser.add_argument('-p1', dest="PITCH_CEILING", default="400", help="Pitch ceiling in Hz")
# init input
args = parser.parse_args()
# cut the clip
command = ['Praat', '--run', 'collect_sound_data.praat', args.INPUT_FILE, args.OUTPUT_PITCH_FILE, args.OUTPUT_PULSE_FILE, args.TIME_STEP, args.PITCH_FLOOR, args.MAX_CANDIDATES, args.VERY_ACCURATE, args.SILENCE_THRESHOLD, args.VOICING_THRESHOLD, args.OCTAVE_COST, args.OCTAVE_JUMP_COST, args.VOICED_COST, args.PITCH_CEILING]
print "Running %s" % " ".join(command)
finished = subprocess.check_call(command)
print "Wrote data to %s and %s" % (args.OUTPUT_PITCH_FILE, args.OUTPUT_PULSE_FILE)
|
mit
| 2,398,684,887,728,343,000
| 55.108108
| 323
| 0.725915
| false
| 2.969957
| false
| false
| false
|
googleapis/googleapis-gen
|
google/cloud/talent/v4beta1/talent-v4beta1-py/google/cloud/talent_v4beta1/services/job_service/transports/grpc.py
|
1
|
23359
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.talent_v4beta1.types import job
from google.cloud.talent_v4beta1.types import job as gct_job
from google.cloud.talent_v4beta1.types import job_service
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import JobServiceTransport, DEFAULT_CLIENT_INFO
class JobServiceGrpcTransport(JobServiceTransport):
"""gRPC backend transport for JobService.
A service handles job management, including job CRUD,
enumeration and search.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'jobs.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'jobs.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_job(self) -> Callable[
[job_service.CreateJobRequest],
gct_job.Job]:
r"""Return a callable for the create job method over gRPC.
Creates a new job.
Typically, the job becomes searchable within 10 seconds,
but it may take up to 5 minutes.
Returns:
Callable[[~.CreateJobRequest],
~.Job]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_job' not in self._stubs:
self._stubs['create_job'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/CreateJob',
request_serializer=job_service.CreateJobRequest.serialize,
response_deserializer=gct_job.Job.deserialize,
)
return self._stubs['create_job']
@property
def batch_create_jobs(self) -> Callable[
[job_service.BatchCreateJobsRequest],
operations_pb2.Operation]:
r"""Return a callable for the batch create jobs method over gRPC.
Begins executing a batch create jobs operation.
Returns:
Callable[[~.BatchCreateJobsRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'batch_create_jobs' not in self._stubs:
self._stubs['batch_create_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/BatchCreateJobs',
request_serializer=job_service.BatchCreateJobsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['batch_create_jobs']
@property
def get_job(self) -> Callable[
[job_service.GetJobRequest],
job.Job]:
r"""Return a callable for the get job method over gRPC.
Retrieves the specified job, whose status is OPEN or
recently EXPIRED within the last 90 days.
Returns:
Callable[[~.GetJobRequest],
~.Job]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_job' not in self._stubs:
self._stubs['get_job'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/GetJob',
request_serializer=job_service.GetJobRequest.serialize,
response_deserializer=job.Job.deserialize,
)
return self._stubs['get_job']
@property
def update_job(self) -> Callable[
[job_service.UpdateJobRequest],
gct_job.Job]:
r"""Return a callable for the update job method over gRPC.
Updates specified job.
Typically, updated contents become visible in search
results within 10 seconds, but it may take up to 5
minutes.
Returns:
Callable[[~.UpdateJobRequest],
~.Job]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_job' not in self._stubs:
self._stubs['update_job'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/UpdateJob',
request_serializer=job_service.UpdateJobRequest.serialize,
response_deserializer=gct_job.Job.deserialize,
)
return self._stubs['update_job']
@property
def batch_update_jobs(self) -> Callable[
[job_service.BatchUpdateJobsRequest],
operations_pb2.Operation]:
r"""Return a callable for the batch update jobs method over gRPC.
Begins executing a batch update jobs operation.
Returns:
Callable[[~.BatchUpdateJobsRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'batch_update_jobs' not in self._stubs:
self._stubs['batch_update_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/BatchUpdateJobs',
request_serializer=job_service.BatchUpdateJobsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['batch_update_jobs']
@property
def delete_job(self) -> Callable[
[job_service.DeleteJobRequest],
empty_pb2.Empty]:
r"""Return a callable for the delete job method over gRPC.
Deletes the specified job.
Typically, the job becomes unsearchable within 10
seconds, but it may take up to 5 minutes.
Returns:
Callable[[~.DeleteJobRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_job' not in self._stubs:
self._stubs['delete_job'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/DeleteJob',
request_serializer=job_service.DeleteJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_job']
@property
def batch_delete_jobs(self) -> Callable[
[job_service.BatchDeleteJobsRequest],
empty_pb2.Empty]:
r"""Return a callable for the batch delete jobs method over gRPC.
Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by
filter.
Returns:
Callable[[~.BatchDeleteJobsRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'batch_delete_jobs' not in self._stubs:
self._stubs['batch_delete_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/BatchDeleteJobs',
request_serializer=job_service.BatchDeleteJobsRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['batch_delete_jobs']
@property
def list_jobs(self) -> Callable[
[job_service.ListJobsRequest],
job_service.ListJobsResponse]:
r"""Return a callable for the list jobs method over gRPC.
Lists jobs by filter.
Returns:
Callable[[~.ListJobsRequest],
~.ListJobsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_jobs' not in self._stubs:
self._stubs['list_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/ListJobs',
request_serializer=job_service.ListJobsRequest.serialize,
response_deserializer=job_service.ListJobsResponse.deserialize,
)
return self._stubs['list_jobs']
@property
def search_jobs(self) -> Callable[
[job_service.SearchJobsRequest],
job_service.SearchJobsResponse]:
r"""Return a callable for the search jobs method over gRPC.
Searches for jobs using the provided
[SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest].
This call constrains the
[visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs
present in the database, and only returns jobs that the caller
has permission to search against.
Returns:
Callable[[~.SearchJobsRequest],
~.SearchJobsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'search_jobs' not in self._stubs:
self._stubs['search_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/SearchJobs',
request_serializer=job_service.SearchJobsRequest.serialize,
response_deserializer=job_service.SearchJobsResponse.deserialize,
)
return self._stubs['search_jobs']
@property
def search_jobs_for_alert(self) -> Callable[
[job_service.SearchJobsRequest],
job_service.SearchJobsResponse]:
r"""Return a callable for the search jobs for alert method over gRPC.
Searches for jobs using the provided
[SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest].
This API call is intended for the use case of targeting passive
job seekers (for example, job seekers who have signed up to
receive email alerts about potential job opportunities), and has
different algorithmic adjustments that are targeted to passive
job seekers.
This call constrains the
[visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs
present in the database, and only returns jobs the caller has
permission to search against.
Returns:
Callable[[~.SearchJobsRequest],
~.SearchJobsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'search_jobs_for_alert' not in self._stubs:
self._stubs['search_jobs_for_alert'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/SearchJobsForAlert',
request_serializer=job_service.SearchJobsRequest.serialize,
response_deserializer=job_service.SearchJobsResponse.deserialize,
)
return self._stubs['search_jobs_for_alert']
__all__ = (
'JobServiceGrpcTransport',
)
|
apache-2.0
| 5,724,087,368,380,103,000
| 42.580224
| 87
| 0.609615
| false
| 4.638403
| false
| false
| false
|
chungjjang80/FRETBursts
|
fretbursts/burstlib.py
|
1
|
133746
|
#
# FRETBursts - A single-molecule FRET burst analysis toolkit.
#
# Copyright (C) 2013-2016 The Regents of the University of California,
# Antonino Ingargiola <tritemio@gmail.com>
#
"""
This module contains all the main FRETBursts analysis functions.
`burstslib.py` defines the fundamental object `Data()` that contains both the
experimental data (attributes) and the high-level analysis routines (methods).
Furthermore it loads all the remaining **FRETBursts** modules (except for
`loaders.py`).
For usage example see the IPython Notebooks in sub-folder "notebooks".
"""
from __future__ import print_function, absolute_import, division
from future.utils import raise_from
from builtins import range, zip
import os
import hashlib
import numpy as np
import copy
from numpy import zeros, size, r_
import scipy.stats as SS
from .utils.misc import pprint, clk_to_s, deprecate
from .poisson_threshold import find_optimal_T_bga
from . import fret_fit
from . import bg_cache
from .ph_sel import Ph_sel
from .fretmath import gamma_correct_E, gamma_uncorrect_E
from .phtools import burstsearch as bslib
from .phtools.burstsearch import (
# Burst search function
bsearch,
# Photon counting function,
mch_count_ph_in_bursts
)
from .phtools import phrates
from . import background as bg
from . import select_bursts
from . import fit
from .fit.gaussian_fitting import (gaussian_fit_hist,
gaussian_fit_cdf,
two_gaussian_fit_hist,
two_gaussian_fit_hist_min,
two_gaussian_fit_hist_min_ab,
two_gaussian_fit_EM,
two_gauss_mix_pdf,
two_gauss_mix_ab,)
# Redefine some old functions that have been renamed so old scripts will not
# break but will print a warning
bg_calc_exp = deprecate(bg.exp_fit, 'bg_calc_exp', 'bg.exp_fit')
bg_calc_exp_cdf = deprecate(bg.exp_cdf_fit, 'bg_calc_exp_cdf', 'bg.exp_cdf_fit')
def _get_bsearch_func(pure_python=False):
if pure_python:
# return the python version
return bslib.bsearch_py
else:
# or what is available
return bsearch
def _get_mch_count_ph_in_bursts_func(pure_python=False):
if pure_python:
# return the python version
return bslib.mch_count_ph_in_bursts_py
else:
# or what is available
return mch_count_ph_in_bursts
def isarray(obj):
"""Test if the object support the array interface.
Returns True for numpy arrays and pandas sequences.
"""
return hasattr(obj, '__array__')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# BURST SELECTION FUNCTIONS
#
def Sel(d_orig, filter_fun, negate=False, nofret=False, **kwargs):
"""Uses `filter_fun` to select a sub-set of bursts from `d_orig`.
This function is deprecated. Use :meth:`Data.select_bursts` instead.
"""
d_sel = d_orig.select_bursts(filter_fun, negate=negate,
computefret=not nofret,
**kwargs)
return d_sel
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Bursts and Timestamps utilities
#
def get_alex_fraction(on_range, alex_period):
"""Get the fraction of period beween two numbers indicating a range.
"""
assert len(on_range) == 2
if on_range[0] < on_range[1]:
fraction = (on_range[1] - on_range[0]) / alex_period
else:
fraction = (alex_period + on_range[1] - on_range[0]) / alex_period
return fraction
def top_tail(nx, a=0.1):
"""Return for each ch the mean size of the top `a` fraction.
nx is one of nd, na, nt from Data() (list of burst size in each ch).
"""
assert a > 0 and a < 1
return np.r_[[n[n > n.max() * (1 - a)].mean() for n in nx]]
##
# Per-burst quatitites from ph-data arrays (timestamps, lifetime, etc..)
#
def _excitation_width(excitation_range, alex_period):
"""Returns duration of alternation period outside selected excitation.
"""
if excitation_range[1] > excitation_range[0]:
return alex_period - excitation_range[1] + excitation_range[0]
elif excitation_range[1] < excitation_range[0]:
return excitation_range[0] - excitation_range[1]
def _ph_times_compact(ph_times_sel, alex_period, excitation_width):
"""Compact ph_times inplace by removing gaps between alternation periods.
Arguments:
ph_times_sel (array): array of timestamps from one alternation period.
alex_period (scalar): period of alternation in timestamp units.
excitation_width (float): fraction of `alex_period` covered by
current photon selection.
Returns nothing, ph_times is modified in-place.
"""
# The formula is
#
# gaps = (ph_times_sel // alex_period)*excitation_width
# ph_times_sel = ph_times_sel - gaps
#
# As a memory optimization the `-gaps` array is reused inplace
times_minusgaps = (ph_times_sel // alex_period) * (-1 * excitation_width)
# The formula is ph_times_sel = ph_times_sel - "gaps"
times_minusgaps += ph_times_sel
return times_minusgaps
def iter_bursts_start_stop(bursts):
"""Iterate over (start, stop) indexes to slice photons for each burst.
"""
arr_istart = bursts.istart
arr_istop = bursts.istop + 1
for istart, istop in zip(arr_istart, arr_istop):
yield istart, istop
def iter_bursts_ph(ph_data, bursts, mask=None, compact=False,
alex_period=None, excitation_width=None):
"""Iterator over arrays of photon-data for each burst.
Arguments:
ph_data (1D array): array of photon-data (timestamps, nanotimes).
bursts (Bursts object): bursts computed from `ph`.
mask (boolean mask or None): if not None, is a boolean mask
to select photons in `ph_data` (for example Donor-ch photons).
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
alex_period (scalar): period of alternation in timestamp units.
excitation_width (float): fraction of `alex_period` covered by
current photon selection.
Yields an array with a selection of "photons" for each burst.
"""
if isinstance(mask, slice) and mask == slice(None):
mask = None
if compact:
assert alex_period is not None
assert excitation_width is not None
assert mask is not None
for start, stop in iter_bursts_start_stop(bursts):
ph = ph_data[start:stop]
if mask is not None:
ph = ph[mask[start:stop]]
if compact:
ph = _ph_times_compact(ph, alex_period, excitation_width)
yield ph
def bursts_ph_list(ph_data, bursts, mask=None):
"""Returna list of ph-data for each burst.
ph_data can be either the timestamp array on which the burst search
has been performed or any other array with same size (boolean array,
nanotimes, etc...)
"""
return [ph for ph in iter_bursts_ph(ph_data, bursts, mask=mask)]
def burst_ph_stats(ph_data, bursts, func=np.mean, func_kw=None, **kwargs):
"""Reduce burst photons (timestamps, nanotimes) to a scalar using `func`.
Arguments
ph_data (1D array): array of photon-data (timestamps, nanotimes).
bursts (Bursts object): bursts computed from `ph`.
func (callable): function that takes the burst photon timestamps
as first argument and returns a scalar.
func_kw (callable): additional arguments in `func` beyond photon-data.
**kwargs: additional arguments passed to :func:`iter_bursts_ph`.
Return
Array one element per burst.
"""
if func_kw is None:
func_kw = {}
burst_stats = []
for burst_ph in iter_bursts_ph(ph_data, bursts, **kwargs):
burst_stats.append(func(burst_ph, **func_kw))
return np.asfarray(burst_stats) # NOTE: asfarray converts None to nan
def ph_in_bursts_mask(ph_data_size, bursts):
"""Return bool mask to select all "ph-data" inside any burst."""
mask = zeros(ph_data_size, dtype=bool)
for start, stop in iter_bursts_start_stop(bursts):
mask[start:stop] = True
return mask
def fuse_bursts_direct(bursts, ms=0, clk_p=12.5e-9, verbose=True):
"""Fuse bursts separated by less than `ms` (milli-seconds).
This function is a direct implementation using a single loop.
For a faster implementation see :func:`fuse_bursts_iter`.
Parameters:
bursts (BurstsGap object): bursts to be fused.
See `phtools.burstsearch` for details.
ms (float): minimum waiting time between bursts (in millisec).
Bursts closer than that will be fused in a single burst.
clk_p (float): clock period or timestamp units in seconds.
verbose (bool): if True print a summary of fused bursts.
Returns:
A BurstsGap object containing the new fused bursts.
"""
max_delay_clk = (ms * 1e-3) / clk_p
fused_bursts_list = []
fused_burst = None
for burst1, burst2 in zip(bursts[:-1], bursts[1:]):
if fused_burst is not None:
burst1c = fused_burst
else:
burst1c = bslib.BurstGap.from_burst(burst1)
separation = burst2.start - burst1c.stop
if separation <= max_delay_clk:
gap = burst2.start - burst1c.stop
gap_counts = burst2.istart - burst1c.istop - 1
if burst1c.istop >= burst2.istart:
gap = 0
gap_counts = 0
fused_burst = bslib.BurstGap(
start = burst1c.start,
istart = burst1c.istart,
stop = burst2.stop,
istop = burst2.istop,
gap = burst1c.gap + gap,
gap_counts = burst1c.gap_counts + gap_counts)
else:
if fused_burst is not None:
fused_bursts_list.append(fused_burst)
fused_burst = None
else:
fused_bursts_list.append(bslib.BurstGap.from_burst(burst1c))
# Append the last bursts (either a fused or an isolated one)
if fused_burst is not None:
fused_bursts_list.append(fused_burst)
else:
fused_bursts_list.append(bslib.BurstGap.from_burst(burst2))
fused_bursts = bslib.BurstsGap.from_list(fused_bursts_list)
init_num_bursts = bursts.num_bursts
delta_b = init_num_bursts - fused_bursts.num_bursts
pprint(" --> END Fused %d bursts (%.1f%%)\n\n" %
(delta_b, 100 * delta_b / init_num_bursts), mute=not verbose)
return fused_bursts
def fuse_bursts_iter(bursts, ms=0, clk_p=12.5e-9, verbose=True):
"""Fuse bursts separated by less than `ms` (milli-secs).
This function calls iteratively :func:`b_fuse` until there are no more
bursts to fuse. For a slower but more readable version see
:func:`fuse_bursts_direct`.
Parameters:
bursts (BurstsGap object): bursts to be fused.
See `phtools.burstsearch` for details.
ms (float): minimum waiting time between bursts (in millisec).
Bursts closer than that will be fused in a single burst.
clk_p (float): clock period or timestamp units in seconds.
verbose (bool): if True print a summary of fused bursts.
Returns:
A BurstsGap object containing the new fused bursts.
"""
init_nburst = bursts.num_bursts
bursts = bslib.BurstsGap(bursts.data)
z = 0
new_nburst, nburst = 0, 1 # starting condition
while new_nburst < nburst:
z += 1
nburst = bursts.num_bursts
bursts = b_fuse(bursts, ms=ms, clk_p=clk_p)
new_nburst = bursts.num_bursts
delta_b = init_nburst - nburst
pprint(" --> END Fused %d bursts (%.1f%%, %d iter)\n\n" %
(delta_b, 100 * delta_b / init_nburst, z), mute=not verbose)
return bursts
def b_fuse(bursts, ms=0, clk_p=12.5e-9):
"""Fuse bursts separated by less than `ms` (milli-secs).
This is a low-level function which fuses pairs of consecutive
bursts separated by less than `ms` millisec.
If there are 3 or more consecutive bursts separated by less than `ms`
only the first 2 are fused.
See :func:`fuse_bursts_iter` or :func:`fuse_bursts_direct` for
higher level functions.
Parameters:
bursts (BurstsGap object): bursts to be fused.
See `phtools.burstsearch` for details.
ms (float): minimum waiting time between bursts (in millisec).
Bursts closer than that will be fused in a single burst.
clk_p (float): clock period or timestamp units in seconds.
Returns:
A BurstsGap object containing the new fused bursts.
"""
max_delay_clk = (ms * 1e-3) / clk_p
# Nearby bursts masks
delays_below_th = (bursts.separation <= max_delay_clk)
if not np.any(delays_below_th):
return bursts
buffer_mask = np.hstack([(False,), delays_below_th, (False,)])
first_bursts = buffer_mask[1:]
second_bursts = buffer_mask[:-1]
# Keep only the first pair in case of more than 2 consecutive bursts
first_bursts ^= (second_bursts * first_bursts)
# note that previous in-place operation also modifies `second_bursts`
both_bursts = first_bursts + second_bursts
# istart is from the first burst, istop is from the second burst
fused_bursts1 = bursts[first_bursts]
fused_bursts2 = bursts[second_bursts]
# Compute gap and gap_counts
gap = fused_bursts2.start - fused_bursts1.stop
gap_counts = fused_bursts2.istart - fused_bursts1.istop - 1 # yes it's -1
overlaping = fused_bursts1.istop >= fused_bursts2.istart
gap[overlaping] = 0
gap_counts[overlaping] = 0
# Assign the new burst data
# fused_bursts1 has alredy the right start and istart
fused_bursts1.istop = fused_bursts2.istop
fused_bursts1.stop = fused_bursts2.stop
fused_bursts1.gap += gap
fused_bursts1.gap_counts += gap_counts
# Join fused bursts with the remaining bursts
new_burst = fused_bursts1.join(bursts[~both_bursts], sort=True)
return new_burst
def mch_fuse_bursts(MBurst, ms=0, clk_p=12.5e-9, verbose=True):
"""Multi-ch version of `fuse_bursts`. `MBurst` is a list of Bursts objects.
"""
mburst = [b.copy() for b in MBurst] # safety copy
new_mburst = []
ch = 0
for mb in mburst:
ch += 1
pprint(" - - - - - CHANNEL %2d - - - - \n" % ch, not verbose)
if mb.num_bursts == 0:
new_bursts = bslib.Bursts.empty()
else:
new_bursts = fuse_bursts_iter(mb, ms=ms, clk_p=clk_p,
verbose=verbose)
new_mburst.append(new_bursts)
return new_mburst
def burst_stats(mburst, clk_p):
"""Compute average duration, size and burst-delay for bursts in mburst.
"""
nans = [np.nan, np.nan]
width_stats = np.array([[b.width.mean(), b.width.std()]
if b.num_bursts > 0 else nans for b in mburst]).T
height_stats = np.array([[b.counts.mean(), b.counts.std()]
if b.num_bursts > 0 else nans for b in mburst]).T
mean_burst_delay = np.array([b.separation.mean() if b.num_bursts > 0
else np.nan for b in mburst])
return (clk_to_s(width_stats, clk_p) * 1e3, height_stats,
clk_to_s(mean_burst_delay, clk_p))
def print_burst_stats(d):
"""Print some bursts statistics."""
nch = len(d.mburst)
width_ms, height, delays = burst_stats(d.mburst, d.clk_p)
s = "\nNUMBER OF BURSTS: m = %d, L = %d" % (d.m, d.L)
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\n#: "+"%7d "*nch % tuple([b.num_bursts for b in d.mburst])
s += "\nT (us) [BS par] "+"%7d "*nch % tuple(np.array(d.T)*1e6)
s += "\nBG Rat T (cps): "+"%7d "*nch % tuple(d.bg_mean[Ph_sel('all')])
s += "\nBG Rat D (cps): "+"%7d "*nch % tuple(d.bg_mean[Ph_sel(Dex='Dem')])
s += "\nBG Rat A (cps): "+"%7d "*nch % tuple(d.bg_mean[Ph_sel(Dex='Aem')])
s += "\n\nBURST WIDTH STATS"
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\nMean (ms): "+"%7.3f "*nch % tuple(width_ms[0, :])
s += "\nStd.dev (ms): "+"%7.3f "*nch % tuple(width_ms[1, :])
s += "\n\nBURST SIZE STATS"
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\nMean (# ph): "+"%7.2f "*nch % tuple(height[0, :])
s += "\nStd.dev (# ph): "+"%7.2f "*nch % tuple(height[1, :])
s += "\n\nBURST MEAN DELAY"
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\nDelay (s): "+"%7.3f "*nch % tuple(delays)
return s
def ES_histog(E, S, bin_step=0.05, E_bins=None, S_bins=None):
"""Returns 2D (ALEX) histogram and bins of bursts (E,S).
"""
if E_bins is None:
E_bins = np.arange(-0.6, 1.6+1e-4, bin_step)
if S_bins is None:
S_bins = np.arange(-0.6, 1.6+1e-4, bin_step)
H, E_bins, S_bins = np.histogram2d(E, S, bins=[E_bins, S_bins])
return H, E_bins, S_bins
def delta(x):
"""Return x.max() - x.min()"""
return x.max() - x.min()
def mask_empty(mask):
"""Returns True if `mask` is empty, otherwise False.
`mask` can be a boolean array or a slice object.
"""
if isinstance(mask, slice):
is_slice_empty = (mask.stop == 0)
return is_slice_empty
else:
# Bolean array
return not mask.any()
class DataContainer(dict):
"""
Generic class for storing data.
It's a dictionary in which each key is also an attribute d['nt'] or d.nt.
"""
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
for k in self:
dict.__setattr__(self, k, self[k])
def add(self, **kwargs):
"""Adds or updates elements (attributes and/or dict entries). """
self.update(**kwargs)
for k, v in kwargs.items():
setattr(self, k, v)
def delete(self, *args, **kwargs):
"""Delete an element (attribute and/or dict entry). """
warning = kwargs.get('warning', True)
for name in args:
try:
self.pop(name)
except KeyError:
if warning:
print(' WARNING: Name %s not found (dict).' % name)
try:
delattr(self, name)
except AttributeError:
if warning:
print(' WARNING: Name %s not found (attr).' % name)
class Data(DataContainer):
"""
Container for all the information (timestamps, bursts) of a dataset.
Data() contains all the information of a dataset (name, timestamps, bursts,
correction factors) and provides several methods to perform analysis
(background estimation, burst search, FRET fitting, etc...).
When loading a measurement file a Data() object is created by one
of the loader functions in `loaders.py`. Data() objects can be also
created with :meth:`Data.copy`, :meth:`Data.fuse_bursts()` or
:meth:`Data.select_bursts`.
To add or delete data-attributes use `.add()` or `.delete()` methods.
All the standard data-attributes are listed below.
Note:
Attributes of type "*list*" contain one element per channel.
Each element, in turn, can be an array. For example `.ph_times_m[i]`
is the array of timestamps for channel `i`; or `.nd[i]` is the array
of donor counts in each burst for channel `i`.
**Measurement attributes**
Attributes:
fname (string): measurements file name
nch (int): number of channels
clk_p (float): clock period in seconds for timestamps in `ph_times_m`
ph_times_m (list): list of timestamp arrays (int64). Each array
contains all the timestamps (donor+acceptor) in one channel.
A_em (list): list of boolean arrays marking acceptor timestamps. Each
array is a boolean mask for the corresponding ph_times_m array.
leakage (float or array of floats): leakage (or bleed-through) fraction.
May be scalar or same size as nch.
gamma (float or array of floats): gamma factor.
May be scalar or same size as nch.
D_em (list of boolean arrays): **[ALEX-only]**
boolean mask for `.ph_times_m[i]` for donor emission
D_ex, A_ex (list of boolean arrays): **[ALEX-only]**
boolean mask for `.ph_times_m[i]` during donor or acceptor
excitation
D_ON, A_ON (2-element tuples of int ): **[ALEX-only]**
start-end values for donor and acceptor excitation selection.
alex_period (int): **[ALEX-only]**
duration of the alternation period in clock cycles.
**Background Attributes**
The background is computed with :meth:`Data.calc_bg`
and is estimated in chunks of equal duration called *background periods*.
Estimations are performed in each spot and photon stream.
The following attributes contain the estimated background rate.
Attributes:
bg (dict): background rates for the different photon streams,
channels and background periods. Keys are `Ph_sel` objects
and values are lists (one element per channel) of arrays (one
element per background period) of background rates.
bg_mean (dict): mean background rates across the entire measurement
for the different photon streams and channels. Keys are `Ph_sel`
objects and values are lists (one element per channel) of
background rates.
nperiods (int): number of periods in which timestamps are split for
background calculation
bg_fun (function): function used to compute the background rates
Lim (list): each element of this list is a list of index pairs for
`.ph_times_m[i]` for **first** and **last** photon in each period.
Ph_p (list): each element in this list is a list of timestamps pairs
for **first** and **last** photon of each period.
bg_ph_sel (Ph_sel object): photon selection used by Lim and Ph_p.
See :mod:`fretbursts.ph_sel` for details.
Th_us (dict): thresholds in us used to select the tail of the
interphoton delay distribution. Keys are `Ph_sel` objects
and values are lists (one element per channel) of arrays (one
element per background period).
Additionlly, there are a few deprecated attributes (`bg_dd`, `bg_ad`,
`bg_da`, `bg_aa`, `rate_dd`, `rate_ad`, `rate_da`, `rate_aa` and `rate_m`)
which will be removed in a future version.
Please use :attr:`Data.bg` and :attr:`Data.bg_mean` instead.
**Burst search parameters (user input)**
These are the parameters used to perform the burst search
(see :meth:`burst_search`).
Attributes:
ph_sel (Ph_sel object): photon selection used for burst search.
See :mod:`fretbursts.ph_sel` for details.
m (int): number of consecutive timestamps used to compute the
local rate during burst search
L (int): min. number of photons for a burst to be identified and saved
P (float, probability): valid values [0..1].
Probability that a burst-start is due to a Poisson background.
The employed Poisson rate is the one computed by `.calc_bg()`.
F (float): `(F * background_rate)` is the minimum rate for burst-start
**Burst search data (available after burst search)**
When not specified, parameters marked as (list of arrays) contains arrays
with one element per bursts. `mburst` arrays contain one "row" per burst.
`TT` arrays contain one element per `period` (see above: background
attributes).
Attributes:
mburst (list of Bursts objects): list Bursts() one element per channel.
See :class:`fretbursts.phtools.burstsearch.Bursts`.
TT (list of arrays): list of arrays of *T* values (in sec.). A *T*
value is the maximum delay between `m` photons to have a
burst-start. Each channels has an array of *T* values, one for
each background "period" (see above).
T (array): per-channel mean of `TT`
nd, na (list of arrays): number of donor or acceptor photons during
donor excitation in each burst
nt (list of arrays): total number photons (nd+na+naa)
naa (list of arrays): number of acceptor photons in each burst
during acceptor excitation **[ALEX only]**
nar (list of arrays): number of acceptor photons in each burst
during donor excitation, not corrected for D-leakage and
A-direct-excitation. **[PAX only]**
bp (list of arrays): time period for each burst. Same shape as `nd`.
This is needed to identify the background rate for each burst.
bg_bs (list): background rates used for threshold computation in burst
search (is a reference to `bg`, `bg_dd` or `bg_ad`).
fuse (None or float): if not None, the burst separation in ms below
which bursts have been fused (see `.fuse_bursts()`).
E (list): FRET efficiency value for each burst:
E = na/(na + gamma*nd).
S (list): stoichiometry value for each burst:
S = (gamma*nd + na) /(gamma*nd + na + naa)
"""
# Attribute names containing per-photon data.
# Each attribute is a list (1 element per ch) of arrays (1 element
# per photon).
ph_fields = ['ph_times_m', 'nanotimes', 'particles',
'A_em', 'D_em', 'A_ex', 'D_ex']
# Attribute names containing background data.
# The attribute `bg` is a dict with photon-selections as keys and
# list of arrays as values. Each list contains one element per channel and
# each array one element per background period.
# The attributes `.Lim` and `.Ph_p` are lists with one element per channel.
# Each element is a lists-of-tuples (one tuple per background period).
# These attributes do not exist before computing the background.
bg_fields = ['bg', 'Lim', 'Ph_p']
# Attribute names containing per-burst data.
# Each attribute is a list (1 element per ch) of arrays (1 element
# per burst).
# They do not necessarly exist. For example 'naa' exists only for ALEX
# data. Also none of them exist before performing a burst search.
burst_fields = ['E', 'S', 'mburst', 'nd', 'na', 'nt', 'bp', 'nda', 'naa',
'max_rate', 'sbr', 'nar']
# Quantities (scalars or arrays) defining the current set of bursts
burst_metadata = ['m', 'L', 'T', 'TT', 'F', 'FF', 'P', 'PP', 'rate_th',
'bg_bs', 'ph_sel', 'bg_corrected', 'leakage_corrected',
'dir_ex_corrected', 'dithering', 'fuse', 'lsb']
# List of photon selections on which the background is computed
_ph_streams = [Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem'),
Ph_sel(Aex='Dem'), Ph_sel(Aex='Aem')]
@property
def ph_streams(self):
if self.alternated:
return self._ph_streams
else:
return [Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')]
def __init__(self, leakage=0., gamma=1., dir_ex=0., **kwargs):
# Default values
init_kw = dict(ALEX=False, _leakage=float(leakage), _gamma=float(gamma),
_dir_ex=float(dir_ex), _beta=1., _chi_ch=1., s=[])
# Override with user data
init_kw.update(**kwargs)
DataContainer.__init__(self, **init_kw)
# def __getattr__(self, name):
# """Single-channel shortcuts for per-channel fields.
#
# Appending a '_' to a per-channel field avoids specifying the channel.
# For example use d.nd_ instead if d.nd[0].
# """
# msg_missing_attr = "'%s' object has no attribute '%s'" %\
# (self.__class__.__name__, name)
# if name.startswith('_') or not name.endswith('_'):
# raise AttributeError(msg_missing_attr)
#
# field = name[:-1]
# try:
# value = self.__getitem__(field)
# except KeyError:
# raise AttributeError(msg_missing_attr)
# else:
# # Support lists, tuples and object with array interface
# if isinstance(value, (list, tuple)) or isarray(value):
# if len(value) == self.nch:
# return value[0]
# raise ValueError('Name "%s" is not a per-channel field.' % field)
def copy(self, mute=False):
"""Copy data in a new object. All arrays copied except for ph_times_m
"""
pprint('Deep copy executed.\n', mute)
new_d = Data(**self) # this make a shallow copy (like a pointer)
# Deep copy (not just reference) or array data
for field in self.burst_fields + self.bg_fields:
# Making sure k is defined
if field in self:
# Make a deepcopy of the per-channel lists
new_d[field] = copy.deepcopy(self[field])
# Set the attribute: new_d.k = new_d[k]
setattr(new_d, field, new_d[field])
return new_d
##
# Methods for photon timestamps (ph_times_m) access
#
def ph_times_hash(self, hash_name='md5', hexdigest=True):
"""Return an hash for the timestamps arrays.
"""
m = hashlib.new(hash_name)
for ph in self.iter_ph_times():
if isinstance(ph, np.ndarray):
m.update(ph.data)
else:
# TODO Handle ph_times in PyTables files
raise NotImplementedError
if hexdigest:
return m.hexdigest()
else:
return m
@property
def ph_data_sizes(self):
"""Array of total number of photons (ph-data) for each channel.
"""
if not hasattr(self, '_ph_data_sizes'):
# This works both for numpy arrays and pytables arrays
self._ph_data_sizes = np.array([ph.shape[0] for ph in
self.ph_times_m])
return self._ph_data_sizes
def _fix_ph_sel(self, ph_sel):
"""For non-ALEX data fix Aex to allow stable comparison."""
msg = 'Photon selection must be of type `Ph_sel` (it was `%s` instead).'
assert isinstance(ph_sel, Ph_sel), (msg % type(ph_sel))
if self.alternated or ph_sel.Dex != 'DAem':
return ph_sel
else:
return Ph_sel(Dex=ph_sel.Dex, Aex='DAem')
def _is_allph(self, ph_sel):
"""Return whether a photon selection `ph_sel` covers all photon."""
if self.alternated:
return ph_sel == Ph_sel(Dex='DAem', Aex='DAem')
else:
return ph_sel.Dex == 'DAem'
def get_ph_mask(self, ich=0, ph_sel=Ph_sel('all')):
"""Returns a mask for `ph_sel` photons in channel `ich`.
The masks are either boolean arrays or slices (full or empty). In
both cases they can be used to index the timestamps of the
corresponding channel.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
"""
assert isinstance(ich, int)
if self._is_allph(ph_sel):
# Note that slice(None) is equivalent to [:].
# Also, numpy arrays are not copied when sliced.
# So getting all photons with this mask is efficient
# Note: the drawback is that the slice cannot be indexed
# (where a normal boolean array would)
return slice(None)
# Handle the case when A_em contains slice objects
if isinstance(self.A_em[ich], slice):
if self.A_em[ich] == slice(None):
if ph_sel.Dex == 'Dem':
return slice(0)
if ph_sel.Dex == 'Aem':
return slice(None)
elif self.A_em[ich] == slice(0):
if ph_sel.Dex == 'Dem':
return slice(None)
if ph_sel.Dex == 'Aem':
return slice(0)
else:
msg = 'When a slice, A_em can only be slice(None) or slice(0).'
raise NotImplementedError(msg)
# Base selections
elif ph_sel == Ph_sel(Dex='Dem'):
return self.get_D_em_D_ex(ich)
elif ph_sel == Ph_sel(Dex='Aem'):
return self.get_A_em_D_ex(ich)
elif ph_sel == Ph_sel(Aex='Dem'):
return self.get_D_em(ich) * self.get_A_ex(ich)
elif ph_sel == Ph_sel(Aex='Aem'):
return self.get_A_em(ich) * self.get_A_ex(ich)
# Selection of all photon in one emission ch
elif ph_sel == Ph_sel(Dex='Dem', Aex='Dem'):
return self.get_D_em(ich)
elif ph_sel == Ph_sel(Dex='Aem', Aex='Aem'):
return self.get_A_em(ich)
# Selection of all photon in one excitation period
elif ph_sel == Ph_sel(Dex='DAem'):
return self.get_D_ex(ich)
elif ph_sel == Ph_sel(Aex='DAem'):
return self.get_A_ex(ich)
# Selection of all photons except for Dem during Aex
elif ph_sel == Ph_sel(Dex='DAem', Aex='Aem'):
return self.get_D_ex(ich) + self.get_A_em(ich) * self.get_A_ex(ich)
else:
raise ValueError('Photon selection not implemented.')
def iter_ph_masks(self, ph_sel=Ph_sel('all')):
"""Iterator returning masks for `ph_sel` photons.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
"""
for ich in range(self.nch):
yield self.get_ph_mask(ich, ph_sel=ph_sel)
def get_ph_times(self, ich=0, ph_sel=Ph_sel('all'), compact=False):
"""Returns the timestamps array for channel `ich`.
This method always returns in-memory arrays, even when ph_times_m
is a disk-backed list of arrays.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
"""
ph = self.ph_times_m[ich]
# If not a list is an on-disk array, we need to load it
if not isinstance(ph, np.ndarray):
if hasattr(self, '_ph_cache') and self._ph_cache_ich == ich:
ph = self._ph_cache
else:
ph = ph.read()
self._ph_cache = ph
self._ph_cache_ich = ich
ph = ph[self.get_ph_mask(ich, ph_sel=ph_sel)]
if compact:
ph = self._ph_times_compact(ph, ph_sel)
return ph
def iter_ph_times(self, ph_sel=Ph_sel('all'), compact=False):
"""Iterator that returns the arrays of timestamps in `.ph_times_m`.
Arguments:
Same arguments as :meth:`get_ph_mask` except for `ich`.
"""
for ich in range(self.nch):
yield self.get_ph_times(ich, ph_sel=ph_sel, compact=compact)
def _get_ph_mask_single(self, ich, mask_name, negate=False):
"""Get the bool array `mask_name` for channel `ich`.
If the internal "bool array" is a scalar return a slice (full or empty)
"""
mask = np.asarray(getattr(self, mask_name)[ich])
if negate:
mask = np.logical_not(mask)
if len(mask.shape) == 0:
# If mask is a boolean scalar, select all or nothing
mask = slice(None) if mask else slice(0)
return mask
def get_A_em(self, ich=0):
"""Returns a mask to select photons detected in the acceptor ch."""
return self._get_ph_mask_single(ich, 'A_em')
def get_D_em(self, ich=0):
"""Returns a mask to select photons detected in the donor ch."""
return self._get_ph_mask_single(ich, 'A_em', negate=True)
def get_A_ex(self, ich=0):
"""Returns a mask to select photons in acceptor-excitation periods."""
return self._get_ph_mask_single(ich, 'A_ex')
def get_D_ex(self, ich=0):
"""Returns a mask to select photons in donor-excitation periods."""
if self.alternated:
return self._get_ph_mask_single(ich, 'D_ex')
else:
return slice(None)
def get_D_em_D_ex(self, ich=0):
"""Returns a mask of donor photons during donor-excitation."""
if self.alternated:
return self.get_D_em(ich) * self.get_D_ex(ich)
else:
return self.get_D_em(ich)
def get_A_em_D_ex(self, ich=0):
"""Returns a mask of acceptor photons during donor-excitation."""
if self.alternated:
return self.get_A_em(ich) * self.get_D_ex(ich)
else:
return self.get_A_em(ich)
def iter_ph_times_period(self, ich=0, ph_sel=Ph_sel('all')):
"""Iterate through arrays of ph timestamps in each background period.
"""
mask = self.get_ph_mask(ich=ich, ph_sel=ph_sel)
for period in range(self.nperiods):
yield self.get_ph_times_period(period, ich=ich, mask=mask)
def get_ph_times_period(self, period, ich=0, ph_sel=Ph_sel('all'),
mask=None):
"""Return the array of ph_times in `period`, `ich` and `ph_sel`.
"""
istart, iend = self.Lim[ich][period]
period_slice = slice(istart, iend + 1)
ph_times = self.get_ph_times(ich=ich)
if mask is None:
mask = self.get_ph_mask(ich=ich, ph_sel=ph_sel)
if isinstance(mask, slice) and mask == slice(None):
ph_times_period = ph_times[period_slice]
else:
ph_times_period = ph_times[period_slice][mask[period_slice]]
return ph_times_period
def _assert_compact(self, ph_sel):
msg = ('Option compact=True requires a photon selection \n'
'from a single excitation period (either Dex or Aex).')
if not self.alternated:
raise ValueError('Option compact=True requires ALEX data.')
if ph_sel.Dex is not None and ph_sel.Aex is not None:
raise ValueError(msg)
def _excitation_width(self, ph_sel, ich=0):
"""Returns duration of alternation period outside selected excitation.
"""
self._assert_compact(ph_sel)
if ph_sel.Aex is None:
excitation_range = self._D_ON_multich[ich]
elif ph_sel.Dex is None:
excitation_range = self._A_ON_multich[ich]
return _excitation_width(excitation_range, self.alex_period)
def _ph_times_compact(self, ph, ph_sel):
"""Return timestamps in one excitation period with "gaps" removed.
It takes timestamps in the specified alternation period and removes
gaps due to time intervals outside the alternation period selection.
This allows to correct the photon rates distorsion due to alternation.
Arguments:
ph (array): timestamps array from which gaps have to be removed.
This array **is modified in-place**.
ph_sel (Ph_sel object): photon selection to be compacted.
Note that only one excitation must be specified, but the
emission can be 'Dem', 'Aem' or 'DAem'.
See :mod:`fretbursts.ph_sel` for details.
Returns:
Array of timestamps in one excitation periods with "gaps" removed.
"""
excitation_width = self._excitation_width(ph_sel)
return _ph_times_compact(ph, self.alex_period, excitation_width)
def _get_tuple_multich(self, name):
"""Get a n-element tuple field in multi-ch format (1 row per ch)."""
field = np.array(self[name])
if field.ndim == 1:
field = np.repeat([field], self.nch, axis=0)
return field
@property
def _D_ON_multich(self):
return self._get_tuple_multich('D_ON')
@property
def _A_ON_multich(self):
return self._get_tuple_multich('A_ON')
@property
def _det_donor_accept_multich(self):
return self._get_tuple_multich('det_donor_accept')
##
# Methods and properties for burst-data access
#
@property
def num_bursts(self):
"""Array of number of bursts in each channel."""
return np.array([bursts.num_bursts for bursts in self.mburst])
@property
def burst_widths(self):
"""List of arrays of burst duration in seconds. One array per channel.
"""
return [bursts.width * self.clk_p for bursts in self.mburst]
def burst_sizes_pax_ich(self, ich=0, gamma=1., add_aex=True,
beta=1., donor_ref=True, aex_corr=True):
r"""Return corrected burst sizes for channel `ich`. PAX-only.
When `donor_ref = False`, the formula for PAX-enhanced burst size is:
.. math::
\gamma(F_{D_{ex}D_{em}} + F_{DA_{ex}D_{em}}) +
\frac{1}{\alpha} F_{FRET}
where :math:`\alpha` is the Dex duty-cycle (0.5 if alternation
periods are equal) and :math:`F_{FRET}` is `na`, the AemAex
signal after leakage and direct-excitation corrections.
If `add_ex = True`, we add the term:
.. math::
\tilde{F}_{A_{ex}A_{em}} / (\alpha\beta)
where :math:`\tilde{F}_{A_{ex}A_{em}}` in A emission due to
A excitation (and not due to FRET).
If `aex_corr = False`, then :math:`\alpha` is fixed to 1.
If `donor_ref = True`, the above burst size expression is divided by
:math:`\gamma`.
Arguments:
ich (int): the spot number, only relevant for multi-spot.
In single-spot data there is only one channel (`ich=0`)
so this argument may be omitted. Default 0.
gamma (float): coefficient for gamma correction of burst
sizes. Default: 1. For more info see explanation above.
donor_ref (bool): True or False select different conventions
for burst size correction. For details see
:meth:`fretbursts.burstlib.Data.burst_sizes_ich`.
add_aex (boolean): when True, the returned burst size also
includes photons detected during the DAex. Default is True.
aex_corr (bool): If True, and `add_aex == True`, then divide
the DAexAem term (naa) by the Dex duty cycle. For example,
if Dex and DAex alternation periods are equal, naa is
multiplied by 2. This correction makes the returned value
equal to the denominator of the stoichiometry ratio S_pax
(PAX-enhanced formula). If False, naa is not divided by
the Dex duty-cycle (gamma and beta corrections may still be
applied). If `add_aex == False`, `aex_corr` is ignored.
beta (float): beta correction factor used for the DAexAem term
(naa) of the burst size.
If `add_aex == False` this argument is ignored. Default 1.
Returns
Array of burst sizes for channel `ich`.
See also:
:meth:`Data.burst_sizes_ich`
"""
assert 'PAX' in self.meas_type
naa = self._get_naa_ich(ich) # nar-subtracted
aex_dex_ratio = self._aex_dex_ratio()
alpha = 1
if aex_corr:
alpha = 1 - self._aex_fraction() # Dex duty-cycle
burst_size_dex = self.nd[ich] * gamma + self.na[ich]
burst_size_aex = (self.nda[ich] * gamma +
self.na[ich] * aex_dex_ratio +
naa / (alpha * beta))
burst_size = burst_size_dex
if add_aex:
burst_size += burst_size_aex
if donor_ref:
burst_size /= gamma
return burst_size
def burst_sizes_ich(self, ich=0, gamma=1., add_naa=False,
beta=1., donor_ref=True):
"""Return gamma corrected burst sizes for channel `ich`.
If `donor_ref == True` (default) the gamma corrected burst size is
computed according to::
1) nd + na / gamma
Otherwise, if `donor_ref == False`, the gamma corrected burst size is::
2) nd * gamma + na
With the definition (1) the corrected burst size is equal to the raw
burst size for zero-FRET or D-only bursts (that's why is `donor_ref`).
With the definition (2) the corrected burst size is equal to the raw
burst size for 100%-FRET bursts.
In an ALEX measurement, use `add_naa = True` to add counts from
AexAem stream to the returned burst size. The argument `gamma` and
`beta` are used to correctly scale `naa` so that it become
commensurate with the Dex corrected burst size. In particular,
when using definition (1) (i.e. `donor_ref = True`), the total
burst size is::
(nd + na/gamma) + naa / (beta * gamma)
Conversely, when using definition (2) (`donor_ref = False`), the
total burst size is::
(nd * gamma + na) + naa / beta
Arguments:
ich (int): the spot number, only relevant for multi-spot.
In single-spot data there is only one channel (`ich=0`)
so this argument may be omitted. Default 0.
add_naa (boolean): when True, add a term for AexAem photons when
computing burst size. Default False.
gamma (float): coefficient for gamma correction of burst
sizes. Default: 1. For more info see explanation above.
beta (float): beta correction factor used for the AexAem term
of the burst size. Default 1. If `add_naa = False` or
measurement is not ALEX this argument is ignored.
For more info see explanation above.
donor_ref (bool): select the convention for burst size correction.
See details above in the function description.
Returns
Array of burst sizes for channel `ich`.
See also :meth:`fretbursts.burstlib.Data.get_naa_corrected`.
"""
if donor_ref:
burst_size = self.nd[ich] + self.na[ich] / gamma
else:
burst_size = self.nd[ich] * gamma + self.na[ich]
if add_naa and self.alternated:
kws = dict(ich=ich, gamma=gamma, beta=beta, donor_ref=donor_ref)
burst_size += self.get_naa_corrected(**kws)
return burst_size
def get_naa_corrected(self, ich=0, gamma=1., beta=1., donor_ref=True):
"""Return corrected naa array for channel `ich`.
Arguments:
ich (int): the spot number, only relevant for multi-spot.
gamma (floats): gamma-factor to use in computing the corrected naa.
beta (float): beta-factor to use in computing the corrected naa.
donor_ref (bool): Select the convention for `naa` correction.
If True (default), uses `naa / (beta * gamma)`. Otherwise,
uses `naa / beta`. A consistent convention should be used
for the corrected Dex burst size in order to make it
commensurable with naa.
See also :meth:`fretbursts.burstlib.Data.burst_sizes_ich`.
"""
naa = self._get_naa_ich(ich) # with eventual duty-cycle correction
if donor_ref:
correction = (gamma * beta)
else:
correction = beta
return naa / correction
def _get_naa_ich(self, ich=0):
"""Return naa for `ich` both in ALEX and PAX measurements.
In case of PAX, returns naa using the duty-cycle correction::
naa = self.naa - aex_dex_ratio * self.nar
where `self.nar` is equal to `self.na` before leakage and direct
excitation correction, and `aex_dex_ratio` is the Aex duty-cycle.
"""
naa = self.naa[ich]
if 'PAX' in self.meas_type:
# ATTENTION: do not modify naa inplace
naa = naa - self._aex_dex_ratio() * self.nar[ich]
return naa
def burst_sizes(self, gamma=1., add_naa=False, beta=1., donor_ref=True):
"""Return gamma corrected burst sizes for all the channel.
Compute burst sizes by calling, for each channel,
:meth:`burst_sizes_ich`.
See :meth:`burst_sizes_ich` for description of the arguments.
Returns
List of arrays of burst sizes, one array per channel.
"""
kwargs = dict(gamma=gamma, add_naa=add_naa, beta=beta,
donor_ref=donor_ref)
bsize_list = [self.burst_sizes_ich(ich, **kwargs) for ich in
range(self.nch)]
return np.array(bsize_list)
def iter_bursts_ph(self, ich=0):
"""Iterate over (start, stop) indexes to slice photons for each burst.
"""
for istart, istop in iter_bursts_start_stop(self.mburst[ich]):
yield istart, istop
def bursts_slice(self, N1=0, N2=-1):
"""Return new Data object with bursts between `N1` and `N2`
`N1` and `N2` can be scalars or lists (one per ch).
"""
if np.isscalar(N1): N1 = [N1] * self.nch
if np.isscalar(N2): N2 = [N2] * self.nch
assert len(N1) == len(N2) == self.nch
d = Data(**self)
d.add(mburst=[b[n1:n2].copy() for b, n1, n2 in zip(d.mburst, N1, N2)])
d.add(nt=[nt[n1:n2] for nt, n1, n2 in zip(d.nt, N1, N2)])
d.add(nd=[nd[n1:n2] for nd, n1, n2 in zip(d.nd, N1, N2)])
d.add(na=[na[n1:n2] for na, n1, n2 in zip(d.na, N1, N2)])
for name in ('naa', 'nda', 'nar'):
if name in d:
d.add(**{name:
[x[n1:n2] for x, n1, n2 in zip(d[name], N1, N2)]})
if 'nda' in self:
d.add(nda=[da[n1:n2] for da, n1, n2 in zip(d.nda, N1, N2)])
d.calc_fret(pax=self.pax) # recalculate fret efficiency
return d
def delete_burst_data(self):
"""Erase all the burst data"""
for name in self.burst_fields + self.burst_metadata:
if name in self:
self.delete(name)
for name in ('E_fitter', 'S_fitter'):
if hasattr(self, name):
delattr(self, name)
##
# Methods for high-level data transformation
#
def slice_ph(self, time_s1=0, time_s2=None, s='slice'):
"""Return a new Data object with ph in [`time_s1`,`time_s2`] (seconds)
If ALEX, this method must be called right after
:func:`fretbursts.loader.alex_apply_periods` (with `delete_ph_t=True`)
and before any background estimation or burst search.
"""
if time_s2 is None:
time_s2 = self.time_max
if time_s2 >= self.time_max and time_s1 <= 0:
return self.copy()
assert time_s1 < self.time_max
t1_clk, t2_clk = int(time_s1 / self.clk_p), int(time_s2 / self.clk_p)
masks = [(ph >= t1_clk) * (ph < t2_clk) for ph in self.iter_ph_times()]
new_d = Data(**self)
for name in self.ph_fields:
if name in self:
new_d[name] = [a[mask] for a, mask in zip(self[name], masks)]
setattr(new_d, name, new_d[name])
new_d.delete_burst_data()
# Shift timestamps to start from 0 to avoid problems with BG calc
for ich in range(self.nch):
ph_i = new_d.get_ph_times(ich)
ph_i -= t1_clk
new_d.s.append(s)
# Delete eventual cached properties
for attr in ['_time_min', '_time_max']:
if hasattr(new_d, attr):
delattr(new_d, attr)
return new_d
def collapse(self, update_gamma=True, skip_ch=None):
"""Returns an object with 1-spot data joining the multi-spot data.
Arguments:
skip_ch (tuple of ints): list of channels to skip.
If None, keep all channels.
update_gamma (bool): if True, recompute gamma as mean of the
per-channel gamma. If False, do not update gamma.
If True, gamma becomes a single value and the update has the
side effect of recomputing E and S values, discarding
previous per-channel corrections. If False, gamma is not
updated (it stays with multi-spot values) and E and S are
not recomputed.
Note:
When using `update_gamma=False`, burst selections on the
collapsed `Data` object should be done with
`computefret=False`, otherwise any attempt to use multi-spot
gamma for single-spot data will raise an error.
"""
dc = Data(**self)
mch_bursts = self.mburst
if skip_ch is not None:
mch_bursts = [bursts for i, bursts in enumerate(mch_bursts)
if i not in skip_ch]
bursts = bslib.Bursts.merge(mch_bursts, sort=False)
# Sort by start times, and when equal by stop times
indexsort = np.lexsort((bursts.stop, bursts.start))
dc.add(mburst=[bursts[indexsort]])
ich_burst = [i * np.ones(nb) for i, nb in enumerate(self.num_bursts)]
dc.add(ich_burst=np.hstack(ich_burst)[indexsort])
for name in self.burst_fields:
if name in self and name is not 'mburst':
# Concatenate arrays along axis = 0
value = [np.concatenate(self[name])[indexsort]]
dc.add(**{name: value})
dc.add(nch=1)
dc.add(_chi_ch=1.)
# NOTE: Updating gamma has the side effect of recomputing E
# (and S if ALEX). We need to update gamma because, in general,
# gamma can be an array with a value for each ch.
# However, the per-channel gamma correction is lost once both
# gamma and chi_ch are made scalar.
if update_gamma:
dc._update_gamma(np.mean(self.get_gamma_array()))
return dc
##
# Utility methods
#
def get_params(self):
"""Returns a plain dict containing only parameters and no arrays.
This can be used as a summary of data analysis parameters.
Additional keys `name' and `Names` are added with values
from `.name` and `.Name()`.
"""
p_names = ['fname', 'clk_p', 'nch', 'ph_sel', 'L', 'm', 'F', 'P',
'_leakage', '_dir_ex', '_gamma', 'bg_time_s',
'T', 'rate_th',
'bg_corrected', 'leakage_corrected', 'dir_ex_corrected',
'dithering', '_chi_ch', 's', 'ALEX']
p_dict = dict(self)
for name in p_dict.keys():
if name not in p_names:
p_dict.pop(name)
p_dict.update(name=self.name, Name=self.Name(), bg_mean=self.bg_mean,
nperiods=self.nperiods)
return p_dict
def expand(self, ich=0, alex_naa=False, width=False):
"""Return per-burst D and A sizes (nd, na) and their background counts.
This method returns for each bursts the corrected signal counts and
background counts in donor and acceptor channels. Optionally, the
burst width is also returned.
Arguments:
ich (int): channel for the bursts (can be not 0 only in multi-spot)
alex_naa (bool): if True and self.ALEX, returns burst sizes and
background also for acceptor photons during accept. excitation
width (bool): whether return the burst duration (in seconds).
Returns:
List of arrays: nd, na, donor bg, acceptor bg.
If `alex_naa` is True returns: nd, na, naa, bg_d, bg_a, bg_aa.
If `width` is True returns the bursts duration (in sec.) as last
element.
"""
period = self.bp[ich]
w = self.mburst[ich].width * self.clk_p
bg_a = self.bg[Ph_sel(Dex='Aem')][ich][period] * w
bg_d = self.bg[Ph_sel(Dex='Dem')][ich][period] * w
res = [self.nd[ich], self.na[ich]]
if self.alternated and alex_naa:
bg_aa = self.bg[Ph_sel(Aex='Aem')][ich][period] * w
res.extend([self.naa[ich], bg_d, bg_a, bg_aa])
else:
res.extend([bg_d, bg_a])
if width:
res.append(w)
return res
def burst_data_ich(self, ich):
"""Return a dict of burst data for channel `ich`."""
bursts = {}
bursts['size_raw'] = self.mburst[ich].counts
bursts['t_start'] = self.mburst[ich].start * self.clk_p
bursts['t_stop'] = self.mburst[ich].stop * self.clk_p
bursts['i_start'] = self.mburst[ich].istart
bursts['i_stop'] = self.mburst[ich].istop
period = bursts['bg_period'] = self.bp[ich]
width = self.mburst[ich].width * self.clk_p
bursts['width_ms'] = width * 1e3
bursts['bg_ad'] = self.bg[Ph_sel(Dex='Aem')][ich][period] * width
bursts['bg_dd'] = self.bg[Ph_sel(Dex='Dem')][ich][period] * width
if self.alternated:
bursts['bg_aa'] = self.bg[Ph_sel(Aex='Aem')][ich][period] * width
bursts['bg_da'] = self.bg[Ph_sel(Aex='Dem')][ich][period] * width
burst_fields = self.burst_fields[:]
burst_fields.remove('mburst')
burst_fields.remove('bp')
for field in burst_fields:
if field in self:
bursts[field] = self[field][ich]
return bursts
@property
def time_max(self):
"""The last recorded time in seconds."""
if not hasattr(self, '_time_max'):
self._time_max = self._time_reduce(last=True, func=max)
return self._time_max
@property
def time_min(self):
"""The first recorded time in seconds."""
if not hasattr(self, '_time_min'):
self._time_min = self._time_reduce(last=False, func=min)
return self._time_min
def _time_reduce(self, last=True, func=max):
"""Return first or last timestamp per-ch, reduced with `func`.
"""
idx = -1 if last else 0
# Get either ph_times_m or ph_times_t
ph_times = None
for ph_times_name in ['ph_times_m', 'ph_times_t']:
try:
ph_times = self[ph_times_name]
except KeyError:
pass
else:
break
if ph_times is not None:
# This works with both numpy arrays and pytables arrays
time = func(t[idx] for t in ph_times if t.shape[0] > 0)
elif 'mburst' in self:
if last:
time = func(bursts[idx].stop for bursts in self.mburst)
else:
time = func(bursts[idx].start for bursts in self.mburst)
else:
raise ValueError("No timestamps or bursts found.")
return time * self.clk_p
def ph_in_bursts_mask_ich(self, ich=0, ph_sel=Ph_sel('all')):
"""Return mask of all photons inside bursts for channel `ich`.
Returns
Boolean array for photons in channel `ich` and photon
selection `ph_sel` that are inside any burst.
"""
bursts_mask = ph_in_bursts_mask(self.ph_data_sizes[ich],
self.mburst[ich])
if self._is_allph(ph_sel):
return bursts_mask
else:
ph_sel_mask = self.get_ph_mask(ich=ich, ph_sel=ph_sel)
return ph_sel_mask * bursts_mask
def ph_in_bursts_ich(self, ich=0, ph_sel=Ph_sel('all')):
"""Return timestamps of photons inside bursts for channel `ich`.
Returns
Array of photon timestamps in channel `ich` and photon
selection `ph_sel` that are inside any burst.
"""
ph_all = self.get_ph_times(ich=ich)
bursts_mask = self.ph_in_bursts_mask_ich(ich, ph_sel)
return ph_all[bursts_mask]
##
# Background analysis methods
#
def _obsolete_bg_attr(self, attrname, ph_sel):
print('The Data.%s attribute is deprecated. Please use '
'Data.bg(%s) instead.' % (attrname, repr(ph_sel)))
bg_attrs = ('bg_dd', 'bg_ad', 'bg_da', 'bg_aa')
bg_mean_attrs = ('rate_m', 'rate_dd', 'rate_ad', 'rate_da', 'rate_aa')
assert attrname in bg_attrs or attrname in bg_mean_attrs
if attrname in bg_attrs:
bg_field = 'bg'
elif attrname in bg_mean_attrs:
bg_field = 'bg_mean'
try:
value = getattr(self, bg_field)[ph_sel]
except AttributeError as e:
# This only happens when trying to access 'bg' because
# 'bg_mean' raises RuntimeError when missing.
msg = 'No attribute `%s` found. Please compute background first.'
raise_from(RuntimeError(msg % bg_field), e)
return value
@property
def rate_m(self):
return self._obsolete_bg_attr('rate_m', Ph_sel('all'))
@property
def rate_dd(self):
return self._obsolete_bg_attr('rate_dd', Ph_sel(Dex='Dem'))
@property
def rate_ad(self):
return self._obsolete_bg_attr('rate_ad', Ph_sel(Dex='Aem'))
@property
def rate_da(self):
return self._obsolete_bg_attr('rate_da', Ph_sel(Aex='Dem'))
@property
def rate_aa(self):
return self._obsolete_bg_attr('rate_aa', Ph_sel(Aex='Aem'))
@property
def bg_dd(self):
return self._obsolete_bg_attr('bg_dd', Ph_sel(Dex='Dem'))
@property
def bg_ad(self):
return self._obsolete_bg_attr('bg_ad', Ph_sel(Dex='Aem'))
@property
def bg_da(self):
return self._obsolete_bg_attr('bg_da', Ph_sel(Aex='Dem'))
@property
def bg_aa(self):
return self._obsolete_bg_attr('bg_aa', Ph_sel(Aex='Aem'))
def calc_bg_cache(self, fun, time_s=60, tail_min_us=500, F_bg=2,
error_metrics=None, fit_allph=True,
recompute=False):
"""Compute time-dependent background rates for all the channels.
This version is the cached version of :meth:`calc_bg`.
This method tries to load the background data from a cache file.
If a saved background data is not found, it computes
the background and stores it to disk.
The arguments are the same as :meth:`calc_bg` with the only addition
of `recompute` (bool) to force a background recomputation even if
a cached version is found.
Form more details on the other arguments see :meth:`calc_bg`.
"""
bg_cache.calc_bg_cache(self, fun, time_s=time_s,
tail_min_us=tail_min_us, F_bg=F_bg,
error_metrics=error_metrics, fit_allph=fit_allph,
recompute=recompute)
def _get_auto_bg_th_arrays(self, F_bg=2, tail_min_us0=250):
"""Return a dict of threshold values for background estimation.
The keys are the ph selections in self.ph_streams and the values
are 1-D arrays of size nch.
"""
Th_us = {}
for ph_sel in self.ph_streams:
th_us = np.zeros(self.nch)
for ich, ph in enumerate(self.iter_ph_times(ph_sel=ph_sel)):
if ph.size > 0:
bg_rate, _ = bg.exp_fit(ph, tail_min_us=tail_min_us0)
th_us[ich] = 1e6 * F_bg / bg_rate
Th_us[ph_sel] = th_us
# Save the input used to generate Th_us
self.add(bg_auto_th_us0=tail_min_us0, bg_auto_F_bg=F_bg)
return Th_us
def _get_bg_th_arrays(self, tail_min_us, nperiods):
"""Return a dict of threshold values for background estimation.
The keys are the ph selections in self.ph_streams and the values
are 1-D arrays of size nch.
"""
n_streams = len(self.ph_streams)
if np.size(tail_min_us) == 1:
tail_min_us = np.repeat(tail_min_us, n_streams)
elif np.size(tail_min_us) == n_streams:
tail_min_us = np.asarray(tail_min_us)
elif np.size(tail_min_us) != n_streams:
raise ValueError('Wrong tail_min_us length (%d).' %
len(tail_min_us))
th_us = {}
for i, key in enumerate(self.ph_streams):
th_us[key] = np.ones(nperiods) * tail_min_us[i]
# Save the input used to generate Th_us
self.add(bg_th_us_user=tail_min_us)
return th_us
def _clean_bg_data(self):
"""Remove background fields specific of only one fit type.
Computing background with manual or 'auto' threshold results in
different sets of attributes being saved. This method removes these
attributes and should be called before recomputing the background
to avoid having old stale attributes of a previous background fit.
"""
# Attributes specific of manual or 'auto' bg fit
field_list = ['bg_auto_th_us0', 'bg_auto_F_bg', 'bg_th_us_user']
for field in field_list:
if field in self:
self.delete(field)
if hasattr(self, '_bg_mean'):
delattr(self, '_bg_mean')
def _get_num_periods(self, time_s):
"""Return the number of periods using `time_s` as period duration.
"""
duration = self.time_max - self.time_min
# Take the ceil to have at least 1 periods
nperiods = np.ceil(duration / time_s)
# Discard last period if negligibly small to avoid problems with
# background fit with very few photons.
if nperiods > 1:
last_period = self.time_max - time_s * (nperiods - 1)
# Discard last period if smaller than 3% of the bg period
if last_period < time_s * 0.03:
nperiods -= 1
return int(nperiods)
def calc_bg(self, fun, time_s=60, tail_min_us=500, F_bg=2,
error_metrics=None, fit_allph=True):
"""Compute time-dependent background rates for all the channels.
Compute background rates for donor, acceptor and both detectors.
The rates are computed every `time_s` seconds, allowing to
track possible variations during the measurement.
Arguments:
fun (function): function for background estimation (example
`bg.exp_fit`)
time_s (float, seconds): compute background each time_s seconds
tail_min_us (float, tuple or string): min threshold in us for
photon waiting times to use in background estimation.
If float is the same threshold for 'all', DD, AD and AA photons
and for all the channels.
If a 3 or 4 element tuple, each value is used for 'all', DD, AD
or AA photons, same value for all the channels.
If 'auto', the threshold is computed for each stream ('all',
DD, DA, AA) and for each channel as `bg_F * rate_ml0`.
`rate_ml0` is an initial estimation of the rate performed using
:func:`bg.exp_fit` and a fixed threshold (default 250us).
F_bg (float): when `tail_min_us` is 'auto', is the factor by which
the initial background estimation if multiplied to compute the
threshold.
error_metrics (string): Specifies the error metric to use.
See :func:`fretbursts.background.exp_fit` for more details.
fit_allph (bool): if True (default) the background for the
all-photon is fitted. If False it is computed as the sum of
backgrounds in all the other streams.
The background estimation functions are defined in the module
`background` (conventionally imported as `bg`).
Example:
Compute background with `bg.exp_fit` (inter-photon delays MLE
tail fitting), every 30s, with automatic tail-threshold::
d.calc_bg(bg.exp_fit, time_s=20, tail_min_us='auto')
Returns:
None, all the results are saved in the object itself.
"""
pprint(" - Calculating BG rates ... ")
self._clean_bg_data()
kwargs = dict(clk_p=self.clk_p, error_metrics=error_metrics)
nperiods = self._get_num_periods(time_s)
streams_noall = [s for s in self.ph_streams if s != Ph_sel('all')]
bg_auto_th = tail_min_us == 'auto'
if bg_auto_th:
tail_min_us0 = 250
self.add(bg_auto_th_us0=tail_min_us0, bg_auto_F_bg=F_bg)
auto_th_kwargs = dict(clk_p=self.clk_p, tail_min_us=tail_min_us0)
th_us = {}
for key in self.ph_streams:
th_us[key] = np.zeros(nperiods)
else:
th_us = self._get_bg_th_arrays(tail_min_us, nperiods)
Lim, Ph_p = [], []
BG, BG_err = [], []
Th_us = []
for ich, ph_ch in enumerate(self.iter_ph_times()):
masks = {sel: self.get_ph_mask(ich, ph_sel=sel)
for sel in self.ph_streams}
bins = ((np.arange(nperiods + 1) * time_s + self.time_min) /
self.clk_p)
# Note: histogram bins are half-open, e.g. [a, b)
counts, _ = np.histogram(ph_ch, bins=bins)
lim, ph_p = [], []
bg = {sel: np.zeros(nperiods) for sel in self.ph_streams}
bg_err = {sel: np.zeros(nperiods) for sel in self.ph_streams}
i1 = 0
for ip in range(nperiods):
i0 = i1
i1 += counts[ip]
lim.append((i0, i1 - 1))
ph_p.append((ph_ch[i0], ph_ch[i1 - 1]))
ph_i = ph_ch[i0:i1]
if fit_allph:
sel = Ph_sel('all')
if bg_auto_th:
_bg, _ = fun(ph_i, **auto_th_kwargs)
th_us[sel][ip] = 1e6 * F_bg / _bg
bg[sel][ip], bg_err[sel][ip] = \
fun(ph_i, tail_min_us=th_us[sel][ip], **kwargs)
for sel in streams_noall:
# This supports cases of D-only or A-only timestamps
# where self.A_em[ich] is a bool and not a bool-array
# In this case, the mask of either DexDem or DexAem is
# slice(None) (all-elements selection).
if isinstance(masks[sel], slice):
if masks[sel] == slice(None):
bg[sel][ip] = bg[Ph_sel('all')][ip]
bg_err[sel][ip] = bg_err[Ph_sel('all')][ip]
continue
else:
ph_i_sel = ph_i[masks[sel][i0:i1]]
if ph_i_sel.size > 0:
if bg_auto_th:
_bg, _ = fun(ph_i_sel, **auto_th_kwargs)
th_us[sel][ip] = 1e6 * F_bg / _bg
bg[sel][ip], bg_err[sel][ip] = \
fun(ph_i_sel, tail_min_us=th_us[sel][ip], **kwargs)
if not fit_allph:
bg[Ph_sel('all')] += sum(bg[s] for s in streams_noall)
bg_err[Ph_sel('all')] += sum(bg_err[s] for s in streams_noall)
Lim.append(lim)
Ph_p.append(ph_p)
BG.append(bg)
BG_err.append(bg_err)
Th_us.append(th_us)
# Make Dict Of Lists (DOL) from Lists of Dicts
BG_dol, BG_err_dol, Th_us_dol = {}, {}, {}
for sel in self.ph_streams:
BG_dol[sel] = [bg_ch[sel] for bg_ch in BG]
BG_err_dol[sel] = [err_ch[sel] for err_ch in BG_err]
Th_us_dol[sel] = [th_ch[sel] for th_ch in Th_us]
self.add(bg=BG_dol, bg_err=BG_err_dol, bg_th_us=Th_us_dol,
Lim=Lim, Ph_p=Ph_p,
bg_fun=fun, bg_fun_name=fun.__name__,
bg_time_s=time_s, bg_ph_sel=Ph_sel('all'),
bg_auto_th=bg_auto_th, # bool, True if the using auto-threshold
)
pprint("[DONE]\n")
@property
def nperiods(self):
return len(self.bg[Ph_sel('all')][0])
@property
def bg_mean(self):
if 'bg' not in self:
raise RuntimeError('No background found, compute it first.')
if not hasattr(self, '_bg_mean'):
self._bg_mean = {k: [bg_ch.mean() for bg_ch in bg_ph_sel]
for k, bg_ph_sel in self.bg.items()}
return self._bg_mean
def recompute_bg_lim_ph_p(self, ph_sel, mute=False):
"""Recompute self.Lim and selp.Ph_p relative to ph selection `ph_sel`
`ph_sel` is a Ph_sel object selecting the timestamps in which self.Lim
and self.Ph_p are being computed.
"""
ph_sel = self._fix_ph_sel(ph_sel)
if self.bg_ph_sel == ph_sel:
return
pprint(" - Recomputing background limits for %s ... " %
str(ph_sel), mute)
bg_time_clk = self.bg_time_s / self.clk_p
Lim, Ph_p = [], []
for ph_ch, lim in zip(self.iter_ph_times(ph_sel), self.Lim):
bins = np.arange(self.nperiods + 1) * bg_time_clk
# Note: histogram bins are half-open, e.g. [a, b)
counts, _ = np.histogram(ph_ch, bins=bins)
lim, ph_p = [], []
i1 = 0
for ip in range(self.nperiods):
i0 = i1
i1 += counts[ip]
lim.append((i0, i1 - 1))
ph_p.append((ph_ch[i0], ph_ch[i1-1]))
Lim.append(lim)
Ph_p.append(ph_p)
self.add(Lim=Lim, Ph_p=Ph_p, bg_ph_sel=ph_sel)
pprint("[DONE]\n", mute)
##
# Burst analysis methods
#
def _calc_burst_period(self):
"""Compute for each burst the "background period" `bp`.
Background periods are the time intervals on which the BG is computed.
"""
P = []
for b, lim in zip(self.mburst, self.Lim):
p = zeros(b.num_bursts, dtype=np.int16)
if b.num_bursts > 0:
istart = b.istart
for i, (l0, l1) in enumerate(lim):
p[(istart >= l0) * (istart <= l1)] = i
P.append(p)
self.add(bp=P)
def _param_as_mch_array(self, par):
"""Regardless of `par` size, return an arrays with size == nch.
if `par` is scalar the arrays repeats the calar multiple times
if `par is a list/array must be of length `nch`.
"""
assert size(par) == 1 or size(par) == self.nch
return np.repeat(par, self.nch) if size(par) == 1 else np.asarray(par)
def bg_from(self, ph_sel):
"""Return the background rates for the specified photon selection.
"""
ph_sel = self._fix_ph_sel(ph_sel)
if ph_sel in self.ph_streams:
return self.bg[ph_sel]
elif ph_sel == Ph_sel(Dex='DAem'):
sel = Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Aex='DAem'):
sel = Ph_sel(Aex='Dem'), Ph_sel(Aex='Aem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Dex='Dem', Aex='Dem'):
sel = Ph_sel(Dex='Dem'), Ph_sel(Aex='Dem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Dex='Aem', Aex='Aem'):
sel = Ph_sel(Dex='Aem'), Ph_sel(Aex='Aem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Dex='DAem', Aex='Aem'):
sel = (Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem'), Ph_sel(Aex='Aem'))
bg = [b1 + b2 + b3 for b1, b2, b3 in
zip(self.bg[sel[0]], self.bg[sel[1]], self.bg[sel[2]])]
else:
raise NotImplementedError('Photon selection %s not implemented.' %
str(ph_sel))
return bg
def _calc_T(self, m, P, F=1., ph_sel=Ph_sel('all'), c=-1):
"""If P is None use F, otherwise uses both P *and* F (F defaults to 1).
When P is None, compute the time lag T for burst search according to::
T = (m - 1 - c) / (F * bg_rate)
"""
# Regardless of F and P sizes, FF and PP are arrays with size == nch
FF = self._param_as_mch_array(F)
PP = self._param_as_mch_array(P)
if P is None:
# NOTE: the following lambda ignores Pi
find_T = lambda m, Fi, Pi, bg: (m - 1 - c) / (bg * Fi)
else:
if F != 1:
print("WARNING: BS prob. th. with modified BG rate (F=%.1f)"
% F)
find_T = lambda m, Fi, Pi, bg: find_optimal_T_bga(bg*Fi, m, 1-Pi)
TT, T, rate_th = [], [], []
bg_bs = self.bg_from(ph_sel)
for bg_ch, F_ch, P_ch in zip(bg_bs, FF, PP):
# All "T" are in seconds
Tch = find_T(m, F_ch, P_ch, bg_ch)
TT.append(Tch)
T.append(Tch.mean())
rate_th.append(np.mean(m / Tch))
self.add(TT=TT, T=T, bg_bs=bg_bs, FF=FF, PP=PP, F=F, P=P,
rate_th=rate_th)
def _burst_search_rate(self, m, L, min_rate_cps, c=-1, ph_sel=Ph_sel('all'),
compact=False, index_allph=True, verbose=True,
pure_python=False):
"""Compute burst search using a fixed minimum photon rate.
The burst starts when, for `m` consecutive photons::
(m - 1 - c) / (t[last] - t[first]) >= min_rate_cps
Arguments:
min_rate_cps (float or array): minimum photon rate for burst start.
If array is one value per channel.
"""
bsearch = _get_bsearch_func(pure_python=pure_python)
Min_rate_cps = self._param_as_mch_array(min_rate_cps)
mburst = []
T_clk = (m - 1 - c) / Min_rate_cps / self.clk_p
for ich, t_clk in enumerate(T_clk):
ph_bs = ph = self.get_ph_times(ich=ich, ph_sel=ph_sel)
if compact:
ph_bs = self._ph_times_compact(ph, ph_sel)
label = '%s CH%d' % (ph_sel, ich + 1) if verbose else None
burstarray = bsearch(ph_bs, L, m, t_clk, label=label, verbose=verbose)
if burstarray.size > 1:
bursts = bslib.Bursts(burstarray)
if compact:
bursts.recompute_times(ph, out=bursts)
else:
bursts = bslib.Bursts.empty()
mburst.append(bursts)
self.add(mburst=mburst, rate_th=Min_rate_cps, T=T_clk * self.clk_p)
if ph_sel != Ph_sel('all') and index_allph:
self._fix_mburst_from(ph_sel=ph_sel)
def _burst_search_TT(self, m, L, ph_sel=Ph_sel('all'), verbose=True,
compact=False, index_allph=True, pure_python=False,
mute=False):
"""Compute burst search with params `m`, `L` on ph selection `ph_sel`
Requires the list of arrays `self.TT` with the max time-thresholds in
the different burst periods for each channel (use `._calc_T()`).
"""
bsearch = _get_bsearch_func(pure_python=pure_python)
self.recompute_bg_lim_ph_p(ph_sel=ph_sel, mute=mute)
MBurst = []
label = ''
for ich, T in enumerate(self.TT):
ph_bs = ph = self.get_ph_times(ich=ich, ph_sel=ph_sel)
if compact:
ph_bs = self._ph_times_compact(ph, ph_sel)
burstarray_ch_list = []
Tck = T / self.clk_p
for ip, (l0, l1) in enumerate(self.Lim[ich]):
if verbose:
label = '%s CH%d-%d' % (ph_sel, ich + 1, ip)
burstarray = bsearch(ph_bs, L, m, Tck[ip], slice_=(l0, l1 + 1),
label=label, verbose=verbose)
if burstarray.size > 1:
burstarray_ch_list.append(burstarray)
if len(burstarray_ch_list) > 0:
data = np.vstack(burstarray_ch_list)
bursts = bslib.Bursts(data)
if compact:
bursts.recompute_times(ph, out=bursts)
else:
bursts = bslib.Bursts.empty()
MBurst.append(bursts)
self.add(mburst=MBurst)
if ph_sel != Ph_sel('all') and index_allph:
# Convert the burst data to be relative to ph_times_m.
# Convert both Lim/Ph_p and mburst, as they are both needed
# to compute `.bp`.
self.recompute_bg_lim_ph_p(ph_sel=Ph_sel('all'), mute=mute)
self._fix_mburst_from(ph_sel=ph_sel, mute=mute)
def _fix_mburst_from(self, ph_sel, mute=False):
"""Convert burst data from any ph_sel to 'all' timestamps selection.
"""
assert isinstance(ph_sel, Ph_sel) and not self._is_allph(ph_sel)
pprint(' - Fixing burst data to refer to ph_times_m ... ', mute)
for bursts, mask in zip(self.mburst,
self.iter_ph_masks(ph_sel=ph_sel)):
bursts.recompute_index_expand(mask, out=bursts)
pprint('[DONE]\n', mute)
def burst_search(self, L=None, m=10, F=6., P=None, min_rate_cps=None,
ph_sel=Ph_sel('all'), compact=False, index_allph=True,
c=-1, computefret=True, max_rate=False, dither=False,
pure_python=False, verbose=False, mute=False, pax=False):
"""Performs a burst search with specified parameters.
This method performs a sliding-window burst search without
binning the timestamps. The burst starts when the rate of `m`
photons is above a minimum rate, and stops when the rate falls below
the threshold. The result of the burst search is stored in the
`mburst` attribute (a list of Bursts objects, one per channel)
containing start/stop times and indexes. By default, after burst
search, this method computes donor and acceptor counts, it applies
burst corrections (background, leakage, etc...) and computes
E (and S in case of ALEX). You can skip these steps by passing
`computefret=False`.
The minimum rate can be explicitly specified with the `min_rate_cps`
argument, or computed as a function of the background rate with the
`F` argument.
Parameters:
m (int): number of consecutive photons used to compute the
photon rate. Typical values 5-20. Default 10.
L (int or None): minimum number of photons in burst. If None
(default) L = m is used.
F (float): defines how many times higher than the background rate
is the minimum rate used for burst search
(`min rate = F * bg. rate`), assuming that `P = None` (default).
Typical values are 3-9. Default 6.
P (float): threshold for burst detection expressed as a
probability that a detected bursts is not due to a Poisson
background. If not None, `P` overrides `F`. Note that the
background process is experimentally super-Poisson so this
probability is not physically very meaningful. Using this
argument is discouraged.
min_rate_cps (float or list/array): minimum rate in cps for burst
start. If not None, it has the precedence over `P` and `F`.
If non-scalar, contains one rate per each multispot channel.
Typical values range from 20e3 to 100e3.
ph_sel (Ph_sel object): defines the "photon selection" (or stream)
to be used for burst search. Default: all photons.
See :mod:`fretbursts.ph_sel` for details.
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
index_allph (bool): if True (default), the indexes of burst start
and stop (`istart`, `istop`) are relative to the full
timestamp array. If False, the indexes are relative to
timestamps selected by the `ph_sel` argument.
c (float): correction factor used in the rate vs time-lags relation.
`c` affects the computation of the burst-search parameter `T`.
When `F` is not None, `T = (m - 1 - c) / (F * bg_rate)`.
When using `min_rate_cps`, `T = (m - 1 - c) / min_rate_cps`.
computefret (bool): if True (default) compute donor and acceptor
counts, apply corrections (background, leakage, direct
excitation) and compute E (and S). If False, skip all these
steps and stop just after the initial burst search.
max_rate (bool): if True compute the max photon rate inside each
burst using the same `m` used for burst search. If False
(default) skip this step.
dither (bool): if True applies dithering corrections to burst
counts. Default False. See :meth:`Data.dither`.
pure_python (bool): if True, uses the pure python functions even
when optimized Cython functions are available.
pax (bool): this has effect only if measurement is PAX.
In this case, when True computes E using a PAX-enhanced
formula: ``(2 na) / (2 na + nd + nda)``.
Otherwise use the usual usALEX formula: ``na / na + nd``.
Quantities `nd`/`na` are D/A burst counts during D excitation
period, while `nda` is D emission during A excitation period.
Note:
when using `P` or `F` the background rates are needed, so
`.calc_bg()` must be called before the burst search.
Example:
d.burst_search(m=10, F=6)
Returns:
None, all the results are saved in the `Data` object.
"""
ph_sel = self._fix_ph_sel(ph_sel)
if compact:
self._assert_compact(ph_sel)
pprint(" - Performing burst search (verbose=%s) ..." % verbose, mute)
# Erase any previous burst data
self.delete_burst_data()
if L is None:
L = m
if min_rate_cps is not None:
# Saves rate_th in self
self._burst_search_rate(m=m, L=L, min_rate_cps=min_rate_cps, c=c,
ph_sel=ph_sel, compact=compact,
index_allph=index_allph,
verbose=verbose, pure_python=pure_python)
else:
# Compute TT, saves P and F in self
self._calc_T(m=m, P=P, F=F, ph_sel=ph_sel, c=c)
# Use TT and compute mburst
self._burst_search_TT(L=L, m=m, ph_sel=ph_sel, compact=compact,
index_allph=index_allph, verbose=verbose,
pure_python=pure_python, mute=mute)
pprint("[DONE]\n", mute)
pprint(" - Calculating burst periods ...", mute)
self._calc_burst_period() # writes bp
pprint("[DONE]\n", mute)
# (P, F) or rate_th are saved in _calc_T() or _burst_search_rate()
self.add(m=m, L=L, ph_sel=ph_sel)
# The correction flags are both set here and in calc_ph_num() so that
# they are always consistent. Case 1: we perform only burst search
# (with no call to calc_ph_num). Case 2: we re-call calc_ph_num()
# without doing a new burst search
self.add(bg_corrected=False, leakage_corrected=False,
dir_ex_corrected=False, dithering=False)
self._burst_search_postprocess(
computefret=computefret, max_rate=max_rate, dither=dither,
pure_python=pure_python, mute=mute, pax=pax)
def _burst_search_postprocess(self, computefret, max_rate, dither,
pure_python, mute, pax):
if computefret:
pprint(" - Counting D and A ph and calculating FRET ... \n", mute)
self.calc_fret(count_ph=True, corrections=True, dither=dither,
mute=mute, pure_python=pure_python, pax=pax)
pprint(" [DONE Counting D/A]\n", mute)
if max_rate:
pprint(" - Computing max rates in burst ...", mute)
self.calc_max_rate(m=self.m)
pprint("[DONE]\n", mute)
def calc_ph_num(self, alex_all=False, pure_python=False):
"""Computes number of D, A (and AA) photons in each burst.
Arguments:
alex_all (bool): if True and self.ALEX is True, computes also the
donor channel photons during acceptor excitation (`nda`)
pure_python (bool): if True, uses the pure python functions even
when the optimized Cython functions are available.
Returns:
Saves `nd`, `na`, `nt` (and eventually `naa`, `nda`) in self.
Returns None.
"""
mch_count_ph_in_bursts = _get_mch_count_ph_in_bursts_func(pure_python)
if not self.alternated:
nt = [b.counts.astype(float) if b.num_bursts > 0 else np.array([])
for b in self.mburst]
A_em = [self.get_A_em(ich) for ich in range(self.nch)]
if isinstance(A_em[0], slice):
# This is to support the case of A-only or D-only data
n0 = [np.zeros(mb.num_bursts) for mb in self.mburst]
if A_em[0] == slice(None):
nd, na = n0, nt # A-only case
elif A_em[0] == slice(0):
nd, na = nt, n0 # D-only case
else:
# This is the usual case with photons in both D and A channels
na = mch_count_ph_in_bursts(self.mburst, A_em)
nd = [t - a for t, a in zip(nt, na)]
assert (nt[0] == na[0] + nd[0]).all()
else:
# The "new style" would be:
#Mask = [m for m in self.iter_ph_masks(Ph_sel(Dex='Dem'))]
Mask = [d_em * d_ex for d_em, d_ex in zip(self.D_em, self.D_ex)]
nd = mch_count_ph_in_bursts(self.mburst, Mask)
Mask = [a_em * d_ex for a_em, d_ex in zip(self.A_em, self.D_ex)]
na = mch_count_ph_in_bursts(self.mburst, Mask)
Mask = [a_em * a_ex for a_em, a_ex in zip(self.A_em, self.A_ex)]
naa = mch_count_ph_in_bursts(self.mburst, Mask)
self.add(naa=naa)
if alex_all or 'PAX' in self.meas_type:
Mask = [d_em * a_ex for d_em, a_ex in zip(self.D_em, self.A_ex)]
nda = mch_count_ph_in_bursts(self.mburst, Mask)
self.add(nda=nda)
if self.ALEX:
nt = [d + a + aa for d, a, aa in zip(nd, na, naa)]
assert (nt[0] == na[0] + nd[0] + naa[0]).all()
elif 'PAX' in self.meas_type:
nt = [d + a + da + aa for d, a, da, aa in zip(nd, na, nda, naa)]
assert (nt[0] == na[0] + nd[0] + nda[0] + naa[0]).all()
# This is a copy of na which will never be corrected
# (except for background). It is used to compute the
# equivalent of naa for PAX:
# naa~ = naa - nar
# where naa~ is the A emission due to direct excitation
# by A laser during D+A-excitation,
# nar is the uncorrected A-channel signal during D-excitation,
# and naa is the A-channel signal during D+A excitation.
nar = [a.copy() for a in na]
self.add(nar=nar)
self.add(nd=nd, na=na, nt=nt,
bg_corrected=False, leakage_corrected=False,
dir_ex_corrected=False, dithering=False)
def fuse_bursts(self, ms=0, process=True, mute=False):
"""Return a new :class:`Data` object with nearby bursts fused together.
Arguments:
ms (float): fuse all burst separated by less than `ms` millisecs.
If < 0 no burst is fused. Note that with ms = 0, overlapping
bursts are fused.
process (bool): if True (default), reprocess the burst data in
the new object applying corrections and computing FRET.
mute (bool): if True suppress any printed output.
"""
if ms < 0:
return self
mburst = mch_fuse_bursts(self.mburst, ms=ms, clk_p=self.clk_p)
new_d = Data(**self)
for k in ['E', 'S', 'nd', 'na', 'naa', 'nda', 'nar', 'nt', 'lsb', 'bp']:
if k in new_d:
new_d.delete(k)
new_d.add(bg_corrected=False, leakage_corrected=False,
dir_ex_corrected=False, dithering=False)
new_d.add(mburst=mburst, fuse=ms)
if 'bg' in new_d:
new_d._calc_burst_period()
if process:
pprint(" - Counting D and A ph and calculating FRET ... \n", mute)
new_d.calc_fret(count_ph=True, corrections=True,
dither=self.dithering, mute=mute, pax=self.pax)
pprint(" [DONE Counting D/A and FRET]\n", mute)
return new_d
##
# Burst selection and filtering
#
def select_bursts(self, filter_fun, negate=False, computefret=True,
args=None, **kwargs):
"""Return an object with bursts filtered according to `filter_fun`.
This is the main method to select bursts according to different
criteria. The selection rule is defined by the selection function
`filter_fun`. FRETBursts provides a several predefined selection
functions see :ref:`burst_selection`. New selection
functions can be defined and passed to this method to implement
arbitrary selection rules.
Arguments:
filter_fun (fuction): function used for burst selection
negate (boolean): If True, negates (i.e. take the complementary)
of the selection returned by `filter_fun`. Default `False`.
computefret (boolean): If True (default) recompute donor and
acceptor counts, corrections and FRET quantities (i.e. E, S)
in the new returned object.
args (tuple or None): positional arguments for `filter_fun()`
kwargs:
Additional keyword arguments passed to `filter_fun()`.
Returns:
A new :class:`Data` object containing only the selected bursts.
Note:
In order to save RAM, the timestamp arrays (`ph_times_m`)
of the new Data() points to the same arrays of the original
Data(). Conversely, all the bursts data (`mburst`, `nd`, `na`,
etc...) are new distinct objects.
"""
Masks, str_sel = self.select_bursts_mask(filter_fun, negate=negate,
return_str=True, args=args,
**kwargs)
d_sel = self.select_bursts_mask_apply(Masks, computefret=computefret,
str_sel=str_sel)
return d_sel
def select_bursts_mask(self, filter_fun, negate=False, return_str=False,
args=None, **kwargs):
"""Returns mask arrays to select bursts according to `filter_fun`.
The function `filter_fun` is called to compute the mask arrays for
each channel.
This method is useful when you want to apply a selection from one
object to a second object. Otherwise use :meth:`Data.select_bursts`.
Arguments:
filter_fun (fuction): function used for burst selection
negate (boolean): If True, negates (i.e. take the complementary)
of the selection returned by `filter_fun`. Default `False`.
return_str: if True return, for each channel, a tuple with
a bool array and a string that can be added to the measurement
name to indicate the selection. If False returns only
the bool array. Default False.
args (tuple or None): positional arguments for `filter_fun()`
kwargs:
Additional keyword arguments passed to `filter_fun()`.
Returns:
A list of boolean arrays (one per channel) that define the burst
selection. If `return_str` is True returns a list of tuples, where
each tuple is a bool array and a string.
See also:
:meth:`Data.select_bursts`, :meth:`Data.select_bursts_mask_apply`
"""
# Create the list of bool masks for the bursts selection
if args is None:
args = tuple()
M = [filter_fun(self, i, *args, **kwargs) for i in range(self.nch)]
# Make sure the selection function has the right return signature
msg = 'The second argument returned by `%s` must be a string.'
assert np.all([isinstance(m[1], str) for m in M]), msg % filter_fun
# Make sure all boolean masks have the right size
msg = ("The size of boolean masks returned by `%s` needs to match "
"the number of bursts.")
assert np.all([m[0].size == n for m, n in zip(M, self.num_bursts)]), (
msg % filter_fun)
Masks = [-m[0] if negate else m[0] for m in M]
str_sel = M[0][1]
if return_str:
return Masks, str_sel
else:
return Masks
def select_bursts_mask_apply(self, masks, computefret=True, str_sel=''):
"""Returns a new Data object with bursts selected according to `masks`.
This method select bursts using a list of boolean arrays as input.
Since the user needs to create the boolean arrays first, this method
is useful when experimenting with new selection criteria that don't
have a dedicated selection function. Usually, however, it is easier
to select bursts through :meth:`Data.select_bursts` (using a
selection function).
Arguments:
masks (list of arrays): each element in this list is a boolean
array that selects bursts in a channel.
computefret (boolean): If True (default) recompute donor and
acceptor counts, corrections and FRET quantities (i.e. E, S)
in the new returned object.
Returns:
A new :class:`Data` object containing only the selected bursts.
Note:
In order to save RAM, the timestamp arrays (`ph_times_m`)
of the new Data() points to the same arrays of the original
Data(). Conversely, all the bursts data (`mburst`, `nd`, `na`,
etc...) are new distinct objects.
See also:
:meth:`Data.select_bursts`, :meth:`Data.select_mask`
"""
# Attributes of ds point to the same objects of self
ds = Data(**self)
##Copy the per-burst fields that must be filtered
used_fields = [field for field in Data.burst_fields if field in self]
for name in used_fields:
# Recreate the current attribute as a new list to avoid modifying
# the old list that is also in the original object.
# The list is initialized with empty arrays because this is the
# valid value when a ch has no bursts.
empty = bslib.Bursts.empty() if name == 'mburst' else np.array([])
ds.add(**{name: [empty] * self.nch})
# Assign the new data
for ich, mask in enumerate(masks):
if self[name][ich].size == 0:
continue # -> no bursts in ch
# Note that boolean masking implies numpy array copy
# On the contrary slicing only makes a new view of the array
ds[name][ich] = self[name][ich][mask]
# Recompute E and S
if computefret:
ds.calc_fret(count_ph=False, pax=self.pax)
# Add the annotation about the filter function
ds.s = list(self.s + [str_sel]) # using append would modify also self
return ds
##
# Burst corrections
#
def background_correction(self, relax_nt=False, mute=False):
"""Apply background correction to burst sizes (nd, na,...)
"""
if self.bg_corrected:
return -1
pprint(" - Applying background correction.\n", mute)
self.add(bg_corrected=True)
for ich, bursts in enumerate(self.mburst):
if bursts.num_bursts == 0:
continue # if no bursts skip this ch
period = self.bp[ich]
nd, na, bg_d, bg_a, width = self.expand(ich, width=True)
nd -= bg_d
na -= bg_a
if 'nar' in self:
# Apply background correction to PAX field nar
self.nar[ich][:] = na
if relax_nt:
# This does not guarantee that nt = nd + na
self.nt[ich] -= self.bg_from(Ph_sel('all'))[ich][period] * width
else:
self.nt[ich] = nd + na
if self.alternated:
bg_aa = self.bg_from(Ph_sel(Aex='Aem'))
self.naa[ich] -= bg_aa[ich][period] * width
if 'nda' in self:
bg_da = self.bg_from(Ph_sel(Aex='Dem'))
self.nda[ich] -= bg_da[ich][period] * width
self.nt[ich] += self.naa[ich]
if 'PAX' in self.meas_type:
self.nt[ich] += self.nda[ich]
def leakage_correction(self, mute=False):
"""Apply leakage correction to burst sizes (nd, na,...)
"""
if self.leakage_corrected:
return -1
elif self.leakage != 0:
pprint(" - Applying leakage correction.\n", mute)
Lk = self.get_leakage_array()
for i, num_bursts in enumerate(self.num_bursts):
if num_bursts == 0:
continue # if no bursts skip this ch
self.na[i] -= self.nd[i] * Lk[i]
self.nt[i] = self.nd[i] + self.na[i]
if self.ALEX:
self.nt[i] += self.naa[i]
elif 'PAX' in self.meas_type:
self.nt[i] += (self.nda[i] + self.naa[i])
self.add(leakage_corrected=True)
def direct_excitation_correction(self, mute=False):
"""Apply direct excitation correction to bursts (ALEX-only).
The applied correction is: na -= naa*dir_ex
"""
if self.dir_ex_corrected:
return -1
elif self.dir_ex != 0:
pprint(" - Applying direct excitation correction.\n", mute)
for i, num_bursts in enumerate(self.num_bursts):
if num_bursts == 0:
continue # if no bursts skip this ch
naa = self.naa[i]
if 'PAX' in self.meas_type:
naa = naa - self.nar[i] # do not modify inplace
self.na[i] -= naa * self.dir_ex
self.nt[i] = self.nd[i] + self.na[i]
if self.ALEX:
self.nt[i] += self.naa[i]
elif 'PAX' in self.meas_type:
self.nt[i] += (self.nda[i] + self.naa[i])
self.add(dir_ex_corrected=True)
def dither(self, lsb=2, mute=False):
"""Add dithering (uniform random noise) to burst counts (nd, na,...).
The dithering amplitude is the range -0.5*lsb .. 0.5*lsb.
"""
if self.dithering:
return -1
pprint(" - Applying burst-size dithering.\n", mute)
self.add(dithering=True)
for nd, na in zip(self.nd, self.na):
nd += lsb * (np.random.rand(nd.size) - 0.5)
na += lsb * (np.random.rand(na.size) - 0.5)
if self.alternated:
for naa in self.naa:
naa += lsb * (np.random.rand(naa.size) - 0.5)
if 'nda' in self:
for nda in self.nda:
nda += lsb * (np.random.rand(nda.size) - 0.5)
self.add(lsb=lsb)
def calc_chi_ch(self, E):
"""Calculate the gamma correction prefactor factor `chi_ch` (array).
Computes `chi_ch`, a channel-dependent prefactor for gamma used
to correct dispersion of E across channels.
Returns:
array of `chi_ch` correction factors (one per spot).
To apply the correction assign the returned array to `Data.chi_ch`.
Upon assignment E values for all bursts will be corrected.
"""
chi_ch = (1 / E.mean() - 1) / (1 / E - 1)
return chi_ch
def corrections(self, mute=False):
"""Apply corrections on burst-counts: nd, na, nda, naa.
The corrections are: background, leakage (or bleed-through) and
direct excitation (dir_ex).
"""
self.background_correction(mute=mute)
self.leakage_correction(mute=mute)
if self.alternated:
self.direct_excitation_correction(mute=mute)
def _update_corrections(self):
"""Recompute corrections whose flag is True.
Checks the flags .bg_corrected, .leakage_corrected, .dir_ex_corrected,
.dithering and recomputes the correction if the corresponding flag
is True (i.e. if the correction was already applied).
Note that this method is not used for gamma and beta corrections
because these do not affect the `nd`, `na` and `naa` quantities but
are only applied when computing E, S and corrected size.
Differently from :meth:`corrections`, this allows to recompute
corrections that have already been applied.
"""
if 'mburst' not in self:
return # no burst search performed yet
old_bg_corrected = self.bg_corrected
old_leakage_corrected = self.leakage_corrected
old_dir_ex_corrected = self.dir_ex_corrected
old_dithering = self.dithering
self.calc_ph_num() # recompute uncorrected na, nd, nda, naa
if old_bg_corrected:
self.background_correction()
if old_leakage_corrected:
self.leakage_correction()
if old_dir_ex_corrected:
self.direct_excitation_correction()
if old_dithering:
self.dither(self.lsb)
# Recompute E and S with no corrections (because already applied)
self.calc_fret(count_ph=False, corrections=False, pax=self.pax)
@property
def leakage(self):
"""Spectral leakage (bleed-through) of D emission in the A channel.
"""
return self._leakage
@leakage.setter
def leakage(self, leakage):
self._update_leakage(leakage)
def _update_leakage(self, leakage):
"""Apply/update leakage (or bleed-through) correction.
"""
assert (np.size(leakage) == 1) or (np.size(leakage) == self.nch)
self.add(_leakage=np.asfarray(leakage), leakage_corrected=True)
self._update_corrections()
@property
def dir_ex(self):
"""Direct excitation correction factor."""
return self._dir_ex
@dir_ex.setter
def dir_ex(self, value):
self._update_dir_ex(value)
def _update_dir_ex(self, dir_ex):
"""Apply/update direct excitation correction with value `dir_ex`.
"""
assert np.size(dir_ex) == 1
self.add(_dir_ex=float(dir_ex), dir_ex_corrected=True)
self._update_corrections()
@property
def beta(self):
"""Beta factor used to correct S (compensates Dex and Aex unbalance).
"""
return self._beta
@beta.setter
def beta(self, value):
self._update_beta(value)
def _update_beta(self, beta):
"""Change the `beta` value and recompute E and S."""
assert np.size(beta) == 1
self.add(_beta=float(beta))
if 'mburst' in self:
# Recompute E and S and delete fitter objects
self.calc_fret(corrections=False, pax=self.pax)
@property
def chi_ch(self):
"""Per-channel relative gamma factor."""
return self._chi_ch
@chi_ch.setter
def chi_ch(self, value):
self._update_chi_ch(value)
def _update_chi_ch(self, chi_ch):
"""Change the `chi_ch` value and recompute E and S."""
msg = 'chi_ch is a per-channel correction and must have size == nch.'
assert np.size(chi_ch) == self.nch, ValueError(msg)
self.add(_chi_ch=np.asfarray(chi_ch))
if 'mburst' in self:
# Recompute E and S and delete fitter objects
self.calc_fret(corrections=False, pax=self.pax)
@property
def gamma(self):
"""Gamma correction factor (compensates DexDem and DexAem unbalance).
"""
return self._gamma
@gamma.setter
def gamma(self, value):
self._update_gamma(value)
def _update_gamma(self, gamma):
"""Change the `gamma` value and recompute E and S."""
assert (np.size(gamma) == 1) or (np.size(gamma) == self.nch)
self.add(_gamma=np.asfarray(gamma))
if 'mburst' in self:
# Recompute E and S and delete fitter objects
self.calc_fret(corrections=False, pax=self.pax)
def get_gamma_array(self):
"""Get the array of gamma factors, one per ch.
It always returns an array of gamma factors regardless of
whether `self.gamma` is scalar or array.
Each element of the returned array is multiplied by `chi_ch`.
"""
gamma = self.gamma
G = np.repeat(gamma, self.nch) if np.size(gamma) == 1 else gamma
G *= self.chi_ch
return G
def get_leakage_array(self):
"""Get the array of leakage coefficients, one per ch.
It always returns an array of leakage coefficients regardless of
whether `self.leakage` is scalar or array.
Each element of the returned array is multiplied by `chi_ch`.
"""
leakage = self.leakage
Lk = np.r_[[leakage] * self.nch] if np.size(leakage) == 1 else leakage
Lk *= self.chi_ch
return Lk
##
# Methods to compute burst quantities: FRET, S, SBR, max_rate, etc ...
#
def calc_sbr(self, ph_sel=Ph_sel('all'), gamma=1.):
"""Return Signal-to-Background Ratio (SBR) for each burst.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection
for which to compute the sbr. Changes the photons used for
burst size and the corresponding background rate. Valid values
here are Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem').
See :mod:`fretbursts.ph_sel` for details.
gamma (float): gamma value used to compute corrected burst size
in the case `ph_sel` is Ph_sel('all'). Ignored otherwise.
Returns:
A list of arrays (one per channel) with one value per burst.
The list is also saved in `sbr` attribute.
"""
ph_sel = self._fix_ph_sel(ph_sel)
sbr = []
for ich, mb in enumerate(self.mburst):
if mb.num_bursts == 0:
sbr.append(np.array([]))
continue # if no bursts skip this ch
nd, na, bg_d, bg_a = self.expand(ich)
nt = self.burst_sizes_ich(ich=ich, gamma=gamma)
signal = {Ph_sel('all'): nt,
Ph_sel(Dex='Dem'): nd, Ph_sel(Dex='Aem'): na}
background = {Ph_sel('all'): bg_d + bg_a,
Ph_sel(Dex='Dem'): bg_d, Ph_sel(Dex='Aem'): bg_a}
sbr.append(signal[ph_sel] / background[ph_sel])
self.add(sbr=sbr)
return sbr
def calc_burst_ph_func(self, func, func_kw, ph_sel=Ph_sel('all'),
compact=False, ich=0):
"""Evaluate a scalar function from photons in each burst.
This method allow calling an arbitrary function on the photon
timestamps of each burst. For example if `func` is `np.mean` it
computes the mean time in each bursts.
Arguments:
func (callable): function that takes as first argument an array of
timestamps for one burst.
func_kw (callable): additional arguments to be passed `func`.
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
Returns:
A list (on element per channel) array. The array size is equal to
the number of bursts in the corresponding channel.
"""
if compact:
self._assert_compact(ph_sel)
kwargs = dict(func=func, func_kw=func_kw, compact=compact)
if self.alternated:
kwargs.update(alex_period=self.alex_period)
if compact:
kwargs.update(excitation_width=self._excitation_width(ph_sel))
results_mch = [burst_ph_stats(ph, bursts, mask=mask, **kwargs)
for ph, mask, bursts in
zip(self.iter_ph_times(),
self.iter_ph_masks(ph_sel=ph_sel),
self.mburst)]
return results_mch
def calc_max_rate(self, m, ph_sel=Ph_sel('all'), compact=False,
c=phrates.default_c):
"""Compute the max m-photon rate reached in each burst.
Arguments:
m (int): number of timestamps to use to compute the rate.
As for burst search, typical values are 5-20.
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
c (float): this parameter is used in the definition of the
rate estimator which is `(m - 1 - c) / t[last] - t[first]`.
For more details see :func:`.phtools.phrates.mtuple_rates`.
"""
ph_sel = self._fix_ph_sel(ph_sel)
Max_Rate = self.calc_burst_ph_func(func=phrates.mtuple_rates_max,
func_kw=dict(m=m, c=c),
ph_sel=ph_sel, compact=compact)
Max_Rate = [mr / self.clk_p - bg[bp] for bp, bg, mr in
zip(self.bp, self.bg_from(ph_sel), Max_Rate)]
params = dict(m=m, ph_sel=ph_sel, compact=compact)
self.add(max_rate=Max_Rate, max_rate_params=params)
def calc_fret(self, count_ph=False, corrections=True, dither=False,
mute=False, pure_python=False, pax=False):
"""Compute FRET (and stoichiometry if ALEX) for each burst.
This is an high-level functions that can be run after burst search.
By default, it will count Donor and Acceptor photons, perform
corrections (background, leakage), and compute gamma-corrected
FRET efficiencies (and stoichiometry if ALEX).
Arguments:
count_ph (bool): if True (default), calls :meth:`calc_ph_num` to
counts Donor and Acceptor photons in each bursts
corrections (bool): if True (default), applies background and
bleed-through correction to burst data
dither (bool): whether to apply dithering to burst size.
Default False.
mute (bool): whether to mute all the printed output. Default False.
pure_python (bool): if True, uses the pure python functions even
when the optimized Cython functions are available.
pax (bool): this has effect only if measurement is PAX.
In this case, when True computes E using a PAX-enhanced
formula: ``(2 na) / (2 na + nd + nda)``.
Otherwise use the usual usALEX formula: ``na / na + nd``.
Quantities `nd`/`na` are D/A burst counts during D excitation
period, while `nda` is D emission during A excitation period.
Returns:
None, all the results are saved in the object.
"""
if count_ph:
self.calc_ph_num(pure_python=pure_python, alex_all=True)
if dither:
self.dither(mute=mute)
if corrections:
self.corrections(mute=mute)
self._calculate_fret_eff(pax=pax)
if self.alternated:
self._calculate_stoich(pax=pax)
#self._calc_alex_hist()
for attr in ('ES_binwidth', 'ES_hist', 'E_fitter', 'S_fitter'):
# E_fitter and S_fitter are only attributes
# so we cannot use the membership syntax (attr in self)
if hasattr(self, attr):
self.delete(attr, warning=False)
def _aex_fraction(self):
"""Proportion of Aex period versus Dex + Aex."""
assert self.alternated
D_ON, A_ON = self.D_ON, self.A_ON
return ((A_ON[1] - A_ON[0]) /
(A_ON[1] - A_ON[0] + D_ON[1] - D_ON[0]))
def _aex_dex_ratio(self):
"""Ratio of Aex and Dex period durations."""
assert self.alternated
D_ON, A_ON = self.D_ON, self.A_ON
return (A_ON[1] - A_ON[0]) / (D_ON[1] - D_ON[0])
def _calculate_fret_eff(self, pax=False):
"""Compute FRET efficiency (`E`) for each burst."""
G = self.get_gamma_array()
if not pax:
E = [na / (g * nd + na) for nd, na, g in zip(self.nd, self.na, G)]
else:
alpha = 1 - self._aex_fraction()
E = [(na / alpha) / (g * (nd + nda) + (na / alpha))
for nd, na, nda, g in zip(self.nd, self.na, self.nda, G)]
self.add(E=E, pax=pax)
def _calculate_stoich(self, pax=False):
"""Compute "stoichiometry" (the `S` parameter) for each burst."""
G = self.get_gamma_array()
naa = self.naa
if 'PAX' in self.meas_type:
naa = [self._get_naa_ich(i) for i in range(self.nch)]
if not pax:
S = [(g * d + a) / (g * d + a + aa / self.beta)
for d, a, aa, g in zip(self.nd, self.na, naa, G)]
else:
# This is a PAX-enhanced formula which uses information
# from both alternation periods in order to compute S
alpha = 1 - self._aex_fraction()
S = [(g * (d + da) + a / alpha) /
(g * (d + da) + a / alpha + aa / (alpha * self.beta))
for d, a, da, aa, g in
zip(self.nd, self.na, self.nda, naa, G)]
self.add(S=S)
def _calc_alex_hist(self, binwidth=0.05):
"""Compute the ALEX histogram with given bin width `bin_step`"""
if 'ES_binwidth' in self and self.ES_binwidth == binwidth:
return
ES_hist_tot = [ES_histog(E, S, binwidth) for E, S in
zip(self.E, self.S)]
E_bins, S_bins = ES_hist_tot[0][1], ES_hist_tot[0][2]
ES_hist = [h[0] for h in ES_hist_tot]
E_ax = E_bins[:-1] + 0.5 * binwidth
S_ax = S_bins[:-1] + 0.5 * binwidth
self.add(ES_hist=ES_hist, E_bins=E_bins, S_bins=S_bins,
E_ax=E_ax, S_ax=S_ax, ES_binwidth=binwidth)
##
# Methods for measurement info
#
def status(self, add="", noname=False):
"""Return a string with burst search, corrections and selection info.
"""
name = "" if noname else self.name
s = name
if 'L' in self: # burst search has been done
if 'rate_th' in self:
s += " BS_%s L%d m%d MR%d" % (self.ph_sel, self.L, self.m,
np.mean(self.rate_th) * 1e-3)
else:
P_str = '' if self.P is None else ' P%s' % self.P
s += " BS_%s L%d m%d F%.1f%s" % \
(self.ph_sel, self.L, self.m, np.mean(self.F), P_str)
s += " G%.3f" % np.mean(self.gamma)
if 'bg_fun' in self: s += " BG%s" % self.bg_fun.__name__[:-4]
if 'bg_time_s' in self: s += "-%ds" % self.bg_time_s
if 'fuse' in self: s += " Fuse%.1fms" % self.fuse
if 'bg_corrected' in self and self.bg_corrected:
s += " bg"
if 'leakage_corrected' in self and self.leakage_corrected:
s += " Lk%.3f" % np.mean(self.leakage*100)
if 'dir_ex_corrected' in self and self.dir_ex_corrected:
s += " dir%.1f" % (self.dir_ex*100)
if 'dithering' in self and self.dithering:
s += " Dith%d" % self.lsb
if 's' in self: s += ' '.join(self.s)
return s + add
@property
def name(self):
"""Measurement name: last subfolder + file name with no extension."""
if not hasattr(self, '_name'):
basename = str(os.path.splitext(os.path.basename(self.fname))[0])
name = basename
last_dir = str(os.path.basename(os.path.dirname(self.fname)))
if len(last_dir) > 0:
name = '_'.join([last_dir, basename])
self.add(_name=name)
return self._name
@name.setter
def name(self, value):
self.add(_name=value)
def Name(self, add=""):
"""Return short filename + status information."""
n = self.status(add=add)
return n
def __repr__(self):
return self.status()
def stats(self, string=False):
"""Print common statistics (BG rates, #bursts, mean size, ...)"""
s = print_burst_stats(self)
if string:
return s
else:
print(s)
##
# FRET fitting methods
#
def fit_E_m(self, E1=-1, E2=2, weights='size', gamma=1.):
"""Fit E in each channel with the mean using bursts in [E1,E2] range.
Note:
This two fitting are equivalent (but the first is much faster)::
fit_E_m(weights='size')
fit_E_minimize(kind='E_size', weights='sqrt')
However `fit_E_minimize()` does not provide a model curve.
"""
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res, fit_model_F = zeros((self.nch, 2)), zeros(self.nch)
for ich, (nd, na, E, mask) in enumerate(zip(
self.nd, self.na, self.E, Mask)):
w = fret_fit.get_weights(nd[mask], na[mask],
weights=weights, gamma=gamma)
# Compute weighted mean
fit_res[ich, 0] = np.dot(w, E[mask])/w.sum()
# Compute weighted variance
fit_res[ich, 1] = np.sqrt(
np.dot(w, (E[mask] - fit_res[ich, 0])**2)/w.sum())
fit_model_F[ich] = mask.sum()/mask.size
fit_model = lambda x, p: SS.norm.pdf(x, p[0], p[1])
self.add(fit_E_res=fit_res, fit_E_name='Moments',
E_fit=fit_res[:, 0], fit_E_curve=True, fit_E_E1=E1,
fit_E_E2=E2, fit_E_model=fit_model,
fit_E_model_F=fit_model_F)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_ML_poiss(self, E1=-1, E2=2, method=1, **kwargs):
"""ML fit for E modeling size ~ Poisson, using bursts in [E1,E2] range.
"""
assert method in [1, 2, 3]
fit_fun = {1: fret_fit.fit_E_poisson_na, 2: fret_fit.fit_E_poisson_nt,
3: fret_fit.fit_E_poisson_nd}
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res = zeros(self.nch)
for ich, mask in zip(range(self.nch), Mask):
nd, na, bg_d, bg_a = self.expand(ich)
bg_x = bg_d if method == 3 else bg_a
fit_res[ich] = fit_fun[method](nd[mask], na[mask],
bg_x[mask], **kwargs)
self.add(fit_E_res=fit_res, fit_E_name='MLE: na ~ Poisson',
E_fit=fit_res, fit_E_curve=False, fit_E_E1=E1, fit_E_E2=E2)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_ML_binom(self, E1=-1, E2=2, **kwargs):
"""ML fit for E modeling na ~ Binomial, using bursts in [E1,E2] range.
"""
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res = np.array([fret_fit.fit_E_binom(_d[mask], _a[mask], **kwargs)
for _d, _a, mask in zip(self.nd, self.na, Mask)])
self.add(fit_E_res=fit_res, fit_E_name='MLE: na ~ Binomial',
E_fit=fit_res, fit_E_curve=False, fit_E_E1=E1, fit_E_E2=E2)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_minimize(self, kind='slope', E1=-1, E2=2, **kwargs):
"""Fit E using method `kind` ('slope' or 'E_size') and bursts in [E1,E2]
If `kind` is 'slope' the fit function is fret_fit.fit_E_slope()
If `kind` is 'E_size' the fit function is fret_fit.fit_E_E_size()
Additional arguments in `kwargs` are passed to the fit function.
"""
assert kind in ['slope', 'E_size']
# Build a dictionary fun_d so we'll call the function fun_d[kind]
fun_d = dict(slope=fret_fit.fit_E_slope,
E_size=fret_fit.fit_E_E_size)
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res = np.array([fun_d[kind](nd[mask], na[mask], **kwargs)
for nd, na, mask in
zip(self.nd, self.na, Mask)])
fit_name = dict(slope='Linear slope fit', E_size='E_size fit')
self.add(fit_E_res=fit_res, fit_E_name=fit_name[kind],
E_fit=fit_res, fit_E_curve=False, fit_E_E1=E1, fit_E_E2=E2)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_two_gauss_EM(self, fit_func=two_gaussian_fit_EM,
weights='size', gamma=1., **kwargs):
"""Fit the E population to a Gaussian mixture model using EM method.
Additional arguments in `kwargs` are passed to the fit_func().
"""
fit_res = zeros((self.nch, 5))
for ich, (nd, na, E) in enumerate(zip(self.nd, self.na, self.E)):
w = fret_fit.get_weights(nd, na, weights=weights, gamma=gamma)
fit_res[ich, :] = fit_func(E, weights=w, **kwargs)
self.add(fit_E_res=fit_res, fit_E_name=fit_func.__name__,
E_fit=fit_res[:, 2], fit_E_curve=True,
fit_E_model=two_gauss_mix_pdf,
fit_E_model_F=np.repeat(1, self.nch))
return self.E_fit
def fit_E_generic(self, E1=-1, E2=2, fit_fun=two_gaussian_fit_hist,
weights=None, gamma=1., **fit_kwargs):
"""Fit E in each channel with `fit_fun` using burst in [E1,E2] range.
All the fitting functions are defined in
:mod:`fretbursts.fit.gaussian_fitting`.
Parameters:
weights (string or None): specifies the type of weights
If not None `weights` will be passed to
`fret_fit.get_weights()`. `weights` can be not-None only when
using fit functions that accept weights (the ones ending in
`_hist` or `_EM`)
gamma (float): passed to `fret_fit.get_weights()` to compute
weights
All the additional arguments are passed to `fit_fun`. For example `p0`
or `mu_fix` can be passed (see `fit.gaussian_fitting` for details).
Note:
Use this method for CDF/PDF or hist fitting.
For EM fitting use :meth:`fit_E_two_gauss_EM()`.
"""
if fit_fun.__name__.startswith("gaussian_fit"):
fit_model = lambda x, p: SS.norm.pdf(x, p[0], p[1])
if 'mu0' not in fit_kwargs: fit_kwargs.update(mu0=0.5)
if 'sigma0' not in fit_kwargs: fit_kwargs.update(sigma0=0.3)
iE, nparam = 0, 2
elif fit_fun.__name__ == "two_gaussian_fit_hist_min_ab":
fit_model = two_gauss_mix_ab
if 'p0' not in fit_kwargs:
fit_kwargs.update(p0=[0, .05, 0.5, 0.6, 0.1, 0.5])
iE, nparam = 3, 6
elif fit_fun.__name__.startswith("two_gaussian_fit"):
fit_model = two_gauss_mix_pdf
if 'p0' not in fit_kwargs:
fit_kwargs.update(p0=[0, .05, 0.6, 0.1, 0.5])
iE, nparam = 2, 5
else:
raise ValueError("Fitting function not recognized.")
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res, fit_model_F = zeros((self.nch, nparam)), zeros(self.nch)
for ich, (nd, na, E, mask) in enumerate(zip(
self.nd, self.na, self.E, Mask)):
if '_hist' in fit_fun.__name__ or '_EM' in fit_fun.__name__:
if weights is None:
w = None
else:
w = fret_fit.get_weights(nd[mask], na[mask],
weights=weights, gamma=gamma)
fit_res[ich, :] = fit_fun(E[mask], weights=w, **fit_kwargs)
else:
# Non-histogram fits (PDF/CDF) do not support weights
fit_res[ich, :] = fit_fun(E[mask], **fit_kwargs)
fit_model_F[ich] = mask.sum()/mask.size
# Save enough info to generate a fit plot (see hist_fret in burst_plot)
self.add(fit_E_res=fit_res, fit_E_name=fit_fun.__name__,
E_fit=fit_res[:, iE], fit_E_curve=True, fit_E_E1=E1,
fit_E_E2=E2, fit_E_model=fit_model,
fit_E_model_F=fit_model_F, fit_E_weights=weights,
fit_E_gamma=gamma, fit_E_kwargs=fit_kwargs)
return self.E_fit
def fit_from(self, D):
"""Copy fit results from another Data() variable.
Now that the fit methods accept E1,E1 parameter this probabily useless.
"""
# NOTE Are 'fit_guess' and 'fit_fix' still used ?
fit_data = ['fit_E_res', 'fit_E_name', 'E_fit', 'fit_E_curve',
'fit_E_E1', 'fit_E_E2=E2', 'fit_E_model',
'fit_E_model_F', 'fit_guess', 'fit_fix']
for name in fit_data:
if name in D:
self[name] = D[name]
setattr(self, name, self[name])
# Deal with the normalization to the number of bursts
self.add(fit_model_F=r_[[old_E.size/new_E.size \
for old_E, new_E in zip(D.E, self.E)]])
def fit_E_calc_variance(self, weights='sqrt', dist='DeltaE',
E_fit=None, E1=-1, E2=2):
"""Compute several versions of WEIGHTED std.dev. of the E estimator.
`weights` are multiplied *BEFORE* squaring the distance/error
`dist` can be 'DeltaE' or 'SlopeEuclid'
Note:
This method is still experimental
"""
assert dist in ['DeltaE', 'SlopeEuclid']
if E_fit is None:
E_fit = self.E_fit
E1 = self.fit_E_E1 if 'fit_E_E1' in self else -1
E2 = self.fit_E_E2 if 'fit_E_E2' in self else 2
else:
# If E_fit is not None the specified E1,E2 range is used
if E1 < 0 and E2 > 1:
pprint('WARN: E1 < 0 and E2 > 1 (wide range of E eff.)\n')
if size(E_fit) == 1 and self.nch > 0:
E_fit = np.repeat(E_fit, self.nch)
assert size(E_fit) == self.nch
E_sel = [Ei[(Ei > E1)*(Ei < E2)] for Ei in self.E]
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
E_var, E_var_bu, E_var_ph = \
zeros(self.nch), zeros(self.nch), zeros(self.nch)
for i, (Ech, nt, mask) in enumerate(zip(E_sel, self.nt, Mask)):
nt_s = nt[mask]
nd_s, na_s = self.nd[i][mask], self.na[i][mask]
w = fret_fit.get_weights(nd_s, na_s, weights=weights)
info_ph = nt_s.sum()
info_bu = nt_s.size
if dist == 'DeltaE':
distances = (Ech - E_fit[i])
elif dist == 'SlopeEuclid':
distances = fret_fit.get_dist_euclid(nd_s, na_s, E_fit[i])
residuals = distances * w
var = np.mean(residuals**2)
var_bu = np.mean(residuals**2)/info_bu
var_ph = np.mean(residuals**2)/info_ph
#lvar = np.mean(log(residuals**2))
#lvar_bu = np.mean(log(residuals**2)) - log(info_bu)
#lvar_ph = np.mean(log(residuals**2)) - log(info_ph)
E_var[i], E_var_bu[i], E_var_ph[i] = var, var_bu, var_ph
assert (-np.isnan(E_var[i])).all() # check there is NO NaN
self.add(E_var=E_var, E_var_bu=E_var_bu, E_var_ph=E_var_ph)
return E_var
|
gpl-2.0
| -7,185,521,470,680,014,000
| 41.513032
| 82
| 0.562813
| false
| 3.492336
| false
| false
| false
|
sl2017/campos
|
campos_jobber_final/models/campos_jobber_accom_group.py
|
1
|
1182
|
# -*- coding: utf-8 -*-
# Copyright 2017 Stein & Gabelgaard ApS
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import api, fields, models, _
class CamposJobberAccomGroup(models.Model):
_name = 'campos.jobber.accom.group'
_description = 'Campos Jobber Accom Group' # TODO
name = fields.Char(required=True)
code = fields.Char(required=True)
owner_id = fields.Many2one('campos.event.participant', 'Owner')
accom_participant_ids = fields.One2many('campos.jobber.accomodation', 'accom_group_id', string='Participants')
number_participants = fields.Integer('# participants', compute='_compute_number_participants')
subcamp_id = fields.Many2one('campos.subcamp', 'Sub Camp')
_sql_constraints = [
('code_uniq', 'unique(code)', 'Code already in use. Choose another'),
('name_uniq', 'unique(name)', 'Name already in use. Choose another'),
]
@api.depends('accom_participant_ids')
@api.multi
def _compute_number_participants(self):
for cjag in self:
cjag.number_participants = len(cjag.accom_participant_ids)
|
agpl-3.0
| 1,359,394,841,940,174,300
| 38.433333
| 114
| 0.64467
| false
| 3.507418
| false
| false
| false
|
OpenTouch/python-facette
|
src/facette/v1/groupentry.py
|
1
|
1278
|
# Copyright (c) 2014 Alcatel-Lucent Enterprise
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from facette.utils import *
import json
GROUP_ENTRY_ORIGIN = "origin"
GROUP_ENTRY_PATTERN = "pattern"
class GroupEntry:
def __init__(self, js=""):
self.entry = {}
self.origin = facette_to_json(GROUP_ENTRY_ORIGIN, js, self.entry)
self.pattern = facette_to_json(GROUP_ENTRY_PATTERN, js, self.entry)
def set(self, origin=None, pattern=None):
self.origin = facette_set(id, GROUP_ENTRY_ORIGIN, self.entry)
self.pattern = facette_set(id, GROUP_ENTRY_PATTERN, self.entry)
def __str__(self):
return json.dumps(self.entry)
def __repr__(self):
return str(self)
|
apache-2.0
| -6,053,528,661,753,423,000
| 34.5
| 78
| 0.682316
| false
| 3.6
| false
| false
| false
|
macarthur-lab/xbrowse
|
xbrowse_server/api/views.py
|
1
|
67273
|
import datetime
import csv
import json
import logging
import sys
import traceback
from collections import defaultdict
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from django.http import HttpResponse, HttpResponseBadRequest, Http404
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from settings import LOGIN_URL
from seqr.utils.gene_utils import get_queried_genes
from xbrowse.analysis_modules.combine_mendelian_families import get_variants_by_family_for_gene
from xbrowse_server.analysis.diagnostic_search import get_gene_diangostic_info
from xbrowse_server.base.model_utils import update_xbrowse_model, get_or_create_xbrowse_model, delete_xbrowse_model, \
create_xbrowse_model
from xbrowse_server.base.models import Project, Family, FamilySearchFlag, VariantNote, ProjectTag, VariantTag, GeneNote, \
AnalysedBy, VariantFunctionalData
from seqr.models import Individual as SeqrIndividual, MatchmakerResult
from xbrowse_server.api.utils import get_project_and_family_for_user, get_project_and_cohort_for_user, \
add_extra_info_to_variants_project, add_notes_to_genes, get_variant_notes, get_variant_tags, get_variant_functional_data
from xbrowse.variant_search.family import get_variants_with_inheritance_mode
from xbrowse_server.api import utils as api_utils
from xbrowse_server.api import forms as api_forms
from xbrowse_server.mall import get_reference, get_datastore, get_mall
from xbrowse_server.search_cache import utils as cache_utils
from xbrowse_server.decorators import log_request
from xbrowse_server.server_utils import JSONResponse
import utils
from xbrowse.variant_search import cohort as cohort_search
from xbrowse import Variant
from xbrowse.analysis_modules.mendelian_variant_search import MendelianVariantSearchSpec
from xbrowse.core import displays as xbrowse_displays
from xbrowse_server import server_utils
from . import basicauth
from xbrowse_server import user_controls
from django.utils import timezone
from xbrowse_server.phenotips.reporting_utilities import phenotype_entry_metric_for_individual
from xbrowse_server.base.models import ANALYSIS_STATUS_CHOICES
from xbrowse_server.matchmaker.utilities import get_all_clinical_data_for_family
from xbrowse_server.matchmaker.utilities import is_a_valid_patient_structure
from xbrowse_server.matchmaker.utilities import generate_slack_notification_for_seqr_match
from xbrowse_server.matchmaker.utilities import gather_all_annotated_genes_in_seqr
from xbrowse_server.matchmaker.utilities import find_projects_with_families_in_matchbox
from xbrowse_server.matchmaker.utilities import find_families_of_this_project_in_matchbox
from xbrowse_server.matchmaker.utilities import extract_hpo_id_list_from_mme_patient_struct
import requests
from django.contrib.admin.views.decorators import staff_member_required
logger = logging.getLogger()
@csrf_exempt
@basicauth.logged_in_or_basicauth()
@log_request('projects_api')
def projects(request):
"""
List the projects that this user has access to
"""
user_projects = user_controls.get_projects_for_user(request.user)
project_ids = [p.project_id for p in user_projects]
response_format = request.GET.get('format', 'json')
if response_format == 'json':
return JSONResponse({'projects': project_ids})
elif response_format == 'tsv':
return HttpResponse('\n'.join(project_ids))
else:
raise Exception("Invalid format")
@csrf_exempt
@login_required
@log_request('mendelian_variant_search_api')
def mendelian_variant_search(request):
# TODO: how about we move project getter into the form, and just test for authX here?
# esp because error should be described in json, not just 404
request_dict = request.GET or request.POST
project, family = get_project_and_family_for_user(request.user, request_dict)
form = api_forms.MendelianVariantSearchForm(request_dict)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.family_id = family.family_id
try:
variants = api_utils.calculate_mendelian_variant_search(search_spec, family, user=request.user)
except Exception as e:
traceback.print_exc()
return JSONResponse({
'is_error': True,
'error': str(e.args[0]) if e.args else str(e)
})
hashable_search_params = search_spec.toJSON()
hashable_search_params['family_id'] = family.family_id
list_of_variants = [v.toJSON(encode_indiv_id=True) for v in variants]
search_hash = cache_utils.save_results_for_spec(project.project_id, hashable_search_params, list_of_variants)
add_extra_info_to_variants_project(get_reference(), project, variants, add_family_tags=True, add_populations=True)
return_type = request_dict.get('return_type', 'json')
if return_type == 'json':
return JSONResponse({
'is_error': False,
'variants': [v.toJSON() for v in variants],
'search_hash': search_hash,
})
elif return_type == 'csv':
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="results_{}.csv"'.format(search_hash)
writer = csv.writer(response)
indiv_ids = family.indiv_ids_with_variant_data()
headers = xbrowse_displays.get_variant_display_headers(get_mall(project), project, indiv_ids)
writer.writerow(headers)
for variant in variants:
fields = xbrowse_displays.get_display_fields_for_variant(get_mall(project), project, variant, indiv_ids, genes_to_return=search_spec.variant_filter.genes)
writer.writerow(fields)
return response
else:
return HttpResponse("Return type not implemented")
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('mendelian_variant_search_spec_api')
def mendelian_variant_search_spec(request):
project, family = get_project_and_family_for_user(request.user, request.GET)
search_hash = request.GET.get('search_hash')
search_spec_dict, variants = cache_utils.get_cached_results(project.project_id, search_hash)
search_spec = MendelianVariantSearchSpec.fromJSON(search_spec_dict)
if variants is None:
variants = api_utils.calculate_mendelian_variant_search(search_spec, family, user=request.user)
else:
variants = [Variant.fromJSON(v) for v in variants]
for variant in variants:
variant.set_extra('family_id', family.family_id)
add_extra_info_to_variants_project(get_reference(), project, variants, add_family_tags=True, add_populations=True)
return_type = request.GET.get('return_type')
if return_type == 'json' or not return_type:
return JSONResponse({
'is_error': False,
'variants': [v.toJSON() for v in variants],
'search_spec': search_spec_dict,
})
elif request.GET.get('return_type') == 'csv':
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="results_{}.csv"'.format(search_hash)
writer = csv.writer(response)
indiv_ids = family.indiv_ids_with_variant_data()
headers = xbrowse_displays.get_variant_display_headers(get_mall(project), project, indiv_ids)
writer.writerow(headers)
for variant in variants:
fields = xbrowse_displays.get_display_fields_for_variant(get_mall(project), project, variant, indiv_ids)
writer.writerow(fields)
return response
@csrf_exempt
@login_required
@log_request('get_cohort_variants')
def cohort_variant_search(request):
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
form = api_forms.CohortVariantSearchForm(request.GET)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.family_id = cohort.cohort_id
variants = api_utils.calculate_mendelian_variant_search(search_spec, cohort, user=request.user)
list_of_variants = [v.toJSON(encode_indiv_id=True) for v in variants]
search_hash = cache_utils.save_results_for_spec(project.project_id, search_spec.toJSON(), list_of_variants)
api_utils.add_extra_info_to_variants_cohort(get_reference(), cohort, variants)
return JSONResponse({
'is_error': False,
'variants': [v.toJSON() for v in variants],
'search_hash': search_hash,
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('cohort_variant_search_spec_api')
def cohort_variant_search_spec(request):
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
# TODO: use form
search_spec_dict, variants = cache_utils.get_cached_results(project.project_id, request.GET.get('search_hash'))
search_spec = MendelianVariantSearchSpec.fromJSON(search_spec_dict)
if variants is None:
variants = api_utils.calculate_mendelian_variant_search(search_spec, cohort, user=request.user)
else:
variants = [Variant.fromJSON(v) for v in variants]
api_utils.add_extra_info_to_variants_cohort(get_reference(), cohort, variants)
return JSONResponse({
'is_error': False,
'variants': [v.toJSON() for v in variants],
'search_spec': search_spec.toJSON(),
})
@csrf_exempt
@login_required
@log_request('cohort_gene_search')
def cohort_gene_search(request):
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
sys.stderr.write("cohort_gene_search %s %s: starting ... \n" % (project.project_id, cohort.cohort_id))
form = api_forms.CohortGeneSearchForm(request.GET)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.cohort_id = cohort.cohort_id
sys.stderr.write("cohort_gene_search %s %s: search spec: %s \n" % (project.project_id, cohort.cohort_id, str(search_spec.toJSON())))
genes = api_utils.calculate_cohort_gene_search(cohort, search_spec)
sys.stderr.write("cohort_gene_search %s %s: get %s genes \n" % (project.project_id, cohort.cohort_id, len(genes)))
search_hash = cache_utils.save_results_for_spec(project.project_id, search_spec.toJSON(), genes)
api_utils.add_extra_info_to_genes(project, get_reference(), genes)
sys.stderr.write("cohort_gene_search %s %s: done adding extra info \n" % (project.project_id, cohort.cohort_id))
return JSONResponse({
'is_error': False,
'genes': genes,
'search_hash': search_hash,
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('cohort_gene_search_spec')
def cohort_gene_search_spec(request):
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
search_spec, genes = cache_utils.get_cached_results(project.project_id, request.GET.get('search_hash'))
if genes is None:
genes = api_utils.calculate_cohort_gene_search(cohort, search_spec)
api_utils.add_extra_info_to_genes(project, get_reference(), genes)
return JSONResponse({
'is_error': False,
'genes': genes,
'search_spec': search_spec,
})
@csrf_exempt
@login_required
@log_request('cohort_gene_search_variants')
def cohort_gene_search_variants(request):
error = None
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
form = api_forms.CohortGeneSearchVariantsForm(request.GET)
if form.is_valid():
gene_id = form.cleaned_data['gene_id']
inheritance_mode = form.cleaned_data['inheritance_mode']
variant_filter = form.cleaned_data['variant_filter']
quality_filter = form.cleaned_data['quality_filter']
else:
error = server_utils.form_error_string(form)
if not error:
indivs_with_inheritance, gene_variation = cohort_search.get_individuals_with_inheritance_in_gene(
get_datastore(project),
get_reference(),
cohort.xcohort(),
inheritance_mode,
gene_id,
variant_filter=variant_filter,
quality_filter=quality_filter
)
relevant_variants = gene_variation.get_relevant_variants_for_indiv_ids(cohort.indiv_id_list())
api_utils.add_extra_info_to_variants_project(get_reference(), project, relevant_variants, add_family_tags=True,
add_populations=True)
ret = {
'is_error': False,
'variants': [v.toJSON() for v in relevant_variants],
'gene_info': get_reference().get_gene(gene_id),
}
return JSONResponse(ret)
else:
ret = {
'is_error': True,
'error': error
}
return JSONResponse(ret)
@login_required
@log_request('gene_info')
def gene_info(request, gene_id):
gene = get_reference().get_gene(gene_id)
gene['expression'] = get_reference().get_tissue_expression_display_values(gene_id)
add_notes_to_genes([gene], request.user)
ret = {
'gene': gene,
'is_error': False,
'found_gene': gene is not None,
}
return JSONResponse(ret)
@login_required
@log_request('family_variant_annotation')
def family_variant_annotation(request):
# TODO: this view not like the others - refactor to forms
error = None
for key in ['project_id', 'family_id', 'xpos', 'ref', 'alt']:
if request.GET.get(key) is None:
error = "%s is requred", key
if not error:
project = get_object_or_404(Project, project_id=request.GET.get('project_id'))
family = get_object_or_404(Family, project=project, family_id=request.GET.get('family_id'))
if not project.can_view(request.user):
raise PermissionDenied
if not error:
variant = get_datastore(project).get_single_variant(
family.project.project_id,
family.family_id,
int(request.GET['xpos']),
request.GET['ref'],
request.GET['alt']
)
if not variant:
error = "Variant does not exist"
if not error:
ret = {
'variant': variant.toJSON(),
'is_error': False,
}
else:
ret = {
'is_error': True,
'error': error,
}
return JSONResponse(ret)
@login_required
@log_request('add_flag')
def add_family_search_flag(request):
error = None
for key in ['project_id', 'family_id', 'xpos', 'ref', 'alt', 'note', 'flag_type', 'flag_inheritance_mode']:
if request.GET.get(key, None) == None:
error = "%s is requred" % key
if not error:
project = get_object_or_404(Project, project_id=request.GET.get('project_id'))
family = get_object_or_404(Family, project=project, family_id=request.GET.get('family_id'))
if not project.can_edit(request.user):
raise PermissionDenied
if not error:
xpos = int(request.GET['xpos'])
ref=request.GET.get('ref')
alt=request.GET['alt']
note=request.GET.get('note')
flag_type=request.GET.get('flag_type')
flag_inheritance_mode=request.GET.get('flag_inheritance_mode')
# todo: more validation - is variant valid?
flag = FamilySearchFlag(user=request.user,
family=family,
xpos=int(request.GET['xpos']),
ref=ref,
alt=alt,
note=note,
flag_type=flag_type,
suggested_inheritance=flag_inheritance_mode,
date_saved=timezone.now(),
)
if not error:
flag.save()
variant = get_datastore(project).get_single_variant(family.project.project_id, family.family_id,
xpos, ref, alt )
api_utils.add_extra_info_to_variants_project(get_reference(), project, [variant], add_family_tags=True,
add_populations=True)
ret = {
'is_error': False,
'variant': variant.toJSON(),
}
else:
ret = {
'is_error': True,
'error': error,
}
return JSONResponse(ret)
@login_required
# @csrf_exempt
@log_request('add_analysed_by')
def add_family_analysed_by(request, data=None):
if not data:
data = request.GET
family_id = data.get('family_id')
project_id = data.get('project_id')
if not (family_id and project_id):
raise HttpResponseBadRequest('family_id and project_id are required')
try:
family = Family.objects.get(project__project_id=project_id, family_id=family_id)
except ObjectDoesNotExist:
raise Http404('No family matches the given query')
if not family.project.can_edit(request.user):
raise PermissionDenied
analysed_by = create_xbrowse_model(AnalysedBy, user=request.user, family=family, date_saved=timezone.now())
return JSONResponse({
'is_error': False,
'analysed_by': analysed_by.toJSON(),
})
@login_required
@log_request('delete_variant_note')
def delete_variant_note(request, note_id):
ret = {
'is_error': False,
}
notes = VariantNote.objects.filter(id=note_id)
if not notes:
ret['is_error'] = True
ret['error'] = 'note id %s not found' % note_id
else:
note = list(notes)[0]
if not note.project.can_edit(request.user):
raise PermissionDenied
delete_xbrowse_model(note)
return JSONResponse(ret)
@login_required
@log_request('add_or_edit_variant_note')
def add_or_edit_variant_note(request):
"""Add a variant note"""
family = None
if 'family_id' in request.GET:
project, family = get_project_and_family_for_user(request.user, request.GET)
else:
project = utils.get_project_for_user(request.user, request.GET)
form = api_forms.VariantNoteForm(project, request.GET)
if not form.is_valid():
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
if 'note_id' in form.cleaned_data and form.cleaned_data['note_id']:
event_type = "edit_variant_note"
notes = VariantNote.objects.filter(
id=form.cleaned_data['note_id'],
project=project,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
)
if not notes:
return JSONResponse({
'is_error': True,
'error': 'note id %s not found' % form.cleaned_data['note_id']
})
note = notes[0]
update_xbrowse_model(
note,
user=request.user,
note=form.cleaned_data['note_text'],
submit_to_clinvar=form.cleaned_data['submit_to_clinvar'],
date_saved=timezone.now(),
family=family)
else:
event_type = "add_variant_note"
create_xbrowse_model(
VariantNote,
user=request.user,
project=project,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
note=form.cleaned_data['note_text'],
submit_to_clinvar = form.cleaned_data['submit_to_clinvar'],
date_saved=timezone.now(),
family=family)
notes = get_variant_notes(project=project, family_id=request.GET.get('family_id'), **form.cleaned_data)
try:
if not settings.DEBUG: settings.EVENTS_COLLECTION.insert({
'event_type': event_type,
'date': timezone.now(),
'project_id': ''.join(project.project_id),
'family_id': family.family_id,
'note': form.cleaned_data['note_text'],
'xpos':form.cleaned_data['xpos'],
'ref':form.cleaned_data['ref'],
'alt':form.cleaned_data['alt'],
'username': request.user.username,
'email': request.user.email,
})
except Exception as e:
logging.error("Error while logging %s event: %s" % (event_type, e))
return JSONResponse({
'is_error': False,
'notes': notes,
})
@login_required
@log_request('add_or_edit_variant_tags')
def add_or_edit_variant_tags(request):
family = None
if 'family_id' in request.GET:
project, family = get_project_and_family_for_user(request.user, request.GET)
else:
project = utils.get_project_for_user(request.user, request.GET)
form = api_forms.VariantTagsForm(project, request.GET)
if not form.is_valid():
ret = {
'is_error': True,
'error': server_utils.form_error_string(form)
}
return JSONResponse(ret)
variant_tags_to_delete = {
variant_tag.id: variant_tag for variant_tag in VariantTag.objects.filter(
family=family,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'])
}
project_tag_events = {}
for project_tag in form.cleaned_data['project_tags']:
# retrieve tags
tag, created = get_or_create_xbrowse_model(
VariantTag,
project_tag=project_tag,
family=family,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
)
if not created:
# this tag already exists so just keep it (eg. remove it from the set of tags that will be deleted)
del variant_tags_to_delete[tag.id]
continue
# this a new tag, so update who saved it and when
project_tag_events[project_tag] = "add_variant_tag"
update_xbrowse_model(
tag,
user=request.user,
date_saved=timezone.now(),
search_url=form.cleaned_data['search_url'])
# delete the tags that are no longer checked.
for variant_tag in variant_tags_to_delete.values():
project_tag_events[variant_tag.project_tag] = "delete_variant_tag"
delete_xbrowse_model(variant_tag)
# Get tags after updating the tag info in the database, so that the new tag info is added to the variant JSON
tags = get_variant_tags(project=project, family_id=request.GET.get('family_id'), **form.cleaned_data)
# log tag creation
for project_tag, event_type in project_tag_events.items():
try:
if not settings.DEBUG: settings.EVENTS_COLLECTION.insert({
'event_type': event_type,
'date': timezone.now(),
'project_id': ''.join(project.project_id),
'family_id': family.family_id,
'tag': project_tag.tag,
'title': project_tag.title,
'xpos':form.cleaned_data['xpos'],
'ref':form.cleaned_data['ref'],
'alt':form.cleaned_data['alt'],
'username': request.user.username,
'email': request.user.email,
'search_url': form.cleaned_data.get('search_url'),
})
except Exception as e:
logging.error("Error while logging add_variant_tag event: %s" % e)
return JSONResponse({
'is_error': False,
'tags': tags,
})
@login_required
@csrf_exempt
@log_request('add_or_edit_functional_data')
def add_or_edit_functional_data(request):
request_data = json.loads(request.body)
project, family = get_project_and_family_for_user(request.user, request_data)
form = api_forms.VariantFunctionalDataForm(request_data)
if not form.is_valid():
ret = {
'is_error': True,
'error': server_utils.form_error_string(form)
}
return JSONResponse(ret)
project_tag_events = {}
tag_ids = set()
for tag_data in form.cleaned_data['tags']:
# retrieve tags
tag, created = get_or_create_xbrowse_model(
VariantFunctionalData,
functional_data_tag=tag_data['tag'],
family=family,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
)
tag_ids.add(tag.id)
if created:
project_tag_events[tag_data['tag']] = "add_variant_functional_data"
elif tag.metadata != tag_data.get('metadata'):
project_tag_events[tag_data['tag']] = "edit_variant_functional_data"
else:
continue
# this a new/changed tag, so update who saved it and when
update_xbrowse_model(
tag,
metadata=tag_data.get('metadata'),
user=request.user,
date_saved=timezone.now(),
search_url=form.cleaned_data['search_url'])
# delete the tags that are no longer checked.
variant_tags_to_delete = VariantFunctionalData.objects.filter(
family=family,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
).exclude(id__in=tag_ids)
for variant_tag in variant_tags_to_delete:
project_tag_events[variant_tag.functional_data_tag] = "delete_variant_functional_data"
delete_xbrowse_model(variant_tag)
# get the tags after updating the tag info in the database, so that the new tag info is added to the variant JSON
functional_data = get_variant_functional_data(project=project, family_id=request_data.get('family_id'), **form.cleaned_data)
# log tag creation
for project_tag, event_type in project_tag_events.items():
try:
if not settings.DEBUG: settings.EVENTS_COLLECTION.insert({
'event_type': event_type,
'date': timezone.now(),
'project_id': ''.join(project.project_id),
'family_id': family.family_id,
'tag': project_tag,
'xpos':form.cleaned_data['xpos'],
'ref':form.cleaned_data['ref'],
'alt':form.cleaned_data['alt'],
'username': request.user.username,
'email': request.user.email,
'search_url': form.cleaned_data.get('search_url'),
})
except Exception as e:
logging.error("Error while logging add_variant_tag event: %s" % e)
return JSONResponse({
'is_error': False,
'functional_data': functional_data,
})
@login_required
@log_request('delete_gene_note')
def delete_gene_note(request, note_id):
try:
note = GeneNote.objects.get(id=note_id)
except ObjectDoesNotExist:
return JSONResponse({
'is_error': True,
'error': 'note id %s not found' % note_id
})
if not note.can_edit(request.user):
raise PermissionDenied
delete_xbrowse_model(note)
return JSONResponse({
'is_error': False,
})
@login_required
@log_request('add_or_edit_gene_note')
def add_or_edit_gene_note(request):
"""Add a gene note"""
form = api_forms.GeneNoteForm(request.GET)
if not form.is_valid():
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
if form.cleaned_data.get('note_id'):
event_type = "edit_gene_note"
try:
note = GeneNote.objects.get(id=form.cleaned_data['note_id'])
except ObjectDoesNotExist:
return JSONResponse({
'is_error': True,
'error': 'note id %s not found' % form.cleaned_data['note_id']
})
if not note.can_edit(request.user):
raise PermissionDenied
update_xbrowse_model(
note,
note=form.cleaned_data['note_text'],
user=request.user,
date_saved=timezone.now(),
)
else:
event_type = "add_variant_note"
note = create_xbrowse_model(
GeneNote,
user=request.user,
gene_id=form.cleaned_data['gene_id'],
note=form.cleaned_data['note_text'],
date_saved=timezone.now(),
)
try:
if not settings.DEBUG: settings.EVENTS_COLLECTION.insert({
'event_type': event_type,
'date': timezone.now(),
'note': form.cleaned_data['note_text'],
'gene_id':form.cleaned_data['gene_id'],
'username': request.user.username,
'email': request.user.email,
})
except Exception as e:
logging.error("Error while logging %s event: %s" % (event_type, e))
return JSONResponse({
'is_error': False,
'note': note.toJSON(request.user),
})
def gene_autocomplete(request):
query = request.GET.get('q', '')
gene_items = get_queried_genes(query, 20)
genes = [{
'value': item['gene_id'],
'label': item['gene_symbol'],
} for item in gene_items]
return JSONResponse(genes)
@login_required
@log_request('variant_info')
def variant_info(request):
pass
@csrf_exempt
@login_required
@log_request('combine_mendelian_families_api')
def combine_mendelian_families(request):
project, family_group = utils.get_project_and_family_group_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
form = api_forms.CombineMendelianFamiliesForm(request.GET)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.family_group_id = family_group.slug
genes = api_utils.calculate_combine_mendelian_families(family_group, search_spec, user=request.user)
search_hash = cache_utils.save_results_for_spec(project.project_id, search_spec.toJSON(), genes)
api_utils.add_extra_info_to_genes(project, get_reference(), genes)
return JSONResponse({
'is_error': False,
'genes': genes,
'search_hash': search_hash,
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('mendelian_variant_search_spec_api')
def combine_mendelian_families_spec(request):
project, family_group = utils.get_project_and_family_group_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
search_hash = request.GET.get('search_hash')
search_spec, genes = cache_utils.get_cached_results(project.project_id, search_hash)
search_spec_obj = MendelianVariantSearchSpec.fromJSON(search_spec)
if request.GET.get('return_type') != 'csv' or not request.GET.get('group_by_variants'):
if genes is None:
genes = api_utils.calculate_combine_mendelian_families(family_group, search_spec, user=request.user)
api_utils.add_extra_info_to_genes(project, get_reference(), genes)
if request.GET.get('return_type') != 'csv':
return JSONResponse({
'is_error': False,
'genes': genes,
'search_spec': search_spec,
})
else:
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="family_group_results_{}.csv"'.format(search_hash)
writer = csv.writer(response)
writer.writerow(["gene", "# families", "family list", "chrom", "start", "end"])
for gene in genes:
family_id_list = [family_id for (project_id, family_id) in gene["family_id_list"]]
writer.writerow(map(str, [gene["gene_name"], len(family_id_list), " ".join(family_id_list), gene["chr"], gene["start"], gene["end"], ""]))
return response
else:
# download results grouped by variant
indiv_id_list = []
for family in family_group.get_families():
indiv_id_list.extend(family.indiv_ids_with_variant_data())
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="results_{}.csv"'.format(search_hash)
writer = csv.writer(response)
headers = ['genes','chr','pos','ref','alt','worst_annotation' ]
headers.extend(project.get_reference_population_slugs())
headers.extend([ 'polyphen','sift','muttaster','fathmm'])
for indiv_id in indiv_id_list:
headers.append(indiv_id)
headers.append(indiv_id+'_gq')
headers.append(indiv_id+'_dp')
writer.writerow(headers)
mall = get_mall(project)
variant_key_to_individual_id_to_variant = defaultdict(dict)
variant_key_to_variant = {}
for family in family_group.get_families():
for variant in get_variants_with_inheritance_mode(
mall,
family.xfamily(),
search_spec_obj.inheritance_mode,
search_spec_obj.variant_filter,
search_spec_obj.quality_filter,
user=request.user):
if len(variant.coding_gene_ids) == 0:
continue
variant_key = (variant.xpos, variant.ref, variant.alt)
variant_key_to_variant[variant_key] = variant
for indiv_id in family.indiv_ids_with_variant_data():
variant_key_to_individual_id_to_variant[variant_key][indiv_id] = variant
for variant_key in sorted(variant_key_to_individual_id_to_variant.keys()):
variant = variant_key_to_variant[variant_key]
individual_id_to_variant = variant_key_to_individual_id_to_variant[variant_key]
genes = [mall.reference.get_gene_symbol(gene_id) for gene_id in variant.coding_gene_ids]
fields = []
fields.append(','.join(genes))
fields.extend([
variant.chr,
str(variant.pos),
variant.ref,
variant.alt,
variant.annotation.get('vep_group', '.'),
])
for ref_population_slug in project.get_reference_population_slugs():
fields.append(variant.annotation['freqs'][ref_population_slug])
for field_key in ['polyphen', 'sift', 'muttaster', 'fathmm']:
fields.append(variant.annotation.get(field_key, ""))
for indiv_id in indiv_id_list:
variant = individual_id_to_variant.get(indiv_id)
genotype = None
if variant is not None:
genotype = variant.get_genotype(indiv_id)
if genotype is None:
fields.extend(['.', '.', '.'])
else:
fields.append("/".join(genotype.alleles) if genotype.alleles else "./.")
#fields[-1] += " %s (%s)" % (indiv_id, genotype.num_alt)
fields.append(str(genotype.gq) if genotype.gq is not None else '.')
fields.append(genotype.extras['dp'] if genotype.extras.get('dp') is not None else '.')
writer.writerow(fields)
return response
@csrf_exempt
@login_required
@log_request('combine_mendelian_families_variants_api')
def combine_mendelian_families_variants(request):
project, family_group = utils.get_project_and_family_group_for_user(request.user, request.GET)
form = api_forms.CombineMendelianFamiliesVariantsForm(request.GET)
if form.is_valid():
variants_grouped = get_variants_by_family_for_gene(
get_mall(project),
[f.xfamily() for f in form.cleaned_data['families']],
form.cleaned_data['inheritance_mode'],
form.cleaned_data['gene_id'],
variant_filter=form.cleaned_data['variant_filter'],
quality_filter=form.cleaned_data['quality_filter'],
user=request.user,
)
variants_by_family = []
for family in form.cleaned_data['families']:
variants = variants_grouped[(family.project.project_id, family.family_id)]
add_extra_info_to_variants_project(get_reference(), family.project, variants, add_family_tags=True, add_populations=True)
variants_by_family.append({
'project_id': family.project.project_id,
'family_id': family.family_id,
'family_name': str(family),
'variants': [v.toJSON() for v in variants],
})
return JSONResponse({
'is_error': False,
'variants_by_family': variants_by_family,
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('diagnostic_search')
def diagnostic_search(request):
project, family = utils.get_project_and_family_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
form = api_forms.DiagnosticSearchForm(family, request.GET)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.family_id = family.family_id
gene_list = form.cleaned_data['gene_list']
diagnostic_info_list = []
for gene_id in gene_list.gene_id_list():
diagnostic_info = get_gene_diangostic_info(family, gene_id, search_spec.variant_filter)
add_extra_info_to_variants_project(get_reference(), project, diagnostic_info._variants, add_family_tags=True, add_populations=True)
diagnostic_info_list.append(diagnostic_info)
return JSONResponse({
'is_error': False,
'gene_diagnostic_info_list': [d.toJSON() for d in diagnostic_info_list],
'gene_list_info': gene_list.toJSON(details=True),
'data_summary': family.get_data_summary(),
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
def family_gene_lookup(request):
project, family = utils.get_project_and_family_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
gene_id = request.GET.get('gene_id')
if not get_reference().is_valid_gene_id(gene_id):
return JSONResponse({
'is_error': True,
'error': 'Invalid gene',
})
family_gene_data = get_gene_diangostic_info(family, gene_id)
add_extra_info_to_variants_project(get_reference(), project, family_gene_data._variants, add_family_tags=True,
add_populations=True)
return JSONResponse({
'is_error': False,
'family_gene_data': family_gene_data.toJSON(),
'data_summary': family.get_data_summary(),
'gene': get_reference().get_gene(gene_id),
})
@csrf_exempt
@login_required
@log_request('API_project_phenotypes')
def export_project_individuals_phenotypes(request,project_id):
"""
Export all HPO terms entered for this project individuals. A direct proxy
from PhenoTips API
Args:
project_id
Returns:
A JSON string of HPO terms entered
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
project = get_object_or_404(Project, project_id=project_id)
result={}
for individual in project.get_individuals():
ui_display_name = individual.indiv_id
ext_id=individual.phenotips_id
result[ui_display_name] = phenotype_entry_metric_for_individual(project_id, ext_id)['raw']
return JSONResponse(result)
@csrf_exempt
@login_required
@log_request('API_project_phenotypes')
def export_project_family_statuses(request,project_id):
"""
Exports the status of all families in this project
Args:
Project ID
Returns:
All statuses of families
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
project = get_object_or_404(Project, project_id=project_id)
status_description_map = {}
for abbrev, details in ANALYSIS_STATUS_CHOICES:
status_description_map[abbrev] = details[0]
result={}
for family in project.get_families():
fam_details =family.toJSON()
result[fam_details['family_id']] = status_description_map.get(family.analysis_status, 'unknown')
return JSONResponse(result)
@csrf_exempt
@login_required
@log_request('API_project_phenotypes')
def export_project_variants(request,project_id):
"""
Export all variants associated to this project
Args:
Project id
Returns:
A JSON object of variant information
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
status_description_map = {}
for abbrev, details in ANALYSIS_STATUS_CHOICES:
status_description_map[abbrev] = details[0]
variants=[]
project_tags = ProjectTag.objects.filter(project__project_id=project_id)
for project_tag in project_tags:
variant_tags = VariantTag.objects.filter(project_tag=project_tag)
for variant_tag in variant_tags:
variant = get_datastore(project).get_single_variant(
project.project_id,
variant_tag.family.family_id if variant_tag.family else '',
variant_tag.xpos,
variant_tag.ref,
variant_tag.alt,
)
variant_json = variant.toJSON() if variant is not None else {'xpos': variant_tag.xpos, 'ref': variant_tag.ref, 'alt': variant_tag.alt}
family_status = ''
if variant_tag.family:
family_status = status_description_map.get(variant_tag.family.analysis_status, 'unknown')
variants.append({"variant":variant_json,
"tag":project_tag.tag,
"description":project_tag.title,
"family":variant_tag.family.toJSON(),
"family_status":family_status})
return JSONResponse(variants)
@login_required
@log_request('matchmaker_individual_add')
def get_submission_candidates(request,project_id,family_id,indiv_id):
"""
Gathers submission candidate individuals from this family
Args:
individual_id: an individual ID
project_id: project this individual belongs to
Returns:
Status code
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
else:
id_map,affected_patient = get_all_clinical_data_for_family(project_id,family_id,indiv_id)
return JSONResponse({
"submission_candidate":affected_patient,
"id_map":id_map
})
@csrf_exempt
@login_required
@log_request('matchmaker_individual_add')
def add_individual(request):
"""
Adds given individual to the local database
Args:
submission information of a single patient is expected in the POST data
Returns:
Submission status information
"""
affected_patient = json.loads(request.POST.get("patient_data", "wasn't able to parse patient_data in POST!"))
seqr_id = request.POST.get("localId", "wasn't able to parse Id (as seqr knows it) in POST!")
project_id = request.POST.get("projectId", "wasn't able to parse project Id in POST!")
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=seqr_id, family__project=project.seqr_project)
submission = json.dumps({'patient':affected_patient})
validity_check=is_a_valid_patient_structure(affected_patient)
if not validity_check['status']:
return JSONResponse({
'http_result':{"message":validity_check['reason'] + ", the patient was not submitted to matchmaker"},
'status_code':400,
})
headers={
'X-Auth-Token': settings.MME_NODE_ADMIN_TOKEN,
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
result = requests.post(url=settings.MME_ADD_INDIVIDUAL_URL,
headers=headers,
data=submission)
#if successfully submitted to MME, persist info
if result.status_code==200 or result.status_code==409:
individual.mme_submitted_data = {'patient':affected_patient}
individual.mme_submitted_date = datetime.datetime.now()
individual.mme_deleted_date = None
individual.mme_deleted_by = None
individual.save()
#update the contact information store if any updates were made
updated_contact_name = affected_patient['contact']['name']
updated_contact_href = affected_patient['contact']['href']
try:
project = Project.objects.get(project_id=project_id)
update_xbrowse_model(
project,
mme_primary_data_owner=updated_contact_name,
mme_contact_url=updated_contact_href,
)
except ObjectDoesNotExist:
logger.error("ERROR: couldn't update the contact name and href of MME submission: ", updated_contact_name, updated_contact_href)
#seqr_project.save()
if result.status_code==401:
return JSONResponse({
'http_result':{"message":"sorry, authorization failed, I wasn't able to insert that individual"},
'status_code':result.status_code,
})
return JSONResponse({
'http_result':result.json(),
'status_code':result.status_code,
})
@csrf_exempt
@login_required
@log_request('matchmaker_individual_delete')
def delete_individual(request,project_id, indiv_id):
"""
Deletes a given individual from the local database
Args:
Project ID of project
Individual ID of a single patient to delete
Returns:
Delete confirmation
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
headers={
'X-Auth-Token': settings.MME_NODE_ADMIN_TOKEN,
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
#find the latest ID that was used in submission which might defer from seqr ID
matchbox_id=indiv_id
if individual.mme_submitted_date:
if individual.mme_deleted_date:
return JSONResponse({"status_code":402,"message":"that individual has already been deleted"})
else:
matchbox_id = individual.mme_submitted_data['patient']['id']
logger.info("using matchbox ID: %s" % (matchbox_id))
payload = {"id":matchbox_id}
result = requests.delete(url=settings.MME_DELETE_INDIVIDUAL_URL,
headers=headers,
data=json.dumps(payload))
#if successfully deleted from matchbox/MME, persist that detail
if result.status_code == 200:
deleted_date = datetime.datetime.now()
individual.mme_deleted_date = deleted_date
individual.mme_deleted_by = request.user
individual.save()
return JSONResponse({"status_code":result.status_code,"message":result.text, 'deletion_date':str(deleted_date)})
else:
return JSONResponse({"status_code":404,"message":result.text})
return JSONResponse({"status_code":result.status_code,"message":result.text})
@login_required
@log_request('matchmaker_family_submissions')
def get_family_submissions(request,project_id,family_id):
"""
Gets the last 4 submissions for this family
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
else:
family = get_object_or_404(Family, project=project, family_id=family_id)
family_submissions=[]
family_members_submitted=[]
for individual in family.individual_set.filter(seqr_individual__mme_submitted_date__isnull=False):
family_submissions.append({'submitted_data': individual.seqr_individual.mme_submitted_data,
'hpo_details': extract_hpo_id_list_from_mme_patient_struct(individual.seqr_individual.mme_submitted_data),
'seqr_id': individual.indiv_id,
'family_id': family_id,
'project_id': project_id,
'insertion_date': individual.seqr_individual.mme_submitted_date.strftime("%b %d %Y %H:%M:%S"),
'deletion': individual.seqr_individual.mme_deleted_date,
})
family_members_submitted.append(individual.indiv_id)
#TODO: figure out when more than 1 indi for a family. For now returning a list. Eventually
#this must be the latest submission for every indiv in a family
return JSONResponse({
"family_submissions":family_submissions,
"family_members_submitted":family_members_submitted
})
@login_required
@csrf_exempt
@log_request('match_internally_and_externally')
def match_internally_and_externally(request,project_id,indiv_id):
"""
Looks for matches for the given individual. Expects a single patient (MME spec) in the POST
data field under key "patient_data"
Args:
project_id,indiv_id and POST all data in POST under key "patient_data"
Returns:
Status code and results
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
patient_data = request.POST.get("patient_data")
if patient_data is None:
r = HttpResponse("wasn't able to parse patient data field in POST!",status=400)
return r
#find details on HPO terms and start aggregating in a map to send back with reply
hpo_map={}
extract_hpo_id_list_from_mme_patient_struct(json.loads(patient_data),hpo_map)
headers={
'X-Auth-Token': settings.MME_NODE_ADMIN_TOKEN,
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
results={}
#first look in the local MME database
internal_result = requests.post(url=settings.MME_LOCAL_MATCH_URL,
headers=headers,
data=patient_data
)
ids={}
for internal_res in internal_result.json().get('results',[]):
ids[internal_res['patient']['id']] = internal_res
extract_hpo_id_list_from_mme_patient_struct(internal_res,hpo_map)
results['local_results']={"result":internal_result.json(),
"status_code":internal_result.status_code
}
#then externally (unless turned off)
if settings.SEARCH_IN_EXTERNAL_MME_NODES:
extnl_result = requests.post(url=settings.MME_EXTERNAL_MATCH_URL,
headers=headers,
data=patient_data
)
results['external_results']={"result":extnl_result.json(),
"status_code":str(extnl_result.status_code)
}
for ext_res in extnl_result.json().get('results',[]):
extract_hpo_id_list_from_mme_patient_struct(ext_res,hpo_map)
ids[ext_res['patient']['id']] = ext_res
saved_results = {
result.result_data['patient']['id']: result for result in MatchmakerResult.objects.filter(individual=individual)
}
result_analysis_state={}
for id in ids.keys():
persisted_result_det = saved_results.get(id)
if not persisted_result_det:
persisted_result_det = MatchmakerResult.objects.create(
individual=individual,
result_data=ids[id],
last_modified_by=request.user,
)
result_analysis_state[id] = {
"id_of_indiv_searched_with":indiv_id,
"content_of_indiv_searched_with":json.loads(patient_data),
"content_of_result":ids[id],
"result_id":id,
"we_contacted_host":persisted_result_det.we_contacted,
"host_contacted_us":persisted_result_det.host_contacted,
"seen_on":str(persisted_result_det.created_date),
"deemed_irrelevant":persisted_result_det.deemed_irrelevant,
"comments":persisted_result_det.comments or '',
"seqr_project_id":project_id,
"flag_for_analysis":persisted_result_det.flag_for_analysis,
"username_of_last_event_initiator":persisted_result_det.last_modified_by.username,
}
#post to slack
if settings.SLACK_TOKEN is not None:
generate_slack_notification_for_seqr_match(results,project_id,indiv_id)
return JSONResponse({
"match_results":results,
"result_analysis_state":result_analysis_state,
"hpo_map":hpo_map
})
@login_required
@csrf_exempt
@log_request('match_internally_and_externally')
def match_in_open_mme_sources(request,project_id,indiv_id):
"""
Match in other MME data sources that are open and not toke protected (ex: Monarch)
Args:
project_id,indiv_id and POST all data in POST under key "patient_data"
Returns:
Status code and results
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
patient_data = request.POST.get("patient_data")
if patient_data is None:
r = HttpResponse("wasn't able to parse patient data field in POST!",status=400)
return r
#find details on HPO terms and start aggregating in a map to send back with reply
hpo_map={}
extract_hpo_id_list_from_mme_patient_struct(json.loads(patient_data),hpo_map)
#these open sites require no token
headers={
'X-Auth-Token': '',
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
results={}
open_sites = {'Monarch Initiative':'https://mme.monarchinitiative.org/match'} #todo: put into settings
for open_site_name, open_site_url in open_sites.iteritems():
results_back = requests.post(url=open_site_url,
headers=headers,
data=patient_data)
ids={}
for res in results_back.json().get('results',[]):
ids[res['patient']['id']] = res
extract_hpo_id_list_from_mme_patient_struct(res,hpo_map)
results[open_site_name]={"result":results_back.json(),
"status_code":results_back.status_code
}
return JSONResponse({
"match_results":results,
"hpo_map":hpo_map
})
@login_required
@csrf_exempt
@log_request('get_project_individuals')
def get_project_individuals(request,project_id):
"""
Get a list of individuals with their family IDs of this project
Args:
project_id
Returns:
map of individuals and their family
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
indivs=[]
for indiv in project.get_individuals():
strct={'guid':indiv.id}
for k,v in indiv.to_dict().iteritems():
if k not in ['phenotypes']:
strct[k] = v
indivs.append(strct)
return JSONResponse({
"individuals":indivs
})
@login_required
@csrf_exempt
@log_request('get_family_individuals')
def get_family_individuals(request,project_id,family_id):
"""
Get a list of individuals belongint to this family IDs
Args:
project_id
family_id
Returns:
map of individuals in this family
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
indivs=[]
for indiv in project.get_individuals():
if indiv.to_dict()['family_id'] == family_id:
strct={'guid':indiv.id}
for k,v in indiv.to_dict().iteritems():
if k not in ['phenotypes']:
strct[k] = v
indivs.append(strct)
return JSONResponse({
"individuals":indivs
})
@staff_member_required(login_url=LOGIN_URL)
@log_request('matchmaker_get_matchbox_id_details')
def get_matchbox_id_details(request,matchbox_id):
"""
Gets information of this matchbox_id
"""
match_individuals = SeqrIndividual.objects.filter(mme_submitted_data__patient__id=matchbox_id)
records = []
for individual in match_individuals:
record = {
'seqr_id':individual.individual_id,
'family_id':individual.family.family_id,
'project_id':individual.family.project.deprecated_project_id,
'insertion_date':str(individual.mme_submitted_date)}
genomicFatures = []
for g_feature in individual.mme_submitted_data['patient']['genomicFeatures']:
genomicFatures.append({'gene_id': g_feature['gene']['id'],
'variant_start': g_feature['variant']['start'],
'variant_end': g_feature['variant']['end']})
record['submitted_genomic_features'] = genomicFatures
features = []
for feature in individual.mme_submitted_data['patient']['features']:
id = feature['id']
label = ''
if feature.has_key('label'):
label = feature['label']
features.append({'id': id,
'label': label}),
record['submitted_features'] = features
records.append(record)
return JSONResponse({
'submission_records':records
})
@staff_member_required(login_url=LOGIN_URL)
@log_request('matchmaker_get_matchbox_metrics')
def get_matchbox_metrics(request):
"""
Gets matchbox metrics
"""
mme_headers={
'X-Auth-Token': settings.MME_NODE_ADMIN_TOKEN,
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
r = requests.get(url=settings.MME_MATCHBOX_METRICS_URL,
headers=mme_headers)
if r.status_code==200:
matchbox_metrics = r.json()['metrics']
genes_in_matchbox=matchbox_metrics['geneCounts'].keys()
seqr__gene_info = gather_all_annotated_genes_in_seqr()
seqr_metrics={"genes_in_seqr":len(seqr__gene_info),
"genes_found_in_matchbox":0}
unique_genes=[]
for gene_ids,proj in seqr__gene_info.iteritems():
if gene_ids[0] in genes_in_matchbox:
unique_genes.append(gene_ids[0])
seqr_metrics['genes_found_in_matchbox'] = len(set(unique_genes))
seqr_metrics["submission_info"]=find_projects_with_families_in_matchbox()
return JSONResponse({"from_matchbox":r.json(),
"from_seqr":seqr_metrics})
else:
resp = HttpResponse('{"message":"error contacting matchbox to gain metrics", "status":' + r.status_code + '}',status=r.status_code)
resp.status_code=r.status_code
return resp
@login_required
@log_request('matchmaker_get_matchbox_metrics')
def get_matchbox_metrics_for_project(request,project_id):
"""
Gets matchbox submission metrics for project (accessible to non-staff)
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
try:
return JSONResponse({"families":find_families_of_this_project_in_matchbox(project_id)})
except:
raise
@login_required
@csrf_exempt
@log_request('update_match_comment')
def update_match_comment(request,project_id,match_id,indiv_id):
"""
Update a comment made about a match
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
parse_json_error_mesg="wasn't able to parse POST!"
comment = request.POST.get("comment",parse_json_error_mesg)
if comment == parse_json_error_mesg:
return HttpResponse('{"message":"' + parse_json_error_mesg +'"}',status=500)
persisted_result_dets = MatchmakerResult.objects.filter(individual=individual, result_data__patient__id=match_id)
if persisted_result_dets.count()>0:
for persisted_result_det in persisted_result_dets:
persisted_result_det.comments=comment.strip()
persisted_result_det.last_modified_by=request.user
persisted_result_det.save()
resp = HttpResponse('{"message":"OK"}',status=200)
return resp
else:
raise
return HttpResponse('{"message":"error updating database"}',status=500)
@staff_member_required(login_url=LOGIN_URL)
@csrf_exempt
@log_request('get_current_match_state_of_all_results')
def get_current_match_state_of_all_results(request):
"""
gets the current state of all matches in this project
"""
return HttpResponse('{"message":"error unimplemented MME endpoint"}',status=500)
@login_required
@csrf_exempt
@log_request('get_current_match_state')
def get_current_match_state(request,project_id,match_id,indiv_id):
"""
gets the current state of this matched pair
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
try:
result_model = MatchmakerResult.objects.filter(individual=individual, result_data__patient__id=match_id).first()
persisted_result_det = {
"id_of_indiv_searched_with":indiv_id,
"content_of_result":result_model.result_data,
"result_id":result_model.result_data['patient']['id'],
"we_contacted_host":result_model.we_contacted,
"host_contacted_us":result_model.host_contacted,
"seen_on":str(result_model.created_date),
"deemed_irrelevant":result_model.deemed_irrelevant,
"comments":result_model.comments or '',
"seqr_project_id":project_id,
"flag_for_analysis":result_model.flag_for_analysis,
"username_of_last_event_initiator":result_model.last_modified_by.username,
}
except Exception as e:
print e
return HttpResponse('{"message":"error talking to database"}',status=500)
return JSONResponse(persisted_result_det)
@login_required
@csrf_exempt
@log_request('match_state_update')
def match_state_update(request,project_id,match_id,indiv_id):
"""
Update a state change made about a match
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
state_type = request.POST.get('state_type', None)
state = request.POST.get('state',None)
if state_type is None or state is None:
return HttpResponse('{"message":"error parsing POST"}',status=500)
persisted_result_det = MatchmakerResult.objects.filter(individual=individual, result_data__patient__id=match_id).first()
try:
if state_type == 'flag_for_analysis':
persisted_result_det.flag_for_analysis=False
if state == "true":
persisted_result_det.flag_for_analysis=True
if state_type == 'deemed_irrelevant':
persisted_result_det.deemed_irrelevant=False
if state == "true":
persisted_result_det.deemed_irrelevant=True
if state_type == 'we_contacted_host':
persisted_result_det.we_contacted=False
if state == "true":
persisted_result_det.we_contacted=True
if state_type == 'host_contacted_us':
persisted_result_det.host_contacted=False
if state == "true":
persisted_result_det.host_contacted=True
persisted_result_det.last_modified_by=request.user
persisted_result_det.save()
except:
raise
return HttpResponse('{"message":"error updating database"}',status=500)
return HttpResponse('{"message":"successfully updated database"}',status=200)
|
agpl-3.0
| -8,759,921,877,234,000,000
| 36.645775
| 170
| 0.615225
| false
| 3.801808
| false
| false
| false
|
dvklopfenstein/PrincetonAlgorithms
|
py/AlgsSedgewickWayne/Topological.py
|
1
|
2002
|
"""Compute topological ordering(w DFS) of a DAG or edge-weighted DAG. Runs in O(E + V) time."""
# TBD Finish Python port
from AlgsSedgewickWayne.DirectedCycle import DirectedCycle
from AlgsSedgewickWayne.DepthFirstOrder import DepthFirstOrder
from AlgsSedgewickWayne.EdgeWeightedDigraph import EdgeWeightedDigraph
from AlgsSedgewickWayne.EdgeWeightedDirectedCycle import EdgeWeightedDirectedCycle
class Topological(object):
"""Determines if digraph G has a topological order and, if so, finds topological order."""
def __init__(self, G): # G is Digraph O(V+E) wc
finder = DirectedCycle(G)
if not finder.hasCycle():
dfs = DepthFirstOrder(G)
self._order = dfs.reversePost() # topological order
self._rank = [] # rank[v] = position of vertex v in topological order
i = 0
for v in self. order:
self._rank[v] = i
i += 1
def Topological(EdgeWeightedDigraph G): # EdgeWeightedDigraph
"""Determines if digraph G has a topological order and, if so, finds topological order."""
EdgeWeightedDirectedCycle finder = new EdgeWeightedDirectedCycle(G)
if not finder.hasCycle():
dfs = DepthFirstOrder(G)
order = dfs.reversePost()
# Returns a topological order if the digraph has a topologial order, None otherwise
def order(self): return self._order # O(V)
# Does the digraph have a topological order?
def hasOrder(self): return self._order is not None # O(k)
def rank(self, v): # O(k)
"""The the rank of vertex v in the topological order; -1 if the digraph is not a DAG."""
self._validateVertex(v)
if self.hasOrder(): return self._rank[v]
else: return -1
def _validateVertex(self, v):
"""raise an IndexOutOfBoundsException unless 0 <= v < V."""
V = len(self._rank)
if v < 0 or v >= V:
raise Exception("vertex {} is not between 0 and {}".format(v, (V-1))
# Copyright 2002-2016, Robert Sedgewick and Kevin Wayne.
# Copyright 2002-2019, DV Klopfenstein, Python port
|
gpl-2.0
| -1,320,169,917,173,574,100
| 39.04
| 95
| 0.695305
| false
| 3.398981
| false
| false
| false
|
WizeCommerce/medusa
|
setup.py
|
1
|
1292
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "thrift_medusa",
version = "0.0.1",
author = "Samir Faci",
author_email = "",
description = ("Language agnostic tool for packaging of thrift based services and artifacts"),
license = "Apache Software License",
url = "https://github.com/WizeCommerce/medusa",
packages=['thrift_medusa', 'tests'],
#packages = find_packages(exclude="test"),
package_data = {'': ['*.yaml']},
long_description=read('README.md'),
install_requires=['lxml','paramiko','argparse','pyyaml','jinja2'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
],
#entry_points = { 'console_scripts': ['medusa = thrift_medusa:main', 'samir = thrift_medusa.thrift_medusa:main'] },
#scripts = ['./publishClients.py'],
test_suite='tests',
zip_safe = True
)
|
apache-2.0
| -8,982,372,392,790,578,000
| 37
| 119
| 0.647059
| false
| 3.578947
| false
| false
| false
|
open-austin/influence-texas
|
src/influencetx/legislators/migrations/0001_initial.py
|
1
|
1680
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-01-17 17:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Legislator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('openstates_leg_id', models.CharField(db_index=True, max_length=48)),
('name', models.CharField(max_length=45)),
('first_name', models.CharField(blank=True, max_length=20)),
('last_name', models.CharField(blank=True, max_length=20)),
('party', models.CharField(choices=[('D', 'Democratic'), ('I', 'Independent'), ('R', 'Republican'), ('U', 'Unknown')], max_length=1)),
('chamber', models.CharField(choices=[('House', 'House'), ('Senate', 'Senate')], max_length=6)),
('district', models.IntegerField()),
('openstates_updated_at', models.DateTimeField()),
('url', models.URLField(blank=True)),
('photo_url', models.URLField(blank=True)),
],
),
migrations.CreateModel(
name='LegislatorIdMap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('openstates_leg_id', models.CharField(db_index=True, max_length=48)),
('tpj_filer_id', models.IntegerField(db_index=True)),
],
),
]
|
gpl-2.0
| -3,630,584,201,326,298,000
| 41
| 150
| 0.554167
| false
| 4.009547
| false
| false
| false
|
raphaelrpl/portal
|
backend/appengine/routes/questions/rest.py
|
1
|
4013
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from time import sleep
from gaebusiness.business import CommandExecutionException
from permission_app.permission_facade import main_user_form
from tekton.gae.middleware.json_middleware import JsonResponse
from question_app import question_facade
from gaepermission.decorator import login_required
from gaecookie.decorator import no_csrf
from question_app.question_model import CategoryQuestion, Question
from category_app.category_model import Category
@login_required
@no_csrf
def index():
cmd = question_facade.list_questions_cmd()
question_list = cmd()
question_form = question_facade.question_form()
def localize_user(model):
dct = question_form.fill_with_model(model)
user = main_user_form().fill_with_model(model.user.get())
dct['user'] = user
return dct
question_dcts = [localize_user(m) for m in question_list]
return JsonResponse(question_dcts)
@login_required
def new(_resp, _logged_user, **question_properties):
if _logged_user is None:
_resp.status_code = 400
return JsonResponse({"name": "Login required!"})
quest = question_properties.get('question', {})
if not quest:
_resp.status_code = 400
return JsonResponse({"name": "Required Field"})
question = Question(**quest)
question.user = _logged_user.key
try:
question.put()
except CommandExecutionException:
_resp.status_code = 400
if not question.name:
return JsonResponse({"name": "Required field"})
return JsonResponse({"name": "Put a valid post"})
for c in question_properties.get("categorys", {}):
cat = Category.query(Category.name == c).fetch()
if cat:
category = CategoryQuestion(origin=cat[0], destination=question)
category.put()
question_form = question_facade.question_form()
data = question_form.fill_with_model(question)
data['user'] = _logged_user.name
sleep(0.5)
return JsonResponse(data)
@login_required
def edit(_resp, _logged_user, **question_properties):
question_id = question_properties.get('id')
# key = ndb.Key('Question', int(question_id))
question = Question.get_by_id(int(question_id))
if int(_logged_user.key.id()) != int(question_properties.get('user', {}).get('id', 0)) and question_id != None:
_resp.status_code = 400
return JsonResponse({"name": "This post don't belong to you!"})
if question is None:
_resp.status_code = 400
return JsonResponse({"name": "Invalid post"})
question.name = question_properties.get('name')
try:
question.put()
except:
_resp.status_code = 400
return JsonResponse({"name": "Put a valid question"})
user_form = main_user_form()
form = question_facade.question_form()
question_dct = form.fill_with_model(question)
question_dct['user'] = user_form.fill_with_model(question.user.get())
return JsonResponse(question_dct)
# cmd = question_facade.update_question_cmd(question_id, **question_properties)
# return _save_or_update_json_response(_logged_user, cmd, _resp)
@login_required
def delete(_resp, id):
cmd = question_facade.delete_question_cmd(id)
try:
question = cmd()
# DeleteCategoryQuestion(destination=question).execute()
except CommandExecutionException:
_resp.status_code = 500
return JsonResponse(cmd.errors)
question_dct = question_facade.question_form().fill_with_model(question)
return JsonResponse(question_dct)
def _save_or_update_json_response(_logged_user, cmd, _resp):
try:
question = cmd()
except CommandExecutionException:
_resp.status_code = 500
return JsonResponse(cmd.errors)
question_form = question_facade.question_form()
data = question_form.fill_with_model(question)
data['user'] = _logged_user.name
return JsonResponse(data)
|
mit
| -3,836,463,681,952,738,300
| 33.299145
| 115
| 0.673561
| false
| 3.821905
| false
| false
| false
|
mozillazg/bustard
|
tests/httpbin/core.py
|
1
|
21325
|
# -*- coding: utf-8 -*-
"""
httpbin.core
~~~~~~~~~~~~
This module provides the core HttpBin experience.
"""
import base64
import json
import os
import random
import time
import uuid
from bustard.app import Bustard
from bustard.http import (
Response, Headers, jsonify as bustard_jsonify, redirect
)
from bustard.utils import json_dumps_default
from werkzeug.datastructures import WWWAuthenticate
from werkzeug.http import http_date
from werkzeug.serving import run_simple
from six.moves import range as xrange
from . import filters
from .helpers import (
get_headers, status_code, get_dict, get_request_range,
check_basic_auth, check_digest_auth, secure_cookie,
H, ROBOT_TXT, ANGRY_ASCII
)
from .utils import weighted_choice
from .structures import CaseInsensitiveDict
ENV_COOKIES = (
'_gauges_unique',
'_gauges_unique_year',
'_gauges_unique_month',
'_gauges_unique_day',
'_gauges_unique_hour',
'__utmz',
'__utma',
'__utmb'
)
def jsonify(*args, **kwargs):
response = bustard_jsonify(*args, **kwargs)
if not response.data.endswith(b'\n'):
response.data += b'\n'
return response
# Prevent WSGI from correcting the casing of the Location header
# BaseResponse.autocorrect_location_header = False
# Find the correct template folder when running from a different location
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'templates')
app = Bustard(__name__, template_dir=tmpl_dir)
render_template = app.render_template
url_for = app.url_for
# -----------
# Middlewares
# -----------
@app.after_request
def set_cors_headers(request, response):
response.headers['Access-Control-Allow-Origin'] = (
request.headers.get('Origin', '*')
)
response.headers['Access-Control-Allow-Credentials'] = 'true'
if request.method == 'OPTIONS':
# Both of these headers are only used for the "preflight request"
# http://www.w3.org/TR/cors/#access-control-allow-methods-response-header
response.headers['Access-Control-Allow-Methods'] = (
'GET, POST, PUT, DELETE, PATCH, OPTIONS'
)
response.headers['Access-Control-Max-Age'] = '3600' # 1 hour cache
if request.headers.get('Access-Control-Request-Headers') is not None:
response.headers['Access-Control-Allow-Headers'] = (
request.headers['Access-Control-Request-Headers']
)
return response
# ------
# Routes
# ------
@app.route('/')
def view_landing_page(request):
"""Generates Landing Page."""
tracking_enabled = 'HTTPBIN_TRACKING' in os.environ
return render_template('index.html', request=request,
tracking_enabled=tracking_enabled)
@app.route('/html')
def view_html_page(request):
"""Simple Html Page"""
return render_template('moby.html')
@app.route('/robots.txt')
def view_robots_page(request):
"""Simple Html Page"""
response = Response()
response.content = ROBOT_TXT
response.content_type = 'text/plain'
return response
@app.route('/deny')
def view_deny_page(request):
"""Simple Html Page"""
response = Response()
response.content = ANGRY_ASCII
response.content_type = 'text/plain'
return response
# return "YOU SHOULDN'T BE HERE"
@app.route('/ip')
def view_origin(request):
"""Returns Origin IP."""
return jsonify(origin=request.headers.get('X-Forwarded-For',
request.remote_addr))
@app.route('/headers')
def view_headers(request):
"""Returns HTTP HEADERS."""
return jsonify(get_dict(request, 'headers'))
@app.route('/user-agent')
def view_user_agent(request):
"""Returns User-Agent."""
headers = get_headers(request)
return jsonify({'user-agent': headers['user-agent']})
@app.route('/get', methods=('GET', 'OPTIONS'))
def view_get(request):
"""Returns GET Data."""
return jsonify(get_dict(request, 'url', 'args', 'headers', 'origin'))
@app.route('/post', methods=('POST',))
def view_post(request):
"""Returns POST Data."""
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files', 'json'))
@app.route('/put', methods=('PUT',))
def view_put(request):
"""Returns PUT Data."""
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files', 'json'))
@app.route('/patch', methods=('PATCH',))
def view_patch(request):
"""Returns PATCH Data."""
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files', 'json'))
@app.route('/delete', methods=('DELETE',))
def view_delete(request):
"""Returns DELETE Data."""
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files', 'json'))
@app.route('/gzip')
@filters.gzip
def view_gzip_encoded_content(request):
"""Returns GZip-Encoded Data."""
return jsonify(get_dict(request, 'origin', 'headers',
method=request.method, gzipped=True))
@app.route('/deflate')
@filters.deflate
def view_deflate_encoded_content(request):
"""Returns Deflate-Encoded Data."""
return jsonify(get_dict(request, 'origin', 'headers',
method=request.method, deflated=True))
@app.route('/redirect/<int:n>')
def redirect_n_times(request, n):
"""302 Redirects n times."""
n = int(n)
assert n > 0
absolute = request.args.get('absolute', 'false').lower() == 'true'
if n == 1:
return redirect(app.url_for('view_get', _request=request,
_external=absolute))
if absolute:
return _redirect(request, 'absolute', n, True)
else:
return _redirect(request, 'relative', n, False)
def _redirect(request, kind, n, external):
return redirect(url_for('{0}_redirect_n_times'.format(kind),
n=n - 1, _external=external, _request=request))
@app.route('/redirect-to')
def redirect_to(request):
"""302 Redirects to the given URL."""
args = CaseInsensitiveDict(request.args.items())
# We need to build the response manually and convert to UTF-8 to prevent
# werkzeug from "fixing" the URL. This endpoint should set the Location
# header to the exact string supplied.
response = Response('')
response.status_code = 302
response.headers['Location'] = args['url'].encode('utf-8')
return response
@app.route('/relative-redirect/<int:n>')
def relative_redirect_n_times(request, n):
"""302 Redirects n times."""
n = int(n)
assert n > 0
response = Response('')
response.status_code = 302
if n == 1:
response.headers['Location'] = url_for('view_get')
return response
response.headers['Location'] = app.url_for(
'relative_redirect_n_times', n=n - 1
)
return response
@app.route('/absolute-redirect/<int:n>')
def absolute_redirect_n_times(request, n):
"""302 Redirects n times."""
n = int(n)
assert n > 0
if n == 1:
return redirect(app.url_for('view_get', _request=request,
_external=True))
return _redirect(request, 'absolute', n, True)
@app.route('/stream/<int:n>')
def stream_n_messages(request, n):
"""Stream n JSON messages"""
n = int(n)
response = get_dict(request, 'url', 'args', 'headers', 'origin')
n = min(n, 100)
def generate_stream():
for i in range(n):
response['id'] = i
yield json.dumps(response, default=json_dumps_default) + '\n'
return Response(generate_stream(), headers={
'Content-Type': 'application/json',
})
@app.route('/status/<codes>',
methods=['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'TRACE'])
def view_status_code(request, codes):
"""Return status code or random status code if more than one are given"""
if ',' not in codes:
code = int(codes)
return status_code(code)
choices = []
for choice in codes.split(','):
if ':' not in choice:
code = choice
weight = 1
else:
code, weight = choice.split(':')
choices.append((int(code), float(weight)))
code = weighted_choice(choices)
return status_code(code)
@app.route('/response-headers')
def response_headers(request):
"""Returns a set of response headers from the query string """
headers = Headers(request.args.to_dict())
response = jsonify(headers)
while True:
content_len_shown = response.headers['Content-Length']
d = {}
for key in response.headers.keys():
value = response.headers.get_all(key)
if len(value) == 1:
value = value[0]
d[key] = value
response = jsonify(d)
for key, value in headers.to_list():
response.headers.add(key, value)
if response.headers['Content-Length'] == content_len_shown:
break
return response
@app.route('/cookies')
def view_cookies(request, hide_env=True):
"""Returns cookie data."""
cookies = dict(request.cookies.items())
if hide_env and ('show_env' not in request.args):
for key in ENV_COOKIES:
try:
del cookies[key]
except KeyError:
pass
return jsonify(cookies=cookies)
@app.route('/forms/post')
def view_forms_post(request):
"""Simple HTML form."""
return render_template('forms-post.html')
@app.route('/cookies/set/<name>/<value>')
def set_cookie(request, name, value):
"""Sets a cookie and redirects to cookie list."""
r = app.make_response(redirect(url_for('view_cookies')))
r.set_cookie(key=name, value=value, secure=secure_cookie(request))
return r
@app.route('/cookies/set')
def set_cookies(request):
"""Sets cookie(s) as provided by the query string
and redirects to cookie list.
"""
cookies = dict(request.args.items())
r = app.make_response(redirect(url_for('view_cookies')))
for key, value in cookies.items():
r.set_cookie(key=key, value=value, secure=secure_cookie(request))
return r
@app.route('/cookies/delete')
def delete_cookies(request):
"""Deletes cookie(s) as provided by the query string
and redirects to cookie list.
"""
cookies = dict(request.args.items())
r = app.make_response(redirect(url_for('view_cookies')))
for key, value in cookies.items():
r.delete_cookie(key=key)
return r
@app.route('/basic-auth/<user>/<passwd>')
def basic_auth(request, user='user', passwd='passwd'):
"""Prompts the user for authorization using HTTP Basic Auth."""
if not check_basic_auth(request, user, passwd):
return status_code(401)
return jsonify(authenticated=True, user=user)
@app.route('/hidden-basic-auth/<user>/<passwd>')
def hidden_basic_auth(request, user='user', passwd='passwd'):
"""Prompts the user for authorization using HTTP Basic Auth."""
if not check_basic_auth(request, user, passwd):
return status_code(404)
return jsonify(authenticated=True, user=user)
@app.route('/digest-auth/<qop>/<user>/<passwd>')
def digest_auth(request, qop=None, user='user', passwd='passwd'):
"""Prompts the user for authorization using HTTP Digest auth"""
if qop not in ('auth', 'auth-int'):
qop = None
if 'Authorization' not in request.headers or \
not check_digest_auth(user, passwd) or \
'Cookie' not in request.headers:
response = app.make_response('')
response.status_code = 401
# RFC2616 Section4.2: HTTP headers are ASCII. That means
# request.remote_addr was originally ASCII, so I should be able to
# encode it back to ascii. Also, RFC2617 says about nonces: "The
# contents of the nonce are implementation dependent"
nonce = H(b''.join([
getattr(request, 'remote_addr', u'').encode('ascii'),
b':',
str(time.time()).encode('ascii'),
b':',
os.urandom(10)
]))
opaque = H(os.urandom(10))
auth = WWWAuthenticate('digest')
auth.set_digest('me@kennethreitz.com', nonce, opaque=opaque,
qop=('auth', 'auth-int') if qop is None else (qop, ))
response.headers['WWW-Authenticate'] = auth.to_header()
response.headers['Set-Cookie'] = 'fake=fake_value'
return response
return jsonify(authenticated=True, user=user)
@app.route('/delay/<delay>')
def delay_response(request, delay):
"""Returns a delayed response"""
delay = min(float(delay), 10)
time.sleep(delay)
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files'))
@app.route('/drip')
def drip(request):
"""Drips data over a duration after an optional initial delay."""
args = CaseInsensitiveDict(request.args.items())
duration = float(args.get('duration', 2))
numbytes = int(args.get('numbytes', 10))
code = int(args.get('code', 200))
pause = duration / numbytes
delay = float(args.get('delay', 0))
if delay > 0:
time.sleep(delay)
def generate_bytes():
for i in xrange(numbytes):
yield u'*'.encode('utf-8')
time.sleep(pause)
response = Response(generate_bytes(), headers={
'Content-Type': 'application/octet-stream',
'Content-Length': str(numbytes),
})
response.status_code = code
return response
@app.route('/base64/<value>')
def decode_base64(request, value):
"""Decodes base64url-encoded string"""
encoded = value.encode('utf-8') # base64 expects binary string as input
return base64.urlsafe_b64decode(encoded).decode('utf-8')
@app.route('/cache', methods=('GET',))
def cache(request):
"""Returns a 304 if an If-Modified-Since header or
If-None-Match is present. Returns the same as a GET otherwise.
"""
is_conditional = (
request.headers.get('If-Modified-Since') or
request.headers.get('If-None-Match')
)
if is_conditional is None:
response = view_get(request)
response.headers['Last-Modified'] = http_date()
response.headers['ETag'] = uuid.uuid4().hex
return response
else:
return status_code(304)
@app.route('/cache/<int:value>')
def cache_control(request, value):
"""Sets a Cache-Control header."""
value = int(value)
response = view_get(request)
response.headers['Cache-Control'] = 'public, max-age={0}'.format(value)
return response
@app.route('/encoding/utf8')
def encoding(request):
return render_template('UTF-8-demo.txt')
@app.route('/bytes/<int:n>')
def random_bytes(request, n):
"""Returns n random bytes generated with given seed."""
n = int(n)
n = min(n, 100 * 1024) # set 100KB limit
params = CaseInsensitiveDict(request.args.items())
if 'seed' in params:
random.seed(int(params['seed']))
response = Response()
# Note: can't just use os.urandom here because it ignores the seed
response.data = bytearray(random.randint(0, 255) for i in range(n))
response.content_type = 'application/octet-stream'
return response
@app.route('/stream-bytes/<int:n>')
def stream_random_bytes(request, n):
"""Streams n random bytes generated with given seed,
at given chunk size per packet.
"""
n = int(n)
n = min(n, 100 * 1024) # set 100KB limit
params = CaseInsensitiveDict(request.args.items())
if 'seed' in params:
random.seed(int(params['seed']))
if 'chunk_size' in params:
chunk_size = max(1, int(params['chunk_size']))
else:
chunk_size = 10 * 1024
def generate_bytes():
chunks = bytearray()
for i in xrange(n):
chunks.append(random.randint(0, 255))
if len(chunks) == chunk_size:
yield(bytes(chunks))
chunks = bytearray()
if chunks:
yield(bytes(chunks))
headers = {'Content-Type': 'application/octet-stream'}
return Response(generate_bytes(), headers=headers)
@app.route('/range/<int:numbytes>')
def range_request(request, numbytes):
"""Streams n random bytes generated with given seed,
at given chunk size per packet.
"""
numbytes = int(numbytes)
if numbytes <= 0 or numbytes > (100 * 1024):
response = Response(headers={
'ETag': 'range%d' % numbytes,
'Accept-Ranges': 'bytes'
})
response.status_code = 404
response.content = 'number of bytes must be in the range (0, 10240]'
return response
params = CaseInsensitiveDict(request.args.items())
if 'chunk_size' in params:
chunk_size = max(1, int(params['chunk_size']))
else:
chunk_size = 10 * 1024
duration = float(params.get('duration', 0))
pause_per_byte = duration / numbytes
request_headers = get_headers(request)
first_byte_pos, last_byte_pos = get_request_range(request_headers,
numbytes)
if (
first_byte_pos > last_byte_pos or
first_byte_pos not in xrange(0, numbytes) or
last_byte_pos not in xrange(0, numbytes)
):
response = Response(headers={
'ETag': 'range%d' % numbytes,
'Accept-Ranges': 'bytes',
'Content-Range': 'bytes */%d' % numbytes
})
response.status_code = 416
return response
def generate_bytes():
chunks = bytearray()
for i in xrange(first_byte_pos, last_byte_pos + 1):
# We don't want the resource to change across requests, so we need
# to use a predictable data generation function
chunks.append(ord('a') + (i % 26))
if len(chunks) == chunk_size:
yield(bytes(chunks))
time.sleep(pause_per_byte * chunk_size)
chunks = bytearray()
if chunks:
time.sleep(pause_per_byte * len(chunks))
yield(bytes(chunks))
content_range = 'bytes %d-%d/%d' % (first_byte_pos, last_byte_pos,
numbytes)
response_headers = {
'Content-Type': 'application/octet-stream',
'ETag': 'range%d' % numbytes,
'Accept-Ranges': 'bytes',
'Content-Range': content_range}
response = Response(generate_bytes(), headers=response_headers)
if (first_byte_pos == 0) and (last_byte_pos == (numbytes - 1)):
response.status_code = 200
else:
response.status_code = 206
return response
@app.route('/links/<int:n>/<int:offset>')
def link_page(request, n, offset):
"""Generate a page containing n links to other pages which do the same."""
n = int(n)
offset = int(offset)
n = min(max(1, n), 200) # limit to between 1 and 200 links
link = "<a href='{0}'>{1}</a> "
html = ['<html><head><title>Links</title></head><body>']
for i in xrange(n):
if i == offset:
html.append('{0} '.format(i))
else:
html.append(link.format(url_for('link_page', n=n, offset=i), i))
html.append('</body></html>')
return ''.join(html)
@app.route('/links/<int:n>')
def links(request, n):
"""Redirect to first links page."""
n = int(n)
return redirect(url_for('link_page', n=n, offset=0))
@app.route('/image')
def image(request):
"""Returns a simple image of the type suggest by the Accept header."""
headers = get_headers(request)
if 'accept' not in headers:
return image_png(request) # Default media type to png
accept = headers['accept'].lower()
if 'image/webp' in accept:
return image_webp(request)
elif 'image/svg+xml' in accept:
return image_svg(request)
elif 'image/jpeg' in accept:
return image_jpeg(request)
elif 'image/png' in accept or 'image/*' in accept:
return image_png(request)
else:
return status_code(406) # Unsupported media type
@app.route('/image/png')
def image_png(request):
data = resource('images/pig_icon.png')
return Response(data, headers={'Content-Type': 'image/png'})
@app.route('/image/jpeg')
def image_jpeg(request):
data = resource('images/jackal.jpg')
return Response(data, headers={'Content-Type': 'image/jpeg'})
@app.route('/image/webp')
def image_webp(request):
data = resource('images/wolf_1.webp')
return Response(data, headers={'Content-Type': 'image/webp'})
@app.route('/image/svg')
def image_svg(request):
data = resource('images/svg_logo.svg')
return Response(data, headers={'Content-Type': 'image/svg+xml'})
def resource(filename):
path = os.path.join(
tmpl_dir,
filename)
return open(path, 'rb').read()
@app.route('/xml')
def xml(request):
response = Response(render_template('sample.xml'))
response.headers['Content-Type'] = 'application/xml'
return response
if __name__ == '__main__':
run_simple('0.0.0.0', 5000, app, use_reloader=True, use_debugger=True)
|
mit
| 1,247,459,229,769,562,400
| 27.245033
| 81
| 0.608488
| false
| 3.708696
| false
| false
| false
|
SurfasJones/icecream-info
|
icecream/lib/python2.7/site-packages/sphinx/search/__init__.py
|
1
|
11415
|
# -*- coding: utf-8 -*-
"""
sphinx.search
~~~~~~~~~~~~~
Create a full-text search index for offline search.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import with_statement
import re
import cPickle as pickle
from docutils.nodes import raw, comment, title, Text, NodeVisitor, SkipNode
from sphinx.util import jsdump, rpartition
class SearchLanguage(object):
"""
This class is the base class for search natural language preprocessors. If
you want to add support for a new language, you should override the methods
of this class.
You should override `lang` class property too (e.g. 'en', 'fr' and so on).
.. attribute:: stopwords
This is a set of stop words of the target language. Default `stopwords`
is empty. This word is used for building index and embedded in JS.
.. attribute:: js_stemmer_code
Return stemmer class of JavaScript version. This class' name should be
``Stemmer`` and this class must have ``stemWord`` method. This string is
embedded as-is in searchtools.js.
This class is used to preprocess search word which Sphinx HTML readers
type, before searching index. Default implementation does nothing.
"""
lang = None
stopwords = set()
js_stemmer_code = """
/**
* Dummy stemmer for languages without stemming rules.
*/
var Stemmer = function() {
this.stemWord = function(w) {
return w;
}
}
"""
_word_re = re.compile(r'\w+(?u)')
def __init__(self, options):
self.options = options
self.init(options)
def init(self, options):
"""
Initialize the class with the options the user has given.
"""
def split(self, input):
"""
This method splits a sentence into words. Default splitter splits input
at white spaces, which should be enough for most languages except CJK
languages.
"""
return self._word_re.findall(input)
def stem(self, word):
"""
This method implements stemming algorithm of the Python version.
Default implementation does nothing. You should implement this if the
language has any stemming rules.
This class is used to preprocess search words before registering them in
the search index. The stemming of the Python version and the JS version
(given in the js_stemmer_code attribute) must be compatible.
"""
return word
def word_filter(self, word):
"""
Return true if the target word should be registered in the search index.
This method is called after stemming.
"""
return not (((len(word) < 3) and (12353 < ord(word[0]) < 12436)) or
(ord(word[0]) < 256 and (len(word) < 3 or word in self.stopwords or
word.isdigit())))
from sphinx.search import en, ja
languages = {
'en': en.SearchEnglish,
'ja': ja.SearchJapanese,
}
class _JavaScriptIndex(object):
"""
The search index as javascript file that calls a function
on the documentation search object to register the index.
"""
PREFIX = 'Search.setIndex('
SUFFIX = ')'
def dumps(self, data):
return self.PREFIX + jsdump.dumps(data) + self.SUFFIX
def loads(self, s):
data = s[len(self.PREFIX):-len(self.SUFFIX)]
if not data or not s.startswith(self.PREFIX) or not \
s.endswith(self.SUFFIX):
raise ValueError('invalid data')
return jsdump.loads(data)
def dump(self, data, f):
f.write(self.dumps(data))
def load(self, f):
return self.loads(f.read())
js_index = _JavaScriptIndex()
class WordCollector(NodeVisitor):
"""
A special visitor that collects words for the `IndexBuilder`.
"""
def __init__(self, document, lang):
NodeVisitor.__init__(self, document)
self.found_words = []
self.found_title_words = []
self.lang = lang
def dispatch_visit(self, node):
if node.__class__ is comment:
raise SkipNode
if node.__class__ is raw:
# Some people might put content in raw HTML that should be searched,
# so we just amateurishly strip HTML tags and index the remaining
# content
nodetext = re.sub(r'(?is)<style.*?</style>', '', node.astext())
nodetext = re.sub(r'(?is)<script.*?</script>', '', nodetext)
nodetext = re.sub(r'<[^<]+?>', '', nodetext)
self.found_words.extend(self.lang.split(nodetext))
raise SkipNode
if node.__class__ is Text:
self.found_words.extend(self.lang.split(node.astext()))
elif node.__class__ is title:
self.found_title_words.extend(self.lang.split(node.astext()))
class IndexBuilder(object):
"""
Helper class that creates a searchindex based on the doctrees
passed to the `feed` method.
"""
formats = {
'jsdump': jsdump,
'pickle': pickle
}
def __init__(self, env, lang, options, scoring):
self.env = env
# filename -> title
self._titles = {}
# stemmed word -> set(filenames)
self._mapping = {}
# stemmed words in titles -> set(filenames)
self._title_mapping = {}
# word -> stemmed word
self._stem_cache = {}
# objtype -> index
self._objtypes = {}
# objtype index -> (domain, type, objname (localized))
self._objnames = {}
# add language-specific SearchLanguage instance
self.lang = languages[lang](options)
if scoring:
with open(scoring, 'rb') as fp:
self.js_scorer_code = fp.read().decode('utf-8')
else:
self.js_scorer_code = u''
def load(self, stream, format):
"""Reconstruct from frozen data."""
if isinstance(format, basestring):
format = self.formats[format]
frozen = format.load(stream)
# if an old index is present, we treat it as not existing.
if not isinstance(frozen, dict) or \
frozen.get('envversion') != self.env.version:
raise ValueError('old format')
index2fn = frozen['filenames']
self._titles = dict(zip(index2fn, frozen['titles']))
def load_terms(mapping):
rv = {}
for k, v in mapping.iteritems():
if isinstance(v, int):
rv[k] = set([index2fn[v]])
else:
rv[k] = set(index2fn[i] for i in v)
return rv
self._mapping = load_terms(frozen['terms'])
self._title_mapping = load_terms(frozen['titleterms'])
# no need to load keywords/objtypes
def dump(self, stream, format):
"""Dump the frozen index to a stream."""
if isinstance(format, basestring):
format = self.formats[format]
format.dump(self.freeze(), stream)
def get_objects(self, fn2index):
rv = {}
otypes = self._objtypes
onames = self._objnames
for domainname, domain in self.env.domains.iteritems():
for fullname, dispname, type, docname, anchor, prio in \
domain.get_objects():
# XXX use dispname?
if docname not in fn2index:
continue
if prio < 0:
continue
prefix, name = rpartition(fullname, '.')
pdict = rv.setdefault(prefix, {})
try:
typeindex = otypes[domainname, type]
except KeyError:
typeindex = len(otypes)
otypes[domainname, type] = typeindex
otype = domain.object_types.get(type)
if otype:
# use unicode() to fire translation proxies
onames[typeindex] = (domainname, type,
unicode(domain.get_type_name(otype)))
else:
onames[typeindex] = (domainname, type, type)
if anchor == fullname:
shortanchor = ''
elif anchor == type + '-' + fullname:
shortanchor = '-'
else:
shortanchor = anchor
pdict[name] = (fn2index[docname], typeindex, prio, shortanchor)
return rv
def get_terms(self, fn2index):
rvs = {}, {}
for rv, mapping in zip(rvs, (self._mapping, self._title_mapping)):
for k, v in mapping.iteritems():
if len(v) == 1:
fn, = v
if fn in fn2index:
rv[k] = fn2index[fn]
else:
rv[k] = [fn2index[fn] for fn in v if fn in fn2index]
return rvs
def freeze(self):
"""Create a usable data structure for serializing."""
filenames = self._titles.keys()
titles = self._titles.values()
fn2index = dict((f, i) for (i, f) in enumerate(filenames))
terms, title_terms = self.get_terms(fn2index)
objects = self.get_objects(fn2index) # populates _objtypes
objtypes = dict((v, k[0] + ':' + k[1])
for (k, v) in self._objtypes.iteritems())
objnames = self._objnames
return dict(filenames=filenames, titles=titles, terms=terms,
objects=objects, objtypes=objtypes, objnames=objnames,
titleterms=title_terms, envversion=self.env.version)
def prune(self, filenames):
"""Remove data for all filenames not in the list."""
new_titles = {}
for filename in filenames:
if filename in self._titles:
new_titles[filename] = self._titles[filename]
self._titles = new_titles
for wordnames in self._mapping.itervalues():
wordnames.intersection_update(filenames)
for wordnames in self._title_mapping.itervalues():
wordnames.intersection_update(filenames)
def feed(self, filename, title, doctree):
"""Feed a doctree to the index."""
self._titles[filename] = title
visitor = WordCollector(doctree, self.lang)
doctree.walk(visitor)
# memoize self.lang.stem
def stem(word):
try:
return self._stem_cache[word]
except KeyError:
self._stem_cache[word] = self.lang.stem(word)
return self._stem_cache[word]
_filter = self.lang.word_filter
for word in visitor.found_title_words:
word = stem(word)
if _filter(word):
self._title_mapping.setdefault(word, set()).add(filename)
for word in visitor.found_words:
word = stem(word)
if word not in self._title_mapping and _filter(word):
self._mapping.setdefault(word, set()).add(filename)
def context_for_searchtool(self):
return dict(
search_language_stemming_code = self.lang.js_stemmer_code,
search_language_stop_words =
jsdump.dumps(sorted(self.lang.stopwords)),
search_scorer_tool = self.js_scorer_code,
)
|
mit
| -8,059,802,016,210,315,000
| 33.279279
| 80
| 0.565484
| false
| 4.215288
| false
| false
| false
|
ilya-epifanov/ansible
|
lib/ansible/plugins/strategies/linear.py
|
1
|
14293
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.playbook.block import Block
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task import Task
from ansible.plugins import action_loader
from ansible.plugins.strategies import StrategyBase
from ansible.template import Templar
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class StrategyModule(StrategyBase):
def _get_next_task_lockstep(self, hosts, iterator):
'''
Returns a list of (host, task) tuples, where the task may
be a noop task to keep the iterator in lock step across
all hosts.
'''
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
host_tasks = {}
display.debug("building list of next tasks for hosts")
for host in hosts:
host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
display.debug("done building task lists")
num_setups = 0
num_tasks = 0
num_rescue = 0
num_always = 0
lowest_cur_block = len(iterator._blocks)
display.debug("counting tasks in each state of execution")
for (k, v) in host_tasks.iteritems():
if v is None:
continue
(s, t) = v
if t is None:
continue
if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
lowest_cur_block = s.cur_block
if s.run_state == PlayIterator.ITERATING_SETUP:
num_setups += 1
elif s.run_state == PlayIterator.ITERATING_TASKS:
num_tasks += 1
elif s.run_state == PlayIterator.ITERATING_RESCUE:
num_rescue += 1
elif s.run_state == PlayIterator.ITERATING_ALWAYS:
num_always += 1
display.debug("done counting tasks in each state of execution")
def _advance_selected_hosts(hosts, cur_block, cur_state):
'''
This helper returns the task for all hosts in the requested
state, otherwise they get a noop dummy task. This also advances
the state of the host, since the given states are determined
while using peek=True.
'''
# we return the values in the order they were originally
# specified in the given hosts array
rvals = []
display.debug("starting to advance hosts")
for host in hosts:
host_state_task = host_tasks[host.name]
if host_state_task is None:
continue
(s, t) = host_state_task
if t is None:
continue
if s.run_state == cur_state and s.cur_block == cur_block:
new_t = iterator.get_next_task_for_host(host)
rvals.append((host, t))
else:
rvals.append((host, noop_task))
display.debug("done advancing hosts to next task")
return rvals
# if any hosts are in ITERATING_SETUP, return the setup task
# while all other hosts get a noop
if num_setups:
display.debug("advancing hosts in ITERATING_SETUP")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)
# if any hosts are in ITERATING_TASKS, return the next normal
# task for these hosts, while all other hosts get a noop
if num_tasks:
display.debug("advancing hosts in ITERATING_TASKS")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)
# if any hosts are in ITERATING_RESCUE, return the next rescue
# task for these hosts, while all other hosts get a noop
if num_rescue:
display.debug("advancing hosts in ITERATING_RESCUE")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)
# if any hosts are in ITERATING_ALWAYS, return the next always
# task for these hosts, while all other hosts get a noop
if num_always:
display.debug("advancing hosts in ITERATING_ALWAYS")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)
# at this point, everything must be ITERATING_COMPLETE, so we
# return None for all hosts in the list
display.debug("all hosts are done, so returning None's for all hosts")
return [(host, None) for host in hosts]
def run(self, iterator, play_context):
'''
The linear strategy is simple - get the next task and queue
it for all hosts, then wait for the queue to drain before
moving on to the next task
'''
# iteratate over each task, while there is one left to run
result = True
work_to_do = True
while work_to_do and not self._tqm._terminated:
try:
self._display.debug("getting the remaining hosts for this loop")
hosts_left = self._inventory.get_hosts(iterator._play.hosts)
self._display.debug("done getting the remaining hosts for this loop")
# queue up this task for each host in the inventory
callback_sent = False
work_to_do = False
host_results = []
host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
# skip control
skip_rest = False
choose_step = True
for (host, task) in host_tasks:
if not task:
continue
run_once = False
work_to_do = True
# test to see if the task across all hosts points to an action plugin which
# sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
# will only send this task to the first host in the list.
try:
action = action_loader.get(task.action, class_only=True)
if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
run_once = True
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
# check to see if this task should be skipped, due to it being a member of a
# role which has already run (and whether that role allows duplicate execution)
if task._role and task._role.has_run(host):
# If there is no metadata, the default behavior is to not allow duplicates,
# if there is metadata, check to see if the allow_duplicates flag was set to true
if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
self._display.debug("'%s' skipped because role has already run" % task)
continue
if task.action == 'meta':
self._execute_meta(task, play_context, iterator)
else:
# handle step if needed, skip meta actions as they are used internally
if self._step and choose_step:
if self._take_step(task):
choose_step = False
else:
skip_rest = True
break
self._display.debug("getting variables")
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
templar = Templar(loader=self._loader, variables=task_vars)
self._display.debug("done getting variables")
if not callback_sent:
display.debug("sending task start callback, copying the task so we can template it temporarily")
saved_name = task.name
display.debug("done copying, going to template now")
try:
task.name = unicode(templar.template(task.name, fail_on_undefined=False))
display.debug("done templating")
except:
# just ignore any errors during task name templating,
# we don't care if it just shows the raw name
display.debug("templating failed for some reason")
pass
display.debug("here goes the callback...")
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
task.name = saved_name
callback_sent = True
display.debug("sending task start callback")
self._blocked_hosts[host.get_name()] = True
self._queue_task(host, task, task_vars, play_context)
results = self._process_pending_results(iterator)
host_results.extend(results)
# if we're bypassing the host loop, break out now
if run_once:
break
# go to next host/task group
if skip_rest:
continue
self._display.debug("done queuing things up, now waiting for results queue to drain")
results = self._wait_on_pending_results(iterator)
host_results.extend(results)
if not work_to_do and len(iterator.get_failed_hosts()) > 0:
self._display.debug("out of hosts to run on")
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result = False
break
try:
included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager)
except AnsibleError, e:
return False
if len(included_files) > 0:
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
all_blocks = dict((host, []) for host in hosts_left)
for included_file in included_files:
# included hosts get the task list while those excluded get an equal-length
# list of noop tasks, to make sure that they continue running in lock-step
try:
new_blocks = self._load_included_file(included_file, iterator=iterator)
except AnsibleError, e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._display.warning(str(e))
continue
for new_block in new_blocks:
noop_block = Block(parent_block=task._block)
noop_block.block = [noop_task for t in new_block.block]
noop_block.always = [noop_task for t in new_block.always]
noop_block.rescue = [noop_task for t in new_block.rescue]
for host in hosts_left:
if host in included_file._hosts:
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task)
final_block = new_block.filter_tagged_tasks(play_context, task_vars)
all_blocks[host].append(final_block)
else:
all_blocks[host].append(noop_block)
for host in hosts_left:
iterator.add_tasks(host, all_blocks[host])
self._display.debug("results queue empty")
except (IOError, EOFError), e:
self._display.debug("got IOError/EOFError in task loop: %s" % e)
# most likely an abort, return failed
return False
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
return super(StrategyModule, self).run(iterator, play_context, result)
|
gpl-3.0
| -5,798,462,310,246,295,000
| 45.405844
| 179
| 0.547331
| false
| 4.777072
| false
| false
| false
|
Huyuwei/tvm
|
topi/python/topi/image/resize.py
|
1
|
7184
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""TVM operator input resize compute."""
from __future__ import absolute_import
import tvm
from .. import tag
def resize(data, size, layout="NCHW", method="bilinear", align_corners=True, out_dtype=None):
"""Perform resize operation on the data.
Parameters
----------
inputs : tvm.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
size: Tuple
Output resolution scale to
layout: string, optional
"NCHW", "NHWC", or "NCHWc".
align_corners: Boolean, optional
To preserve the values at the corner pixels.
method: {"bilinear", "nearest_neighbor", "bicubic"}
Method to be used for resizing.
out_dtype: string, optional
Type to return. If left None will be same as input type.
Returns
-------
output : tvm.Tensor
4-D with shape [batch, channel, in_height*scale, in_width*scale]
or [batch, in_height*scale, in_width*scale, channel]
or 5-D with shape [batch, channel-major, in_height*scale, in_width*scale, channel-minor]
"""
method = method.lower()
if layout == 'NHWC':
in_n, in_h, in_w, in_c = data.shape
output_shape = [in_n, size[0], size[1], in_c]
elif layout == 'NCHW':
in_n, in_c, in_h, in_w = data.shape
output_shape = [in_n, in_c, size[0], size[1]]
# Otherwise layout must be NCHWxc
else:
in_n, in_c, in_h, in_w, in_cc = data.shape
output_shape = [in_n, in_c, size[0], size[1], in_cc]
if align_corners:
y_ratio = (in_h - 1).astype('float') / (size[0] - 1)
x_ratio = (in_w - 1).astype('float') / (size[1] - 1)
else:
y_ratio = (in_h).astype('float') / (size[0])
x_ratio = (in_w).astype('float') / (size[1])
def _get_pixel(n, c, y, x, cc):
y = tvm.max(tvm.min(y, in_h - 1), 0)
x = tvm.max(tvm.min(x, in_w - 1), 0)
if layout == 'NHWC':
return data(n, y, x, c).astype('float')
if layout == 'NCHW':
return data(n, c, y, x).astype('float')
# else must be NCHWxc
return data(n, c, y, x, cc).astype('float')
def _get_indices(*indices):
if layout == 'NHWC':
n, y, x, c = indices
cc = None
elif layout == 'NCHW':
n, c, y, x = indices
cc = None
else:
n, c, y, x, cc = indices
return n, c, y, x, cc
def _cast_output(value):
if out_dtype:
dtype = out_dtype
else:
dtype = data.dtype
return value.astype(dtype)
# Nearest neighbor computation
def _nearest_neighbor(*indices):
n, c, y, x, cc = _get_indices(*indices)
in_y = y_ratio * y
in_x = x_ratio * x
if align_corners:
yint = tvm.round(in_y).astype('int32')
xint = tvm.round(in_x).astype('int32')
else:
# Add epsilon to floor to prevent gpu rounding errors.
epsilon = 1e-5
yint = tvm.floor(in_y + epsilon).astype('int32')
xint = tvm.floor(in_x + epsilon).astype('int32')
return _cast_output(_get_pixel(n, c, yint, xint, cc))
# Bilinear helper functions and computation.
def _lerp(A, B, t):
return A * (1.0 - t) + B * t
def _bilinear(*indices):
n, c, y, x, cc = _get_indices(*indices)
in_y = y_ratio * y
in_x = x_ratio * x
xint = tvm.floor(in_x).astype('int32')
xfract = in_x - tvm.floor(in_x)
yint = tvm.floor(in_y).astype('int32')
yfract = in_y - tvm.floor(in_y)
p00 = _get_pixel(n, c, yint, xint, cc)
p10 = _get_pixel(n, c, yint, xint + 1, cc)
p01 = _get_pixel(n, c, yint + 1, xint, cc)
p11 = _get_pixel(n, c, yint + 1, xint + 1, cc)
col0 = _lerp(p00, p10, xfract)
col1 = _lerp(p01, p11, xfract)
value = _lerp(col0, col1, yfract)
return _cast_output(value)
# Bicubic helper function and computation.
def _cubic_kernel(A, B, C, D, t):
a = -A / 2.0 + (3.0*B) / 2.0 - (3.0*C) / 2.0 + D / 2.0
b = A - (5.0*B) / 2.0 + 2.0*C - D / 2.0
c = -A / 2.0 + C / 2.0
d = B
return a*t*t*t + b*t*t + c*t + d
def _bicubic(*indices):
n, c, y, x, cc = _get_indices(*indices)
in_y = y_ratio * y
in_x = x_ratio * x
xint = tvm.floor(in_x).astype('int32')
xfract = in_x - tvm.floor(in_x)
yint = tvm.floor(in_y).astype('int32')
yfract = in_y - tvm.floor(in_y)
# 1st row
p00 = _get_pixel(n, c, yint - 1, xint - 1, cc)
p10 = _get_pixel(n, c, yint - 1, xint + 0, cc)
p20 = _get_pixel(n, c, yint - 1, xint + 1, cc)
p30 = _get_pixel(n, c, yint - 1, xint + 2, cc)
# 2nd row
p01 = _get_pixel(n, c, yint + 0, xint - 1, cc)
p11 = _get_pixel(n, c, yint + 0, xint + 0, cc)
p21 = _get_pixel(n, c, yint + 0, xint + 1, cc)
p31 = _get_pixel(n, c, yint + 0, xint + 2, cc)
# 3rd row
p02 = _get_pixel(n, c, yint + 1, xint - 1, cc)
p12 = _get_pixel(n, c, yint + 1, xint + 0, cc)
p22 = _get_pixel(n, c, yint + 1, xint + 1, cc)
p32 = _get_pixel(n, c, yint + 1, xint + 2, cc)
# 4th row
p03 = _get_pixel(n, c, yint + 2, xint - 1, cc)
p13 = _get_pixel(n, c, yint + 2, xint + 0, cc)
p23 = _get_pixel(n, c, yint + 2, xint + 1, cc)
p33 = _get_pixel(n, c, yint + 2, xint + 2, cc)
# Interpolate bicubically
col0 = _cubic_kernel(p00, p10, p20, p30, xfract)
col1 = _cubic_kernel(p01, p11, p21, p31, xfract)
col2 = _cubic_kernel(p02, p12, p22, p32, xfract)
col3 = _cubic_kernel(p03, p13, p23, p33, xfract)
value = _cubic_kernel(col0, col1, col2, col3, yfract)
return _cast_output(value)
# Determine which interpolation method to use then run it.
if method == "nearest_neighbor":
compute_func = _nearest_neighbor
elif method == "bilinear":
compute_func = _bilinear
elif method == "bicubic":
compute_func = _bicubic
else:
raise ValueError('%s method is not supported.' % method)
return tvm.compute(output_shape, compute_func, name='resize', tag=tag.INJECTIVE)
|
apache-2.0
| 8,464,358,940,680,116,000
| 32.886792
| 96
| 0.549415
| false
| 2.980913
| false
| false
| false
|
ictofnwi/coach
|
dashboard/views.py
|
1
|
19639
|
import random
import re
import json
import pytz
import dateutil.parser
from datetime import datetime, timedelta
from pprint import pformat
from hashlib import md5
from django.http import HttpResponse
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render, redirect
from django.conf import settings
from django.template import RequestContext, loader
from django.db.models import Q
from models import Activity, Recommendation, LogEvent, GroupAssignment
from recommendation import recommend
from tincan_api import TinCan
from helpers import *
# Fetch TinCan credentials from settings
USERNAME = settings.TINCAN['username']
PASSWORD = settings.TINCAN['password']
ENDPOINT = settings.TINCAN['endpoint']
# Reference to TinCan verbs
COMPLETED = TinCan.VERBS['completed']['id']
PROGRESSED = TinCan.VERBS['progressed']['id']
# Reference to TinCan activity types
ASSESSMENT = TinCan.ACTIVITY_TYPES['assessment']
MEDIA = TinCan.ACTIVITY_TYPES['media']
QUESTION = TinCan.ACTIVITY_TYPES['question']
# Reference to progress URI in result/extension
PROGRESS_T = "http://uva.nl/coach/progress"
# Default barcode height
BARCODE_HEIGHT = 35
## Decorators
def identity_required(func):
def inner(request, *args, **kwargs):
# Fetch email from GET paramaters if present and store in session.
paramlist = request.GET.get('paramlist', None)
email = request.GET.get('email', None)
param_hash = request.GET.get('hash', None)
if paramlist is not None:
hash_contents = []
for param in paramlist.split(","):
if param == "pw":
hash_contents.append(settings.AUTHENTICATION_SECRET)
else:
hash_contents.append(request.GET.get(param, ""))
hash_string = md5(",".join(hash_contents)).hexdigest().upper()
if hash_string == param_hash and email is not None and email != "":
request.session['user'] = "mailto:%s" % (email, )
# Fetch user from session
user = request.session.get('user', None)
# If no user is specified, show information on how to login
if user is None:
return render(request, 'dashboard/loginfirst.html', {})
else:
return func(request, *args, **kwargs)
return inner
def check_group(func):
"""Decorator to check the group for A/B testing.
Users in group A see the dashboard and users in group B do not.
Users that are in no group will be assigned one, so that both groups differ
at most 1 in size. If both groups are the same size, the group will be
assigned pseudorandomly.
"""
def inner(request, *args, **kwargs):
# Fetch user from session
user = request.session.get('user', None)
# Case 1: Existing user
try:
assignment = GroupAssignment.objects.get(user=user)
if assignment.group == 'A':
return func(request, *args, **kwargs)
else:
return HttpResponse()
# Case 2: New user
except ObjectDoesNotExist:
# Case 2a: First half of new pair,
# randomly pick A or B for this user.
if GroupAssignment.objects.count() % 2 == 0:
group = random.choice(['A', 'B'])
if group == 'A':
assignment = GroupAssignment(user=user, group='A')
assignment.save()
return func(request, *args, **kwargs)
else:
assignment = GroupAssignment(user=user, group='B')
assignment.save()
return HttpResponse()
# Case 2b: Second half of new pair,
# choose the group that was not previously chosen.
else:
try:
last_group = GroupAssignment.objects.order_by('-id')[0].group
except:
last_group = random.choice(['A', 'B'])
if last_group == 'A':
assignment = GroupAssignment(user=user, group='B')
assignment.save()
return HttpResponse()
else:
assignment = GroupAssignment(user=user, group='A')
assignment.save()
return func(request, *args, **kwargs)
return inner
## Bootstrap
def bootstrap(request):
width = request.GET.get('width',0)
template = loader.get_template('dashboard/bootstrap.js')
return HttpResponse(
template.render(RequestContext(
request,
{ 'host': request.get_host(), 'width': width }
)),
content_type="application/javascript"
)
def bootstrap_recommend(request, milestones):
width = request.GET.get('width',0)
max_recs = int(request.GET.get('max', False))
return render(request, 'dashboard/bootstrap_recommend.js',
{'milestones': milestones,
'max_recs': max_recs,
'width': width,
'host': request.get_host()})
## Debug interface
def log(request):
logs = LogEvent.objects.order_by('-timestamp')[:100]
data = request.GET.get('data',"0") == "1"
return render(request, 'dashboard/log.html',
{ 'logs': logs, 'data': data, 'host': request.get_host()})
## Interface
@identity_required
@check_group
def barcode(request, default_width=170):
"""Return an svg representing progress of an individual vs the group."""
# Fetch user from session
user = request.session.get('user', None)
width = int(request.GET.get('width', default_width))
data = {'width': width, 'height': BARCODE_HEIGHT}
# Add values
markers = {}
activities = Activity.objects.filter(type=ASSESSMENT)
for activity in activities:
if activity.user in markers:
markers[activity.user] += min(80, activity.value)
else:
markers[activity.user] = min(80, activity.value)
if user in markers:
data['user'] = markers[user]
del markers[user]
else:
data['user'] = 0
data['people'] = markers.values()
# Normalise
if len(markers) > 0:
maximum = max(max(data['people']), data['user'])
data['user'] /= maximum
data['user'] *= width
data['user'] = int(data['user'])
for i in range(len(data['people'])):
data['people'][i] /= maximum
data['people'][i] *= width
data['people'][i] = int(data['people'][i])
else:
# if no other persons have been active
# then user is assumed to be in the lead.
# This is regardless if the user has done anything at all.
data['user'] = width
return render(request, 'dashboard/barcode.svg', data)
@identity_required
@check_group
def index(request):
# Fetch user from session
user = request.session.get('user', None)
# Fetch desired width of the dashboard
width = request.GET.get("width",300);
activities = Activity.objects.filter(user=user).order_by('time')
statements = map(lambda x: x._dict(), activities)
statements = aggregate_statements(statements)
for statement in statements:
statement['activity'] = fix_url(statement['activity'], request)
statements = split_statements(statements)
assignments = statements['assignments']
assignments.sort(key = lambda x: x['time'], reverse=True)
exercises = statements['exercises']
exercises.sort(key = lambda x: x['value'])
video = statements['video']
video.sort(key = lambda x: x['time'], reverse=True)
template = loader.get_template('dashboard/index.html')
context = RequestContext(request, {
'width': width,
'barcode_height': BARCODE_HEIGHT,
'assignments': assignments,
'exercises': exercises,
'video': video,
'host': request.get_host()
})
response = HttpResponse(template.render(context))
response['Access-Control-Allow-Origin'] = "*"
event = LogEvent(type='D', user=user, data="{}")
event.save()
return response
@identity_required
@check_group
def get_recommendations(request, milestones, max_recommendations=False):
# Fetch user from session
user = request.session.get('user', None)
# Fetch desired width of the recommendations dashboard
width = request.GET.get("width", 300);
# Get maximum recommendations to be showed
max_recommendations = int(request.GET.get('max', max_recommendations))
# Fetch activities that can be perceived as seen by the user
seen = Activity.objects.filter(
Q(verb=COMPLETED) | Q(verb=PROGRESSED),
value__gte=30,
user=user
)
# Futher filter that list to narrow it down to activities that can be
# perceived as being done by the user.
done = seen.filter(value__gte=80)
# Preprocess the seen and done sets to be used later
seen = set(map(lambda x: hash(x.activity), seen))
done = set(map(lambda x: x.activity, done))
# Init dict containing final recommendations
recommendations = {}
# For every milestone we want to make recommendations for:
for milestone in milestones.split(','):
# Alas this is necessary on some servers
milestone = re.sub(r'http(s?):/([^/])',r'http\1://\2',milestone)
# Make sure the milestone is not already passed
if milestone not in done:
# Fetch list of rules from the context of this milestone.
# Rules contain antecedent => consequent associations with a
# certain amount of confidence and support. The antecedent is
# stored as a hash of the activities in the antecedent. The
# consequent is the activity that is recommended if you did the
# activities in the consequent. At the moment only the trail
# recommendation algorithm is used, which has antecedents of only
# one activity. If this was different, the antecedent hash check
# would have to include creating powersets of certain length.
rules = Recommendation.objects.filter(milestone=milestone)
# For each recommendation rule
for rule in rules:
# If the LHS applies and the RHS is not already done
if rule.antecedent_hash in seen and \
rule.consequent not in done:
# If the consequent was already recommended earlier
if rule.consequent in recommendations:
# Fetch earlier recommendation
earlier_rule = recommendations[rule.consequent]
# Calculate the original total by with the support was
# divided in order to get the confidence of the
# the earlier recommendation
earlier_total = earlier_rule['support']
earlier_total /= float(earlier_rule['confidence'])
total = earlier_total + rule.support/rule.confidence
# Calculate combined values
support = earlier_rule['support'] + rule.support
confidence = support / float(total)
score = f_score(confidence, support, beta=1.5)
# Update the earlier recommendation to combine both
earlier_rule['support'] = support
earlier_rule['confidence'] = confidence
earlier_rule['score'] = score
# If the consequent is recommended for the first time
else:
# Calculate F-score
score = f_score(rule.confidence, rule.support, beta=1.5)
# Store recommendation for this consequent
recommendations[rule.consequent] = {
'milestone': milestone,
'url': rule.consequent,
'id': rand_id(),
'name': rule.name,
'desc': rule.description,
'm_name': rule.m_name,
'confidence': rule.confidence,
'support': rule.support,
'score': score
}
# Convert to a list of recommendations.
# The lookup per consequent is no longer necessary
recommendations = recommendations.values()
# If recommendations were found
if len(recommendations) > 0:
# Normalise score
max_score = max(map(lambda x: x['score'], recommendations))
for recommendation in recommendations:
recommendation['score'] /= max_score
# Sort the recommendations using their f-scores
recommendations.sort(key = lambda x: x['score'], reverse=True)
# Cap the number of recommendations if applicable.
if max_recommendations:
recommendations = recommendations[:max_recommendations]
# Log Recommendations viewed
data = json.dumps({
"recs": map(lambda x: x['url'], recommendations),
"path": request.path,
"milestone_n": len(milestones.split(',')),
"milestones": milestones})
event = LogEvent(type='V', user=user, data=data)
event.save()
# Render the result
return render(request, 'dashboard/recommend.html',
{'recommendations': recommendations,
'context': event.id,
'width' : width,
'host': request.get_host()})
else:
return HttpResponse()
## Background processes
def cache_activities(request):
"""Create a cache of the Learning Record Store by getting all items since
the most recent one in the cache.
"""
# Dynamic interval retrieval settings
INTERVAL = timedelta(days=1)
EPOCH = datetime(2013, 9, 3, 0, 0, 0, 0, pytz.utc)
# Set aggregate to True if events concerning the same activity-person
# should be aggregated into one row. This has impact for recommendations.
aggregate = False
# Find most recent date
try:
# Selecting the the datetime of the latest stored item minus a margin
# of 6 hours. The margin is there to be slightly more resillient to
# variation (read mistakes) in timezone handling and also to cope with
# the situation that an event was stored later than it occured. The
# latter situation is one of the use cases of the Experience API.
# TODO: The 6 hour margin is arbitrary and a hack.
# We should find a better solution for this.
t1 = Activity.objects.latest('time').time - timedelta(hours=6)
except:
t1 = EPOCH
# Get new data
tincan = TinCan(USERNAME, PASSWORD, ENDPOINT)
statements = tincan.dynamicIntervalStatementRetrieval(t1, INTERVAL)
created_statement_count = 0
for statement in statements:
statement_type = statement['object']['definition']['type']
user = statement['actor']['mbox']
activity = statement['object']['id']
verb = statement['verb']['id']
name = statement['object']['definition']['name']['en-US']
description = statement['object']['definition']['description']['en-US']
time = dateutil.parser.parse(statement['timestamp'])
try:
raw_score = statement['result']['score']['raw']
min_score = statement['result']['score']['min']
max_score = statement['result']['score']['max']
value = 100 * (raw_score - min_score) / max_score
except KeyError:
try:
value = 100 * float(statement['result']['extensions'][PROGRESS_T])
except KeyError:
# If no information is given about the end result then assume a
# perfect score was acquired when the activity was completed,
# and no score otherwise.
if verb == COMPLETED:
value = 100
else:
value = 0
if aggregate:
a, created = Activity.objects.get_or_create(user=user,
activity=activity)
# Don't overwrite completed except with other completed events
# and only overwite with more recent timestamp
if created or (time > a.time and
(verb == COMPLETED or a.verb != COMPLETED)):
a.verb = verb
a.type = statement_type
a.value = value
a.name = name
a.description = description
a.time = time
a.save()
created_statement_count += 1
else:
a, created = Activity.objects.get_or_create(user=user,
verb=verb,
activity=activity,
time=time)
if created:
a.verb = verb
a.type = statement_type
a.value = value
a.name = name
a.description = description
a.time = time
a.save()
created_statement_count += 1
data = json.dumps({'t1':t1.isoformat(), 'created':created_statement_count});
event = LogEvent(type='C', user='all', data=data)
event.save()
return HttpResponse()
def generate_recommendations(request):
minsup = int(request.GET.get('minsup', 2))
minconf = int(request.GET.get('minconf', .3))
gamma = int(request.GET.get('gamma', .8))
# Mine recommendations
recommendations, names = recommend(
minsup=minsup,
minconf=minconf,
gamma=gamma
)
# Add recommendations to database
Recommendation.objects.all().delete()
for recommendation in recommendations:
model = Recommendation(
antecedent_hash = hash(recommendation['antecedent']),
confidence = recommendation['confidence'],
support = recommendation['support'],
milestone = recommendation['milestone'],
m_name = names[recommendation['milestone']][0],
name = names[recommendation['consequent']][0],
consequent = recommendation['consequent'],
description = names[recommendation['consequent']][1])
model.save()
event = LogEvent(type='G', user='all', data=json.dumps(recommendations))
event.save()
return HttpResponse(pformat(recommendations))
@identity_required
def track(request, defaulttarget='index.html'):
"""Track user clicks so that we may be able to improve recommendation
relevance in the future.
"""
# Fetch user from session
user = request.session.get('user', None)
# Fetch target URL from GET parameters
target = request.GET.get('target', defaulttarget)
# Fetch context log id from GET paramaters
context = request.GET.get('context', None)
if context is not None:
try:
context = LogEvent.objects.get(pk=int(context))
except LogEvent.DoesNotExist:
context = None
event = LogEvent(type='T', user=user, data=target, context=context)
event.save()
return redirect(fix_url(target, request))
|
agpl-3.0
| -7,548,557,729,014,595,000
| 38.594758
| 82
| 0.585671
| false
| 4.473576
| false
| false
| false
|
mikacousin/olc
|
src/ascii_load.py
|
1
|
25915
|
"""ASCII file: Load functions"""
import array
from olc.channel_time import ChannelTime
from olc.cue import Cue
from olc.define import MAX_CHANNELS, NB_UNIVERSES, App
from olc.device import Device, Parameter, Template
from olc.group import Group
from olc.independent import Independent
from olc.master import Master
from olc.sequence import Sequence
from olc.step import Step
def get_time(string):
"""String format : [[hours:]minutes:]seconds[.tenths]
Return time in seconds
"""
if ":" in string:
tsplit = string.split(":")
if len(tsplit) == 2:
time = int(tsplit[0]) * 60 + float(tsplit[1])
elif len(tsplit) == 3:
time = int(tsplit[0]) * 3600 + int(tsplit[1]) * 60 + float(tsplit[2])
else:
print("Time format Error")
time = 0
else:
time = float(string)
return time
class AsciiParser:
"""Parse ASCII files"""
def __init__(self):
self.default_time = App().settings.get_double("default-time")
def parse(self, readlines):
"""Parse stream"""
flag_seq = False
in_cue = False
flag_patch = False
flag_master = False
flag_group = False
flag_preset = False
flag_inde = False
flag_template = False
flag_parameter = False
type_seq = "MainPlayback"
playback = False
txt = False
t_in = False
t_out = False
d_in = False
d_out = False
wait = False
channels = False
mem = False
channel_time = {}
template = None
devices = {}
parameters = {}
console = ""
item = ""
for line in readlines:
# Remove not needed endline
line = line.replace("\r", "")
line = line.replace("\n", "")
# Marker for end of file
if line[:7].upper() == "ENDDATA":
break
# Console type
if line[:7].upper() == "CONSOLE":
console = line[8:]
# Clear all
if line[:9].upper() == "CLEAR ALL":
del App().memories[:]
del App().chasers[:]
del App().groups[:]
del App().masters[:]
for page in range(2):
for i in range(20):
App().masters.append(Master(page + 1, i + 1, 0, 0))
App().patch.patch_empty()
App().sequence.__init__(1, text="Main Playback")
del App().sequence.steps[1:]
App().independents.__init__()
# Sequence
if line[:9].upper() == "$SEQUENCE":
p = line[10:].split(" ")
if int(p[0]) < 2 and not playback:
playback = True
type_seq = "MainPlayback"
else:
type_seq = "Chaser"
index_seq = int(p[0])
App().chasers.append(Sequence(index_seq, type_seq=type_seq))
del App().chasers[-1].steps[1:]
flag_seq = True
flag_patch = False
flag_master = False
flag_group = False
flag_inde = False
flag_preset = False
flag_template = False
# Chasers
if flag_seq and type_seq == "Chaser":
if line[:4].upper() == "TEXT":
App().chasers[-1].text = line[5:]
if line[:4].upper() == "$CUE":
in_cue = True
channels = array.array("B", [0] * MAX_CHANNELS)
p = line[5:].split(" ")
seq = p[0]
mem = float(p[1])
if in_cue:
if line[:4].upper() == "DOWN":
p = line[5:]
time = p.split(" ")[0]
delay = p.split(" ")[1]
t_out = get_time(time)
if t_out == 0:
t_out = self.default_time
d_out = get_time(delay)
if line[:2].upper() == "UP":
p = line[3:]
time = p.split(" ")[0]
delay = p.split(" ")[1]
t_in = get_time(time)
if t_in == 0:
t_in = self.default_time
d_in = get_time(delay)
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
level = int(r[1][1:], 16)
channels[channel - 1] = level
if line == "":
if not wait:
wait = 0.0
if not txt:
txt = ""
if not t_out:
t_out = 5.0
if not t_in:
t_in = 5.0
cue = Cue(seq, mem, channels, text=txt)
step = Step(
seq,
cue,
time_in=t_in,
time_out=t_out,
delay_out=d_out,
delay_in=d_in,
wait=wait,
text=txt,
)
App().chasers[-1].add_step(step)
in_cue = False
t_out = False
t_in = False
channels = False
# Main Playback
if flag_seq and type_seq == "MainPlayback":
if line[:0] == "!":
flag_seq = False
if line[:3].upper() == "CUE":
in_cue = True
channels = array.array("B", [0] * MAX_CHANNELS)
mem = float(line[4:])
if line[:4].upper() == "$CUE":
in_cue = True
channels = array.array("B", [0] * MAX_CHANNELS)
mem = float(line[5:])
if in_cue:
if line[:4].upper() == "TEXT":
txt = line[5:]
if line[:6].upper() == "$$TEXT" and not txt:
txt = line[7:]
if line[:12].upper() == "$$PRESETTEXT":
txt = line[13:]
if line[:4].upper() == "DOWN":
p = line[5:]
time = p.split(" ")[0]
delay = p.split(" ")[1] if len(p.split(" ")) == 2 else "0"
t_out = get_time(time)
if t_out == 0:
t_out = self.default_time
d_out = get_time(delay)
if line[:2].upper() == "UP":
p = line[3:]
time = p.split(" ")[0]
delay = p.split(" ")[1] if len(p.split(" ")) == 2 else "0"
t_in = get_time(time)
if t_in == 0:
t_in = self.default_time
d_in = get_time(delay)
if line[:6].upper() == "$$WAIT":
time = line[7:].split(" ")[0]
wait = get_time(time)
if line[:11].upper() == "$$PARTTIME ":
p = line[11:]
d = p.split(" ")[0]
if d == ".":
d = 0
delay = float(d)
time_str = p.split(" ")[1]
time = get_time(time_str)
if line[:14].upper() == "$$PARTTIMECHAN":
p = line[15:].split(" ")
# We could have several channels
for chan in p:
if chan.isdigit():
channel_time[int(chan)] = ChannelTime(delay, time)
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
# Ignore channels greater than MAX_CHANNELS
if channel < MAX_CHANNELS:
level = int(r[1][1:], 16)
channels[channel - 1] = level
if line[:5].upper() == "$$AL ":
items = line[5:].split(" ")
channel = int(items[0])
if line[:4].upper() == "$$A ":
items = line[4:].split(" ")
channel = int(items[0])
param_number = int(items[1])
value = int(items[2])
if channel < MAX_CHANNELS:
device_number = abs(App().patch.channels[channel - 1][0][0])
device = App().patch.devices[device_number]
param = device.template.parameters.get(param_number)
high_byte = param.offset.get("High Byte")
low_byte = param.offset.get("Low Byte")
parameters[param_number] = {
"high byte": high_byte,
"low byte": low_byte,
"value": value,
}
devices[channel] = parameters
if line == "":
if not wait:
wait = 0.0
if not txt:
txt = ""
if not t_out:
t_out = 5.0
if not t_in:
t_in = 5.0
if not d_in:
d_in = 0.0
if not d_out:
d_out = 0.0
# Create Cue
cue = Cue(0, mem, channels, text=txt, devices=devices)
# Add cue to the list
App().memories.append(cue)
# Create Step
step = Step(
1,
cue,
time_in=t_in,
time_out=t_out,
delay_in=d_in,
delay_out=d_out,
wait=wait,
channel_time=channel_time,
text=txt,
)
# Add Step to the Sequence
App().sequence.add_step(step)
in_cue = False
txt = False
t_out = False
t_in = False
wait = False
mem = False
channels = False
channel_time = {}
devices = {}
parameters = {}
# Dimmers Patch
if line[:11].upper() == "CLEAR PATCH":
flag_seq = False
flag_patch = True
flag_master = False
flag_group = False
flag_inde = False
flag_preset = False
flag_template = False
App().patch.patch_empty() # Empty patch
App().window.channels_view.flowbox.invalidate_filter()
if flag_patch and line[:0] == "!":
flag_patch = False
if line[:7].upper() == "PATCH 1":
for p in line[8:].split(" "):
q = p.split("<")
if q[0]:
r = q[1].split("@")
channel = int(q[0])
output = int(r[0])
univ = int((output - 1) / 512)
level = int(r[1])
if univ < NB_UNIVERSES:
if channel < MAX_CHANNELS:
out = output - (512 * univ)
App().patch.add_output(channel, out, univ, level)
App().window.channels_view.flowbox.invalidate_filter()
else:
print("More than", MAX_CHANNELS, "channels")
else:
print("More than", NB_UNIVERSES, "universes")
# Parameter Definitions
if line[:9].upper() == "$PARAMDEF":
item = line[10:].split(" ")
number = int(item[0])
group = int(item[1])
name = item[2]
name = ""
for i in range(2, len(item)):
name += item[i] + " "
name = name[:-1]
App().parameters[number] = [group, name]
# Device Template
if flag_template:
if line[:0] == "!":
flag_template = False
if line[:14].upper() == "$$MANUFACTURER":
template.manufacturer = line[15:]
if line[:11].upper() == "$$MODELNAME":
template.model_name = line[12:]
if line[:10].upper() == "$$MODENAME":
template.mode_name = line[11:]
if line[:10].upper() == "$$COLORCAL":
pass
if line[:11].upper() == "$$FOOTPRINT":
template.footprint = int(line[12:])
if line[:11].upper() == "$$PARAMETER":
item = line[12:].split(" ")
param_number = int(item[0])
# param_type = int(item[1])
# param_xfade = int(item[2])
parameter = Parameter(param_number)
flag_parameter = True
if flag_parameter:
if line[:8].upper() == "$$OFFSET":
item = line[9:].split(" ")
parameter.offset = {
"High Byte": int(item[0]),
"Low Byte": int(item[1]),
"Step": int(item[2]),
}
if line[:9].upper() == "$$DEFAULT":
parameter.default = int(line[10:])
if line[:11].upper() == "$$HIGHLIGHT":
parameter.highlight = int(line[12:])
if line[:7].upper() == "$$TABLE":
item = line[8:].split(" ")
start = int(item[0])
stop = int(item[1])
flags = int(item[2])
range_name = ""
for i in range(3, len(item)):
range_name += item[i] + " "
range_name = range_name[:-1]
parameter.table.append([start, stop, flags, range_name])
if line[:8].upper() == "$$RANGE ":
item = line[8:].split(" ")
percent = int(item[2]) == 1
parameter.range = {
"Minimum": int(item[0]),
"Maximum": int(item[1]),
"Percent": percent,
}
if line[:12].upper() == "$$RANGEGROUP":
pass
if line == "":
template.parameters[parameter.number] = parameter
flag_parameter = False
if line[:9].upper() == "$TEMPLATE":
flag_seq = False
flag_patch = False
flag_master = False
flag_group = False
flag_inde = False
flag_preset = False
flag_template = True
name = line[10:]
template = Template(name)
App().templates.append(template)
# Devices
if line[:8].upper() == "$DEVICE ":
item = line[8:].split(" ")
channel = int(item[0])
output = int(item[1])
universe = int((output - 1) / 512)
output = output - (512 * universe)
template = ""
for i in range(6, len(item)):
template += item[i] + " "
template = template[:-1]
if channel < MAX_CHANNELS and universe < NB_UNIVERSES:
device = Device(channel, output, universe, template)
App().patch.add_device(device)
# Presets not in sequence
if line[:5].upper() == "GROUP" and console == "CONGO":
# On Congo, Preset not in sequence
flag_seq = False
flag_patch = False
flag_master = False
flag_group = False
flag_inde = False
flag_preset = True
flag_template = False
channels = array.array("B", [0] * MAX_CHANNELS)
preset_nb = float(line[6:])
if line[:7].upper() == "$PRESET" and (console in ("DLIGHT", "VLC")):
# On DLight, Preset not in sequence
flag_seq = False
flag_patch = False
flag_master = False
flag_group = False
flag_inde = False
flag_template = False
flag_preset = True
channels = array.array("B", [0] * MAX_CHANNELS)
preset_nb = float(line[8:])
if flag_preset:
if line[:1] == "!":
flag_preset = False
if line[:4].upper() == "TEXT":
txt = line[5:]
if line[:6].upper() == "$$TEXT":
txt = line[7:]
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
level = int(r[1][1:], 16)
if channel <= MAX_CHANNELS:
channels[channel - 1] = level
if line == "":
# Find Preset's position
found = False
i = 0
for i, _ in enumerate(App().memories):
if App().memories[i].memory > preset_nb:
found = True
break
if not found:
# Preset is at the end
i += 1
if not txt:
txt = ""
# Create Preset
cue = Cue(0, preset_nb, channels, text=txt)
# Add preset to the list
App().memories.insert(i, cue)
flag_preset = False
txt = ""
# Groups
if line[:5].upper() == "GROUP" and console != "CONGO":
flag_seq = False
flag_patch = False
flag_master = False
flag_preset = False
flag_inde = False
flag_template = False
flag_group = True
channels = array.array("B", [0] * MAX_CHANNELS)
group_nb = float(line[6:])
if line[:6].upper() == "$GROUP":
flag_seq = False
flag_patch = False
flag_master = False
flag_preset = False
flag_inde = False
flag_template = False
flag_group = True
channels = array.array("B", [0] * MAX_CHANNELS)
group_nb = float(line[7:])
if flag_group:
if line[:1] == "!":
flag_group = False
if line[:4].upper() == "TEXT":
txt = line[5:]
if line[:6].upper() == "$$TEXT":
txt = line[7:]
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
level = int(r[1][1:], 16)
if channel <= MAX_CHANNELS:
channels[channel - 1] = level
if line == "":
if not txt:
txt = ""
# We don't create a group who already exist
group_exist = False
for grp in App().groups:
if group_nb == grp.index:
group_exist = True
if not group_exist:
App().groups.append(Group(group_nb, channels, txt))
flag_group = False
txt = ""
# Masters
if flag_master:
if line[:1] == "!":
flag_master = False
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
level = int(r[1][1:], 16)
if channel <= MAX_CHANNELS:
channels[channel - 1] = level
if (line == "" or line[:13].upper() == "$MASTPAGEITEM") and int(
item[1]
) <= 20:
index = int(item[1]) - 1 + ((int(item[0]) - 1) * 20)
App().masters[index] = Master(
int(item[0]), int(item[1]), item[2], channels
)
flag_master = False
if line[:13].upper() == "$MASTPAGEITEM":
item = line[14:].split(" ")
# DLight use Type "2" for Groups
if console == "DLIGHT" and item[2] == "2":
item[2] = "13"
if item[2] == "2":
flag_seq = False
flag_patch = False
flag_group = False
flag_preset = False
flag_inde = False
flag_template = False
flag_master = True
channels = array.array("B", [0] * MAX_CHANNELS)
# Only 20 Masters per pages
elif int(item[1]) <= 20:
index = int(item[1]) - 1 + ((int(item[0]) - 1) * 20)
App().masters[index] = Master(
int(item[0]), int(item[1]), item[2], item[3]
)
# Independents
if line[:16].upper() == "$SPECIALFUNCTION":
flag_seq = False
flag_patch = False
flag_master = False
flag_preset = False
flag_group = False
flag_template = False
flag_inde = True
channels = array.array("B", [0] * MAX_CHANNELS)
text = ""
items = line[17:].split(" ")
number = int(items[0])
# Parameters not implemented:
# ftype = items[1] # 0: inclusive, 1: Inhibit, 2: Exclusive
# button_mode = items[2] # 0: Momentary, 1: Toggling
if flag_inde:
if line[:1] == "!":
flag_inde = False
if line[:4].upper() == "TEXT":
text = line[5:]
if line[:6].upper() == "$$TEXT" and not text:
text = line[7:]
if line[:4].upper() == "CHAN":
chan_list = line[5:].split(" ")
for channel in chan_list:
item = channel.split("/")
if item[0]:
chan = int(item[0])
level = int(item[1][1:], 16)
if chan <= MAX_CHANNELS:
channels[chan - 1] = level
if line == "":
inde = Independent(number, text=text, levels=channels)
App().independents.update(inde)
flag_inde = False
# MIDI mapping
if line[:10].upper() == "$$MIDINOTE":
item = line[11:].split(" ")
App().midi.midi_notes.update({item[0]: [int(item[1]), int(item[2])]})
if line[:8].upper() == "$$MIDICC":
item = line[9:].split(" ")
App().midi.midi_cc.update({item[0]: [int(item[1]), int(item[2])]})
|
gpl-3.0
| -7,586,853,367,817,942,000
| 41.001621
| 88
| 0.353849
| false
| 4.790203
| false
| false
| false
|
mementum/backtrader
|
samples/vctest/vctest.py
|
1
|
15011
|
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import datetime
# The above could be sent to an independent module
import backtrader as bt
from backtrader.utils import flushfile # win32 quick stdout flushing
from backtrader.utils.py3 import string_types
class TestStrategy(bt.Strategy):
params = dict(
smaperiod=5,
trade=False,
stake=10,
exectype=bt.Order.Market,
stopafter=0,
valid=None,
cancel=0,
donotsell=False,
price=None,
pstoplimit=None,
)
def __init__(self):
# To control operation entries
self.orderid = list()
self.order = None
self.counttostop = 0
self.datastatus = 0
# Create SMA on 2nd data
self.sma = bt.indicators.MovAv.SMA(self.data, period=self.p.smaperiod)
print('--------------------------------------------------')
print('Strategy Created')
print('--------------------------------------------------')
def notify_data(self, data, status, *args, **kwargs):
print('*' * 5, 'DATA NOTIF:', data._getstatusname(status), *args)
if status == data.LIVE:
self.counttostop = self.p.stopafter
self.datastatus = 1
def notify_store(self, msg, *args, **kwargs):
print('*' * 5, 'STORE NOTIF:', msg)
def notify_order(self, order):
if order.status in [order.Completed, order.Cancelled, order.Rejected]:
self.order = None
print('-' * 50, 'ORDER BEGIN', datetime.datetime.now())
print(order)
print('-' * 50, 'ORDER END')
def notify_trade(self, trade):
print('-' * 50, 'TRADE BEGIN', datetime.datetime.now())
print(trade)
print('-' * 50, 'TRADE END')
def prenext(self):
self.next(frompre=True)
def next(self, frompre=False):
txt = list()
txt.append('%04d' % len(self))
dtfmt = '%Y-%m-%dT%H:%M:%S.%f'
txt.append('%s' % self.data.datetime.datetime(0).strftime(dtfmt))
txt.append('{}'.format(self.data.open[0]))
txt.append('{}'.format(self.data.high[0]))
txt.append('{}'.format(self.data.low[0]))
txt.append('{}'.format(self.data.close[0]))
txt.append('{}'.format(self.data.volume[0]))
txt.append('{}'.format(self.data.openinterest[0]))
txt.append('{}'.format(self.sma[0]))
print(', '.join(txt))
if len(self.datas) > 1:
txt = list()
txt.append('%04d' % len(self))
dtfmt = '%Y-%m-%dT%H:%M:%S.%f'
txt.append('%s' % self.data1.datetime.datetime(0).strftime(dtfmt))
txt.append('{}'.format(self.data1.open[0]))
txt.append('{}'.format(self.data1.high[0]))
txt.append('{}'.format(self.data1.low[0]))
txt.append('{}'.format(self.data1.close[0]))
txt.append('{}'.format(self.data1.volume[0]))
txt.append('{}'.format(self.data1.openinterest[0]))
txt.append('{}'.format(float('NaN')))
print(', '.join(txt))
if self.counttostop: # stop after x live lines
self.counttostop -= 1
if not self.counttostop:
self.env.runstop()
return
if not self.p.trade:
return
# if True and len(self.orderid) < 1:
if self.datastatus and not self.position and len(self.orderid) < 1:
self.order = self.buy(size=self.p.stake,
exectype=self.p.exectype,
price=self.p.price,
plimit=self.p.pstoplimit,
valid=self.p.valid)
self.orderid.append(self.order)
elif self.position.size > 0 and not self.p.donotsell:
if self.order is None:
size = self.p.stake // 2
if not size:
size = self.position.size # use the remaining
self.order = self.sell(size=size, exectype=bt.Order.Market)
elif self.order is not None and self.p.cancel:
if self.datastatus > self.p.cancel:
self.cancel(self.order)
if self.datastatus:
self.datastatus += 1
def start(self):
header = ['Datetime', 'Open', 'High', 'Low', 'Close', 'Volume',
'OpenInterest', 'SMA']
print(', '.join(header))
self.done = False
def runstrategy():
args = parse_args()
# Create a cerebro
cerebro = bt.Cerebro()
storekwargs = dict()
if not args.nostore:
vcstore = bt.stores.VCStore(**storekwargs)
if args.broker:
brokerargs = dict(account=args.account, **storekwargs)
if not args.nostore:
broker = vcstore.getbroker(**brokerargs)
else:
broker = bt.brokers.VCBroker(**brokerargs)
cerebro.setbroker(broker)
timeframe = bt.TimeFrame.TFrame(args.timeframe)
if args.resample or args.replay:
datatf = bt.TimeFrame.Ticks
datacomp = 1
else:
datatf = timeframe
datacomp = args.compression
fromdate = None
if args.fromdate:
dtformat = '%Y-%m-%d' + ('T%H:%M:%S' * ('T' in args.fromdate))
fromdate = datetime.datetime.strptime(args.fromdate, dtformat)
todate = None
if args.todate:
dtformat = '%Y-%m-%d' + ('T%H:%M:%S' * ('T' in args.todate))
todate = datetime.datetime.strptime(args.todate, dtformat)
VCDataFactory = vcstore.getdata if not args.nostore else bt.feeds.VCData
datakwargs = dict(
timeframe=datatf, compression=datacomp,
fromdate=fromdate, todate=todate,
historical=args.historical,
qcheck=args.qcheck,
tz=args.timezone
)
if args.nostore and not args.broker: # neither store nor broker
datakwargs.update(storekwargs) # pass the store args over the data
data0 = VCDataFactory(dataname=args.data0, tradename=args.tradename,
**datakwargs)
data1 = None
if args.data1 is not None:
data1 = VCDataFactory(dataname=args.data1, **datakwargs)
rekwargs = dict(
timeframe=timeframe, compression=args.compression,
bar2edge=not args.no_bar2edge,
adjbartime=not args.no_adjbartime,
rightedge=not args.no_rightedge,
)
if args.replay:
cerebro.replaydata(data0, **rekwargs)
if data1 is not None:
cerebro.replaydata(data1, **rekwargs)
elif args.resample:
cerebro.resampledata(data0, **rekwargs)
if data1 is not None:
cerebro.resampledata(data1, **rekwargs)
else:
cerebro.adddata(data0)
if data1 is not None:
cerebro.adddata(data1)
if args.valid is None:
valid = None
else:
try:
valid = float(args.valid)
except:
dtformat = '%Y-%m-%d' + ('T%H:%M:%S' * ('T' in args.valid))
valid = datetime.datetime.strptime(args.valid, dtformat)
else:
valid = datetime.timedelta(seconds=args.valid)
# Add the strategy
cerebro.addstrategy(TestStrategy,
smaperiod=args.smaperiod,
trade=args.trade,
exectype=bt.Order.ExecType(args.exectype),
stake=args.stake,
stopafter=args.stopafter,
valid=valid,
cancel=args.cancel,
donotsell=args.donotsell,
price=args.price,
pstoplimit=args.pstoplimit)
# Live data ... avoid long data accumulation by switching to "exactbars"
cerebro.run(exactbars=args.exactbars)
if args.plot and args.exactbars < 1: # plot if possible
cerebro.plot()
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Test Visual Chart 6 integration')
parser.add_argument('--exactbars', default=1, type=int,
required=False, action='store',
help='exactbars level, use 0/-1/-2 to enable plotting')
parser.add_argument('--plot',
required=False, action='store_true',
help='Plot if possible')
parser.add_argument('--stopafter', default=0, type=int,
required=False, action='store',
help='Stop after x lines of LIVE data')
parser.add_argument('--nostore',
required=False, action='store_true',
help='Do not Use the store pattern')
parser.add_argument('--qcheck', default=0.5, type=float,
required=False, action='store',
help=('Timeout for periodic '
'notification/resampling/replaying check'))
parser.add_argument('--no-timeoffset',
required=False, action='store_true',
help=('Do not Use TWS/System time offset for non '
'timestamped prices and to align resampling'))
parser.add_argument('--data0', default=None,
required=True, action='store',
help='data 0 into the system')
parser.add_argument('--tradename', default=None,
required=False, action='store',
help='Actual Trading Name of the asset')
parser.add_argument('--data1', default=None,
required=False, action='store',
help='data 1 into the system')
parser.add_argument('--timezone', default=None,
required=False, action='store',
help='timezone to get time output into (pytz names)')
parser.add_argument('--historical',
required=False, action='store_true',
help='do only historical download')
parser.add_argument('--fromdate',
required=False, action='store',
help=('Starting date for historical download '
'with format: YYYY-MM-DD[THH:MM:SS]'))
parser.add_argument('--todate',
required=False, action='store',
help=('End date for historical download '
'with format: YYYY-MM-DD[THH:MM:SS]'))
parser.add_argument('--smaperiod', default=5, type=int,
required=False, action='store',
help='Period to apply to the Simple Moving Average')
pgroup = parser.add_mutually_exclusive_group(required=False)
pgroup.add_argument('--replay',
required=False, action='store_true',
help='replay to chosen timeframe')
pgroup.add_argument('--resample',
required=False, action='store_true',
help='resample to chosen timeframe')
parser.add_argument('--timeframe', default=bt.TimeFrame.Names[0],
choices=bt.TimeFrame.Names,
required=False, action='store',
help='TimeFrame for Resample/Replay')
parser.add_argument('--compression', default=1, type=int,
required=False, action='store',
help='Compression for Resample/Replay')
parser.add_argument('--no-bar2edge',
required=False, action='store_true',
help='no bar2edge for resample/replay')
parser.add_argument('--no-adjbartime',
required=False, action='store_true',
help='no adjbartime for resample/replay')
parser.add_argument('--no-rightedge',
required=False, action='store_true',
help='no rightedge for resample/replay')
parser.add_argument('--broker',
required=False, action='store_true',
help='Use VisualChart as broker')
parser.add_argument('--account', default=None,
required=False, action='store',
help='Choose broker account (else first)')
parser.add_argument('--trade',
required=False, action='store_true',
help='Do Sample Buy/Sell operations')
parser.add_argument('--donotsell',
required=False, action='store_true',
help='Do not sell after a buy')
parser.add_argument('--exectype', default=bt.Order.ExecTypes[0],
choices=bt.Order.ExecTypes,
required=False, action='store',
help='Execution to Use when opening position')
parser.add_argument('--price', default=None, type=float,
required=False, action='store',
help='Price in Limit orders or Stop Trigger Price')
parser.add_argument('--pstoplimit', default=None, type=float,
required=False, action='store',
help='Price for the limit in StopLimit')
parser.add_argument('--stake', default=10, type=int,
required=False, action='store',
help='Stake to use in buy operations')
parser.add_argument('--valid', default=None,
required=False, action='store',
help='Seconds or YYYY-MM-DD')
parser.add_argument('--cancel', default=0, type=int,
required=False, action='store',
help=('Cancel a buy order after n bars in operation,'
' to be combined with orders like Limit'))
return parser.parse_args()
if __name__ == '__main__':
runstrategy()
|
gpl-3.0
| 3,710,281,282,891,024,000
| 35.612195
| 79
| 0.541136
| false
| 4.155869
| false
| false
| false
|
vivisect/synapse
|
synapse/lib/trigger.py
|
1
|
1918
|
import logging
import synapse.lib.cache as s_cache
logger = logging.getLogger(__name__)
class Triggers:
def __init__(self):
self._trig_list = []
self._trig_match = s_cache.MatchCache()
self._trig_byname = s_cache.Cache(onmiss=self._onTrigNameMiss)
def clear(self):
'''
Clear all previously registered triggers
'''
self._trig_list = []
self._trig_byname.clear()
def add(self, func, perm):
'''
Add a new callback to the triggers.
Args:
func (function): The function to call
perm (str,dict): The permission tufo
Returns:
(None)
'''
self._trig_list.append((perm, func))
self._trig_byname.clear()
def _onTrigNameMiss(self, name):
retn = []
for perm, func in self._trig_list:
if self._trig_match.match(name, perm[0]):
retn.append((perm, func))
return retn
def _cmpperm(self, perm, must):
for prop, match in must[1].items():
valu = perm[1].get(prop)
if valu is None:
return False
if not self._trig_match.match(valu, match):
return False
return True
def trigger(self, perm, *args, **kwargs):
'''
Fire any matching trigger functions for the given perm.
Args:
perm ((str,dict)): The perm tufo to trigger
*args (list): args list to use calling the trigger function
**kwargs (dict): kwargs dict to use calling the trigger function
Returns:
(None)
'''
for must, func in self._trig_byname.get(perm[0]):
if self._cmpperm(perm, must):
try:
func(*args, **kwargs)
except Exception as e:
logger.exception(e)
|
apache-2.0
| -5,746,460,234,072,582,000
| 25.638889
| 79
| 0.519291
| false
| 4.063559
| false
| false
| false
|
orionzhou/robin
|
utils/counter.py
|
1
|
6732
|
"""
Counter class for py2.6 back compat.
<http://code.activestate.com/recipes/576611/>
"""
from operator import itemgetter
from heapq import nlargest
from itertools import repeat, ifilter
class Counter(dict):
'''Dict subclass for counting hashable objects. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> Counter('zyzygy')
Counter({'y': 3, 'z': 2, 'g': 1})
'''
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
self.update(iterable, **kwds)
def __missing__(self, key):
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abracadabra').most_common(3)
[('a', 5), ('r', 2), ('b', 2)]
'''
if n is None:
return sorted(self.iteritems(), key=itemgetter(1), reverse=True)
return nlargest(n, self.iteritems(), key=itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
If an element's count has been set to zero or is a negative number,
elements() will ignore it.
'''
for elem, count in self.iteritems():
for _ in repeat(None, count):
yield elem
# Override dict methods where the meaning changes for Counter objects.
@classmethod
def fromkeys(cls, iterable, v=None):
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
if iterable is not None:
if hasattr(iterable, 'iteritems'):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
dict.update(self, iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def copy(self):
'Like dict.copy() but returns a Counter instance instead of a dict.'
return Counter(self)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
dict.__delitem__(self, elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] + other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] - other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
_max = max
result = Counter()
for elem in set(self) | set(other):
newcount = _max(self[elem], other[elem])
if newcount > 0:
result[elem] = newcount
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
_min = min
result = Counter()
if len(self) < len(other):
self, other = other, self
for elem in ifilter(self.__contains__, other):
newcount = _min(self[elem], other[elem])
if newcount > 0:
result[elem] = newcount
return result
def report(self, sep=", ", percentage=False):
total = sum(self.values())
items = []
for k, v in sorted(self.items(), key=lambda x: -x[-1]):
item = "{0}:{1}".format(k, v)
if percentage:
item += " ({0:.1f}%)".format(v * 100. / total)
items.append(item)
return sep.join(items)
if __name__ == '__main__':
import doctest
print(doctest.testmod())
|
gpl-2.0
| -6,222,579,013,777,309,000
| 32
| 85
| 0.531491
| false
| 4.210131
| false
| false
| false
|
wjakob/layerlab
|
recipes/utils/materials.py
|
1
|
6279
|
# Complex-valued IOR curves for a few metals
from scipy import interpolate
lambda_gold = [298.75705, 302.400421, 306.133759, 309.960449, 313.884003, 317.908142,
322.036835, 326.274139, 330.624481, 335.092377, 339.682678, 344.400482,
349.251221, 354.240509, 359.37442, 364.659332, 370.10202, 375.709625,
381.489777, 387.450562, 393.600555, 399.948975, 406.505493, 413.280579,
420.285339, 427.531647, 435.032196, 442.800629, 450.851562, 459.200653,
467.864838, 476.862213, 486.212463, 495.936707, 506.057861, 516.600769,
527.592224, 539.061646, 551.040771, 563.564453, 576.670593, 590.400818,
604.800842, 619.920898, 635.816284, 652.548279, 670.184753, 688.800964,
708.481018, 729.318665, 751.41925, 774.901123, 799.897949, 826.561157,
855.063293, 885.601257]
eta_gold = [1.795+1.920375j, 1.812+1.92j, 1.822625+1.918875j, 1.83+1.916j,
1.837125+1.911375j, 1.84+1.904j, 1.83425+1.891375j,
1.824+1.878j, 1.812+1.86825j, 1.798+1.86j, 1.782+1.85175j,
1.766+1.846j, 1.7525+1.84525j, 1.74+1.848j, 1.727625+1.852375j,
1.716+1.862j, 1.705875+1.883j, 1.696+1.906j, 1.68475+1.9225j,
1.674+1.936j, 1.666+1.94775j, 1.658+1.956j, 1.64725+1.959375j,
1.636+1.958j, 1.628+1.951375j, 1.616+1.94j, 1.59625+1.9245j,
1.562+1.904j, 1.502125+1.875875j, 1.426+1.846j,
1.345875+1.814625j, 1.242+1.796j, 1.08675+1.797375j,
0.916+1.84j, 0.7545+1.9565j, 0.608+2.12j, 0.49175+2.32625j,
0.402+2.54j, 0.3455+2.730625j, 0.306+2.88j, 0.267625+2.940625j,
0.236+2.97j, 0.212375+3.015j, 0.194+3.06j, 0.17775+3.07j,
0.166+3.15j, 0.161+3.445812j, 0.16+3.8j, 0.160875+4.087687j,
0.164+4.357j, 0.1695+4.610188j, 0.176+4.86j,
0.181375+5.125813j, 0.188+5.39j, 0.198125+5.63125j, 0.21+5.88j]
lambda_aluminium = [298.75705, 302.400421, 306.133759, 309.960449, 313.884003,
317.908142, 322.036835, 326.274139, 330.624481, 335.092377, 339.682678,
344.400482, 349.251221, 354.240509, 359.37442, 364.659332, 370.10202,
375.709625, 381.489777, 387.450562, 393.600555, 399.948975, 406.505493,
413.280579, 420.285339, 427.531647, 435.032196, 442.800629, 450.851562,
459.200653, 467.864838, 476.862213, 486.212463, 495.936707, 506.057861,
516.600769, 527.592224, 539.061646, 551.040771, 563.564453, 576.670593,
590.400818, 604.800842, 619.920898, 635.816284, 652.548279, 670.184753,
688.800964, 708.481018, 729.318665, 751.41925, 774.901123, 799.897949,
826.561157, 855.063293, 885.601257]
eta_aluminium = [(0.273375+3.59375j), (0.28+3.64j), (0.286813+3.689375j),
(0.294+3.74j), (0.301875+3.789375j), (0.31+3.84j),
(0.317875+3.894375j), (0.326+3.95j), (0.33475+4.005j), (0.344+4.06j),
(0.353813+4.11375j), (0.364+4.17j), (0.374375+4.23375j), (0.385+4.3j),
(0.39575+4.365j), (0.407+4.43j), (0.419125+4.49375j), (0.432+4.56j),
(0.445688+4.63375j), (0.46+4.71j), (0.474688+4.784375j), (0.49+4.86j),
(0.506188+4.938125j), (0.523+5.02j), (0.540063+5.10875j), (0.558+5.2j),
(0.577313+5.29j), (0.598+5.38j), (0.620313+5.48j), (0.644+5.58j),
(0.668625+5.69j), (0.695+5.8j), (0.72375+5.915j), (0.755+6.03j),
(0.789+6.15j), (0.826+6.28j), (0.867+6.42j), (0.912+6.55j),
(0.963+6.7j), (1.02+6.85j), (1.08+7j), (1.15+7.15j), (1.22+7.31j),
(1.3+7.48j), (1.39+7.65j), (1.49+7.82j), (1.6+8.01j), (1.74+8.21j),
(1.91+8.39j), (2.14+8.57j), (2.41+8.62j), (2.63+8.6j), (2.8+8.45j),
(2.74+8.31j), (2.58+8.21j), (2.24+8.21j)]
lambda_copper = [302.400421, 306.133759, 309.960449, 313.884003, 317.908142,
322.036835, 326.274139, 330.624481, 335.092377, 339.682678, 344.400482,
349.251221, 354.240509, 359.37442, 364.659332, 370.10202, 375.709625,
381.489777, 387.450562, 393.600555, 399.948975, 406.505493, 413.280579,
420.285339, 427.531647, 435.032196, 442.800629, 450.851562, 459.200653,
467.864838, 476.862213, 486.212463, 495.936707, 506.057861, 516.600769,
527.592224, 539.061646, 551.040771, 563.564453, 576.670593, 590.400818,
604.800842, 619.920898, 635.816284, 652.548279, 670.184753, 688.800964,
708.481018, 729.318665, 751.41925, 774.901123, 799.897949, 826.561157,
855.063293, 885.601257]
eta_copper = [(1.38+1.687j), (1.358438+1.703313j), (1.34+1.72j),
(1.329063+1.744563j), (1.325+1.77j), (1.3325+1.791625j), (1.34+1.81j),
(1.334375+1.822125j), (1.325+1.834j), (1.317812+1.85175j),
(1.31+1.872j), (1.300313+1.89425j), (1.29+1.916j),
(1.281563+1.931688j), (1.27+1.95j), (1.249062+1.972438j),
(1.225+2.015j), (1.2+2.121562j), (1.18+2.21j), (1.174375+2.177188j),
(1.175+2.13j), (1.1775+2.160063j), (1.18+2.21j), (1.178125+2.249938j),
(1.175+2.289j), (1.172812+2.326j), (1.17+2.362j), (1.165312+2.397625j),
(1.16+2.433j), (1.155312+2.469187j), (1.15+2.504j),
(1.142812+2.535875j), (1.135+2.564j), (1.131562+2.589625j),
(1.12+2.605j), (1.092437+2.595562j), (1.04+2.583j), (0.950375+2.5765j),
(0.826+2.599j), (0.645875+2.678062j), (0.468+2.809j),
(0.35125+3.01075j), (0.272+3.24j), (0.230813+3.458187j), (0.214+3.67j),
(0.20925+3.863125j), (0.213+4.05j), (0.21625+4.239563j), (0.223+4.43j),
(0.2365+4.619563j), (0.25+4.817j), (0.254188+5.034125j), (0.26+5.26j),
(0.28+5.485625j), (0.3+5.717j)]
lambda_chrome = [300.194, 307.643005, 316.276001, 323.708008, 333.279999,
341.542999, 351.217987, 362.514984, 372.312012, 385.031006, 396.10202,
409.175018, 424.58902, 438.09201, 455.80899, 471.406982, 490.040009,
512.314026, 532.102966, 558.468018, 582.06604, 610.739014, 700.452026,
815.65802, 826.53302, 849.17804, 860.971985, 885.570984]
eta_chrome = [(0.98+2.67j), (1.02+2.76j), (1.06+2.85j), (1.12+2.95j),
(1.18+3.04j), (1.26+3.12j), (1.33+3.18j), (1.39+3.24j), (1.43+3.31j),
(1.44+3.4j), (1.48+3.54j), (1.54+3.71j), (1.65+3.89j), (1.8+4.06j),
(1.99+4.22j), (2.22+4.36j), (2.49+4.44j), (2.75+4.46j), (2.98+4.45j),
(3.18+4.41j), (3.34+4.38j), (3.48+4.36j), (3.84+4.37j), (4.23+4.34j),
(4.27+4.33j), (4.31+4.32j), (4.33+4.32j), (4.38+4.31j)]
gold = interpolate.interp1d(lambda_gold, eta_gold, kind='cubic')
copper = interpolate.interp1d(lambda_copper, eta_copper, kind='cubic')
aluminium = interpolate.interp1d(lambda_aluminium, eta_aluminium, kind='cubic')
chrome = interpolate.interp1d(lambda_chrome, eta_chrome, kind='cubic')
|
bsd-2-clause
| 1,225,152,362,439,384,800
| 60.558824
| 85
| 0.637363
| false
| 1.763764
| false
| false
| false
|
angus-ai/angus-jumpingsumo
|
wrapper.py
|
1
|
3347
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import subprocess
import threading
import time
import angus
WIDTH = 640
def img_generator(file_path):
with open(file_path, "rb") as f:
buff = ""
for chunk in f:
buff += chunk
s = buff.find('\xff\xd8')
e = buff.find('\xff\xd9')
if s != -1 and e != -1:
jpg = buff[s:e + 2]
buff = buff[e + 2:]
yield jpg
def command(img, service):
file_path = '/tmp/imgtmp.jpg'
with open(file_path, 'wb') as f:
f.write(img)
job = service.process({'image': open(file_path, 'rb')})
result = job.result['faces']
if len(result) > 0 and result[0]['roi_confidence'] > 0.5:
roi = result[0]['roi']
x = roi[0]
w = roi[2]
cmd_angle = (x + w * 0.5) - WIDTH / 2
print w
if abs(cmd_angle) > WIDTH / 8:
if cmd_angle > 0:
return "Right"
else:
return "Left"
elif w > 100:
return "Back"
elif w < 80:
return "Forw"
return None
def command_loop(singleton, sub, service):
img = singleton[0]
if img is None:
return
cmd = command(img, service)
if cmd == "Right":
sub.stdin.write("u")
sub.stdin.flush()
elif cmd == "Left":
sub.stdin.write("y")
sub.stdin.flush()
elif cmd == "Back":
sub.stdin.write("i")
sub.stdin.flush()
elif cmd == "Forw":
sub.stdin.write("o")
sub.stdin.flush()
def loop(singleton, sub, service):
while True:
command_loop(singleton, sub, service)
# print "Loop"
time.sleep(1)
def launch(input_path, sub, service):
singleton = [None]
count = 0
thread = threading.Thread(target=loop, args=(singleton, sub, service))
thread.daemon = True
thread.start()
for img in img_generator(input_path):
singleton[0] = img
count += 1
if count > 600:
break
sub.stdin.write("q")
sub.stdin.flush()
def main():
os.environ[
'LD_LIBRARY_PATH'] = "../ARSDKBuildUtils/Targets/Unix/Install/lib"
sub = subprocess.Popen(
["./JumpingSumoInterface"],
stdin=subprocess.PIPE,
stdout=None,
stderr=subprocess.STDOUT)
time.sleep(2)
conn = angus.connect()
service = conn.services.get_service('face_detection', 1)
launch("./video_fifo", sub, service)
if __name__ == "__main__":
main()
|
apache-2.0
| -9,205,598,472,601,172,000
| 24.356061
| 74
| 0.58052
| false
| 3.649945
| false
| false
| false
|
nhatbui/pysuite
|
pookeeper/pookeeper/pookeeper.py
|
1
|
7385
|
import os
from collections import defaultdict, OrderedDict
from twisted.internet.protocol import Factory
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
class ZooKeeper(LineReceiver):
def __init__(self, connection_addr, znodes, ephem_nodes):
self.address = connection_addr
self.znodes = znodes
self.ephem_nodes = ephem_nodes
def connectionMade(self):
self.sendLine("true:Connected")
def connectionLost(self):
# Delete all ephemeral nodes associated
# with this connection/address.
for node in self.ephem_nodes[self.address]:
self.delete_node(node)
del self.ephem_nodes[self.address]
def delete_node(self, node):
# Delete node from parent's children listing
parent, child_name = os.path.split(node)
del self.znodes[parent]['children'][child_name]
# Delete node and all its children :(
stack = [node]
while len(stack):
curr_node = stack.pop()
stack.extend(self.znodes[curr_node]['children'].keys())
# Notify watchers
self.notify_watchers(curr_node)
del self.znodes[curr_node]
def notify_watchers(self, node):
# Notify watchers
while len(self.znodes[node]['watchers']):
watcher = self.znodes[node]['watchers'].pop()
watcher.sendLine('true:WATCHER_NOTICE:DELETED:{}'.format(node))
def lineReceived(self, msg):
# Check command
idx = msg.find(':')
if idx == -1:
self.sendLine('false:bad message')
cmd = msg[:idx]
if cmd == 'CREATE':
self.handle_CREATENODE(msg[(idx+1):])
elif cmd == 'ECREATE':
self.handle_CREATEEPHEMERALNODE(msg[(idx+1):])
elif cmd == 'DELETE':
self.handle_DELETENODE(msg[(idx+1):])
elif cmd == 'EXISTS':
self.handle_EXISTSNODE(msg[(idx+1):])
elif cmd == 'GET':
self.handle_GET(msg[(idx+1):])
elif cmd == 'SET':
self.handle_SET(msg[(idx+1):])
elif cmd == 'CHILDREN':
self.handle_GETCHILDREN(msg[(idx+1):])
elif cmd == 'WATCH':
self.handle_WATCH(msg[(idx+1):])
else:
self.sendLine('false:unknown command')
def handle_CREATENODE(self, node):
# Check if znode path starts with a slash
if node[0] != '/':
self.sendLine('false')
# Check path up to node exists
p, _ = os.path.split(node)
if p not in self.znodes:
self.sendLine('false')
return
# Check if node already exists
if node in self.znodes:
self.sendLine('false:node already exists')
return
parent, child = os.path.split(node)
self.znodes[node] = { 'parent': parent, 'children': {}, 'watchers': []}
self.znodes[parent]['children'][child] = True
self.sendLine('true:CREATED:{}'.format(node))
def handle_CREATEEPHEMERALNODE(self, node):
# Check if znode path starts with a slash
if node[0] != '/':
self.sendLine('false:bad node name')
# Check path up to node exists
p, _ = os.path.split(node)
if p not in self.znodes:
self.sendLine('false:path up to node does not exist')
else:
parent, child = os.path.split(node)
self.znodes[node] = { 'parent': parent, 'children': {}, 'watchers': []}
self.znodes[parent]['children'][child] = True
# Add as ephemeral node
self.ephem_nodes[self.address].append(node)
self.sendLine('true:CREATED_ENODE:{}'.format(node))
def handle_DELETENODE(self, node):
# Check if znode path starts with a slash
if node[0] != '/':
self.sendLine('false')
# Check that node exists
if node in self.znodes:
# Delete node from parent's children listing
parent, child_name = os.path.split(node)
del self.znodes[parent]['children'][child_name]
# Delete node and all its children :(
stack = [node]
while len(stack):
curr_node = stack.pop()
stack.extend(self.znodes[curr_node]['children'].keys())
# Notify watchers
while len(self.znodes[curr_node]['watchers']):
watcher = self.znodes[curr_node]['watchers'].pop()
watcher.sendLine('true:WATCHER_NOTICE:DELETED:{}'.format(curr_node))
del self.znodes[curr_node]
self.sendLine('true:DELETED:{}'.format(node))
else:
self.sendLine('false:NOT DELETED:{}'.format(node))
def handle_EXISTSNODE(self, node):
# Check if znode path starts with a slash
if node[0] != '/':
self.sendLine('false')
# Check that node exists
if node in self.znodes:
self.sendLine('true')
else:
self.sendLine('false')
def handle_GET(self, node):
# Check if znode path starts with a slash
if node[0] != '/':
self.sendLine('false')
# Check that node exists
if node in self.znodes:
self.sendLine(self.znodes[node]['data'])
else:
self.sendLine('false')
def handle_SET(self, msg):
idx = msg.find(':')
if idx == -1:
self.sendLine('false')
node = msg[:idx]
data = msg[(idx+1):]
# Check if znode path starts with a slash
if node[0] != '/':
self.sendLine('false')
# Check that node exists
if node in self.znodes:
self.znodes[node]['data'] = data
# Notify watchers
while len(self.znodes[node]['watchers']):
watcher = self.znodes[node]['watchers'].pop()
watcher.sendLine('true:WATCHER_NOFITY:CHANGED:{}'.format(node))
self.sendLine('true:SET:{}'.format(node))
else:
self.sendLine('false')
def handle_GETCHILDREN(self, node):
# Check if znode path starts with a slash
if node[0] != '/':
self.sendLine('false')
# Check that node exists
if node in self.znodes:
self.sendLine(','.join(self.znodes[node]['children'].keys()))
else:
self.sendLine('false')
def handle_WATCH(self, node):
# Check if znode path starts with a slash
if node[0] != '/':
self.sendLine('false:WATCHING:improper naming:{}'.format(node))
# Check that node exists
if node in self.znodes:
self.znodes[node]['watchers'].append(self)
self.sendLine('true:WATCHING:{}'.format(node))
else:
self.sendLine('false:WATCHING:node does not exist:{}'.format(node))
class ZooKeeperFactory(Factory):
def __init__(self):
self.znodes = {'/': { 'parent': None, 'children': OrderedDict(), 'watchers': [] } }
self.ephem_nodes = defaultdict(list)
def buildProtocol(self, addr):
return ZooKeeper(addr, self.znodes, self.ephem_nodes)
if __name__ == '__main__':
reactor.listenTCP(8123, ZooKeeperFactory())
print('Starting on port 8123')
reactor.run()
|
mit
| -7,960,860,430,897,966,000
| 29.899582
| 91
| 0.55545
| false
| 3.870545
| false
| false
| false
|
FluidityStokes/fluidity
|
tests/mms_tracer_P1dg_cdg_diff_steady_3d_cjc_inhNmnnbc/cdg3d.py
|
1
|
1504
|
import os
from fluidity_tools import stat_parser
from sympy import *
from numpy import array,max,abs
meshtemplate='''
Point(1) = {0.0,0.0,0,0.1};
Extrude {1,0,0} {
Point{1}; Layers{<layers>};
}
Extrude {0,1,0} {
Line{1}; Layers{<layers>};
}
Extrude {0,0,1} {
Surface{5}; Layers{<layers>};
}
//Z-normal surface, z=0
Physical Surface(28) = {5};
//Z-normal surface, z=1
Physical Surface(29) = {27};
//Y-normal surface, y=0
Physical Surface(30) = {14};
//Y-normal surface, y=1
Physical Surface(31) = {22};
//X-normal surface, x=0
Physical Surface(32) = {26};
//X-normal surface, x=1
Physical Surface(33) = {18};
Physical Volume(34) = {1};
'''
def generate_meshfile(name,layers):
geo = meshtemplate.replace('<layers>',str(layers))
open(name+".geo",'w').write(geo)
os.system("gmsh -3 "+name+".geo")
os.system("../../bin/gmsh2triangle "+name+".msh")
def run_test(layers, binary):
'''run_test(layers, binary)
Run a single test of the channel problem. Layers is the number of mesh
points in the cross-channel direction. The mesh is unstructured and
isotropic. binary is a string containing the fluidity command to run.
The return value is the error in u and p at the end of the simulation.'''
generate_meshfile("channel",layers)
os.system(binary+" channel_viscous.flml")
s=stat_parser("channel-flow-dg.stat")
return (s["Water"]['AnalyticUVelocitySolutionError']['l2norm'][-1],
s["Water"]['AnalyticPressureSolutionError']['l2norm'][-1])
|
lgpl-2.1
| -6,174,876,977,776,289,000
| 25.857143
| 77
| 0.664229
| false
| 2.94902
| false
| false
| false
|
hzlf/openbroadcast
|
website/apps/__rework_in_progress/importer/api.py
|
1
|
7486
|
from django.conf import settings
from django.conf.urls.defaults import *
from django.contrib.auth.models import User
from django.db.models import Count
import json
from tastypie import fields
from tastypie.authentication import *
from tastypie.authorization import *
from tastypie.resources import ModelResource, Resource, ALL, ALL_WITH_RELATIONS
from tastypie.cache import SimpleCache
from tastypie.utils import trailing_slash
from tastypie.exceptions import ImmediateHttpResponse
from django.http import HttpResponse
from importer.models import Import, ImportFile
from alibrary.api import MediaResource
# file = request.FILES[u'files[]']
class ImportFileResource(ModelResource):
import_session = fields.ForeignKey('importer.api.ImportResource', 'import_session', null=True, full=False)
media = fields.ForeignKey('alibrary.api.MediaResource', 'media', null=True, full=True)
class Meta:
queryset = ImportFile.objects.all()
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
resource_name = 'importfile'
# excludes = ['type','results_musicbrainz']
excludes = ['type',]
authentication = Authentication()
authorization = Authorization()
always_return_data = True
filtering = {
'import_session': ALL_WITH_RELATIONS,
'created': ['exact', 'range', 'gt', 'gte', 'lt', 'lte'],
}
def dehydrate(self, bundle):
bundle.data['status'] = bundle.obj.get_status_display().lower();
# offload json parsing to the backend
# TODO: remove in js, enable here
"""
bundle.data['import_tag'] = json.loads(bundle.data['import_tag'])
bundle.data['results_acoustid'] = json.loads(bundle.data['results_acoustid'])
bundle.data['results_musicbrainz'] = json.loads(bundle.data['results_musicbrainz'])
bundle.data['results_discogs'] = json.loads(bundle.data['results_discogs'])
bundle.data['results_tag'] = json.loads(bundle.data['results_tag'])
"""
return bundle
def obj_update(self, bundle, request, **kwargs):
#import time
#time.sleep(3)
return super(ImportFileResource, self).obj_update(bundle, request, **kwargs)
def obj_create(self, bundle, request, **kwargs):
"""
Little switch to play with jquery fileupload
"""
try:
#import_id = request.GET['import_session']
import_id = request.GET.get('import_session', None)
uuid_key = request.GET.get('uuid_key', None)
print "####################################"
print request.FILES[u'files[]']
if import_id:
imp = Import.objects.get(pk=import_id)
bundle.data['import_session'] = imp
elif uuid_key:
imp, created = Import.objects.get_or_create(uuid_key=uuid_key, user=request.user)
bundle.data['import_session'] = imp
else:
bundle.data['import_session'] = None
bundle.data['file'] = request.FILES[u'files[]']
except Exception, e:
print e
return super(ImportFileResource, self).obj_create(bundle, request, **kwargs)
class ImportResource(ModelResource):
files = fields.ToManyField('importer.api.ImportFileResource', 'files', full=True, null=True)
class Meta:
queryset = Import.objects.all()
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
#list_allowed_methods = ['get',]
#detail_allowed_methods = ['get',]
resource_name = 'import'
excludes = ['updated',]
include_absolute_url = True
authentication = Authentication()
authorization = Authorization()
always_return_data = True
filtering = {
#'channel': ALL_WITH_RELATIONS,
'created': ['exact', 'range', 'gt', 'gte', 'lt', 'lte'],
}
def save_related(self, obj):
return True
# additional methods
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/import-all%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('import_all'), name="importer_api_import_all"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/apply-to-all%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('apply_to_all'), name="importer_api_apply_to_all"),
]
def import_all(self, request, **kwargs):
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
import_session = Import.objects.get(**self.remove_api_resource_names(kwargs))
import_files = import_session.files.filter(status=2)
# first to a batch update
import_files.update(status=6)
# save again to trigger pos-save actions
for import_file in import_files:
import_file.status = 6
import_file.save()
bundle = self.build_bundle(obj=import_session, request=request)
bundle = self.full_dehydrate(bundle)
self.log_throttled_access(request)
return self.create_response(request, bundle)
"""
mass aply import tag
"""
def apply_to_all(self, request, **kwargs):
self.method_check(request, allowed=['post'])
self.is_authenticated(request)
self.throttle_check(request)
import_session = Import.objects.get(**self.remove_api_resource_names(kwargs))
item_id = request.POST.get('item_id', None)
ct = request.POST.get('ct', None)
print 'item_id: %s' % item_id
print 'ct: %s' % ct
if not (ct and item_id):
raise ImmediateHttpResponse(response=HttpResponse(status=410))
import_files = import_session.files.filter(status__in=(2,4))
source = import_files.filter(pk=item_id)
# exclude current one
import_files = import_files.exclude(pk=item_id)
try:
source = source[0]
print source
# print source.import_tag
except:
source = None
if source:
sit = source.import_tag
for import_file in import_files:
dit = import_file.import_tag
if ct == 'artist':
map = ('artist', 'alibrary_artist_id', 'mb_artist_id', 'force_artist')
if ct == 'release':
map = ('release', 'alibrary_release_id', 'mb_release_id', 'force_release')
for key in map:
src = sit.get(key, None)
if src:
dit[key] = src
else:
dit.pop(key, None)
import_file.import_tag = dit
import_file.save()
bundle = self.build_bundle(obj=import_session, request=request)
bundle = self.full_dehydrate(bundle)
self.log_throttled_access(request)
return self.create_response(request, bundle)
|
gpl-3.0
| 2,424,500,771,755,327,000
| 31.837719
| 190
| 0.56786
| false
| 4.050866
| false
| false
| false
|
crempp/mdweb
|
mdweb/SiteMapView.py
|
1
|
2696
|
"""MDWeb SiteMap View Object."""
import datetime
import logging
import numbers
import os
import pytz
import time
from flask import (
current_app as app,
make_response,
render_template_string,
url_for,
)
from flask.views import View
#: Template string to use for the sitemap generation
# (is there a better place to put this?, not in the theme)
# pylint: disable=C0301
SITEMAP_TEMPLATE = """<?xml version="1.0" encoding="utf-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">
{% for page in pages -%}
<url>
<loc>{{page.loc|safe}}</loc>
<lastmod>{{page.lastmod|safe}}</lastmod>
{%- if page.changefreq %}
<changefreq>{{page.changefreq|safe}}</changefreq>
{%- endif %}
{%- if page.priority %}
<priority>{{page.priority|safe}}</priority>
{%- endif %}
</url>
{%- endfor %}
</urlset>
"""
class SiteMapView(View):
"""Sitemap View Object."""
sitemap_cache = None
def dispatch_request(self):
"""Flask dispatch method."""
if self.sitemap_cache is None:
self.sitemap_cache = self.generate_sitemap()
response = make_response(self.sitemap_cache)
response.headers["Content-Type"] = "application/xml"
return response
@classmethod
def generate_sitemap(cls):
"""Generate sitemap.xml. Makes a list of urls and date modified."""
logging.info("Generating sitemap...")
start = time.time()
pages = []
index_url = url_for('index', _external=True)
for url, page in app.navigation.get_page_dict().items():
if page.meta_inf.published:
mtime = os.path.getmtime(page.page_path)
if isinstance(mtime, numbers.Real):
mtime = datetime.datetime.fromtimestamp(mtime)
mtime.replace(tzinfo=pytz.UTC)
# lastmod = mtime.strftime('%Y-%m-%dT%H:%M:%S%z')
lastmod = mtime.strftime('%Y-%m-%d')
pages.append({
'loc': "%s%s" % (index_url, url),
'lastmod': lastmod,
'changefreq': page.meta_inf.sitemap_changefreq,
'priority': page.meta_inf.sitemap_priority,
})
sitemap_xml = render_template_string(SITEMAP_TEMPLATE, pages=pages)
end = time.time()
logging.info("completed sitemap generation in %s seconds",
(end - start))
return sitemap_xml
|
mit
| -4,282,121,319,264,830,000
| 30.717647
| 124
| 0.585682
| false
| 3.845934
| false
| false
| false
|
armyofevilrobots/reticulatus
|
reticulatus/gui/reticulate_main.py
|
1
|
13375
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'reticulate_main.ui'
#
# Created: Thu Oct 25 21:48:45 2012
# by: pyside-uic 0.2.13 running on PySide 1.1.0
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_main_window(object):
def setupUi(self, main_window):
main_window.setObjectName("main_window")
main_window.resize(925, 633)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(main_window.sizePolicy().hasHeightForWidth())
main_window.setSizePolicy(sizePolicy)
main_window.setMinimumSize(QtCore.QSize(512, 384))
main_window.setAutoFillBackground(False)
self.centralwidget = QtGui.QWidget(main_window)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.object_tabs = QtGui.QTabWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.object_tabs.sizePolicy().hasHeightForWidth())
self.object_tabs.setSizePolicy(sizePolicy)
self.object_tabs.setObjectName("object_tabs")
self.object_3d = QtGui.QWidget()
self.object_3d.setCursor(QtCore.Qt.CrossCursor)
self.object_3d.setLayoutDirection(QtCore.Qt.RightToLeft)
self.object_3d.setObjectName("object_3d")
self.object_3d_layout = QtGui.QHBoxLayout(self.object_3d)
self.object_3d_layout.setObjectName("object_3d_layout")
self.frame = QtGui.QFrame(self.object_3d)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth())
self.frame.setSizePolicy(sizePolicy)
self.frame.setMaximumSize(QtCore.QSize(50, 16777215))
self.frame.setLayoutDirection(QtCore.Qt.RightToLeft)
self.frame.setFrameShape(QtGui.QFrame.NoFrame)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setLineWidth(0)
self.frame.setObjectName("frame")
self.slider_container_layout = QtGui.QVBoxLayout(self.frame)
self.slider_container_layout.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.slider_container_layout.setObjectName("slider_container_layout")
self.layer_slider = QtGui.QSlider(self.frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.layer_slider.sizePolicy().hasHeightForWidth())
self.layer_slider.setSizePolicy(sizePolicy)
self.layer_slider.setMaximumSize(QtCore.QSize(50, 16777215))
self.layer_slider.setMinimum(0)
self.layer_slider.setMaximum(9999)
self.layer_slider.setProperty("value", 0)
self.layer_slider.setOrientation(QtCore.Qt.Vertical)
self.layer_slider.setInvertedAppearance(False)
self.layer_slider.setObjectName("layer_slider")
self.slider_container_layout.addWidget(self.layer_slider)
self.layer_lcd = QtGui.QLCDNumber(self.frame)
self.layer_lcd.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.layer_lcd.setFont(font)
self.layer_lcd.setNumDigits(4)
self.layer_lcd.setObjectName("layer_lcd")
self.slider_container_layout.addWidget(self.layer_lcd)
self.object_3d_layout.addWidget(self.frame)
self.object_tabs.addTab(self.object_3d, "")
self.gcode = QtGui.QWidget()
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gcode.sizePolicy().hasHeightForWidth())
self.gcode.setSizePolicy(sizePolicy)
self.gcode.setObjectName("gcode")
self.gcode_hlayout = QtGui.QHBoxLayout(self.gcode)
self.gcode_hlayout.setObjectName("gcode_hlayout")
self.gcode_editor = QtGui.QTextEdit(self.gcode)
self.gcode_editor.setObjectName("gcode_editor")
self.gcode_hlayout.addWidget(self.gcode_editor)
self.object_tabs.addTab(self.gcode, "")
self.horizontalLayout.addWidget(self.object_tabs)
main_window.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(main_window)
self.menubar.setGeometry(QtCore.QRect(0, 0, 925, 23))
self.menubar.setObjectName("menubar")
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menu_edit = QtGui.QMenu(self.menubar)
self.menu_edit.setObjectName("menu_edit")
self.menu_Settings = QtGui.QMenu(self.menubar)
self.menu_Settings.setObjectName("menu_Settings")
self.menu_Help = QtGui.QMenu(self.menubar)
self.menu_Help.setObjectName("menu_Help")
self.menuActions = QtGui.QMenu(self.menubar)
self.menuActions.setObjectName("menuActions")
self.menu_Windows = QtGui.QMenu(self.menubar)
self.menu_Windows.setObjectName("menu_Windows")
main_window.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(main_window)
self.statusbar.setEnabled(True)
self.statusbar.setSizeGripEnabled(True)
self.statusbar.setObjectName("statusbar")
main_window.setStatusBar(self.statusbar)
self.layers_dock = QtGui.QDockWidget(main_window)
self.layers_dock.setMinimumSize(QtCore.QSize(120, 160))
self.layers_dock.setMaximumSize(QtCore.QSize(1024, 1024))
self.layers_dock.setObjectName("layers_dock")
self.dock_contents = QtGui.QWidget()
self.dock_contents.setObjectName("dock_contents")
self.verticalLayout = QtGui.QVBoxLayout(self.dock_contents)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtGui.QLabel(self.dock_contents)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.layer_list_widget = QtGui.QListWidget(self.dock_contents)
self.layer_list_widget.setObjectName("layer_list_widget")
self.verticalLayout.addWidget(self.layer_list_widget)
self.layers_dock.setWidget(self.dock_contents)
main_window.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.layers_dock)
self.tools_dock = QtGui.QDockWidget(main_window)
self.tools_dock.setMinimumSize(QtCore.QSize(120, 160))
self.tools_dock.setObjectName("tools_dock")
self.dockWidgetContents = QtGui.QWidget()
self.dockWidgetContents.setObjectName("dockWidgetContents")
self.tools_dock.setWidget(self.dockWidgetContents)
main_window.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.tools_dock)
self.action_file = QtGui.QAction(main_window)
self.action_file.setObjectName("action_file")
self.action_new = QtGui.QAction(main_window)
self.action_new.setObjectName("action_new")
self.action_open = QtGui.QAction(main_window)
self.action_open.setObjectName("action_open")
self.action_save = QtGui.QAction(main_window)
self.action_save.setObjectName("action_save")
self.action_quit = QtGui.QAction(main_window)
self.action_quit.setObjectName("action_quit")
self.action_print_settings = QtGui.QAction(main_window)
self.action_print_settings.setObjectName("action_print_settings")
self.action_slice_settings = QtGui.QAction(main_window)
self.action_slice_settings.setObjectName("action_slice_settings")
self.action_help = QtGui.QAction(main_window)
self.action_help.setObjectName("action_help")
self.action_about = QtGui.QAction(main_window)
self.action_about.setObjectName("action_about")
self.action_display_settings = QtGui.QAction(main_window)
self.action_display_settings.setObjectName("action_display_settings")
self.action_slice = QtGui.QAction(main_window)
self.action_slice.setObjectName("action_slice")
self.action_Layers = QtGui.QAction(main_window)
self.action_Layers.setObjectName("action_Layers")
self.action_Toolbox = QtGui.QAction(main_window)
self.action_Toolbox.setObjectName("action_Toolbox")
self.menuFile.addAction(self.action_new)
self.menuFile.addAction(self.action_open)
self.menuFile.addAction(self.action_save)
self.menuFile.addSeparator()
self.menuFile.addAction(self.action_quit)
self.menu_Settings.addAction(self.action_print_settings)
self.menu_Settings.addAction(self.action_slice_settings)
self.menu_Settings.addAction(self.action_display_settings)
self.menu_Help.addAction(self.action_help)
self.menu_Help.addSeparator()
self.menu_Help.addAction(self.action_about)
self.menuActions.addAction(self.action_slice)
self.menu_Windows.addAction(self.action_Layers)
self.menu_Windows.addAction(self.action_Toolbox)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menu_edit.menuAction())
self.menubar.addAction(self.menuActions.menuAction())
self.menubar.addAction(self.menu_Settings.menuAction())
self.menubar.addAction(self.menu_Windows.menuAction())
self.menubar.addAction(self.menu_Help.menuAction())
self.retranslateUi(main_window)
self.object_tabs.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(main_window)
def retranslateUi(self, main_window):
main_window.setWindowTitle(QtGui.QApplication.translate("main_window", "Reticulatus", None, QtGui.QApplication.UnicodeUTF8))
self.layer_slider.setToolTip(QtGui.QApplication.translate("main_window", "Layer clip plane", None, QtGui.QApplication.UnicodeUTF8))
self.object_tabs.setTabText(self.object_tabs.indexOf(self.object_3d), QtGui.QApplication.translate("main_window", "3D Object", None, QtGui.QApplication.UnicodeUTF8))
self.object_tabs.setTabText(self.object_tabs.indexOf(self.gcode), QtGui.QApplication.translate("main_window", "GCode", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setTitle(QtGui.QApplication.translate("main_window", "&File", None, QtGui.QApplication.UnicodeUTF8))
self.menu_edit.setTitle(QtGui.QApplication.translate("main_window", "&Edit", None, QtGui.QApplication.UnicodeUTF8))
self.menu_Settings.setTitle(QtGui.QApplication.translate("main_window", "&Settings", None, QtGui.QApplication.UnicodeUTF8))
self.menu_Help.setTitle(QtGui.QApplication.translate("main_window", "&Help", None, QtGui.QApplication.UnicodeUTF8))
self.menuActions.setTitle(QtGui.QApplication.translate("main_window", "&Actions", None, QtGui.QApplication.UnicodeUTF8))
self.menu_Windows.setTitle(QtGui.QApplication.translate("main_window", "&Windows", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("main_window", "Layers", None, QtGui.QApplication.UnicodeUTF8))
self.action_file.setText(QtGui.QApplication.translate("main_window", "&file", None, QtGui.QApplication.UnicodeUTF8))
self.action_new.setText(QtGui.QApplication.translate("main_window", "&New", None, QtGui.QApplication.UnicodeUTF8))
self.action_open.setText(QtGui.QApplication.translate("main_window", "&Open", None, QtGui.QApplication.UnicodeUTF8))
self.action_save.setText(QtGui.QApplication.translate("main_window", "&Save", None, QtGui.QApplication.UnicodeUTF8))
self.action_quit.setText(QtGui.QApplication.translate("main_window", "&Quit", None, QtGui.QApplication.UnicodeUTF8))
self.action_print_settings.setText(QtGui.QApplication.translate("main_window", "&Printer", None, QtGui.QApplication.UnicodeUTF8))
self.action_slice_settings.setText(QtGui.QApplication.translate("main_window", "S&licing", None, QtGui.QApplication.UnicodeUTF8))
self.action_help.setText(QtGui.QApplication.translate("main_window", "&Help", None, QtGui.QApplication.UnicodeUTF8))
self.action_about.setText(QtGui.QApplication.translate("main_window", "&About", None, QtGui.QApplication.UnicodeUTF8))
self.action_display_settings.setText(QtGui.QApplication.translate("main_window", "&Display", None, QtGui.QApplication.UnicodeUTF8))
self.action_slice.setText(QtGui.QApplication.translate("main_window", "&Slice", None, QtGui.QApplication.UnicodeUTF8))
self.action_Layers.setText(QtGui.QApplication.translate("main_window", "&Layers", None, QtGui.QApplication.UnicodeUTF8))
self.action_Toolbox.setText(QtGui.QApplication.translate("main_window", "&Toolbox", None, QtGui.QApplication.UnicodeUTF8))
|
gpl-3.0
| -5,630,036,228,393,934,000
| 61.209302
| 173
| 0.717458
| false
| 3.835675
| false
| false
| false
|
mitodl/micromasters
|
cms/migrations/0025_infolinks.py
|
1
|
1226
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-05 22:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('cms', '0024_programtabpage'),
]
operations = [
migrations.CreateModel(
name='InfoLinks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('url', models.URLField(blank=True, help_text='A url for an external page. There will be a link to this url from the program page.', null=True)),
('title_url', models.TextField(blank=True, help_text='The text for the link to an external homepage.')),
('program_page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='info_links', to='cms.ProgramPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
|
bsd-3-clause
| -5,478,774,938,762,284,000
| 38.548387
| 161
| 0.604405
| false
| 4.127946
| false
| false
| false
|
vpelletier/neoppod
|
neo/master/backup_app.py
|
1
|
16200
|
#
# Copyright (C) 2012-2016 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import random, weakref
from bisect import bisect
from collections import defaultdict
from neo.lib import logging
from neo.lib.bootstrap import BootstrapManager
from neo.lib.exception import PrimaryFailure
from neo.lib.handler import EventHandler
from neo.lib.node import NodeManager
from neo.lib.protocol import CellStates, ClusterStates, \
NodeStates, NodeTypes, Packets, uuid_str, INVALID_TID, ZERO_TID
from neo.lib.util import add64, dump
from .app import StateChangedException
from .pt import PartitionTable
from .handlers.backup import BackupHandler
"""
Backup algorithm
This implementation relies on normal storage replication.
Storage nodes that are specialised for backup are not in the same NEO cluster,
but are managed by another master in a different cluster.
When the cluster is in BACKINGUP state, its master acts like a client to the
master of the main cluster. It gets notified of new data thanks to invalidation,
and notifies in turn its storage nodes what/when to replicate.
Storages stay in UP_TO_DATE state, even if partitions are synchronized up to
different tids. Storage nodes remember they are in such state and when
switching into RUNNING state, the cluster cuts the DB at the "backup TID", which
is the last TID for which we have all data. This TID can't be guessed from
'trans' and 'obj' tables, like it is done in normal mode, so:
- The master must even notify storages of transactions that don't modify their
partitions: see Replicate packets without any source.
- 'backup_tid' properties exist in many places, on the master and the storages,
so that the DB can be made consistent again at any moment, without losing
any (or little) data.
Out of backup storage nodes assigned to a partition, one is chosen as primary
for that partition. It means only this node will fetch data from the upstream
cluster, to minimize bandwidth between clusters. Other replicas will
synchronize from the primary node.
There is no UUID conflict between the 2 clusters:
- Storage nodes connect anonymously to upstream.
- Master node receives a new from upstream master and uses it only when
communicating with it.
"""
class BackupApplication(object):
pt = None
def __init__(self, app, name, master_addresses):
self.app = weakref.proxy(app)
self.name = name
self.nm = NodeManager()
for master_address in master_addresses:
self.nm.createMaster(address=master_address)
em = property(lambda self: self.app.em)
ssl = property(lambda self: self.app.ssl)
def close(self):
self.nm.close()
del self.__dict__
def log(self):
self.nm.log()
if self.pt is not None:
self.pt.log()
def provideService(self):
logging.info('provide backup')
poll = self.em.poll
app = self.app
pt = app.pt
while True:
app.changeClusterState(ClusterStates.STARTING_BACKUP)
bootstrap = BootstrapManager(self, self.name, NodeTypes.CLIENT)
# {offset -> node}
self.primary_partition_dict = {}
# [[tid]]
self.tid_list = tuple([] for _ in xrange(pt.getPartitions()))
try:
while True:
for node in pt.getNodeSet(readable=True):
if not app.isStorageReady(node.getUUID()):
break
else:
break
poll(1)
node, conn, uuid, num_partitions, num_replicas = \
bootstrap.getPrimaryConnection()
try:
app.changeClusterState(ClusterStates.BACKINGUP)
del bootstrap, node
if num_partitions != pt.getPartitions():
raise RuntimeError("inconsistent number of partitions")
self.pt = PartitionTable(num_partitions, num_replicas)
conn.setHandler(BackupHandler(self))
conn.ask(Packets.AskNodeInformation())
conn.ask(Packets.AskPartitionTable())
conn.ask(Packets.AskLastTransaction())
# debug variable to log how big 'tid_list' can be.
self.debug_tid_count = 0
while True:
poll(1)
except PrimaryFailure, msg:
logging.error('upstream master is down: %s', msg)
finally:
app.backup_tid = pt.getBackupTid()
try:
conn.close()
except PrimaryFailure:
pass
try:
del self.pt
except AttributeError:
pass
except StateChangedException, e:
if e.args[0] != ClusterStates.STOPPING_BACKUP:
raise
app.changeClusterState(*e.args)
tid = app.backup_tid
# Wait for non-primary partitions to catch up,
# so that all UP_TO_DATE cells are really UP_TO_DATE.
# XXX: Another possibility could be to outdate such cells, and
# they would be quickly updated at the beginning of the
# RUNNING phase. This may simplify code.
# Any unfinished replication from upstream will be truncated.
while pt.getBackupTid(min) < tid:
poll(1)
last_tid = app.getLastTransaction()
handler = EventHandler(app)
if tid < last_tid:
assert tid != ZERO_TID
logging.warning("Truncating at %s (last_tid was %s)",
dump(app.backup_tid), dump(last_tid))
else:
# We will do a dummy truncation, just to leave backup mode,
# so it's fine to start automatically if there's any
# missing storage.
# XXX: Consider using another method to leave backup mode,
# at least when there's nothing to truncate. Because
# in case of StoppedOperation during VERIFYING state,
# this flag will be wrongly set to False.
app._startup_allowed = True
# If any error happened before reaching this line, we'd go back
# to backup mode, which is the right mode to recover.
del app.backup_tid
# Now back to RECOVERY...
return tid
finally:
del self.primary_partition_dict, self.tid_list
pt.clearReplicating()
def nodeLost(self, node):
getCellList = self.app.pt.getCellList
trigger_set = set()
for offset, primary_node in self.primary_partition_dict.items():
if primary_node is not node:
continue
cell_list = getCellList(offset, readable=True)
cell = max(cell_list, key=lambda cell: cell.backup_tid)
tid = cell.backup_tid
self.primary_partition_dict[offset] = primary_node = cell.getNode()
p = Packets.Replicate(tid, '', {offset: primary_node.getAddress()})
for cell in cell_list:
cell.replicating = tid
if cell.backup_tid < tid:
logging.debug(
"ask %s to replicate partition %u up to %s from %s",
uuid_str(cell.getUUID()), offset, dump(tid),
uuid_str(primary_node.getUUID()))
cell.getNode().getConnection().notify(p)
trigger_set.add(primary_node)
for node in trigger_set:
self.triggerBackup(node)
def invalidatePartitions(self, tid, partition_set):
app = self.app
prev_tid = app.getLastTransaction()
app.setLastTransaction(tid)
pt = app.pt
trigger_set = set()
untouched_dict = defaultdict(dict)
for offset in xrange(pt.getPartitions()):
try:
last_max_tid = self.tid_list[offset][-1]
except IndexError:
last_max_tid = prev_tid
if offset in partition_set:
self.tid_list[offset].append(tid)
node_list = []
for cell in pt.getCellList(offset, readable=True):
node = cell.getNode()
assert node.isConnected(), node
if cell.backup_tid == prev_tid:
# Let's given 4 TID t0,t1,t2,t3: if a cell is only
# modified by t0 & t3 and has all data for t0, 4 values
# are possible for its 'backup_tid' until it replicates
# up to t3: t0, t1, t2 or t3 - 1
# Choosing the smallest one (t0) is easier to implement
# but when leaving backup mode, we would always lose
# data if the last full transaction does not modify
# all partitions. t1 is wrong for the same reason.
# So we have chosen the highest one (t3 - 1).
# t2 should also work but maybe harder to implement.
cell.backup_tid = add64(tid, -1)
logging.debug(
"partition %u: updating backup_tid of %r to %s",
offset, cell, dump(cell.backup_tid))
else:
assert cell.backup_tid < last_max_tid, (
cell.backup_tid, last_max_tid, prev_tid, tid)
if app.isStorageReady(node.getUUID()):
node_list.append(node)
assert node_list
trigger_set.update(node_list)
# Make sure we have a primary storage for this partition.
if offset not in self.primary_partition_dict:
self.primary_partition_dict[offset] = \
random.choice(node_list)
else:
# Partition not touched, so increase 'backup_tid' of all
# "up-to-date" replicas, without having to replicate.
for cell in pt.getCellList(offset, readable=True):
if last_max_tid <= cell.backup_tid:
cell.backup_tid = tid
untouched_dict[cell.getNode()][offset] = None
elif last_max_tid <= cell.replicating:
# Same for 'replicating' to avoid useless orders.
logging.debug("silently update replicating order"
" of %s for partition %u, up to %s",
uuid_str(cell.getUUID()), offset, dump(tid))
cell.replicating = tid
for node, untouched_dict in untouched_dict.iteritems():
if app.isStorageReady(node.getUUID()):
node.notify(Packets.Replicate(tid, '', untouched_dict))
for node in trigger_set:
self.triggerBackup(node)
count = sum(map(len, self.tid_list))
if self.debug_tid_count < count:
logging.debug("Maximum number of tracked tids: %u", count)
self.debug_tid_count = count
def triggerBackup(self, node):
tid_list = self.tid_list
tid = self.app.getLastTransaction()
replicate_list = []
for offset, cell in self.app.pt.iterNodeCell(node):
max_tid = tid_list[offset]
if max_tid and self.primary_partition_dict[offset] is node and \
max(cell.backup_tid, cell.replicating) < max_tid[-1]:
cell.replicating = tid
replicate_list.append(offset)
if not replicate_list:
return
getCellList = self.pt.getCellList
source_dict = {}
address_set = set()
for offset in replicate_list:
cell_list = getCellList(offset, readable=True)
random.shuffle(cell_list)
assert cell_list, offset
for cell in cell_list:
addr = cell.getAddress()
if addr in address_set:
break
else:
address_set.add(addr)
source_dict[offset] = addr
logging.debug("ask %s to replicate partition %u up to %s from %r",
uuid_str(node.getUUID()), offset, dump(tid), addr)
node.getConnection().notify(Packets.Replicate(
tid, self.name, source_dict))
def notifyReplicationDone(self, node, offset, tid):
app = self.app
cell = app.pt.getCell(offset, node.getUUID())
tid_list = self.tid_list[offset]
if tid_list: # may be empty if the cell is out-of-date
# or if we're not fully initialized
if tid < tid_list[0]:
cell.replicating = tid
else:
try:
tid = add64(tid_list[bisect(tid_list, tid)], -1)
except IndexError:
last_tid = app.getLastTransaction()
if tid < last_tid:
tid = last_tid
node.notify(Packets.Replicate(tid, '', {offset: None}))
logging.debug("partition %u: updating backup_tid of %r to %s",
offset, cell, dump(tid))
cell.backup_tid = tid
# Forget tids we won't need anymore.
cell_list = app.pt.getCellList(offset, readable=True)
del tid_list[:bisect(tid_list, min(x.backup_tid for x in cell_list))]
primary_node = self.primary_partition_dict.get(offset)
primary = primary_node is node
result = None if primary else app.pt.setUpToDate(node, offset)
assert cell.isReadable()
if result: # was out-of-date
if primary_node is not None:
max_tid, = [x.backup_tid for x in cell_list
if x.getNode() is primary_node]
if tid < max_tid:
cell.replicating = max_tid
logging.debug(
"ask %s to replicate partition %u up to %s from %s",
uuid_str(node.getUUID()), offset, dump(max_tid),
uuid_str(primary_node.getUUID()))
node.notify(Packets.Replicate(max_tid, '',
{offset: primary_node.getAddress()}))
else:
if app.getClusterState() == ClusterStates.BACKINGUP:
self.triggerBackup(node)
if primary:
# Notify secondary storages that they can replicate from
# primary ones, even if they are already replicating.
p = Packets.Replicate(tid, '', {offset: node.getAddress()})
for cell in cell_list:
if max(cell.backup_tid, cell.replicating) < tid:
cell.replicating = tid
logging.debug(
"ask %s to replicate partition %u up to %s from %s",
uuid_str(cell.getUUID()), offset,
dump(tid), uuid_str(node.getUUID()))
cell.getNode().notify(p)
return result
|
gpl-2.0
| -8,403,937,226,814,069,000
| 45.685879
| 80
| 0.555
| false
| 4.473902
| false
| false
| false
|
redhat-openstack/glance
|
glance/cmd/registry.py
|
1
|
2664
|
#!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Reference implementation server for Glance Registry
"""
import eventlet
import os
import sys
# Monkey patch socket and time
eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True)
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
from oslo.config import cfg
import osprofiler.notifier
import osprofiler.web
from glance.common import config
from glance.common import wsgi
from glance import notifier
from glance.openstack.common import log
from glance.openstack.common import systemd
CONF = cfg.CONF
CONF.import_group("profiler", "glance.common.wsgi")
def main():
try:
config.parse_args()
wsgi.set_eventlet_hub()
log.setup('glance')
if cfg.CONF.profiler.enabled:
_notifier = osprofiler.notifier.create("Messaging",
notifier.messaging, {},
notifier.get_transport(),
"glance", "registry",
cfg.CONF.bind_host)
osprofiler.notifier.set(_notifier)
else:
osprofiler.web.disable()
server = wsgi.Server()
server.start(config.load_paste_app('glance-registry'),
default_port=9191)
systemd.notify_once()
server.wait()
except RuntimeError as e:
sys.exit("ERROR: %s" % e)
if __name__ == '__main__':
main()
|
apache-2.0
| -6,918,632,415,636,315,000
| 32.3
| 78
| 0.626502
| false
| 4.188679
| false
| false
| false
|
adusca/treeherder
|
treeherder/perf/models.py
|
1
|
2417
|
from django.core.validators import MinLengthValidator
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from jsonfield import JSONField
from treeherder.model.models import (MachinePlatform,
OptionCollection,
Repository)
SIGNATURE_HASH_LENGTH = 40L
@python_2_unicode_compatible
class PerformanceFramework(models.Model):
name = models.SlugField(max_length=255L, unique=True)
class Meta:
db_table = 'performance_framework'
def __str__(self):
return self.name
@python_2_unicode_compatible
class PerformanceSignature(models.Model):
signature_hash = models.CharField(max_length=SIGNATURE_HASH_LENGTH,
validators=[
MinLengthValidator(SIGNATURE_HASH_LENGTH)
],
unique=True,
db_index=True)
framework = models.ForeignKey(PerformanceFramework)
platform = models.ForeignKey(MachinePlatform)
option_collection = models.ForeignKey(OptionCollection)
suite = models.CharField(max_length=80L)
test = models.CharField(max_length=80L, blank=True)
# extra properties to distinguish the test (that don't fit into
# option collection for whatever reason)
extra_properties = JSONField(max_length=1024)
class Meta:
db_table = 'performance_signature'
def __str__(self):
return self.signature_hash
@python_2_unicode_compatible
class PerformanceDatum(models.Model):
repository = models.ForeignKey(Repository)
job_id = models.PositiveIntegerField(db_index=True)
result_set_id = models.PositiveIntegerField(db_index=True)
signature = models.ForeignKey(PerformanceSignature)
value = models.FloatField()
push_timestamp = models.DateTimeField(db_index=True)
class Meta:
db_table = 'performance_datum'
index_together = [('repository', 'signature', 'push_timestamp'),
('repository', 'job_id'),
('repository', 'result_set_id')]
unique_together = ('repository', 'job_id', 'result_set_id',
'signature', 'push_timestamp')
def __str__(self):
return "{} {}".format(self.value, self.push_timestamp)
|
mpl-2.0
| -5,983,352,198,108,781,000
| 33.042254
| 83
| 0.620604
| false
| 4.621415
| false
| false
| false
|
lovetox/gajim
|
src/common/crypto.py
|
1
|
4823
|
# common crypto functions (mostly specific to XEP-0116, but useful elsewhere)
# -*- coding:utf-8 -*-
## src/common/crypto.py
##
## Copyright (C) 2007 Brendan Taylor <whateley AT gmail.com>
##
## This file is part of Gajim.
##
## Gajim is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## Gajim is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Gajim. If not, see <http://www.gnu.org/licenses/>.
##
import sys
import os
import math
from hashlib import sha256 as SHA256
# convert a large integer to a big-endian bitstring
def encode_mpi(n):
if n >= 256:
return encode_mpi(n // 256) + bytes([n % 256])
else:
return bytes([n])
# convert a large integer to a big-endian bitstring, padded with \x00s to
# a multiple of 16 bytes
def encode_mpi_with_padding(n):
return pad_to_multiple(encode_mpi(n), 16, '\x00', True)
# pad 'string' to a multiple of 'multiple_of' with 'char'.
# pad on the left if 'left', otherwise pad on the right.
def pad_to_multiple(string, multiple_of, char, left):
mod = len(string) % multiple_of
if mod == 0:
return string
else:
padding = (multiple_of - mod) * char
if left:
return padding + string
else:
return string + padding
# convert a big-endian bitstring to an integer
def decode_mpi(s):
if len(s) == 0:
return 0
else:
return 256 * decode_mpi(s[:-1]) + s[-1]
def sha256(string):
sh = SHA256()
sh.update(string)
return sh.digest()
base28_chr = "acdefghikmopqruvwxy123456789"
def sas_28x5(m_a, form_b):
sha = sha256(m_a + form_b + b'Short Authentication String')
lsb24 = decode_mpi(sha[-3:])
return base28(lsb24)
def base28(n):
if n >= 28:
return base28(n // 28) + base28_chr[n % 28]
else:
return base28_chr[n]
def add_entropy_sources_OpenSSL():
# Other possibly variable data. This are very low quality sources of
# entropy, but some of them are installation dependent and can be hard
# to guess for the attacker.
# Data available on all platforms Unix, Windows
sources = [sys.argv, sys.builtin_module_names,
sys.copyright, sys.getfilesystemencoding(), sys.hexversion,
sys.modules, sys.path, sys.version, sys.api_version,
os.environ, os.getcwd(), os.getpid()]
for s in sources:
OpenSSL.rand.add(str(s).encode('utf-8'), 1)
# On Windows add the current contents of the screen to the PRNG state.
# if os.name == 'nt':
# OpenSSL.rand.screen()
# The /proc filesystem on POSIX systems contains many random variables:
# memory statistics, interrupt counts, network packet counts
if os.name == 'posix':
dirs = ['/proc', '/proc/net', '/proc/self']
for d in dirs:
if os.access(d, os.R_OK):
for filename in os.listdir(d):
OpenSSL.rand.add(filename.encode('utf-8'), 0)
try:
with open(d + os.sep + filename, "r") as fp:
# Limit the ammount of read bytes, in case a memory
# file was opened
OpenSSL.rand.add(str(fp.read(5000)).encode('utf-8'),
1)
except:
# Ignore all read and access errors
pass
PYOPENSSL_PRNG_PRESENT = False
try:
import OpenSSL.rand
PYOPENSSL_PRNG_PRESENT = True
except ImportError:
# PyOpenSSL PRNG not available
pass
def random_bytes(bytes_):
if PYOPENSSL_PRNG_PRESENT:
OpenSSL.rand.add(os.urandom(bytes_), bytes_)
return OpenSSL.rand.bytes(bytes_)
else:
return os.urandom(bytes_)
def generate_nonce():
return random_bytes(8)
# generate a random number between 'bottom' and 'top'
def srand(bottom, top):
# minimum number of bytes needed to represent that range
bytes = int(math.ceil(math.log(top - bottom, 256)))
# in retrospect, this is horribly inadequate.
return (decode_mpi(random_bytes(bytes)) % (top - bottom)) + bottom
# a faster version of (base ** exp) % mod
# taken from <http://lists.danga.com/pipermail/yadis/2005-September/001445.html>
def powmod(base, exp, mod):
square = base % mod
result = 1
while exp > 0:
if exp & 1: # exponent is odd
result = (result * square) % mod
square = (square * square) % mod
exp //= 2
return result
|
gpl-3.0
| 1,936,273,987,310,581,000
| 30.940397
| 94
| 0.6243
| false
| 3.601942
| false
| false
| false
|
python-dirbtuves/it-brandos-egzaminai
|
exams/E2018/pagrindinis/u2/u2.py
|
1
|
1377
|
from itertools import islice
from pathlib import Path
from typing import Dict
def seconds(v: int, m: int, s: int) -> int:
# Ši funkcija verčia valandas, minutes ir sekundes į sekundes.
return v * 3600 + m * 60 + s
def save_results(path: Path, pabaiga: Dict[str, int]) -> None:
with path.open('w') as f:
# Rūšiuojame slidininkus pagal laiką ir vardus.
for laikas, slidininkas in sorted((v, k) for k, v in pabaiga.items()):
# Sekundes verčiame į minutes ir sekundes.
m, s = divmod(laikas, 60)
print(f'{slidininkas:<20}{m} {s}', file=f)
def main(path: Path) -> None:
startas: Dict[str, int] = {}
pabaiga: Dict[str, int] = {}
with open(path / 'U2.txt') as f:
# Skaitome starto duomenis.
n = int(next(f))
for eilute in islice(f, n):
slidininkas = eilute[:20]
laikas = map(int, eilute[20:].split())
startas[slidininkas] = seconds(*laikas)
# Skaitome finišo duomenis.
m = int(next(f))
for eilute in islice(f, m):
slidininkas = eilute[:20]
laikas = map(int, eilute[20:].split())
# Įsimename per kiek laiko sekundėmis slidininkas pasiekė finišą.
pabaiga[slidininkas] = seconds(*laikas) - startas[slidininkas]
save_results(path / 'U2rez.txt', pabaiga)
|
agpl-3.0
| -4,870,201,727,215,256,000
| 33.075
| 78
| 0.590609
| false
| 2.720559
| false
| false
| false
|
Woraufhin/logic
|
formula.py
|
1
|
1112
|
import itertools
import string
from abc import ABCMeta, abstractproperty
import attr
def is_valid_formula(inst, attr, value):
if not isinstance(value, (Formula, str)):
raise ValueError('{} is not a valid formula type.'.format(value))
class Formula(object):
__metaclass__ = ABCMeta
group = {'open': '(', 'close': ')'}
@abstractproperty
def token(self):
pass
@attr.s
class Atomic(Formula):
token = list(itertools.chain.from_iterable(
[string.uppercase, string.lowercase]))
exp = attr.ib(validator=is_valid_formula)
@attr.s
class And(Formula):
token = ['^', '&']
left = attr.ib(validator=is_valid_formula)
right = attr.ib(validator=is_valid_formula)
@attr.s
class Or(Formula):
token = ['|']
left = attr.ib(validator=is_valid_formula)
right = attr.ib(validator=is_valid_formula)
@attr.s
class Imply(Formula):
token = ['>']
left = attr.ib(validator=is_valid_formula)
right = attr.ib(validator=is_valid_formula)
@attr.s
class Not(Formula):
token = ['~']
exp = attr.ib(validator=is_valid_formula)
|
mit
| 1,520,875,086,732,139,300
| 19.592593
| 73
| 0.642986
| false
| 3.379939
| false
| false
| false
|
googleapis/python-dataflow-client
|
google/cloud/dataflow_v1beta3/types/snapshots.py
|
1
|
5677
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.dataflow.v1beta3",
manifest={
"SnapshotState",
"PubsubSnapshotMetadata",
"Snapshot",
"GetSnapshotRequest",
"DeleteSnapshotRequest",
"DeleteSnapshotResponse",
"ListSnapshotsRequest",
"ListSnapshotsResponse",
},
)
class SnapshotState(proto.Enum):
r"""Snapshot state."""
UNKNOWN_SNAPSHOT_STATE = 0
PENDING = 1
RUNNING = 2
READY = 3
FAILED = 4
DELETED = 5
class PubsubSnapshotMetadata(proto.Message):
r"""Represents a Pubsub snapshot.
Attributes:
topic_name (str):
The name of the Pubsub topic.
snapshot_name (str):
The name of the Pubsub snapshot.
expire_time (google.protobuf.timestamp_pb2.Timestamp):
The expire time of the Pubsub snapshot.
"""
topic_name = proto.Field(proto.STRING, number=1,)
snapshot_name = proto.Field(proto.STRING, number=2,)
expire_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
class Snapshot(proto.Message):
r"""Represents a snapshot of a job.
Attributes:
id (str):
The unique ID of this snapshot.
project_id (str):
The project this snapshot belongs to.
source_job_id (str):
The job this snapshot was created from.
creation_time (google.protobuf.timestamp_pb2.Timestamp):
The time this snapshot was created.
ttl (google.protobuf.duration_pb2.Duration):
The time after which this snapshot will be
automatically deleted.
state (google.cloud.dataflow_v1beta3.types.SnapshotState):
State of the snapshot.
pubsub_metadata (Sequence[google.cloud.dataflow_v1beta3.types.PubsubSnapshotMetadata]):
PubSub snapshot metadata.
description (str):
User specified description of the snapshot.
Maybe empty.
disk_size_bytes (int):
The disk byte size of the snapshot. Only
available for snapshots in READY state.
region (str):
Cloud region where this snapshot lives in,
e.g., "us-central1".
"""
id = proto.Field(proto.STRING, number=1,)
project_id = proto.Field(proto.STRING, number=2,)
source_job_id = proto.Field(proto.STRING, number=3,)
creation_time = proto.Field(
proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,
)
ttl = proto.Field(proto.MESSAGE, number=5, message=duration_pb2.Duration,)
state = proto.Field(proto.ENUM, number=6, enum="SnapshotState",)
pubsub_metadata = proto.RepeatedField(
proto.MESSAGE, number=7, message="PubsubSnapshotMetadata",
)
description = proto.Field(proto.STRING, number=8,)
disk_size_bytes = proto.Field(proto.INT64, number=9,)
region = proto.Field(proto.STRING, number=10,)
class GetSnapshotRequest(proto.Message):
r"""Request to get information about a snapshot
Attributes:
project_id (str):
The ID of the Cloud Platform project that the
snapshot belongs to.
snapshot_id (str):
The ID of the snapshot.
location (str):
The location that contains this snapshot.
"""
project_id = proto.Field(proto.STRING, number=1,)
snapshot_id = proto.Field(proto.STRING, number=2,)
location = proto.Field(proto.STRING, number=3,)
class DeleteSnapshotRequest(proto.Message):
r"""Request to delete a snapshot.
Attributes:
project_id (str):
The ID of the Cloud Platform project that the
snapshot belongs to.
snapshot_id (str):
The ID of the snapshot.
location (str):
The location that contains this snapshot.
"""
project_id = proto.Field(proto.STRING, number=1,)
snapshot_id = proto.Field(proto.STRING, number=2,)
location = proto.Field(proto.STRING, number=3,)
class DeleteSnapshotResponse(proto.Message):
r"""Response from deleting a snapshot. """
class ListSnapshotsRequest(proto.Message):
r"""Request to list snapshots.
Attributes:
project_id (str):
The project ID to list snapshots for.
job_id (str):
If specified, list snapshots created from
this job.
location (str):
The location to list snapshots in.
"""
project_id = proto.Field(proto.STRING, number=1,)
job_id = proto.Field(proto.STRING, number=3,)
location = proto.Field(proto.STRING, number=2,)
class ListSnapshotsResponse(proto.Message):
r"""List of snapshots.
Attributes:
snapshots (Sequence[google.cloud.dataflow_v1beta3.types.Snapshot]):
Returned snapshots.
"""
snapshots = proto.RepeatedField(proto.MESSAGE, number=1, message="Snapshot",)
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| 4,473,963,810,072,339,500
| 31.815029
| 95
| 0.648758
| false
| 4.084173
| false
| false
| false
|
invariantor/ImageSplit-Classification
|
image split and classification/image_split.py
|
1
|
6276
|
import numpy as np
import pylab
import mahotas as mh
import types
# constants
upper_distance = 100 #the start searching
approxWidth = 40
threshold = 300
border = 1
def pre_process(image):
"""
pre_process will return black_white image, given a colorful image as input.
"""
T = mh.thresholding.otsu(image)
image1 =image > T
image2 = [[0]* image1.shape[1] for i in range(image1.shape[0])]
for i in range(image1.shape[0]):
for j in range(image1.shape[1]):
if (image1[i][j] != [0,0,0]).any():
image2[i][j] = 1
image2 = np.array(image2, dtype = np.uint8)
return image2
def locate(image):
"""
Given an screenshot as input, return the position of the matching game
as well as the size of the game(num_x,num_y)
and the size of each grids(size_x,size_y).
"""
image = pre_process(image)
height,width = image.shape
# stop going down when a grid is found
up = upper_distance
while True:
num_white =0
for j in range(width):
num_white+=image[up][j]
if num_white>(approxWidth/2):
break
up +=1
# stop going up when a grid is found
down = height-1
pre_num_white =0 #the number of white pixels in the last step
for j in range(width):
pre_num_white+=image[down][j]
while True:
num_white =0
for j in range(width):
num_white+=image[down][j]
if num_white-pre_num_white>(approxWidth/2):
break
pre_num_white = num_white
down -=1
current_image = image[up:]
"""cut the top part(including the time bar, all sorts of buttons) away
which will interfere with our searching process"""
current_image = np.array(current_image)
c_height,c_width = current_image.shape
# stop going right when a grid is found
left = 0
pre_num_white =0
for i in range(c_height):
pre_num_white+=current_image[i][left]
while True:
num_white =0
for i in range(c_height):
num_white+=current_image[i][left]
if num_white-pre_num_white>(approxWidth/2):
break
pre_num_white = num_white
left +=1
# stop going left when a grid is found
right = c_width-1
pre_num_white =0
for i in range(c_height):
pre_num_white+=current_image[i][right]
while True:
num_white =0
for i in range(c_height):
num_white+=current_image[i][right]
if num_white-pre_num_white>(approxWidth/2):
break
pre_num_white = num_white
right -=1
temp = [0]*(down+1-up)
for i in range(len(temp)):
temp[i] = current_image[i][left:right+1]
current_image = np.array(temp)
height,width = current_image.shape
divd_x = []
for i in range(height):
num_white = sum(current_image[i])
if num_white < approxWidth/2:
divd_x.append(i)
temp_x = [divd_x[i] for i in range(len(divd_x)) if ((i==0) or (i==len(divd_x)-1)) or not (divd_x[i-1]+1==divd_x[i] and divd_x[i+1]-1==divd_x[i])]
# only keep the truly dividing lines, namely those marginal lines.
divd_x =temp_x
divd_y = []
for j in range(width):
num_white = 0
for i in range(height):
num_white += current_image[i][j]
if num_white < approxWidth/2:
divd_y.append(j)
temp_y = [divd_y[i] for i in range(len(divd_y)) if ((i==0) or (i==len(divd_y)-1)) or not (divd_y[i-1]+1==divd_y[i] and divd_y[i+1]-1==divd_y[i])]
# only keep the truly dividing lines, namely those marginal lines.
divd_y = temp_y
#print divd_x
#print divd_y
"""
This part needs further refinement.
"""
if len(divd_x):
size_x = divd_x[0]
num_x = divd_x[-1] / size_x +1
else:
size_x = height - 1
num_x = 1
if len(divd_y):
size_y = divd_y[0]
num_y = divd_y[-1] / size_y +1
else:
size_y = height - 1
num_y = 1
position = (up,down,left,right)
info = (size_x,size_y,num_x,num_y)
return position, info
def split(image,position,info):
"""
Return a 2d matrix label, which labels different kinds of grids using natural numbers.
(By convention, the empty grid is labeled 0)
"""
size_x, size_y, num_x, num_y = info
up, down, left, right = position
T = mh.thresholding.otsu(image)
image = image >T
temp = [0]* (down+1-up)
for i in range(len(temp)):
temp[i] = image[up+i][left:right+1]
temp = np.array(temp)
image = temp
game = [[0]* num_y for j in range(num_x)]
for i in range(num_x):
for j in range(num_y):
grid = [0]* size_x
for k in range(size_x):
grid[k] = image[i*(size_x+1)+k][j*(size_y+1):(j+1)*(size_y+1)-1]
game[i][j] = grid
# using a quite naive method -- calculating the statistical distance between two grids
# improvement is needed here, to speed up the program
black = [[[0]*3]*size_y]*size_x
records = [black]
label = [[0]* num_y for j in range(num_x)]
for i in range(num_x):
for j in range(num_y):
find = False
for index in range(len(records)):
if distance(records[index],game[i][j])< threshold:
label[i][j] = index
find =True
break
if not find:
records.append(game[i][j])
label[i][j] = len(records)-1
return label
def distance(a1,a2):
"""
recursively calculate the distance between a1 and a2
"""
if (type(a1)== np.uint8) or (type(a1) == types.IntType) or (type(a1)==np.bool_):
return abs(int(a1)-int(a2))
if len(a1)!= len(a2):
print "Wrong Format","len(a1)=",len(a1),"len(a2)=",len(a2)
return
dis =0
for i in range(len(a1)):
dis += distance(a1[i],a2[i])
return dis
|
mit
| -6,850,167,160,207,502,000
| 28.608491
| 149
| 0.53362
| false
| 3.273865
| false
| false
| false
|
Maselkov/GW2Bot
|
guildwars2/evtc.py
|
1
|
12990
|
import datetime
import aiohttp
import discord
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
from .exceptions import APIError
from .utils.chat import (embed_list_lines, en_space, magic_space,
zero_width_space)
UTC_TZ = datetime.timezone.utc
BASE_URL = "https://dps.report/"
UPLOAD_URL = BASE_URL + "uploadContent"
JSON_URL = BASE_URL + "getJson"
TOKEN_URL = BASE_URL + "getUserToken"
ALLOWED_FORMATS = (".evtc", ".zevtc", ".zip")
class EvtcMixin:
async def get_dpsreport_usertoken(self, user):
doc = await self.bot.database.get(user, self)
token = doc.get("dpsreport_token")
if not token:
try:
async with self.session.get(TOKEN_URL) as r:
data = await r.json()
token = data["userToken"]
await self.bot.database.set(
user, {"dpsreport_token": token}, self)
return token
except:
return None
async def upload_log(self, file, user):
params = {"json": 1}
token = await self.get_dpsreport_usertoken(user)
if token:
params["userToken"] = token
data = aiohttp.FormData()
data.add_field("file", await file.read(), filename=file.filename)
async with self.session.post(
UPLOAD_URL, data=data, params=params) as r:
resp = await r.json()
error = resp["error"]
if error:
raise APIError(error)
return resp
async def find_duplicate_dps_report(self, doc):
margin_of_error = datetime.timedelta(seconds=10)
doc = await self.db.encounters.find_one({
"boss_id": doc["boss_id"],
"players": {
"$eq": doc["players"]
},
"date": {
"$gte": doc["date"] - margin_of_error,
"$lt": doc["date"] + margin_of_error
},
"start_date": {
"$gte": doc["start_date"] - margin_of_error,
"$lt": doc["start_date"] + margin_of_error
},
})
return True if doc else False
async def upload_embed(self, ctx, result):
if not result["encounter"]["jsonAvailable"]:
return None
async with self.session.get(
JSON_URL, params={"id": result["id"]}) as r:
data = await r.json()
lines = []
targets = data["phases"][0]["targets"]
group_dps = 0
for target in targets:
group_dps += sum(
p["dpsTargets"][target][0]["dps"] for p in data["players"])
def get_graph(percentage):
bar_count = round(percentage / 5)
bars = ""
bars += "▀" * bar_count
bars += "━" * (20 - bar_count)
return bars
def get_dps(player):
bars = ""
dps = player["dps"]
if not group_dps or not dps:
percentage = 0
else:
percentage = round(100 / group_dps * dps)
bars = get_graph(percentage)
bars += f"` **{dps}** DPS | **{percentage}%** of group DPS"
return bars
players = []
for player in data["players"]:
dps = 0
for target in targets:
dps += player["dpsTargets"][target][0]["dps"]
player["dps"] = dps
players.append(player)
players.sort(key=lambda p: p["dps"], reverse=True)
for player in players:
down_count = player["defenses"][0]["downCount"]
prof = self.get_emoji(ctx, player["profession"])
line = f"{prof} **{player['name']}** *({player['account']})*"
if down_count:
line += (f" | {self.get_emoji(ctx, 'downed')}Downed "
f"count: **{down_count}**")
lines.append(line)
dpses = []
charater_name_max_length = 19
for player in players:
line = self.get_emoji(ctx, player["profession"])
align = (charater_name_max_length - len(player["name"])) * " "
line += "`" + player["name"] + align + get_dps(player)
dpses.append(line)
dpses.append(f"> Group DPS: **{group_dps}**")
color = discord.Color.green(
) if data["success"] else discord.Color.red()
minutes, seconds = data["duration"].split()[:2]
minutes = int(minutes[:-1])
seconds = int(seconds[:-1])
duration_time = (minutes * 60) + seconds
duration = f"**{minutes}** minutes, **{seconds}** seconds"
embed = discord.Embed(
title="DPS Report",
description="Encounter duration: " + duration,
url=result["permalink"],
color=color)
boss_lines = []
for target in targets:
target = data["targets"][target]
if data["success"]:
health_left = 0
else:
percent_burned = target["healthPercentBurned"]
health_left = 100 - percent_burned
health_left = round(health_left, 2)
if len(targets) > 1:
boss_lines.append(f"**{target['name']}**")
boss_lines.append(f"Health: **{health_left}%**")
boss_lines.append(get_graph(health_left))
embed.add_field(name="> **BOSS**", value="\n".join(boss_lines))
buff_lines = []
sought_buffs = ["Might", "Fury", "Quickness", "Alacrity"]
buffs = []
for buff in sought_buffs:
for key, value in data["buffMap"].items():
if value["name"] == buff:
buffs.append({
"name": value["name"],
"id": int(key[1:]),
"stacking": value["stacking"]
})
break
separator = 2 * en_space
line = zero_width_space + (en_space * (charater_name_max_length + 6))
for buff in sought_buffs:
line += self.get_emoji(
ctx, buff, fallback=True,
fallback_fmt="{:1.1}") + f"{separator}{2 * en_space}"
buff_lines.append(line)
groups = []
for player in players:
if player["group"] not in groups:
groups.append(player["group"])
if len(groups) > 1:
players.sort(key=lambda p: p["group"])
current_group = None
for player in players:
if "buffUptimes" not in player:
continue
if len(groups) > 1:
if not current_group or player["group"] != current_group:
current_group = player["group"]
buff_lines.append(f"> **GROUP {current_group}**")
line = "`"
line = self.get_emoji(ctx, player["profession"])
align = (3 + charater_name_max_length - len(player["name"])) * " "
line += "`" + player["name"] + align
for buff in buffs:
for buff_uptime in player["buffUptimes"]:
if buff["id"] == buff_uptime["id"]:
uptime = str(buff_uptime["buffData"][0]["uptime"])
break
else:
uptime = "0"
if not buff["stacking"]:
uptime += "%"
line += uptime
line += separator + ((6 - len(uptime)) * magic_space)
line += '`'
buff_lines.append(line)
embed = embed_list_lines(embed, lines, "> **PLAYERS**")
embed = embed_list_lines(embed, dpses, "> **DPS**")
embed = embed_list_lines(embed, buff_lines, "> **BUFFS**")
boss = self.gamedata["bosses"].get(str(result["encounter"]["bossId"]))
date_format = "%Y-%m-%d %H:%M:%S %z"
date = datetime.datetime.strptime(data["timeEnd"] + "00", date_format)
start_date = datetime.datetime.strptime(data["timeStart"] + "00",
date_format)
date = date.astimezone(datetime.timezone.utc)
start_date = start_date.astimezone(datetime.timezone.utc)
doc = {
"boss_id": result["encounter"]["bossId"],
"start_date": start_date,
"date": date,
"players":
sorted([player["account"] for player in data["players"]]),
"permalink": result["permalink"],
"success": data["success"],
"duration": duration_time
}
duplicate = await self.find_duplicate_dps_report(doc)
if not duplicate:
await self.db.encounters.insert_one(doc)
embed.timestamp = date
embed.set_footer(text="Recorded at", icon_url=self.bot.user.avatar_url)
if boss:
embed.set_author(name=data["fightName"], icon_url=boss["icon"])
return embed
@commands.group(case_insensitive=True)
async def evtc(self, ctx):
"""Process an EVTC combat log or enable automatic processing
Simply upload your file and in the "add a comment" field type $evtc,
in other words invoke this command while uploading a file.
Use this command ($evtc) without uploading a file to see other commands
Accepted formats are: .evtc, .zevtc, .zip
It's highly recommended to enable compression in your Arc settings.
With the setting enabled logs sized will rarely, if ever, be higher
than the Discord upload limit
"""
if ctx.invoked_subcommand is None and not ctx.message.attachments:
return await ctx.send_help(ctx.command)
for attachment in ctx.message.attachments:
if attachment.filename.endswith(ALLOWED_FORMATS):
break
else:
return await ctx.send_help(ctx.command)
if ctx.guild:
doc = await self.bot.database.get(ctx.channel, self)
settings = doc.get("evtc", {})
enabled = settings.get("enabled")
if not ctx.channel.permissions_for(ctx.me).embed_links:
return await ctx.send(
"I need embed links permission to process logs.")
if enabled:
return
await self.process_evtc(ctx.message)
@commands.cooldown(1, 5, BucketType.guild)
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
@evtc.command(name="channel")
async def evtc_channel(self, ctx):
"""Sets this channel to be automatically used to process logs"""
doc = await self.bot.database.get(ctx.channel, self)
enabled = not doc.get("evtc.enabled", False)
await self.bot.database.set(ctx.channel, {"evtc.enabled": enabled},
self)
if enabled:
msg = ("Automatic EVTC processing enabled. Simply upload the file "
"wish to be processed in this channel. Accepted "
"formats: `.evtc`, `.zevtc`, `.zip` ")
if not ctx.channel.permissions_for(ctx.me).embed_links:
await ctx.send("I won't be able to process logs without Embed "
"Links permission.")
else:
msg = ("Automatic EVTC processing diasbled")
await ctx.send(msg)
async def process_evtc(self, message):
embeds = []
prompt = await message.channel.send("Processing logs... " +
self.get_emoji(message, "loading"))
for attachment in message.attachments:
if attachment.filename.endswith(ALLOWED_FORMATS):
try:
resp = await self.upload_log(attachment, message.author)
embeds.append(await self.upload_embed(message, resp))
except Exception as e:
self.log.exception(
"Exception processing EVTC log ", exc_info=e)
return await prompt.edit(
content="Error processing your log! :x:")
for embed in embeds:
await message.channel.send(embed=embed)
try:
await prompt.delete()
await message.delete()
except discord.HTTPException:
pass
@commands.Cog.listener()
async def on_message(self, message):
if not message.attachments:
return
if not message.guild:
return
for attachment in message.attachments:
if attachment.filename.endswith(ALLOWED_FORMATS):
break
else:
return
doc = await self.bot.database.get(message.channel, self)
settings = doc.get("evtc", {})
enabled = settings.get("enabled")
if not enabled:
return
await self.process_evtc(message)
|
mit
| -8,968,317,482,185,168,000
| 39.702194
| 79
| 0.522027
| false
| 4.15488
| false
| false
| false
|
mit-ll/LO-PHI
|
lophi-automation/lophi_automation/dataconsumers/logudp.py
|
1
|
1294
|
"""
Class to handle logging over UDP
(c) 2015 Massachusetts Institute of Technology
"""
# Native
import socket
import logging
logger = logging.getLogger(__name__)
class LogUDP:
def __init__(self,address,port):
"""
Intialize our UDP logger
@param address: Address of remote server
@param port: port of listening server
"""
self.address = address
self.port = port
self.SOCK = None
self.connected = False
def _connect(self):
"""
Create our socket
"""
if self.connected:
return True
try:
self.SOCK = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.connected = True
return True
except:
logger.error("Could not open UDP socket")
return False
def append(self, data):
"""
Write raw data to the UDP socket
@param data: Data to be written to the UDP socket
"""
assert self._connect()
try:
self.SOCK.sendto(data,(self.address,self.port))
except:
logger.error("Could not send UDP packet")
|
bsd-3-clause
| -5,603,330,343,461,050,000
| 22.125
| 72
| 0.506955
| false
| 4.957854
| false
| false
| false
|
jprawiharjo/Nerddit
|
Storm/Streaming/Push_to_Cassandra_Bolt.py
|
1
|
3976
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 23 13:37:20 2016
@author: jprawiharjo
"""
from cassandra.cluster import Cluster
import cassandra
from collections import namedtuple
from pyleus.storm import SimpleBolt
from Streaming.Doc_Processor import DataFrame
import logging
log = logging.getLogger('cassandra_bolt')
# create CassandraCluster
CassandraCluster = Cluster(["ec2-52-27-157-187.us-west-2.compute.amazonaws.com",
"ec2-52-34-178-13.us-west-2.compute.amazonaws.com",
"ec2-52-35-186-215.us-west-2.compute.amazonaws.com",
'ec2-52-10-19-240.us-west-2.compute.amazonaws.com'])
keyspace = 'wikidata'
tablename = "titlelinks"
class Push_to_Cassandra(SimpleBolt):
def initialize(self):
self.session = CassandraCluster.connect(keyspace)
self.session.default_consistency_level = cassandra.ConsistencyLevel.ALL
#self.session.encoder.mapping[tuple] = self.session.encoder.cql_encode_set_collection
queryAddNew1 = "INSERT INTO {} (id, title, linksto) VALUES (?, ?, ?) IF NOT EXISTS".format(tablename)
self.preparedAddNew1 = self.session.prepare(queryAddNew1)
queryAddNew2 = "INSERT INTO {} (id, title, linksto, referredby) VALUES (?, ?, ?, ?) IF NOT EXISTS".format(tablename)
self.preparedAddNew2 = self.session.prepare(queryAddNew2)
queryUpdateReferredbyTitle = "UPDATE {} SET id = ?, linksto = ? WHERE title = ? IF EXISTS".format(tablename)
self.preparedReferredbyTitle = self.session.prepare(queryUpdateReferredbyTitle)
queryUpdateReferredbyOnly = "UPDATE {} SET referredby = referredby + ? WHERE title = ? IF EXISTS".format(tablename)
self.preparedReferredbyOnly = self.session.prepare(queryUpdateReferredbyOnly)
queryAddNewReferredBy = "INSERT INTO {} (title, referredby) VALUES (?, ?) IF NOT EXISTS".format(tablename)
self.preparedAddNewReferredBy = self.session.prepare(queryAddNewReferredBy)
self.bulk_data = []
log.debug("Initialized")
def process_tick(self):
log.debug("Process Tick")
log.debug(len(self.bulk_data))
linkage = {}
for row in self.bulk_data:
if len(row.Links) > 0:
log.debug('Processing Links')
for link in row.Links:
if link in linkage.keys():
linkage[link].add(row.Title)
else:
linkage[link] = set([row.Title])
for row in self.bulk_data:
log.debug(row.Title)
if row.Title in linkage.keys():
bound1 = self.preparedAddNew2.bind((str(row.Id), str(row.Title), row.Links, linkage[row.Title]))
else:
bound1 = self.preparedAddNew1.bind((str(row.Id), str(row.Title), row.Links))
res = self.session.execute(bound1)
res = res.current_rows[0].applied
#log.debug("Insertion Result = " + str(res))
if not(res):
bound2 = self.preparedReferredbyTitle.bind((str(row.Id), row.Links, str(row.Title)))
self.session.execute_async(bound2)
#Inserting into database
for k,v in linkage.iteritems():
log.debug(k)
log.debug(v)
bound3 = self.preparedReferredbyOnly.bind((v, k))
res = self.session.execute(bound3)
res = res.current_rows[0].applied
if not(res):
bound4 = self.preparedAddNewReferredBy.bind((k, v))
res = self.session.execute_async(bound4)
self.bulk_data = []
def process_tuple(self, tup):
result = DataFrame(*tup.values)
self.bulk_data.append(result)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
filename='/tmp/cassandra_bolt.log',
filemode='a',
)
Push_to_Cassandra().run()
|
gpl-3.0
| 9,044,771,838,797,030,000
| 37.240385
| 124
| 0.608903
| false
| 3.740357
| false
| false
| false
|
googleapis/googleapis-gen
|
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/enums/types/placeholder_type.py
|
1
|
1630
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.enums',
marshal='google.ads.googleads.v7',
manifest={
'PlaceholderTypeEnum',
},
)
class PlaceholderTypeEnum(proto.Message):
r"""Container for enum describing possible placeholder types for
a feed mapping.
"""
class PlaceholderType(proto.Enum):
r"""Possible placeholder types for a feed mapping."""
UNSPECIFIED = 0
UNKNOWN = 1
SITELINK = 2
CALL = 3
APP = 4
LOCATION = 5
AFFILIATE_LOCATION = 6
CALLOUT = 7
STRUCTURED_SNIPPET = 8
MESSAGE = 9
PRICE = 10
PROMOTION = 11
AD_CUSTOMIZER = 12
DYNAMIC_EDUCATION = 13
DYNAMIC_FLIGHT = 14
DYNAMIC_CUSTOM = 15
DYNAMIC_HOTEL = 16
DYNAMIC_REAL_ESTATE = 17
DYNAMIC_TRAVEL = 18
DYNAMIC_LOCAL = 19
DYNAMIC_JOB = 20
IMAGE = 21
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| -9,196,301,452,706,389,000
| 27.103448
| 74
| 0.634969
| false
| 3.773148
| false
| false
| false
|
julierthanjulie/PedestrianTracking
|
generate_frames.py
|
1
|
3979
|
"""
This code generates frames from CSV values that can be stiched together using FFMPEG
to animate pedestrian data. This version produces an animation at 4x speed.
"""
print "Importing..."
# Please ensure the following dependencies are installed before use:
import pylab
import numpy as np
import itertools
import sys, getopt
import operator
import collections
drawing_by_frame = []
#
def generate_frames(argv):
# Some default values if nothing is provided in command line arguments.
traces = 'bubble_pop_traces.csv'
background = 'trails_480.png'
# Get command line arguments.
# -f specify a file name. This code expects csv files in the format PedestrianID, X, Y, FrameNum
# -b specify a backgroun image. Any format available to pylab is acceptable.
try:
opts,args = getopt.getopt(argv, "f:b:")
except getopt.GetoptError:
print "Getopt Error"
exit(2)
for opt, arg in opts:
if opt == "-f":
traces = arg
elif opt == "-b":
background = arg
# Name each frame based on the filename
figure_name = traces.split("/")[-1].split(".")[-2]
# Load up csv file
trace = np.loadtxt(traces, comments=';', delimiter=',')
traces = itertools.groupby(trace, lambda x:x[0])
# These values should match those in pedestrian_tracking.py
w,h=640,360
border=20
# Some values from trail validation
valid = 0
avg_length = 0
num_traces = 0
# Load up background image.
background = pylab.imread(background)
pylab.imshow(background)
for id,t in traces:
pts = np.array(list(t))
invalid = False
# Validate Trails
if (pts[0,1]>border and pts[0,1]<w-border) and (pts[0,2]>border and pts[0,2]<h-border):
invalid = True
if (pts[-1,1]>border and pts[-1,1]<w-border) and (pts[-1,2]>border and pts[-1,2]<h-border):
invalid = True
if len(pts) < 200:
invalid = True
if ((pts[0,2] > h-border) and (pts[0,1] > w/2-75 and pts[0,1] < w/2+75) or (pts[-1,2] > h-border) and (pts[-1,1] > w/2-75 and pts[-1,1] < w/2+75)):
invalid = True
# For all valid trails, prepare them for generating animated trails by frame number
if not invalid:
num_traces += 1
avg_length += len(pts)
# Drawing colour for traces given as RGB
colour = (0,0,1)
for pt in pts:
this_frame = [pt[3], pt[1], pt[2], pt[0]]
drawing_by_frame.append(this_frame)
valid += 1
x = np.clip(pts[:,1],0,w)
y = np.clip(pts[:,2],0,h)
print "Valid Trails: " , valid, " Average Length:" , avg_length/num_traces
drawing_by_frame.sort()
last_frame = drawing_by_frame[-1][0]
current_frame = drawing_by_frame[0][0]
drawing_dict = collections.defaultdict(list)
count = 0
while len(drawing_by_frame) > 0:
#print "Next Frame, " , current_frame
pylab.imshow(background)
while drawing_by_frame[0][0] == current_frame:
list_one = drawing_by_frame.pop(0)
x = drawing_dict[list_one[3]]
x.append([list_one[1], list_one[2]])
drawing_dict[list_one[3]] = x
# Adjust mod value here to adjust frame drawing frequency
# Draw stuff here
if (current_frame % 10 ==0):
print "Percentage Complete: " , (current_frame/last_frame)*100
draw_dict(drawing_dict, w, h, border, figure_name, current_frame, count)
count += 1
pylab.clf()
current_frame = drawing_by_frame[0][0]
def draw_dict(dict, w, h, border, figure_name, frame, count):
for trace in dict:
print trace
pts = dict[trace]
pylab.plot([p[0] for p in pts], [p[1] for p in pts],'-',color=(0,0,1),alpha=0.5, linewidth=2)
pylab.xlim(0,w)
pylab.ylim(h,0)
pylab.axis('off')
pylab.subplots_adjust(0,0,1,1,0,0)
pylab.savefig("Frames/" + figure_name + "_" + str(count).zfill(6) + '.png', dpi=150,bbox_inches='tight', pad_inches=0)
#pylab.savefig("Frames/" + 'frame' + str(int(frame)) + '.png', dpi=150,bbox_inches='tight', pad_inches=0)
if __name__ == "__main__":
print "Starting Frame Generation"
generate_frames(sys.argv[1:])
|
mit
| -6,176,049,097,840,916,000
| 22.96988
| 149
| 0.643629
| false
| 2.821986
| false
| false
| false
|
by46/simplekit
|
simplekit/email/__init__.py
|
1
|
4151
|
import httplib
import os.path
import requests
import six
from simplekit import settings
from simplekit.exceptions import MailException
PRIORITY_NORMAL = 0
PRIORITY_LOW = 1
PRIORITY_HIGH = 2
CONTENT_TYPE_HTML = 0
CONTENT_TYPE_TEXT = 1
ENCODING_UTF8 = 0
ENCODING_ASCII = 1
ENCODING_UTF32 = 2
ENCODING_UNICODE = 3
MEDIA_TYPE_GIF = 0
MEDIA_TYPE_JPEG = 1
MEDIA_TYPE_TIFF = 2
MEDIA_TYPE_PDF = 3
MEDIA_TYPE_RTF = 4
MEDIA_TYPE_SOAP = 5
MEDIA_TYPE_ZIP = 6
MEDIA_TYPE_OTHER = 7
MAIL_TYPE_SMTP = 1
MAIL_TYPE_LONDON2 = 0
class SmtpSetting(dict):
def __init__(self, subject_encoding, body_encoding, attachments=None):
kwargs = dict(SubjectEncoding=subject_encoding,
BodyEncoding=body_encoding,
Attachments=attachments)
super(SmtpSetting, self).__init__(**kwargs)
self.__dict__ = self
class MailAttachment(dict):
def __init__(self, filename, file_content, media_type=MEDIA_TYPE_OTHER):
kwargs = dict(FileName=filename,
FileContent=file_content,
MediaType=media_type)
super(MailAttachment, self).__init__(**kwargs)
self.__dict__ = self
class LondonIISetting(dict):
def __init__(self, company_code, country_code, language_code, system_id, template_id, mail_template_variables):
kwargs = dict(CompanyCode=company_code,
CountryCode=country_code,
LanguageCode=language_code,
SystemID=system_id,
TemplateID=template_id,
MailTemplateVariables=mail_template_variables)
super(LondonIISetting, self).__init__(**kwargs)
self.__dict__ = self
class MailTemplateVariable(dict):
def __init__(self, key, value):
kwargs = dict(Key=key, Value=value)
super(MailTemplateVariable, self).__init__(**kwargs)
def send_email_inner(sender, to, subject, body, cc=None, bcc=None, priority=PRIORITY_NORMAL,
content_type=CONTENT_TYPE_TEXT,
mail_type=None, smtp_setting=None, london_2_setting=None):
if isinstance(to, (list, tuple)):
to = ';'.join(to)
body = dict(From=sender,
To=to,
CC=cc,
BCC=bcc,
Subject=subject,
Body=body,
Priority=priority,
ContentType=content_type,
MailType=mail_type,
SmtpSetting=smtp_setting,
LondonIISetting=london_2_setting)
response = requests.post(settings.URL_EMAIL, json=body,
headers={'Content-Type': 'Application/json', 'accept': 'application/json'})
if response.status_code != httplib.OK:
del body['SmtpSetting']
raise MailException("Send mail use api {0} status code: {1}\n body : {2}\n response content : {3}".format(
settings.URL_EMAIL, response.status_code, body, response.content))
def send_email(sender, to, subject, body, cc=None, bcc=None, priority=PRIORITY_NORMAL,
content_type=CONTENT_TYPE_TEXT,
files=None):
attachments = []
import base64
if files:
for item in files:
if isinstance(item, six.string_types):
filename = os.path.basename(item)
file_content = open(item, 'rb').read()
file_content = base64.b64encode(file_content)
media_type = MEDIA_TYPE_OTHER
attachment = MailAttachment(filename, file_content, media_type)
attachments.append(attachment)
else:
attachments.append(item)
smtp_setting = SmtpSetting(ENCODING_UTF8, ENCODING_UTF8, attachments)
send_email_inner(sender, to, subject, body, cc, bcc, priority, content_type, MAIL_TYPE_SMTP, smtp_setting)
if __name__ == '__main__':
send_email('benjamin.c.yan@newegg.com', 'benjamin.c.yan@newegg.com', '(info) testing', 'testing body',
files=['__init__.py'])
|
mit
| -3,068,504,204,268,324,000
| 33.177966
| 115
| 0.582751
| false
| 3.847081
| false
| false
| false
|
pentestfail/TA-FireEye_TAP
|
bin/input_module_fireeye_tap_incidents.py
|
1
|
4568
|
# encoding = utf-8
import os
import sys
import time
import datetime
import json
def validate_input(helper, definition):
api_env = definition.parameters.get('api_env', None)
instanceid = definition.parameters.get('instance_id', None)
apikey = definition.parameters.get('apikey', None)
api_limit = definition.parameters.get('api_limit', None)
api_timeout = definition.parameters.get('api_timeout', None)
pass
def collect_events(helper, ew):
# Retrieve runtime variables
opt_environment = helper.get_arg('api_env')
opt_instanceid = helper.get_arg('instance_id')
opt_apikey = helper.get_arg('apikey')
opt_limit = helper.get_arg('api_limit')
opt_timeout = float(helper.get_arg('api_timeout'))
# Create checkpoint key
opt_checkpoint = "incidents_" + opt_environment + "_" + opt_instanceid
#Create last status entry for storage as checkpoint
current_status = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
#Check for last query execution data in kvstore & generate if not present
try:
last_status = helper.get_check_point(opt_checkpoint) or time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(0))
helper.log_debug("[" + opt_instanceid + "] TAP Incidents - Last successful checkpoint time: " + str(last_status))
except Exception as e:
helper.log_error("[" + opt_instanceid + "] TAP Incidents - Unable to retrieve last execution checkpoint!")
raise e
# use simple rest call to load the events
header = {}
data = {}
parameter = {}
parameter['limit'] = opt_limit
parameter['sort'] = "-createDate"
parameter['withCount'] = "1"
parameter['includes'] = "revisions._updatedBy"
parameter['query'] = str('{"updateDate":{"$gte":"' + last_status + '"}}')
url = "https://" + opt_environment + ".fireeye.com/tap/id/" + opt_instanceid + "/api/v1/incidents"
method = 'GET'
header['x-mansfield-key'] = opt_apikey
try:
# Leverage helper function to send http request
response = helper.send_http_request(url, method, parameters=parameter, payload=None, headers=header, cookies=None, verify=True, cert=None, timeout=opt_timeout, use_proxy=True)
# Return API response code
r_status = response.status_code
# Return API request status_code
if r_status is not 200:
helper.log_error("[" + opt_instanceid + "] Incidents API unsuccessful status_code=" + str(r_status))
response.raise_for_status()
# Return API request as JSON
obj = response.json()
if obj is None:
helper.log_info("[" + opt_instanceid + "] No new incidents retrieved from TAP.")
# Iterate over incidents in array & index
i=0
for incident in obj.get("incidents"):
singleIncident = (obj.get("incidents")[i])
singleIncident['tap_instance'] = opt_instanceid
singleIncident['tap_environment'] = opt_environment
# Rename underscore fields so Splunk will index values
singleIncident['_alert'] = singleIncident['_alert']
singleIncident['updatedBy'] = singleIncident['_updatedBy']
singleIncident['createdBy'] = singleIncident['_createdBy']
singleIncident['assignedTo'] = singleIncident['_assignedTo']
# Remove underscore fieldnames and values
del singleIncident['_alert']
del singleIncident['_updatedBy']
del singleIncident['_createdBy']
del singleIncident['_assignedTo']
event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=json.dumps(singleIncident))
try:
ew.write_event(event)
helper.log_debug("[" + opt_instanceid + "] Added incident:" + str(singleIncident['id']))
except Exception as error:
helper.log_error("[" + opt_instanceid + "] Unable to add incident:" + str(singleIncident['id']))
i = i + 1
#Update last completed execution time
helper.save_check_point(opt_checkpoint, current_status)
helper.log_info("[" + opt_instanceid + "] Incidents collection complete. Records added: " + str(i))
helper.log_debug("[" + opt_instanceid + "] TAP Incidents - Storing checkpoint time: " + current_status)
except Exception as error:
helper.log_error("[" + opt_instanceid + "] TAP Incidents - An unknown error occurred!")
raise error
|
mit
| -2,108,804,593,542,460,400
| 43.794118
| 183
| 0.632443
| false
| 3.910959
| false
| false
| false
|
turtlewit/GSHS_RPG
|
AdventureEngine/CoreEngine/input.py
|
2
|
3088
|
#------------------------------------------------------------------------------#
# Copyright 2016-2017 Golden Sierra Game Development Class #
# This file is part of Verloren (GSHS_RPG). #
# #
# Verloren (GSHS_RPG) is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# Verloren (GSHS_RPG) is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with Verloren (GSHS_RPG). If not, see <http://www.gnu.org/licenses/>. #
#------------------------------------------------------------------------------#
import sys
import curses
class Input:
#renderer = None
commandHistory = []
command = None
unf_command = ""
cheese = "cheese"
takeTextInput = False
char = None
def Update(self, renderer):
Input.command = None
Input.char = None
if renderer:
currentCharacter = renderer.m_screen.getch()
if currentCharacter != -1:
if currentCharacter != curses.KEY_RESIZE:
Input.char = currentCharacter
if Input.takeTextInput:
if currentCharacter == ord('\n'):
if len(Input.unf_command.split()) > 0:
Input.commandHistory.insert(0,Input.command)
Input.command = Input.unf_command
else:
Input.command = 10
renderer.m_cmd = ""
Input.unf_command = ""
if sys.platform == 'linux' \
or sys.platform == 'linux2' \
or sys.platform == 'linux-armv7l':
if currentCharacter == 127 \
or currentCharacter == curses.KEY_BACKSPACE:
renderer.m_cmd = renderer.m_cmd[:-1]
Input.unf_command = Input.unf_command[:-1]
else:
if currentCharacter == 8:
renderer.m_cmd = renderer.m_cmd[:-1]
Input.unf_command = Input.unf_command[:-1]
if currentCharacter >=32 and currentCharacter <= 126:
if renderer.m_vorCmd:
if len(Input.unf_command) \
< renderer.BUFFER_X \
- len(renderer.m_vorCmd) \
- 1:
renderer.m_cmd += chr(currentCharacter)
Input.unf_command += chr(currentCharacter)
if currentCharacter in [
curses.KEY_UP,
curses.KEY_DOWN,
curses.KEY_LEFT,
curses.KEY_RIGHT,
27
]:
Input.command = currentCharacter
|
gpl-3.0
| 1,061,857,247,540,585,600
| 35.204819
| 80
| 0.510687
| false
| 3.979381
| false
| false
| false
|
TheOriginalBDM/Lazy-Cleaner-9000
|
code/clean_sweep_vision.py
|
1
|
6258
|
#!/usr/bin/env python
from picamera.array import PiRGBArray
from picamera import PiCamera
import cv2
import time
from colormath.color_diff import delta_e_cie2000
from colormath.color_objects import LabColor, sRGBColor
from colormath.color_conversions import convert_color
def nothing(*arg):
pass
def is_allowed_color(cur_int, avg_int, m_val):
b = abs(cur_int[0] - avg_int[0])
g = abs(cur_int[1] - avg_int[1])
r = abs(cur_int[2] - avg_int[2])
if (b > m_val or g > m_val or r > m_val):
return True
else:
return False
def make_gt_val(val, min_val):
if val < min_val:
val = min_val
return val
def make_odd(val):
if val % 2 == 0:
val += 1
return val
def get_avg_bgr(in_img, in_cntrs):
ttlA = 0
sum_roiA_mean = (0, 0, 0)
avg_roiA_mean = (0, 0, 0)
ttlA = len(in_cntrs)
for cnt2 in in_cntrs:
x2, y2, w2, h2 = cv2.boundingRect(cnt2)
roiA = in_img[y:y2+w2, x:x2+h2]
roiA_mean = cv2.mean(roiA)
int_roiA_mean = (int(roiA_mean[0]), int(roiA_mean[1]), int(roiA_mean[2]))
sum_roiA_mean = (int_roiA_mean[0] + sum_roiA_mean[0], int_roiA_mean[1] + sum_roiA_mean[1], int_roiA_mean[2] + sum_roiA_mean[2])
if ttlA > 0:
avg_roiA_mean = (sum_roiA_mean[0]/ttlA, sum_roiA_mean[1]/ttlA, sum_roiA_mean[2]/ttlA)
return avg_roiA_mean
window_nm = 'img_cntrls'
cam_res_w = 640
cam_res_h = 480
cam_fr_rt = 32
cv2.namedWindow(window_nm)
cv2.createTrackbar('blur_size', window_nm, 7 , 21, nothing)
cv2.createTrackbar('canny_min', window_nm, 156, 255, nothing)
cv2.createTrackbar('thresh_min', window_nm, 7 , 255, nothing)
cv2.createTrackbar('min_area', window_nm, 5 , 2000, nothing)
cv2.createTrackbar('max_area', window_nm, 40000 , 90000, nothing)
cv2.createTrackbar('max_delta', window_nm, 20 , 100, nothing)
cv2.createTrackbar('get_avg', window_nm, 0 , 1, nothing)
cv2.createTrackbar('get_mode', window_nm, 0, 7, nothing)
camera = PiCamera()
camera.resolution = (cam_res_w, cam_res_h)
camera.framerate = cam_fr_rt
rawCapture = PiRGBArray(camera, size=(cam_res_w, cam_res_h))
time.sleep(0.2)
avg_roi_mean = (0, 0, 0) #b, g, r
delta_color = 000.0000
for frame in camera.capture_continuous(rawCapture, format='bgr', use_video_port=True):
#############################################
### GET THE CURRENT FRAME FROM THE CAMERA ###
#############################################
im = frame.array
im_raw = im #keep a copy in case we want to look at it later
####################
### GET SETTINGS ###
####################
s = cv2.getTrackbarPos('get_avg', window_nm)
blur_size = cv2.getTrackbarPos('blur_size',window_nm)
canny_min = cv2.getTrackbarPos('canny_min',window_nm)
thresh_min = cv2.getTrackbarPos('thresh_min',window_nm)
min_area = cv2.getTrackbarPos('min_area',window_nm)
max_area = cv2.getTrackbarPos('max_area',window_nm)
max_delta = cv2.getTrackbarPos('max_delta',window_nm)
mode = cv2.getTrackbarPos('get_mode', window_nm)
############################
### ENSURE CORRECT VALUE ###
############################
blur_size = make_odd(blur_size)
blur_size = make_gt_val(blur_size, 0)
thresh_min = make_odd(thresh_min)
thresh_min = make_gt_val(thresh_min, 0)
########################################################
### START IMAGE PROCESSING TO FIND OBJECTS IN RANGE ###
########################################################
imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
blur = cv2.blur(imgray, (blur_size, blur_size))
#edged = cv2.Canny(blur, canny_min, 255)
ret3, thresh = cv2.threshold(blur, thresh_min, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
###S = 1 means get an average of the overall RGB picture
if s == 1:
blur_size == 0
thresh_size = 1
min_area = 0
ovr_avg = get_avg_bgr(im, contours)
avg_roi_mean = ovr_avg
print avg_roi_mean
cv2.setTrackbarPos('get_avg', window_nm, 0)
else:
ttl_area = 0
ttl_cntrs = len(contours)
ttl_color = 0
sum_roi_mean = (0, 0, 0)
for cnt in contours:
a = cv2.contourArea(cnt)
### DO WE HAVE SOMETHING IN THE RIGHT SIZE (NO NEED TO PICK UP CARS) ###
if min_area < a < max_area:
ttl_area += 1
x, y, h, w = cv2.boundingRect(cnt)
roi = im[y:y+h, x:x+w]
roi_mean = cv2.mean(roi)
int_roi_mean = (int(roi_mean[0]), int(roi_mean[1]), int(roi_mean[2]))
b, g, r = avg_roi_mean
bckgrnd_lab = convert_color(sRGBColor(r, g, b), LabColor)
contColor_lab = convert_color(sRGBColor(roi_mean[2],roi_mean[1], roi_mean[0]), LabColor)
delta_color = round(delta_e_cie2000(bckgrnd_lab, contColor_lab),1)
if delta_color >= max_delta:
# if is_allowed_color(int_roi_mean, avg_roi_mean, max_dev):
cv2.rectangle(im, (x, y), (x+h, y+w), int_roi_mean, 2)
ttl_color += 1
strLoc = str(x) + ',' + str(y) + ':' + str(delta_color)
cv2.putText(im, strLoc, (x,y), cv2.FONT_HERSHEY_PLAIN, 1.0, (0,0,0), 1)
strTTL = str(ttl_cntrs) + ' - ' + str(ttl_area) + ' - ' + str(ttl_color)
cv2.putText(im, str(strTTL), (20,20), cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 0), 2)
cv2.putText(im, str(avg_roi_mean), (20, cam_res_h - 20) ,cv2.FONT_HERSHEY_PLAIN, 2.0, avg_roi_mean, 2)
if mode == 0:
cv2.imshow('imgview', im_raw)
print 'Raw image view'
elif mode == 1:
cv2.imshow('imgview', imgray)
print 'Grayscale view'
elif mode == 2:
cv2.imshow('imgview', blur)
print 'Blur view'
elif mode == 3:
cv2.imshow('imgview', blur)
print 'Blur view'
elif mode == 4:
cv2.imshow('imgview', thresh)
print 'Threshold view'
else:
cv2.imshow('imgview', im)
print 'Contour overlay on raw view'
ch = cv2.waitKey(5)
rawCapture.truncate(0)
if ch == 27:
break
cv2.destroyAllWindows()
|
gpl-3.0
| 5,175,269,980,451,224,000
| 30.606061
| 135
| 0.563279
| false
| 2.843253
| false
| false
| false
|
raiden-network/raiden
|
raiden/utils/upgrades.py
|
1
|
8374
|
import os
import sqlite3
import sys
from contextlib import closing
from glob import escape, glob
from pathlib import Path
import filelock
import structlog
from raiden.constants import RAIDEN_DB_VERSION
from raiden.storage.sqlite import SQLiteStorage
from raiden.storage.versions import VERSION_RE, filter_db_names, latest_db_file
from raiden.utils.typing import Any, Callable, DatabasePath, List, NamedTuple
class UpgradeRecord(NamedTuple):
from_version: int
function: Callable
UPGRADES_LIST: List[UpgradeRecord] = []
log = structlog.get_logger(__name__)
def get_file_lock(db_filename: Path) -> filelock.FileLock:
lock_file_name = f"{db_filename}.lock"
return filelock.FileLock(lock_file_name)
def update_version(storage: SQLiteStorage, version: int) -> None:
cursor = storage.conn.cursor()
cursor.execute(
'INSERT OR REPLACE INTO settings(name, value) VALUES("version", ?)', (str(version),)
)
def get_file_version(db_path: Path) -> int:
match = VERSION_RE.match(os.path.basename(db_path))
assert match, f'Database name "{db_path}" does not match our format'
file_version = int(match.group(1))
return file_version
def get_db_version(db_filename: Path) -> int:
"""Return the version value stored in the db"""
msg = f"Path '{db_filename}' expected, but not found"
assert os.path.exists(db_filename), msg
# Perform a query directly through SQL rather than using
# storage.get_version()
# as get_version will return the latest version if it doesn't
# find a record in the database.
conn = sqlite3.connect(str(db_filename), detect_types=sqlite3.PARSE_DECLTYPES)
cursor = conn.cursor()
try:
cursor.execute('SELECT value FROM settings WHERE name="version";')
result = cursor.fetchone()
except sqlite3.OperationalError:
raise RuntimeError("Corrupted database. Database does not the settings table.")
if not result:
raise RuntimeError(
"Corrupted database. Settings table does not contain an entry the db version."
)
return int(result[0])
def _copy(old_db_filename: Path, current_db_filename: Path) -> None:
old_conn = sqlite3.connect(old_db_filename, detect_types=sqlite3.PARSE_DECLTYPES)
current_conn = sqlite3.connect(current_db_filename, detect_types=sqlite3.PARSE_DECLTYPES)
with closing(old_conn), closing(current_conn):
old_conn.backup(current_conn)
def delete_dbs_with_failed_migrations(valid_db_names: List[Path]) -> None:
for db_path in valid_db_names:
file_version = get_file_version(db_path)
with get_file_lock(db_path):
db_version = get_db_version(db_path)
# The version matches, nothing to do.
if db_version == file_version:
continue
elif db_version > file_version:
raise RuntimeError(
f"Impossible database version. "
f"The database {db_path} has too high a version ({db_version}), "
f"this should never happen."
)
# The version number in the database is smaller then the current
# target, this means that a migration failed to execute and the db
# is partially upgraded.
else:
os.remove(db_path)
class UpgradeManager:
"""Run migrations when a database upgrade is necessary.
Skip the upgrade if either:
- There is no previous DB
- There is a current DB file and the version in settings matches.
Upgrade procedure:
- Delete corrupted databases.
- Copy the old file to the latest version (e.g. copy version v16 as v18).
- In a transaction: Run every migration. Each migration must decide whether
to proceed or not.
"""
def __init__(self, db_filename: DatabasePath, **kwargs: Any) -> None:
base_name = os.path.basename(db_filename)
match = VERSION_RE.match(base_name)
assert match, f'Database name "{base_name}" does not match our format'
self._current_db_filename = Path(db_filename)
self._kwargs = kwargs
def run(self) -> None:
# First clear up any partially upgraded databases.
#
# A database will be partially upgraded if the process receives a
# SIGKILL/SIGINT while executing migrations. NOTE: It's very probable
# the content of the database remains consistent, because the upgrades
# are executed inside a migration, however making a second copy of the
# database does no harm.
escaped_path = escape(str(self._current_db_filename.parent))
paths = glob(f"{escaped_path}/v*_log.db")
valid_db_names = filter_db_names(paths)
delete_dbs_with_failed_migrations(valid_db_names)
# At this point we know every file version and db version match
# (assuming there are no concurrent runs).
paths = glob(f"{escaped_path}/v*_log.db")
valid_db_names = filter_db_names(paths)
latest_db_path = latest_db_file(valid_db_names)
# First run, there is no database file available
if latest_db_path is None:
return
file_version = get_file_version(latest_db_path)
# The latest version matches our target version, nothing to do.
if file_version == RAIDEN_DB_VERSION:
return
if file_version > RAIDEN_DB_VERSION:
raise RuntimeError(
f"Conflicting database versions detected, latest db version is v{file_version}, "
f"Raiden client version is v{RAIDEN_DB_VERSION}."
f"\n\n"
f"Running a downgraded version of Raiden after an upgrade is not supported, "
f"because the transfers done with the new client are not understandable by the "
f"older."
)
if RAIDEN_DB_VERSION >= 27 and file_version <= 26 and file_version > 1:
msg = (
f"Your Raiden database is version {file_version} and there is no compatible "
f"migration to version {RAIDEN_DB_VERSION} available.\n"
"You need to either start a new Raiden node with a different account, or "
"close and settle all channels, and start over with a fresh database.\n\n"
"More information on this topic at "
"https://raiden-network.readthedocs.io/en/latest/other/known-issues.html"
"#database-upgrades\n\n"
"If you are on **mainnet** and affected by this, please create an issue at "
"https://github.com/raiden-network/raiden/issues/new?title=Mainnet%20Migration%20"
f"{file_version}%20{RAIDEN_DB_VERSION}"
)
log.warning(msg)
sys.exit(msg)
self._upgrade(
target_file=self._current_db_filename,
from_file=latest_db_path,
from_version=file_version,
)
def _upgrade(self, target_file: Path, from_file: Path, from_version: int) -> None:
with get_file_lock(from_file), get_file_lock(target_file):
_copy(from_file, target_file)
# Only instantiate `SQLiteStorage` after the copy. Otherwise
# `_copy` will deadlock because only one connection is allowed to
# `target_file`.
with SQLiteStorage(target_file) as storage:
log.debug(f"Upgrading database from v{from_version} to v{RAIDEN_DB_VERSION}")
try:
version_iteration = from_version
with storage.transaction():
for upgrade_record in UPGRADES_LIST:
if upgrade_record.from_version < from_version:
continue
version_iteration = upgrade_record.function(
storage=storage,
old_version=version_iteration,
current_version=RAIDEN_DB_VERSION,
**self._kwargs,
)
update_version(storage, RAIDEN_DB_VERSION)
except BaseException as e:
log.error(f"Failed to upgrade database: {e}")
raise
|
mit
| -4,492,969,264,185,019,000
| 36.891403
| 98
| 0.614999
| false
| 4.143493
| true
| false
| false
|
tymmothy/dds3x25
|
dds3x25/dds.py
|
1
|
12274
|
#!/usr/bin/env python
"""
This is an interface library for Hantek DDS-3X25 arbitrary waveform generator.
Licenced LGPL2+
Copyright (C) 2013 Domas Jokubauskis (domas@jokubauskis.lt)
Copyright (C) 2014 Tymm Twillman (tymmothy@gmail.com)
"""
import struct
import math
import collections
# dds3x25 imports...
from usb_interface import *
from packet import *
def samplepoint_encode(value):
SIGN_BIT = (1 << 11)
encoded = abs(value)
if encoded > DDS.MAX_POINT_VALUE:
msg = "Value {0} is out of range ({1}-{2})".format(value, -DDS.MAX_POINT_VALUE, DDS.MAX_POINT_VALUE)
raise ValueError(msg)
# Note: 0 is negative value
if value > 0:
encoded = (DDS.MAX_POINT_VALUE + 1) - encoded
else:
encoded = encoded | SIGN_BIT
return struct.pack("<H", encoded)
def samplepoint_chunks(data):
"""Cut samplepoint data into 32-point chunks.
If necessary, add padding to the last chunk to make it 64 bytes.
"""
SAMPLEPOINT_CHUNK_SIZE=32
for i in xrange(0, len(data), SAMPLEPOINT_CHUNK_SIZE):
chunkdata = data[i:i+SAMPLEPOINT_CHUNK_SIZE]
chunk = "".join([ samplepoint_encode(x) for x in chunkdata ])
if len(chunk) < SAMPLEPOINT_CHUNK_SIZE * 2:
chunk += "\x91\x1c" * ((SAMPLEPOINT_CHUNK_SIZE - (len(chunk) / 2)))
yield chunk
class DDS(object):
# Hantek 3x25 USB Vendor & Product IDs
USB_VID = 0x0483
USB_PID = 0x5721
# Core DAC clock -> 200 MHz
DAC_CLOCK = int(200e6)
# Maximum DAC clock divider
DAC_CLOCK_DIV_MAX = 131070
# Maximum # of sample points
MAX_POINTS = 4095
# Maximum value of a point
MAX_POINT_VALUE = (1 << 11) - 1
NUM_DIGITAL_OUTPUTS = 12
NUM_DIGITAL_INPUTS = 6
def __init__(self, idVendor=USB_VID, idProduct=USB_PID, **kwargs):
"""Initialize a DDS instance and connect to the hardware.
Args:
idVendor (int): 16-bit USB Vendor ID (VID) for the DDS hardware.
idProduct (int): 16-bit USB Product ID (PID) for the DDS hardware.
Kwargs:
See DDS.configure() for the list of kwargs that __init__ understands.
"""
# Set up defaults for instance variables.
self._ext_trigger = None
self._oneshot = False
self._counter_mode = False
self._programmable_output = True
self._digital_output = 0
self._clock_divider = 128
# do not initialize USB device if used for unit testing
if kwargs.get('testing', False):
return
self._in_ep, self._out_ep = dds_usb_open(idVendor, idProduct)
self.configure(**kwargs)
def transfer(self, data):
self._out_ep.write(data)
return self._in_ep.read(self._in_ep.wMaxPacketSize)
def configure(self, **kwargs):
"""Update the 3x25's configuration settings.
Kwargs:
reset_trig (bool): If True, reset the DDS external trigger.
reset_counter (bool): If True, reset the DDS counter.
oneshot (bool): If True, only output one wave (not continuous).
counter_mode (bool): Set true to enable counter mode.
If True, the 3x25 counts pulses.
If False, the 3x25 measures frequency.
programmable_output (bool): Set true to enable programmable digital output.
If True, digital output pins are controlled by setting digital_output.
If False, digital output pins follow the DAC output value.
ext_trigger ([None, 0 or 1]): Configure external trigger mode.
If None, external triggering is disabled.
If 1, external triggering occurs on rising pulse edges.
If 0, external triggering occurs on falling pulse edges.
digital_output (int): 12-bit unsigned value whose bits are written
to the 3x25's digital output pins.
Note: Only used when programmable_output is enabled.
clock_divider (int): Divisor to use for 200Mhz DAC clock to generate
sample output clock.
Must be an even value from 0-131070
"""
reset_trigger = bool(kwargs.get('reset_trig', False))
reset_counter = bool(kwargs.get('reset_counter', False))
oneshot = bool(kwargs.get('oneshot', self._oneshot))
counter_mode = bool(kwargs.get('counter_mode', self._counter_mode))
programmable_output = bool(kwargs.get('programmable_output', self._programmable_output))
ext_trigger = kwargs.get('ext_trigger', self._ext_trigger)
if ext_trigger not in [ None, 0, 1 ]:
raise ValueError("Invalid value for ext_trigger (must be 1, 0 or None)")
digital_output = int(kwargs.get('digital_output', self._digital_output))
clock_divider = int(kwargs.get('clock_divider', self._clock_divider))
if (clock_divider < 1) or (clock_divider > 131070) or (clock_divider > 1 and clock_divider & 1):
msg = "Clock divider ({0}) must be 1 or an even value between 2 and {1}.".format(clock_divider, DDS.DAC_CLOCK_DIV_MAX)
raise ValueError(msg)
self._oneshot = oneshot
self._counter_mode = counter_mode
self._programmable_output = programmable_output
self._ext_trigger = ext_trigger
self._digital_output = digital_output
self._clock_divider = clock_divider
configure_packet = ConfigurePacket(self, reset_trigger=reset_trigger, reset_counter=reset_counter)
response = self.transfer(str(configure_packet))
response = self._parse_configure_packet_response(response)
return response
def _parse_configure_packet_response(self, packet):
vals = struct.unpack("<HII", packet)
return {
'digital_input' : vals[0],
'frequency' : vals[1] * 2 if self._counter_mode is False else None,
'ticks' : None if vals[2] == 0xffffffff else vals[2],
'counts' : vals[1] if self._counter_mode is True else None,
}
def set_waveform(self, points, clock_divider=None, shift_points=0):
count = len(points)
if shift_points:
points = collections.deque(points)
points.rotate(shift_points)
response = self.transfer(str(PointCountPacket(count, is_start=True)))
assert response[0] == 0xcc
for chunk in samplepoint_chunks(points):
response = self.transfer(chunk)
assert response[0] == 0xcc
response = self.transfer(str(PointCountPacket(count)))
assert response[0] == 0xcc
if clock_divider is not None:
self.configure(clock_divider=clock_divider)
def reset_counter(self):
"""Reset the 3x25 counter state."""
self.configure(reset_counter=True)
def reset_trigger(self):
"""Reset the 3x25 external trigger."""
self.configure(reset_trigger=True)
def digital_write(self, pin, pin_state):
"""Set the output state of a digital output pin.
Args:
pin (int): Number of pin to control.
pin_state (int/bool): If 1/True, pin will be set high.
If 0/False, pin will be set low.
"""
pin_state = 1 if pin_state else 0
digital_output = self._digital_output & ~(1 << pin)
digital_output |= (pin_state << pin)
self.configure(digital_output=digital_output)
def digital_write_port(self, pin_states):
"""Set the output states of all digital output pins.
Args:
pin_states (int): Value comprised of bits to write to
the digital output pins.
"""
self.configure(digital_output=val)
def digital_read(self, pin):
"""Read the state of a digital input pin.
Args:
pin (int): Input pin # to read.
Returns:
0 if the pin is low, 1 if the pin is high.
"""
digital_in = self.configure()['digital_input']
return 1 if (digital_in & (1 << pin)) else 0
def digital_read_port(self):
"""Read the state of all input pins as one integer value.
Returns:
Integer w/bits set to the states of the input pins.
"""
return self.configure()['digital_input']
def count_in_frequency(self):
"""Get the input frequency at the 3x25's COUNT IN port.
The frequency is only available when the 3x25 is NOT in counter mode.
Returns:
Frequency (in Hz) at the COUNT IN port, or None if in counter mode.
"""
return self.configure()['frequency']
def count_in_counts(self):
"""Get the # of pulses counted at the 3x25's COUNT IN port since last reset.
The count is only available when the 3x25 IS in counter mode.
use .reset_counter() to reset the value to 0.
Returns:
# of pulses counted at the COUNT IN port, or None if not in counter mode.
"""
return self.configure()['counts']
def count_in_ticks(self):
return self.configure()['ticks']
@property
def ext_trigger(self):
return self._ext_trigger
@ext_trigger.setter
def ext_trigger(self, trig):
if trig is not None and trig != 0 and trig != 1:
raise ValueError("Invalid value for external trigger (should be 1, 0 or None)")
self.configure(ext_trigger=trig)
@property
def oneshot_mode(self):
return self._oneshot
@oneshot_mode.setter
def oneshot_mode(self, val):
val = True if val else False
self.configure(oneshot=val)
@property
def counter_mode(self):
return self._counter_mode
@counter_mode.setter
def counter_mode(self, val):
val = True if val else False
self.configure(counter_mode=val)
@property
def programmable_output(self):
return self._programmable_output
@programmable_output.setter
def programmable_output(self, val):
self.configure(programmable_output=val)
@staticmethod
def points_and_div_for_freq(freq):
# Calculate divisor based on using max # of available samples possible.
# -- ceil( DAC_CLOCK / (frequency * MAX_POINTS) )
freq = int(freq)
div = (DDS.DAC_CLOCK + (freq - 1) * DDS.MAX_POINTS) / (freq * DDS.MAX_POINTS)
# Adjust if odd value -- divisor has to be 1 or a multiple of 2
if div > 1 and div & 1:
div += 1
# Calculate # of sample points to use w/this divider to get closest
# to requested frequency
# -- round( DAC_CLOCK / (divider * frequency) )
npoints = (DDS.DAC_CLOCK + (div * freq / 2)) / (div * freq)
# Calculate actual frequency
actual = (DDS.DAC_CLOCK / div) / npoints
return (npoints, div, actual)
def generate_sine(self, freq, amplitude=(1<<11)-1, offset=0, phase=0.0, shift=0):
phase = float(phase)
npoints, div, actual = DDS.points_and_div_for_freq(freq)
points = []
for i in range(npoints):
i = float(i)
point = (amplitude * math.sin((2.0 * math.pi * i / npoints) + phase)) + offset
points.append(int(point))
self.set_waveform(points, clock_divider=div, shift_points=shift)
return actual
def generate_square(self, freq, duty_cycle=0.5, amplitude=(1<<11)-1, offset=0, phase=0.0, shift=0):
phase = float(phase)
npoints, div, actual = DDS.points_and_div_for_freq(freq)
points = []
for i in range(npoints):
shifted = int(i + (phase * npoints) / (2.0 * math.pi)) % npoints
point = amplitude if shifted < (duty_cycle * npoints) else -amplitude
points.append(int(point + offset))
self.set_waveform(points, clock_divider=div, shift_points=shift)
return actual
if __name__ == "__main__":
import time
freq = 6000000
d = DDS()
# print "Generating square wave @ {0} hz".format(freq)
# d.generate_square(25000000, 0.50)
# time.sleep(10)
print "Generating sine wave @ {0} hz".format(freq)
d.generate_sine(freq)
d.programmable_output=True
d.reset_counter()
d.counter_mode = True
|
lgpl-2.1
| 106,578,187,649,829,000
| 32.535519
| 130
| 0.607952
| false
| 3.744356
| true
| false
| false
|
atzengin/OCC
|
oc-utils/python/modtool/code_generator.py
|
1
|
2298
|
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
""" A code generator (needed by ModToolAdd) """
from templates import Templates
import Cheetah.Template
from util_functions import str_to_fancyc_comment
from util_functions import str_to_python_comment
from util_functions import strip_default_values
from util_functions import strip_arg_types
from util_functions import strip_arg_types_occ
class GRMTemplate(Cheetah.Template.Template):
""" An extended template class """
def __init__(self, src, searchList):
self.grtypelist = {
'sync': 'sync_block',
'sink': 'sync_block',
'source': 'sync_block',
'decimator': 'sync_decimator',
'interpolator': 'sync_interpolator',
'general': 'block',
'tagged_stream': 'tagged_stream_block',
'hier': 'hier_block2',
'noblock': ''}
searchList['str_to_fancyc_comment'] = str_to_fancyc_comment
searchList['str_to_python_comment'] = str_to_python_comment
searchList['strip_default_values'] = strip_default_values
searchList['strip_arg_types'] = strip_arg_types
searchList['strip_arg_types_occ'] = strip_arg_types_occ
Cheetah.Template.Template.__init__(self, src, searchList=searchList)
self.grblocktype = self.grtypelist[searchList['blocktype']]
def get_template(tpl_id, **kwargs):
""" Return the template given by tpl_id, parsed through Cheetah """
return str(GRMTemplate(Templates[tpl_id], searchList=kwargs))
|
gpl-3.0
| -1,651,171,289,484,127,200
| 41.555556
| 76
| 0.681027
| false
| 3.836394
| false
| false
| false
|
mostaphaRoudsari/Honeybee
|
src/Honeybee_AskMe.py
|
1
|
1992
|
#
# Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Honeybee.
#
# Copyright (c) 2013-2020, Mostapha Sadeghipour Roudsari <mostapha@ladybug.tools>
# Honeybee is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Honeybee is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Honeybee; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to get basic information on Honeybee Objects, whether they are HBSrfs or HBZones.
-
Provided by Honeybee 0.0.66
Args:
_HBObjects: Any valid Honeybee object.
Returns:
readMe!: Information about the Honeybee object. Connect to a panel to visualize.
"""
ghenv.Component.Name = "Honeybee_AskMe"
ghenv.Component.NickName = 'askMe'
ghenv.Component.Message = 'VER 0.0.66\nJUL_07_2020'
ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application
ghenv.Component.Category = "HB-Legacy"
ghenv.Component.SubCategory = "00 | Honeybee"
#compatibleHBVersion = VER 0.0.56\nJUL_24_2017
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "1"
except: pass
import scriptcontext as sc
try:
# call the objects from the lib
hb_hive = sc.sticky["honeybee_Hive"]()
HBObjectsFromHive = hb_hive.visualizeFromHoneybeeHive(_HBObjects)
for HBO in HBObjectsFromHive:
print HBO
except Exception, e:
print "Honeybee has no idea what this object is! Vviiiiiiz!"
pass
|
gpl-3.0
| -3,768,216,618,678,705,700
| 35.218182
| 100
| 0.744478
| false
| 3.207729
| false
| false
| false
|
wdv4758h/ZipPy
|
edu.uci.python.benchmark/src/benchmarks/euler31-timed.py
|
1
|
1614
|
#runas solve()
#unittest.skip recursive generator
#pythran export solve()
# 01/08/14 modified for benchmarking by Wei Zhang
import sys, time
COINS = [1, 2, 5, 10, 20, 50, 100, 200]
# test
def _sum(iterable):
sum = None
for i in iterable:
if sum is None:
sum = i
else:
sum += i
return sum
def balance(pattern):
return _sum(COINS[x]*pattern[x] for x in range(0, len(pattern)))
def gen(pattern, coinnum, num):
coin = COINS[coinnum]
for p in range(0, num//coin + 1):
newpat = pattern[:coinnum] + (p,)
bal = balance(newpat)
if bal > num:
return
elif bal == num:
yield newpat
elif coinnum < len(COINS)-1:
for pat in gen(newpat, coinnum+1, num):
yield pat
def solve(total):
'''
In England the currency is made up of pound, P, and pence, p, and there are eight coins in general circulation:
1p, 2p, 5p, 10p, 20p, 50p, P1 (100p) and P2 (200p).
It is possible to make P2 in the following way:
1 P1 + 1 50p + 2 20p + 1 5p + 1 2p + 3 1p
How many different ways can P2 be made using any number of coins?
'''
return _sum(1 for pat in gen((), 0, total))
def measure():
input = int(sys.argv[1]) # 200
for i in range(3):
solve(input)
print("Start timing...")
start = time.time()
result = solve(input)
print('total number of different ways: ', result)
duration = "%.3f\n" % (time.time() - start)
print("euler31: " + duration)
# warm up
for i in range(2000): # 300
solve(40)
measure()
|
bsd-3-clause
| 7,452,297,059,408,330,000
| 23.830769
| 115
| 0.576208
| false
| 3.189723
| false
| false
| false
|
longde123/MultiversePlatform
|
lib/IPCE/Lib/ctypes.py
|
1
|
5974
|
# Copyright (c) 2006 Seo Sanghyeon
# 2006-06-08 sanxiyn Created
# 2006-06-11 sanxiyn Implemented .value on primitive types
# 2006-11-02 sanxiyn Support for multiple signatures
__all__ = [
'c_int', 'c_float', 'c_double', 'c_char_p', 'c_void_p',
'LibraryLoader', 'CDLL', 'cdll',
'byref', 'sizeof'
]
# --------------------------------------------------------------------
# Dynamic module definition
from System import AppDomain
from System.Reflection import AssemblyName
from System.Reflection.Emit import AssemblyBuilderAccess
def pinvoke_module():
domain = AppDomain.CurrentDomain
name = AssemblyName('pinvoke')
flag = AssemblyBuilderAccess.Run
assembly = domain.DefineDynamicAssembly(name, flag)
module = assembly.DefineDynamicModule('pinvoke')
return module
# --------------------------------------------------------------------
# General interface
class pinvoke_value:
type = None
value = None
def get_type(obj):
if isinstance(obj, pinvoke_value):
return obj.type
else:
return type(obj)
def get_value(obj):
if isinstance(obj, pinvoke_value):
return obj.value
else:
return obj
# --------------------------------------------------------------------
# Primitive types
from System import Single, Double, IntPtr
class pinvoke_primitive(pinvoke_value):
def __init__(self, value=None):
if value is None:
value = self.type()
if not isinstance(value, self.type):
expected = self.type.__name__
given = value.__class__.__name__
msg = "%s expected instead of %s" % (expected, given)
raise TypeError(msg)
self.value = value
def __repr__(self):
clsname = self.__class__.__name__
return "%s(%r)" % (clsname, self.value)
class c_int(pinvoke_primitive):
type = int
class c_float(pinvoke_primitive):
type = Single
class c_double(pinvoke_primitive):
type = Double
class c_char_p(pinvoke_primitive):
type = str
class c_void_p(pinvoke_primitive):
type = IntPtr
# --------------------------------------------------------------------
# Reference
from System import Type
class pinvoke_reference(pinvoke_value):
def __init__(self, obj):
self.obj = obj
self.type = Type.MakeByRefType(obj.type)
self.value = obj.value
def __repr__(self):
return "byref(%r)" % (self.obj,)
def byref(obj):
if not isinstance(obj, pinvoke_value):
raise TypeError("byref() argument must be a ctypes instance")
ref = pinvoke_reference(obj)
return ref
# --------------------------------------------------------------------
# Utility
from System.Runtime.InteropServices import Marshal
def sizeof(obj):
return Marshal.SizeOf(obj.type)
# --------------------------------------------------------------------
# Dynamic P/Invoke
from System import Array
from System.Reflection import CallingConventions, MethodAttributes
from System.Runtime.InteropServices import CallingConvention, CharSet
from IronPython.Runtime.Calls import BuiltinFunction, FunctionType
class pinvoke_method:
pinvoke_attributes = (
MethodAttributes.Public |
MethodAttributes.Static |
MethodAttributes.PinvokeImpl
)
calling_convention = None
return_type = None
def __init__(self, dll, entry):
self.dll = dll
self.entry = entry
self.restype = None
self.argtypes = None
self.func = None
self.signatures = set()
def create(self, restype, argtypes):
dll = self.dll
entry = self.entry
attributes = self.pinvoke_attributes
cc = self.calling_convention
clr_argtypes = Array[Type](argtypes)
module = pinvoke_module()
module.DefinePInvokeMethod(
entry, dll, attributes, CallingConventions.Standard,
restype, clr_argtypes, cc, CharSet.Ansi)
module.CreateGlobalFunctions()
method = module.GetMethod(entry)
self.func = BuiltinFunction.MakeOrAdd(
self.func, entry, method, FunctionType.Function)
self.signatures.add((restype, argtypes))
def __call__(self, *args):
if self.restype:
restype = self.restype.type
else:
restype = self.return_type.type
if self.argtypes:
argtypes = [argtype.type for argtype in self.argtypes]
else:
argtypes = [get_type(arg) for arg in args]
argtypes = tuple(argtypes)
if (restype, argtypes) not in self.signatures:
self.create(restype, argtypes)
args = [get_value(arg) for arg in args]
result = self.func(*args)
return result
# --------------------------------------------------------------------
# Function loader
def is_special_name(name):
return name.startswith('__') and name.endswith('__')
class pinvoke_dll:
method_class = None
def __init__(self, name):
self.name = name
def __repr__(self):
clsname = self.__class__.__name__
return "<%s '%s'>" % (clsname, self.name)
def __getattr__(self, name):
if is_special_name(name):
raise AttributeError(name)
method = self.method_class(self.name, name)
setattr(self, name, method)
return method
class CDLL(pinvoke_dll):
class method_class(pinvoke_method):
calling_convention = CallingConvention.Cdecl
return_type = c_int
# --------------------------------------------------------------------
# Library loader
class LibraryLoader(object):
def __init__(self, dlltype):
self.dlltype = dlltype
def __getattr__(self, name):
if is_special_name(name):
raise AttributeError(name)
dll = self.dlltype(name)
setattr(self, name, dll)
return dll
def LoadLibrary(self, name):
return self.dlltype(name)
cdll = LibraryLoader(CDLL)
|
mit
| -2,151,948,963,422,639,000
| 25.789238
| 70
| 0.571142
| false
| 4.105842
| false
| false
| false
|
bergolho1337/URI-Online-Judge
|
Basicos/Python/1061/main.py
|
1
|
1292
|
# -*- coding: utf-8 -*-
def converteString (dia, hora):
evento = []
# Parse do dia
num = dia[4:6]
evento.append(int(num))
# Parse da hora
num = hora[0:2]
evento.append(int(num))
# Parse dos minutos
num = hora[5:7]
evento.append(int(num))
# Parse dos segundos
num = hora[10:12]
evento.append(int(num))
return evento
def calculaDuracao (inicio, fim):
inicio_seg = (inicio[0]*86400)+(inicio[1]*3600)+(inicio[2]*60)+(inicio[3])
fim_seg = (fim[0]*86400)+(fim[1]*3600)+(fim[2]*60)+(fim[3])
duracao_seg = fim_seg - inicio_seg
dias = duracao_seg / 86400
duracao_seg = duracao_seg - (dias*86400)
horas = duracao_seg / 3600
duracao_seg = duracao_seg - (horas*3600)
minutos = duracao_seg / 60
duracao_seg = duracao_seg - (minutos*60)
segundos = duracao_seg
return dias, horas, minutos, segundos
dia_inicio = raw_input()
hora_inicio = raw_input()
dia_fim = raw_input()
hora_fim = raw_input()
evento_inicio = converteString(dia_inicio,hora_inicio)
evento_fim = converteString(dia_fim,hora_fim)
dias, horas, minutos, segundos = calculaDuracao(evento_inicio,evento_fim)
print("%d dia(s)" % dias)
print("%d hora(s)" % horas)
print("%d minuto(s)" % minutos)
print("%d segundo(s)" % segundos)
|
gpl-2.0
| -1,258,978,114,268,866,600
| 25.387755
| 78
| 0.629257
| false
| 2.327928
| false
| false
| false
|
google/material-design-icons
|
update/venv/lib/python3.9/site-packages/fontTools/varLib/plot.py
|
5
|
4153
|
"""Visualize DesignSpaceDocument and resulting VariationModel."""
from fontTools.varLib.models import VariationModel, supportScalar
from fontTools.designspaceLib import DesignSpaceDocument
from matplotlib import pyplot
from mpl_toolkits.mplot3d import axes3d
from itertools import cycle
import math
import logging
import sys
log = logging.getLogger(__name__)
def stops(support, count=10):
a,b,c = support
return [a + (b - a) * i / count for i in range(count)] + \
[b + (c - b) * i / count for i in range(count)] + \
[c]
def _plotLocationsDots(locations, axes, subplot, **kwargs):
for loc, color in zip(locations, cycle(pyplot.cm.Set1.colors)):
if len(axes) == 1:
subplot.plot(
[loc.get(axes[0], 0)],
[1.],
'o',
color=color,
**kwargs
)
elif len(axes) == 2:
subplot.plot(
[loc.get(axes[0], 0)],
[loc.get(axes[1], 0)],
[1.],
'o',
color=color,
**kwargs
)
else:
raise AssertionError(len(axes))
def plotLocations(locations, fig, names=None, **kwargs):
n = len(locations)
cols = math.ceil(n**.5)
rows = math.ceil(n / cols)
if names is None:
names = [None] * len(locations)
model = VariationModel(locations)
names = [names[model.reverseMapping[i]] for i in range(len(names))]
axes = sorted(locations[0].keys())
if len(axes) == 1:
_plotLocations2D(
model, axes[0], fig, cols, rows, names=names, **kwargs
)
elif len(axes) == 2:
_plotLocations3D(
model, axes, fig, cols, rows, names=names, **kwargs
)
else:
raise ValueError("Only 1 or 2 axes are supported")
def _plotLocations2D(model, axis, fig, cols, rows, names, **kwargs):
subplot = fig.add_subplot(111)
for i, (support, color, name) in enumerate(
zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names))
):
if name is not None:
subplot.set_title(name)
subplot.set_xlabel(axis)
pyplot.xlim(-1.,+1.)
Xs = support.get(axis, (-1.,0.,+1.))
X, Y = [], []
for x in stops(Xs):
y = supportScalar({axis:x}, support)
X.append(x)
Y.append(y)
subplot.plot(X, Y, color=color, **kwargs)
_plotLocationsDots(model.locations, [axis], subplot)
def _plotLocations3D(model, axes, fig, rows, cols, names, **kwargs):
ax1, ax2 = axes
axis3D = fig.add_subplot(111, projection='3d')
for i, (support, color, name) in enumerate(
zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names))
):
if name is not None:
axis3D.set_title(name)
axis3D.set_xlabel(ax1)
axis3D.set_ylabel(ax2)
pyplot.xlim(-1.,+1.)
pyplot.ylim(-1.,+1.)
Xs = support.get(ax1, (-1.,0.,+1.))
Ys = support.get(ax2, (-1.,0.,+1.))
for x in stops(Xs):
X, Y, Z = [], [], []
for y in Ys:
z = supportScalar({ax1:x, ax2:y}, support)
X.append(x)
Y.append(y)
Z.append(z)
axis3D.plot(X, Y, Z, color=color, **kwargs)
for y in stops(Ys):
X, Y, Z = [], [], []
for x in Xs:
z = supportScalar({ax1:x, ax2:y}, support)
X.append(x)
Y.append(y)
Z.append(z)
axis3D.plot(X, Y, Z, color=color, **kwargs)
_plotLocationsDots(model.locations, [ax1, ax2], axis3D)
def plotDocument(doc, fig, **kwargs):
doc.normalize()
locations = [s.location for s in doc.sources]
names = [s.name for s in doc.sources]
plotLocations(locations, fig, names, **kwargs)
def main(args=None):
from fontTools import configLogger
if args is None:
args = sys.argv[1:]
# configure the library logger (for >= WARNING)
configLogger()
# comment this out to enable debug messages from logger
# log.setLevel(logging.DEBUG)
if len(args) < 1:
print("usage: fonttools varLib.plot source.designspace", file=sys.stderr)
print(" or")
print("usage: fonttools varLib.plot location1 location2 ...", file=sys.stderr)
sys.exit(1)
fig = pyplot.figure()
fig.set_tight_layout(True)
if len(args) == 1 and args[0].endswith('.designspace'):
doc = DesignSpaceDocument()
doc.read(args[0])
plotDocument(doc, fig)
else:
axes = [chr(c) for c in range(ord('A'), ord('Z')+1)]
locs = [dict(zip(axes, (float(v) for v in s.split(',')))) for s in args]
plotLocations(locs, fig)
pyplot.show()
if __name__ == '__main__':
import sys
sys.exit(main())
|
apache-2.0
| -5,397,906,847,771,724,000
| 23.868263
| 80
| 0.642427
| false
| 2.69151
| false
| false
| false
|
inonit/django-chemtrails
|
tests/testapp/migrations/0005_guild.py
|
1
|
1061
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-10 13:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('testapp', '0004_book_view_book_permission'),
]
operations = [
migrations.CreateModel(
name='Guild',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('contact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='guild_contacts', to='testapp.Author')),
('members', models.ManyToManyField(related_name='guild_set', to='testapp.Author', verbose_name='members')),
],
),
migrations.AddField(
model_name='author',
name='guilds',
field=models.ManyToManyField(blank=True, to='testapp.Guild'),
),
]
|
mit
| 6,104,085,431,111,180,000
| 34.366667
| 144
| 0.600377
| false
| 4.144531
| false
| false
| false
|
alfred82santa/tarrabme2
|
src/orgs/models.py
|
1
|
1856
|
from django.db import models
from common.models import CommonModel, AbstractContact, AbstractAddress
from django.contrib.auth.models import Group
from imagekit.models import ProcessedImageField, ImageSpecField
from imagekit.processors import ResizeToFill
class Organization(CommonModel):
name = models.CharField(max_length=100, unique=True)
commercial_name = models.CharField(max_length=150, unique=True)
prefix = models.CharField(max_length=6, unique=True)
active = models.BooleanField('active', default=True)
logo = ProcessedImageField(
upload_to="logos",
processors=[ResizeToFill(400, 400)],
)
logo_thumbnail = ImageSpecField(source='logo',
processors=[ResizeToFill(50, 50)],)
def logo_thumbnail_img(self):
return '<img src="%s"/>' % self.logo_thumbnail.url
logo_thumbnail_img.allow_tags = True
logo_thumbnail_img.short_description = ''
class Meta:
pass
def __unicode__(self):
return self.name
class Contact(AbstractContact):
organization = models.ForeignKey(Organization, blank=False,
null=False, related_name="contacts_list"
)
class BillingAccount(AbstractAddress):
fiscal_number = models.CharField(max_length=126, unique=True)
payment_method = models.CharField(max_length=126, unique=True)
payment_data = models.CharField(max_length=126, unique=True)
organization = models.ForeignKey(Organization, blank=False,
null=False, related_name="contacts"
)
class OrganizationRole(Group):
organization = models.ForeignKey(Organization, blank=False,
null=False, related_name="roles"
)
|
gpl-3.0
| -9,086,944,784,570,360,000
| 34.692308
| 77
| 0.634698
| false
| 4.450839
| false
| false
| false
|
anneline/Bika-LIMS
|
bika/lims/utils/__init__.py
|
1
|
13899
|
from time import time
from AccessControl import ModuleSecurityInfo, allow_module
from bika.lims import logger
from bika.lims.browser import BrowserView
from DateTime import DateTime
from email import Encoders
from email.MIMEBase import MIMEBase
from plone.memoize import ram
from plone.registry.interfaces import IRegistry
from Products.Archetypes.public import DisplayList
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import safe_unicode
from zope.component import queryUtility
from zope.i18n import translate
from zope.i18n.locales import locales
import App
import Globals
import os
import re
import urllib2
ModuleSecurityInfo('email.Utils').declarePublic('formataddr')
allow_module('csv')
def to_utf8(text):
if text is None:
text = ''
return safe_unicode(text).encode('utf-8')
def to_unicode(text):
if text is None:
text = ''
return safe_unicode(text)
def t(i18n_msg):
"""Safely translate and convert to UTF8, any zope i18n msgid returned from
a bikaMessageFactory _
"""
return to_utf8(translate(i18n_msg))
# Wrapper for PortalTransport's sendmail - don't know why there sendmail
# method is marked private
ModuleSecurityInfo('Products.bika.utils').declarePublic('sendmail')
# Protected( Publish, 'sendmail')
def sendmail(portal, from_addr, to_addrs, msg):
mailspool = portal.portal_mailspool
mailspool.sendmail(from_addr, to_addrs, msg)
class js_log(BrowserView):
def __call__(self, message):
"""Javascript sends a string for us to place into the log.
"""
self.logger.info(message)
class js_err(BrowserView):
def __call__(self, message):
"""Javascript sends a string for us to place into the error log
"""
self.logger.error(message);
ModuleSecurityInfo('Products.bika.utils').declarePublic('printfile')
def printfile(portal, from_addr, to_addrs, msg):
""" set the path, then the cmd 'lpr filepath'
temp_path = 'C:/Zope2/Products/Bika/version.txt'
os.system('lpr "%s"' %temp_path)
"""
pass
def _cache_key_getUsers(method, context, roles=[], allow_empty=True):
key = time() // (60 * 60), roles, allow_empty
return key
@ram.cache(_cache_key_getUsers)
def getUsers(context, roles, allow_empty=True):
""" Present a DisplayList containing users in the specified
list of roles
"""
mtool = getToolByName(context, 'portal_membership')
pairs = allow_empty and [['', '']] or []
users = mtool.searchForMembers(roles=roles)
for user in users:
uid = user.getId()
fullname = user.getProperty('fullname')
if not fullname:
fullname = uid
pairs.append((uid, fullname))
pairs.sort(lambda x, y: cmp(x[1], y[1]))
return DisplayList(pairs)
def isActive(obj):
""" Check if obj is inactive or cancelled.
"""
wf = getToolByName(obj, 'portal_workflow')
if (hasattr(obj, 'inactive_state') and obj.inactive_state == 'inactive') or \
wf.getInfoFor(obj, 'inactive_state', 'active') == 'inactive':
return False
if (hasattr(obj, 'cancellation_state') and obj.inactive_state == 'cancelled') or \
wf.getInfoFor(obj, 'cancellation_state', 'active') == 'cancelled':
return False
return True
def formatDateQuery(context, date_id):
""" Obtain and reformat the from and to dates
into a date query construct
"""
from_date = context.REQUEST.get('%s_fromdate' % date_id, None)
if from_date:
from_date = from_date + ' 00:00'
to_date = context.REQUEST.get('%s_todate' % date_id, None)
if to_date:
to_date = to_date + ' 23:59'
date_query = {}
if from_date and to_date:
date_query = {'query': [from_date, to_date],
'range': 'min:max'}
elif from_date or to_date:
date_query = {'query': from_date or to_date,
'range': from_date and 'min' or 'max'}
return date_query
def formatDateParms(context, date_id):
""" Obtain and reformat the from and to dates
into a printable date parameter construct
"""
from_date = context.REQUEST.get('%s_fromdate' % date_id, None)
to_date = context.REQUEST.get('%s_todate' % date_id, None)
date_parms = {}
if from_date and to_date:
date_parms = 'from %s to %s' % (from_date, to_date)
elif from_date:
date_parms = 'from %s' % (from_date)
elif to_date:
date_parms = 'to %s' % (to_date)
return date_parms
def formatDuration(context, totminutes):
""" Format a time period in a usable manner: eg. 3h24m
"""
mins = totminutes % 60
hours = (totminutes - mins) / 60
if mins:
mins_str = '%sm' % mins
else:
mins_str = ''
if hours:
hours_str = '%sh' % hours
else:
hours_str = ''
return '%s%s' % (hours_str, mins_str)
def formatDecimalMark(value, decimalmark='.'):
""" Dummy method to replace decimal mark from an input string.
Assumes that 'value' uses '.' as decimal mark and ',' as
thousand mark.
"""
rawval = value
if decimalmark == ',':
rawval = rawval.replace('.', '[comma]')
rawval = rawval.replace(',', '.')
rawval = rawval.replace('[comma]', ',')
return rawval
# encode_header function copied from roundup's rfc2822 package.
hqre = re.compile(r'^[A-z0-9!"#$%%&\'()*+,-./:;<=>?@\[\]^_`{|}~ ]+$')
ModuleSecurityInfo('Products.bika.utils').declarePublic('encode_header')
def encode_header(header, charset='utf-8'):
""" Will encode in quoted-printable encoding only if header
contains non latin characters
"""
# Return empty headers unchanged
if not header:
return header
# return plain header if it does not contain non-ascii characters
if hqre.match(header):
return header
quoted = ''
# max_encoded = 76 - len(charset) - 7
for c in header:
# Space may be represented as _ instead of =20 for readability
if c == ' ':
quoted += '_'
# These characters can be included verbatim
elif hqre.match(c):
quoted += c
# Otherwise, replace with hex value like =E2
else:
quoted += "=%02X" % ord(c)
return '=?%s?q?%s?=' % (charset, quoted)
def zero_fill(matchobj):
return matchobj.group().zfill(8)
num_sort_regex = re.compile('\d+')
ModuleSecurityInfo('Products.bika.utils').declarePublic('sortable_title')
def sortable_title(portal, title):
"""Convert title to sortable title
"""
if not title:
return ''
def_charset = portal.plone_utils.getSiteEncoding()
sortabletitle = title.lower().strip()
# Replace numbers with zero filled numbers
sortabletitle = num_sort_regex.sub(zero_fill, sortabletitle)
# Truncate to prevent bloat
for charset in [def_charset, 'latin-1', 'utf-8']:
try:
sortabletitle = safe_unicode(sortabletitle, charset)[:30]
sortabletitle = sortabletitle.encode(def_charset or 'utf-8')
break
except UnicodeError:
pass
except TypeError:
# If we get a TypeError if we already have a unicode string
sortabletitle = sortabletitle[:30]
break
return sortabletitle
def logged_in_client(context, member=None):
if not member:
membership_tool = getToolByName(context, 'portal_membership')
member = membership_tool.getAuthenticatedMember()
client = None
groups_tool = context.portal_groups
member_groups = [groups_tool.getGroupById(group.id).getGroupName()
for group in groups_tool.getGroupsByUserId(member.id)]
if 'Clients' in member_groups:
for obj in context.clients.objectValues("Client"):
if member.id in obj.users_with_local_role('Owner'):
client = obj
return client
def changeWorkflowState(content, wf_id, state_id, acquire_permissions=False,
portal_workflow=None, **kw):
"""Change the workflow state of an object
@param content: Content obj which state will be changed
@param state_id: name of the state to put on content
@param acquire_permissions: True->All permissions unchecked and on riles and
acquired
False->Applies new state security map
@param portal_workflow: Provide workflow tool (optimisation) if known
@param kw: change the values of same name of the state mapping
@return: None
"""
if portal_workflow is None:
portal_workflow = getToolByName(content, 'portal_workflow')
# Might raise IndexError if no workflow is associated to this type
found_wf = 0
for wf_def in portal_workflow.getWorkflowsFor(content):
if wf_id == wf_def.getId():
found_wf = 1
break
if not found_wf:
logger.error("%s: Cannot find workflow id %s" % (content, wf_id))
wf_state = {
'action': None,
'actor': None,
'comments': "Setting state to %s" % state_id,
'review_state': state_id,
'time': DateTime(),
}
# Updating wf_state from keyword args
for k in kw.keys():
# Remove unknown items
if k not in wf_state:
del kw[k]
if 'review_state' in kw:
del kw['review_state']
wf_state.update(kw)
portal_workflow.setStatusOf(wf_id, content, wf_state)
if acquire_permissions:
# Acquire all permissions
for permission in content.possible_permissions():
content.manage_permission(permission, acquire=1)
else:
# Setting new state permissions
wf_def.updateRoleMappingsFor(content)
# Map changes to the catalogs
content.reindexObject(idxs=['allowedRolesAndUsers', 'review_state'])
return
def tmpID():
import binascii
return binascii.hexlify(os.urandom(16))
def isnumber(s):
try:
float(s)
return True
except ValueError:
return False
def createPdf(htmlreport, outfile=None, css=None):
debug_mode = App.config.getConfiguration().debug_mode
# XXX css must be a local file - urllib fails under robotframework tests.
css_def = ''
if css:
if css.startswith("http://") or css.startswith("https://"):
# Download css file in temp dir
u = urllib2.urlopen(css)
_cssfile = Globals.INSTANCE_HOME + '/var/' + tmpID() + '.css'
localFile = open(_cssfile, 'w')
localFile.write(u.read())
localFile.close()
else:
_cssfile = css
cssfile = open(_cssfile, 'r')
css_def = cssfile.read()
if not outfile:
outfile = Globals.INSTANCE_HOME + "/var/" + tmpID() + ".pdf"
from weasyprint import HTML, CSS
import os
if css:
HTML(string=htmlreport, encoding='utf-8').write_pdf(outfile,
stylesheets=[CSS(string=css_def)])
else:
HTML(string=htmlreport, encoding='utf-8').write_pdf(outfile)
if debug_mode:
htmlfilepath = Globals.INSTANCE_HOME + "/var/" + tmpID() + ".html"
htmlfile = open(htmlfilepath, 'w')
htmlfile.write(htmlreport)
htmlfile.close()
return open(outfile, 'r').read();
def attachPdf(mimemultipart, pdfreport, filename=None):
part = MIMEBase('application', "application/pdf")
part.add_header('Content-Disposition',
'attachment; filename="%s.pdf"' % (filename or tmpID()))
part.set_payload(pdfreport)
Encoders.encode_base64(part)
mimemultipart.attach(part)
def get_invoice_item_description(obj):
if obj.portal_type == 'AnalysisRequest':
sample = obj.getSample()
samplepoint = sample.getSamplePoint()
samplepoint = samplepoint and samplepoint.Title() or ''
sampletype = sample.getSampleType()
sampletype = sampletype and sampletype.Title() or ''
description = sampletype + ' ' + samplepoint
elif obj.portal_type == 'SupplyOrder':
products = obj.folderlistingFolderContents()
products = [o.getProduct().Title() for o in products]
description = ', '.join(products)
return description
def currency_format(context, locale):
locale = locales.getLocale(locale)
currency = context.bika_setup.getCurrency()
symbol = locale.numbers.currencies[currency].symbol
def format(val):
return '%s %0.2f' % (symbol, val)
return format
def getHiddenAttributesForClass(classname):
try:
registry = queryUtility(IRegistry)
hiddenattributes = registry.get('bika.lims.hiddenattributes', ())
if hiddenattributes is not None:
for alist in hiddenattributes:
if alist[0] == classname:
return alist[1:]
except:
logger.warning(
'Probem accessing optionally hidden attributes in registry')
return []
def isAttributeHidden(classname, fieldname):
try:
registry = queryUtility(IRegistry)
hiddenattributes = registry.get('bika.lims.hiddenattributes', ())
if hiddenattributes is not None:
for alist in hiddenattributes:
if alist[0] == classname:
return fieldname in alist[1:]
except:
logger.warning(
'Probem accessing optionally hidden attributes in registry')
return False
def dicts_to_dict(dictionaries, key_subfieldname):
"""Convert a list of dictionaries into a dictionary of dictionaries.
key_subfieldname must exist in each Record's subfields and have a value,
which will be used as the key for the new dictionary. If a key is duplicated,
the earlier value will be overwritten.
"""
result = {}
for d in dictionaries:
result[d[key_subfieldname]] = d
return result
|
agpl-3.0
| -5,184,133,056,224,156,000
| 29.818182
| 86
| 0.630189
| false
| 3.817358
| false
| false
| false
|
zmarvel/slowboy
|
slowboy/util.py
|
1
|
1753
|
import abc
from collections import namedtuple
Op = namedtuple('Op', ['function', 'cycles', 'description'])
class ClockListener(metaclass=abc.ABCMeta):
@abc.abstractmethod
def notify(self, clock: int, cycles: int):
"""Notify the listener that the clock has advanced.
:param clock: The new value of the CPU clock.
:param cycles: The number of cycles that have passed since the last
notification."""
pass
def uint8toBCD(uint8):
"""Convert an 8-bit unsigned integer to binary-coded decimal."""
d1 = uint8 // 10
d0 = uint8 % 10
return (d1 << 4) | d0
def sub_s8(x, y):
"""Subtract two 8-bit integers stored in two's complement."""
return (x + twoscompl8(y)) & 0xff
def sub_s16(x, y):
"""Subtract two 16-bit integers stored in two's complement."""
return (x + twoscompl16(y)) & 0xffff
def add_s8(x, y):
"""Add two 8-bit integers stored in two's complement."""
return (x + y) & 0xff
def add_s16(x, y):
"""Add two 16-bit integers stored in two's complement."""
return (x + y) & 0xffff
def twoscompl8(x):
"""Returns the reciprocal of 8-bit x in two's complement."""
return ((x ^ 0xff) + 1) & 0xff
def twoscompl16(x):
"""Returns the reciprocal of 16-bit x in two's complement."""
return ((x ^ 0xffff) + 1) & 0xffff
def hexdump(bytes, line_len, start=0):
line = []
j = 0
for b in bytes:
s = '{:02x}'.format(b)
if j % line_len == 0 and j > 0:
yield '{:04x}: {}'.format(start+j-line_len, ' '.join(line))
line = []
j += 1
line.append(s)
yield '{:04x}: {}'.format(start+j-line_len, ' '.join(line))
def print_lines(it):
for line in it:
print(line)
|
mit
| 8,089,310,398,789,706,000
| 24.405797
| 75
| 0.590416
| false
| 3.240296
| false
| false
| false
|
koreiklein/fantasia
|
ui/render/gl/distances.py
|
1
|
1104
|
# Copyright (C) 2013 Korei Klein <korei.klein1@gmail.com>
# Constants for gl rendering of basic are collected here.
from ui.render.gl import colors
epsilon = 0.0001
divider_spacing = 15.0
notThickness = 22.0
notShiftThickness = notThickness + 21.0
# Amount by which to shift the value contained inside a Not.
notShiftOffset = [notShiftThickness + 5, notShiftThickness, 0.0]
quantifier_variables_spacing = 100.0
variable_binding_spacing = 20.0
quantifier_before_divider_spacing = 10.0
quantifier_after_divider_spacing = 55.0
infixSpacing = 88.0
applySpacing = 16.0
productVariableHorizontalSpacing = 0.0
productVariableBorder = 10.0
symbolBackgroundBorderWidth = 30.0
variableBackgroundBorderWidth = 30.0
holdsSpacing = 60.0
iffSpacing = 35.0
exponential_border_width = 40.0
min_unit_divider_length = 100.0
min_intersect_divider_length = 250.0
unit_width = 20.0
quantifier_divider_width = 20.0
conjunctiveDividerWidth = 20.0
def capLengthOfDividerByLength(length):
return min(35.0, length / 7.0)
inject_spacing = 8.0
before_dot_spacing = 8.0
after_dot_spacing = 8.0
dotWidth = 15.0
|
gpl-2.0
| -6,752,107,023,383,802,000
| 19.444444
| 64
| 0.764493
| false
| 2.860104
| false
| false
| false
|
shakfu/start-vm
|
default/bin/normalize.py
|
1
|
1259
|
#!/usr/bin/env python
import hashlib
import os
import sys
from datetime import datetime
HASH = hashlib.md5(str(datetime.now())).hexdigest()
def normalize(path, file_func=None, dir_func=None):
''' recursive normalization of directory and file names
applies the following changes to directory and filenames:
- lowercasing
- converts spaces to '-'
'''
norm_func = lambda x: x.lower().replace(' ', '-')
if not file_func:
file_func = norm_func
if not dir_func:
dir_func = norm_func
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
f = os.path.join(root, name)
print(file_func(f))
for name in dirs:
d = os.path.join(root, name)
#print(dir_func(d))
def norm_func(path):
entry = os.path.basename(path)
parent = os.path.dirname(path)
entry_norm = entry.lower().replace(' ', '-')
p = os.path.join(parent, entry_norm)+HASH
os.rename(path, p)
new = p.strip(HASH)
os.rename(p, new)
return new
def norm_path(path=None):
if not path:
path = sys.argv[1]
normalize(path, norm_func)
#normalize(path, None, norm_func)
if __name__ == '__main__':
norm_path()
|
mit
| -1,232,118,386,050,333,700
| 23.211538
| 65
| 0.597299
| false
| 3.449315
| false
| false
| false
|
luci/luci-py
|
appengine/components/components/auth/change_log_test.py
|
2
|
45674
|
#!/usr/bin/env vpython
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import datetime
import sys
import unittest
from test_support import test_env
test_env.setup_test_env()
from google.appengine.ext import ndb
from components import utils
from components.auth import change_log
from components.auth import model
from components.auth.proto import realms_pb2
from components.auth.proto import security_config_pb2
from test_support import test_case
class MakeInitialSnapshotTest(test_case.TestCase):
"""Tests for ensure_initial_snapshot function."""
def test_works(self):
# Initial entities. Don't call 'record_revision' to imitate "old"
# application without history related code.
@ndb.transactional
def make_auth_db():
model.AuthGlobalConfig(key=model.root_key()).put()
model.AuthIPWhitelistAssignments(
key=model.ip_whitelist_assignments_key()).put()
model.AuthGroup(key=model.group_key('A group')).put()
model.AuthIPWhitelist(key=model.ip_whitelist_key('A whitelist')).put()
model.replicate_auth_db()
make_auth_db()
# Bump auth_db once more to avoid hitting trivial case of "processing first
# revision ever".
auth_db_rev = ndb.transaction(model.replicate_auth_db)
self.assertEqual(2, auth_db_rev)
# Now do the work.
change_log.ensure_initial_snapshot(auth_db_rev)
# Generated new AuthDB rev with updated entities.
self.assertEqual(3, model.get_auth_db_revision())
# Check all *History entities exist now.
p = model.historical_revision_key(3)
self.assertIsNotNone(
ndb.Key('AuthGlobalConfigHistory', 'root', parent=p).get())
self.assertIsNotNone(
ndb.Key(
'AuthIPWhitelistAssignmentsHistory', 'default', parent=p).get())
self.assertIsNotNone(ndb.Key('AuthGroupHistory', 'A group', parent=p).get())
self.assertIsNotNone(
ndb.Key('AuthIPWhitelistHistory', 'A whitelist', parent=p).get())
# Call again, should be noop (marker is set).
change_log.ensure_initial_snapshot(3)
self.assertEqual(3, model.get_auth_db_revision())
ident = lambda x: model.Identity.from_bytes('user:' + x)
glob = lambda x: model.IdentityGlob.from_bytes('user:' + x)
def make_group(name, comment, **kwargs):
group = model.AuthGroup(key=model.group_key(name), **kwargs)
group.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment=comment)
group.put()
def make_ip_whitelist(name, comment, **kwargs):
wl = model.AuthIPWhitelist(key=model.ip_whitelist_key(name), **kwargs)
wl.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment=comment)
wl.put()
def security_config(regexps):
msg = security_config_pb2.SecurityConfig(internal_service_regexp=regexps)
return msg.SerializeToString()
class GenerateChangesTest(test_case.TestCase):
"""Tests for generate_changes function."""
def setUp(self):
super(GenerateChangesTest, self).setUp()
self.mock(change_log, 'enqueue_process_change_task', lambda _: None)
self.mock_now(datetime.datetime(2015, 1, 2, 3, 4, 5))
def auth_db_transaction(self, callback):
"""Imitates AuthDB change and subsequent 'process-change' task.
Returns parent entity of entity subgroup with all generated changes.
"""
@ndb.transactional
def run():
callback()
return model.replicate_auth_db()
auth_db_rev = run()
change_log.process_change(auth_db_rev)
return change_log.change_log_revision_key(auth_db_rev)
def grab_all(self, ancestor):
"""Returns dicts with all entities under given ancestor."""
entities = {}
def cb(key):
# Skip AuthDBLogRev itself, it's not interesting.
if key == ancestor:
return
as_str = []
k = key
while k and k != ancestor:
as_str.append('%s:%s' % (k.kind(), k.id()))
k = k.parent()
entities['/'.join(as_str)] = {
prop: val for prop, val in key.get().to_dict().items() if val
}
ndb.Query(ancestor=ancestor).map(cb, keys_only=True)
return entities
def test_works(self):
# Touch all kinds of entities at once. More thorough tests for per-entity
# changes are below.
def touch_all():
make_group(
name='A group',
members=[ident('a@example.com'), ident('b@example.com')],
description='Blah',
comment='New group')
make_ip_whitelist(
name='An IP whitelist',
subnets=['127.0.0.1/32'],
description='Bluh',
comment='New IP whitelist')
a = model.AuthIPWhitelistAssignments(
key=model.ip_whitelist_assignments_key(),
assignments=[
model.AuthIPWhitelistAssignments.Assignment(
identity=ident('a@example.com'),
ip_whitelist='An IP whitelist')
])
a.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='New assignment')
a.put()
c = model.AuthGlobalConfig(
key=model.root_key(),
oauth_client_id='client_id',
oauth_client_secret='client_secret',
oauth_additional_client_ids=['1', '2'])
c.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Config change')
c.put()
r = model.AuthRealmsGlobals(
key=model.realms_globals_key(),
permissions=[realms_pb2.Permission(name='luci.dev.p1')])
r.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='New permission')
r.put()
p = model.AuthProjectRealms(
key=model.project_realms_key('proj1'),
realms=realms_pb2.Realms(api_version=1234),
config_rev='config_rev',
perms_rev='prems_rev')
p.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='New project')
p.put()
changes = self.grab_all(self.auth_db_transaction(touch_all))
self.assertEqual({
'AuthDBChange:AuthGlobalConfig$root!7000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_CONF_OAUTH_CLIENT_CHANGED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'oauth_client_id': u'client_id',
'oauth_client_secret': u'client_secret',
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGlobalConfig$root!7100': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_CONF_CLIENT_IDS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'oauth_additional_client_ids': [u'1', u'2'],
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_CREATED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'New group',
'description': u'Blah',
'owners': u'administrators',
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1200': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'New group',
'members': [
model.Identity(kind='user', name='a@example.com'),
model.Identity(kind='user', name='b@example.com'),
],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelist$An IP whitelist!3000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_CREATED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'New IP whitelist',
'description': u'Bluh',
'target': u'AuthIPWhitelist$An IP whitelist',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelist$An IP whitelist!3200': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'New IP whitelist',
'subnets': [u'127.0.0.1/32'],
'target': u'AuthIPWhitelist$An IP whitelist',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelistAssignments'
'$default$user:a@example.com!5000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'],
'comment': u'New assignment',
'identity': model.Identity(kind='user', name='a@example.com'),
'ip_whitelist': u'An IP whitelist',
'target': u'AuthIPWhitelistAssignments$default$user:a@example.com',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com')
},
'AuthDBChange:AuthProjectRealms$proj1!10000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_CREATED,
'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'],
'comment': u'New project',
'config_rev_new': u'config_rev',
'perms_rev_new': u'prems_rev',
'target': u'AuthProjectRealms$proj1',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com')
},
'AuthDBChange:AuthRealmsGlobals$globals!9000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_REALMS_GLOBALS_CHANGED,
'class_': [u'AuthDBChange', u'AuthRealmsGlobalsChange'],
'comment': u'New permission',
'permissions_added': [u'luci.dev.p1'],
'target': u'AuthRealmsGlobals$globals',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com')
},
}, changes)
def test_groups_diff(self):
def create():
make_group(
name='A group',
members=[ident('a@example.com'), ident('b@example.com')],
globs=[glob('*@example.com'), glob('*@other.com')],
nested=['A', 'B'],
description='Blah',
comment='New group')
changes = self.grab_all(self.auth_db_transaction(create))
self.assertEqual({
'AuthDBChange:AuthGroup$A group!1000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_CREATED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'New group',
'description': u'Blah',
'owners': u'administrators',
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1200': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'New group',
'members': [
model.Identity(kind='user', name='a@example.com'),
model.Identity(kind='user', name='b@example.com'),
],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1400': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_GLOBS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'New group',
'globs': [
model.IdentityGlob(kind='user', pattern='*@example.com'),
model.IdentityGlob(kind='user', pattern='*@other.com'),
],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1600': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_NESTED_ADDED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'New group',
'nested': [u'A', u'B'],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def modify():
g = model.group_key('A group').get()
g.members = [ident('a@example.com'), ident('c@example.com')]
g.globs = [glob('*@example.com'), glob('*@blah.com')]
g.nested = ['A', 'C']
g.description = 'Another blah'
g.owners = 'another-owners'
g.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Changed')
g.put()
changes = self.grab_all(self.auth_db_transaction(modify))
self.assertEqual({
'AuthDBChange:AuthGroup$A group!1100': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_DESCRIPTION_CHANGED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Changed',
'description': u'Another blah',
'old_description': u'Blah',
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1150': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_OWNERS_CHANGED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Changed',
'old_owners': u'administrators',
'owners': u'another-owners',
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1200': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Changed',
'members': [model.Identity(kind='user', name='c@example.com')],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1300': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Changed',
'members': [model.Identity(kind='user', name='b@example.com')],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1400': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_GLOBS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Changed',
'globs': [model.IdentityGlob(kind='user', pattern='*@blah.com')],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1500': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_GLOBS_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Changed',
'globs': [model.IdentityGlob(kind='user', pattern='*@other.com')],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1600': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_NESTED_ADDED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Changed',
'nested': [u'C'],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1700': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_NESTED_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Changed',
'nested': [u'B'],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def delete():
g = model.group_key('A group').get()
g.record_deletion(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Deleted')
g.key.delete()
changes = self.grab_all(self.auth_db_transaction(delete))
self.assertEqual({
'AuthDBChange:AuthGroup$A group!1300': {
'app_version': u'v1a',
'auth_db_rev': 3,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Deleted',
'members': [
model.Identity(kind='user', name='a@example.com'),
model.Identity(kind='user', name='c@example.com'),
],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1500': {
'app_version': u'v1a',
'auth_db_rev': 3,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_GLOBS_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Deleted',
'globs': [
model.IdentityGlob(kind='user', pattern='*@example.com'),
model.IdentityGlob(kind='user', pattern='*@blah.com'),
],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1700': {
'app_version': u'v1a',
'auth_db_rev': 3,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_NESTED_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Deleted',
'nested': [u'A', u'C'],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1800': {
'app_version': u'v1a',
'auth_db_rev': 3,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_DELETED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Deleted',
'old_description': u'Another blah',
'old_owners': u'another-owners',
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def test_ip_whitelists_diff(self):
def create():
make_ip_whitelist(
name='A list',
subnets=['127.0.0.1/32', '127.0.0.2/32'],
description='Blah',
comment='New list')
changes = self.grab_all(self.auth_db_transaction(create))
self.assertEqual({
'AuthDBChange:AuthIPWhitelist$A list!3000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_CREATED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'New list',
'description': u'Blah',
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelist$A list!3200': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'New list',
'subnets': [u'127.0.0.1/32', u'127.0.0.2/32'],
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def modify():
l = model.ip_whitelist_key('A list').get()
l.subnets = ['127.0.0.1/32', '127.0.0.3/32']
l.description = 'Another blah'
l.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Changed')
l.put()
changes = self.grab_all(self.auth_db_transaction(modify))
self.assertEqual({
'AuthDBChange:AuthIPWhitelist$A list!3100': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_DESCRIPTION_CHANGED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'Changed',
'description': u'Another blah',
'old_description': u'Blah',
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelist$A list!3200': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'Changed',
'subnets': [u'127.0.0.3/32'],
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelist$A list!3300': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'Changed',
'subnets': [u'127.0.0.2/32'],
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def delete():
l = model.ip_whitelist_key('A list').get()
l.record_deletion(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Deleted')
l.key.delete()
changes = self.grab_all(self.auth_db_transaction(delete))
self.assertEqual({
'AuthDBChange:AuthIPWhitelist$A list!3300': {
'app_version': u'v1a',
'auth_db_rev': 3,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'Deleted',
'subnets': [u'127.0.0.1/32', u'127.0.0.3/32'],
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelist$A list!3400': {
'app_version': u'v1a',
'auth_db_rev': 3,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_DELETED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'Deleted',
'old_description': u'Another blah',
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def test_ip_wl_assignments_diff(self):
def create():
a = model.AuthIPWhitelistAssignments(
key=model.ip_whitelist_assignments_key(),
assignments=[
model.AuthIPWhitelistAssignments.Assignment(
identity=ident('a@example.com'),
ip_whitelist='An IP whitelist'),
model.AuthIPWhitelistAssignments.Assignment(
identity=ident('b@example.com'),
ip_whitelist='Another IP whitelist'),
])
a.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='New assignment')
a.put()
changes = self.grab_all(self.auth_db_transaction(create))
self.assertEqual({
'AuthDBChange:AuthIPWhitelistAssignments$'
'default$user:a@example.com!5000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'],
'comment': u'New assignment',
'identity': model.Identity(kind='user', name='a@example.com'),
'ip_whitelist': u'An IP whitelist',
'target': u'AuthIPWhitelistAssignments$default$user:a@example.com',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelistAssignments$'
'default$user:b@example.com!5000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'],
'comment': u'New assignment',
'identity': model.Identity(kind='user', name='b@example.com'),
'ip_whitelist': u'Another IP whitelist',
'target': u'AuthIPWhitelistAssignments$default$user:b@example.com',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def change():
a = model.ip_whitelist_assignments_key().get()
a.assignments=[
model.AuthIPWhitelistAssignments.Assignment(
identity=ident('a@example.com'),
ip_whitelist='Another IP whitelist'),
model.AuthIPWhitelistAssignments.Assignment(
identity=ident('c@example.com'),
ip_whitelist='IP whitelist'),
]
a.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='change')
a.put()
changes = self.grab_all(self.auth_db_transaction(change))
self.assertEqual({
'AuthDBChange:AuthIPWhitelistAssignments$'
'default$user:a@example.com!5000': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'],
'comment': u'change',
'identity': model.Identity(kind='user', name='a@example.com'),
'ip_whitelist': u'Another IP whitelist',
'target': u'AuthIPWhitelistAssignments$default$user:a@example.com',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelistAssignments$'
'default$user:b@example.com!5100': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_UNSET,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'],
'comment': u'change',
'identity': model.Identity(kind='user', name='b@example.com'),
'ip_whitelist': u'Another IP whitelist',
'target': u'AuthIPWhitelistAssignments$default$user:b@example.com',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelistAssignments$'
'default$user:c@example.com!5000': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'],
'comment': u'change',
'identity': model.Identity(kind='user', name='c@example.com'),
'ip_whitelist': u'IP whitelist',
'target': u'AuthIPWhitelistAssignments$default$user:c@example.com',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def test_global_config_diff(self):
def create():
c = model.AuthGlobalConfig(
key=model.root_key(),
oauth_client_id='client_id',
oauth_client_secret='client_secret',
oauth_additional_client_ids=['1', '2'])
c.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Config change')
c.put()
changes = self.grab_all(self.auth_db_transaction(create))
self.assertEqual({
'AuthDBChange:AuthGlobalConfig$root!7000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_CONF_OAUTH_CLIENT_CHANGED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'oauth_client_id': u'client_id',
'oauth_client_secret': u'client_secret',
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGlobalConfig$root!7100': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_CONF_CLIENT_IDS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'oauth_additional_client_ids': [u'1', u'2'],
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def modify():
c = model.root_key().get()
c.oauth_additional_client_ids = ['1', '3']
c.token_server_url = 'https://token-server'
c.security_config = security_config(['hi'])
c.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Config change')
c.put()
changes = self.grab_all(self.auth_db_transaction(modify))
self.assertEqual({
'AuthDBChange:AuthGlobalConfig$root!7100': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_CONF_CLIENT_IDS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'oauth_additional_client_ids': [u'3'],
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGlobalConfig$root!7200': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_CONF_CLIENT_IDS_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'oauth_additional_client_ids': [u'2'],
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGlobalConfig$root!7300': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type':
change_log.AuthDBChange.CHANGE_CONF_TOKEN_SERVER_URL_CHANGED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'target': u'AuthGlobalConfig$root',
'token_server_url_new': u'https://token-server',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGlobalConfig$root!7400': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type':
change_log.AuthDBChange.CHANGE_CONF_SECURITY_CONFIG_CHANGED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'security_config_new': security_config(['hi']),
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def test_realms_globals_diff(self):
def create():
c = model.AuthRealmsGlobals(
key=model.realms_globals_key(),
permissions=[
realms_pb2.Permission(name='luci.dev.p1'),
realms_pb2.Permission(name='luci.dev.p2'),
realms_pb2.Permission(name='luci.dev.p3'),
])
c.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='New realms config')
c.put()
self.auth_db_transaction(create)
def modify():
ent = model.realms_globals_key().get()
ent.permissions = [
realms_pb2.Permission(name='luci.dev.p1'),
realms_pb2.Permission(name='luci.dev.p3'),
realms_pb2.Permission(name='luci.dev.p4'),
]
ent.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Realms config change')
ent.put()
changes = self.grab_all(self.auth_db_transaction(modify))
self.assertEqual({
'AuthDBChange:AuthRealmsGlobals$globals!9000': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type':
change_log.AuthDBChange.CHANGE_REALMS_GLOBALS_CHANGED,
'class_': [u'AuthDBChange', u'AuthRealmsGlobalsChange'],
'comment': u'Realms config change',
'permissions_added': [u'luci.dev.p4'],
'permissions_removed': [u'luci.dev.p2'],
'target': u'AuthRealmsGlobals$globals',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def test_project_realms_diff(self):
# Note: in reality Realms.api_version is fixed. We change it in this test
# since it is the simplest field to change.
def create():
p = model.AuthProjectRealms(
key=model.project_realms_key('proj1'),
realms=realms_pb2.Realms(api_version=123),
config_rev='config_rev1',
perms_rev='perms_rev1')
p.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Created')
p.put()
changes = self.grab_all(self.auth_db_transaction(create))
self.assertEqual({
'AuthDBChange:AuthProjectRealms$proj1!10000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_CREATED,
'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'],
'comment': u'Created',
'config_rev_new': u'config_rev1',
'perms_rev_new': u'perms_rev1',
'target': u'AuthProjectRealms$proj1',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def update(api_version, config_rev, perms_rev):
p = model.project_realms_key('proj1').get()
p.realms = realms_pb2.Realms(api_version=api_version)
p.config_rev = config_rev
p.perms_rev = perms_rev
p.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Updated')
p.put()
# Update everything.
changes = self.grab_all(self.auth_db_transaction(
lambda: update(1234, 'config_rev2', 'perms_rev2')))
self.assertEqual({
'AuthDBChange:AuthProjectRealms$proj1!10100': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_CHANGED,
'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'],
'comment': u'Updated',
'config_rev_new': u'config_rev2',
'config_rev_old': u'config_rev1',
'target': u'AuthProjectRealms$proj1',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthProjectRealms$proj1!10200': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type':
change_log.AuthDBChange.CHANGE_PROJECT_REALMS_REEVALUATED,
'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'],
'comment': u'Updated',
'perms_rev_new': u'perms_rev2',
'perms_rev_old': u'perms_rev1',
'target': u'AuthProjectRealms$proj1',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
# Update realms_pb2.Realms, but do not change revisions.
changes = self.grab_all(self.auth_db_transaction(
lambda: update(12345, 'config_rev2', 'perms_rev2')))
self.assertEqual({
'AuthDBChange:AuthProjectRealms$proj1!10100': {
'app_version': u'v1a',
'auth_db_rev': 3,
'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_CHANGED,
'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'],
'comment': u'Updated',
'config_rev_new': u'config_rev2',
'config_rev_old': u'config_rev2',
'target': u'AuthProjectRealms$proj1',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
# Update revisions, but don't actually touch realms.
changes = self.grab_all(self.auth_db_transaction(
lambda: update(12345, 'config_rev3', 'perms_rev3')))
self.assertEqual({}, changes)
def delete():
p = model.project_realms_key('proj1').get()
p.record_deletion(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Deleted')
p.key.delete()
changes = self.grab_all(self.auth_db_transaction(delete))
self.assertEqual({
'AuthDBChange:AuthProjectRealms$proj1!10300': {
'app_version': u'v1a',
'auth_db_rev': 5,
'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_REMOVED,
'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'],
'comment': u'Deleted',
'config_rev_old': u'config_rev3',
'perms_rev_old': u'perms_rev3',
'target': u'AuthProjectRealms$proj1',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
class AuthDBChangeTest(test_case.TestCase):
# Test to_jsonish for AuthDBGroupChange and AuthDBIPWhitelistAssignmentChange,
# the rest are trivial.
def test_group_change_to_jsonish(self):
c = change_log.AuthDBGroupChange(
change_type=change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_ADDED,
target='AuthGroup$abc',
auth_db_rev=123,
who=ident('a@example.com'),
when=datetime.datetime(2015, 1, 2, 3, 4, 5),
comment='A comment',
app_version='v123',
description='abc',
members=[ident('a@a.com')],
globs=[glob('*@a.com')],
nested=['A'],
owners='abc',
old_owners='def')
self.assertEqual({
'app_version': 'v123',
'auth_db_rev': 123,
'change_type': 'GROUP_MEMBERS_ADDED',
'comment': 'A comment',
'description': 'abc',
'globs': ['user:*@a.com'],
'members': ['user:a@a.com'],
'nested': ['A'],
'old_description': None,
'old_owners': 'def',
'owners': 'abc',
'target': 'AuthGroup$abc',
'when': 1420167845000000,
'who': 'user:a@example.com',
}, c.to_jsonish())
def test_wl_assignment_to_jsonish(self):
c = change_log.AuthDBIPWhitelistAssignmentChange(
change_type=change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_ADDED,
target='AuthIPWhitelistAssignments$default',
auth_db_rev=123,
who=ident('a@example.com'),
when=datetime.datetime(2015, 1, 2, 3, 4, 5),
comment='A comment',
app_version='v123',
identity=ident('b@example.com'),
ip_whitelist='whitelist')
self.assertEqual({
'app_version': 'v123',
'auth_db_rev': 123,
'change_type': 'GROUP_MEMBERS_ADDED',
'comment': 'A comment',
'identity': 'user:b@example.com',
'ip_whitelist': 'whitelist',
'target': 'AuthIPWhitelistAssignments$default',
'when': 1420167845000000,
'who': 'user:a@example.com',
}, c.to_jsonish())
def test_security_config_change_to_jsonish(self):
c = change_log.AuthDBConfigChange(
change_type=change_log.AuthDBChange.CHANGE_CONF_SECURITY_CONFIG_CHANGED,
target='AuthGlobalConfig$default',
auth_db_rev=123,
who=ident('a@example.com'),
when=datetime.datetime(2015, 1, 2, 3, 4, 5),
comment='A comment',
app_version='v123',
security_config_old=None,
security_config_new=security_config(['hi']))
self.assertEqual({
'app_version': 'v123',
'auth_db_rev': 123,
'change_type': 'CONF_SECURITY_CONFIG_CHANGED',
'comment': 'A comment',
'oauth_additional_client_ids': [],
'oauth_client_id': None,
'oauth_client_secret': None,
'security_config_new': {'internal_service_regexp': [u'hi']},
'security_config_old': None,
'target': 'AuthGlobalConfig$default',
'token_server_url_new': None,
'token_server_url_old': None,
'when': 1420167845000000,
'who': 'user:a@example.com',
}, c.to_jsonish())
class ChangeLogQueryTest(test_case.TestCase):
# We know that some indexes are required. But component can't declare them,
# so don't check them.
SKIP_INDEX_YAML_CHECK = True
def test_is_changle_log_indexed(self):
self.assertTrue(change_log.is_changle_log_indexed())
def test_make_change_log_query(self):
def mk_ch(tp, rev, target):
ch = change_log.AuthDBChange(
change_type=getattr(change_log.AuthDBChange, 'CHANGE_%s' % tp),
auth_db_rev=rev,
target=target)
ch.key = change_log.make_change_key(ch)
ch.put()
def key(c):
return '%s/%s' % (c.key.parent().id(), c.key.id())
mk_ch('GROUP_CREATED', 1, 'AuthGroup$abc')
mk_ch('GROUP_MEMBERS_ADDED', 1, 'AuthGroup$abc')
mk_ch('GROUP_CREATED', 1, 'AuthGroup$another')
mk_ch('GROUP_DELETED', 2, 'AuthGroup$abc')
mk_ch('GROUP_MEMBERS_ADDED', 2, 'AuthGroup$another')
# All. Most recent first. Largest even types first.
q = change_log.make_change_log_query()
self.assertEqual([
'2/AuthGroup$another!1200',
'2/AuthGroup$abc!1800',
'1/AuthGroup$another!1000',
'1/AuthGroup$abc!1200',
'1/AuthGroup$abc!1000',
], map(key, q.fetch()))
# Single revision only.
q = change_log.make_change_log_query(auth_db_rev=1)
self.assertEqual([
'1/AuthGroup$another!1000',
'1/AuthGroup$abc!1200',
'1/AuthGroup$abc!1000',
], map(key, q.fetch()))
# Single target only.
q = change_log.make_change_log_query(target='AuthGroup$another')
self.assertEqual([
'2/AuthGroup$another!1200',
'1/AuthGroup$another!1000',
], map(key, q.fetch()))
# Single revision and single target.
q = change_log.make_change_log_query(
auth_db_rev=1, target='AuthGroup$another')
self.assertEqual(['1/AuthGroup$another!1000'], map(key, q.fetch()))
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
unittest.main()
|
apache-2.0
| -6,992,933,365,418,927,000
| 38.238832
| 80
| 0.593073
| false
| 3.291108
| true
| false
| false
|
HydrelioxGitHub/PiDDL
|
ZTPAGE.py
|
1
|
2847
|
# coding: utf-8
from urllib2 import urlopen
import urllib2
import bs4 as BeautifulSoup
class ZTPage:
def __init__(self, url):
self.url = url
self.update()
def update(self):
self.update_content()
self.parse_type()
self.parse_infos()
self.parse_links()
def update_content(self):
req = urllib2.Request(self.url, headers={'User-Agent': "Magic Browser"})
html = urlopen(req).read()
soup = BeautifulSoup.BeautifulSoup(html, "html5lib")
self.content = soup.find('div', class_="maincont")
def parse_type(self):
if "series" in self.url:
self.type = "Show"
if "films" in self.url:
self.type = "Movie"
def parse_links(self):
liste = {}
host = 'error'
html = self.content.find('div', class_="contentl").find_all(["span", "a"])
for elem in html:
if ('span' == elem.name) and (unicode(elem.string) != 'None'):
host = elem.string
liste[host] = {}
if elem.name == 'a':
elem.string = elem.string.replace("Episode", '').replace('Final', '').strip()
episode_number = int(elem.string)
liste[host][episode_number] = elem.attrs['href']
self.links = liste
def parse_infos(self):
# Retreive Title
title = self.content.find('div', class_="titrearticles").h1.string
if self.type == "Show":
title = title.split("-")
self.title = title[0].strip()
# Retreive Season for TV Shows
self.season = int(title[1].replace("Saison", "").replace('[Complete]', '').strip())
if self.type == "Movie":
self.title = title.strip()
# Retreive Language, Format, Codec ...
info = self.content.find('div', class_="corps").div.span.span.b.strong.string
first_part = info.split('|')[0]
second_part = info.split('|')[1]
self.language = first_part.split(' ')[1].strip()
self.currentEpisode = first_part.split(' ')[0].strip()
self.currentEpisode = self.currentEpisode.replace('[', '')
self.currentEpisode = int(self.currentEpisode.split('/')[0])
# Pb encodage ...
quality = second_part.replace("Qualit", '').strip()
quality = quality[1:]
# ...
self.quality = quality.strip()
def get_available_hosts(self):
return self.links.keys()
def get_tvshow_link(self, host, episodenumber):
alllinks = self.links[host]
link = alllinks[episodenumber]
return link
def print_report(self):
print self.url
print self.title
print self.season
print self.quality
print self.language
print self.currentEpisode
print self.links
|
gpl-2.0
| 8,344,752,789,403,362,000
| 32.892857
| 95
| 0.556024
| false
| 3.842105
| false
| false
| false
|
darknao/piOClock
|
ssd1351.py
|
1
|
13500
|
#!/bin/env python
# -*- coding: UTF-8 -*-
# ----------------------------------------------------------------------
# ssd1351.py from https://github.com/guyc/py-gaugette
# ported by Jason Porritt,
# and reworked by darknao,
# based on original work by Guy Carpenter for display.py
#
# This library works with
# Adafruit's 128x96 SPI color OLED http://www.adafruit.com/products/1673
#
# The code is based heavily on Adafruit's Arduino library
# https://github.com/adafruit/Adafruit_SSD1351
# written by Limor Fried/Ladyada for Adafruit Industries.
#
# It has the following dependencies:
# wiringpi2 for GPIO
# spidev for SPI
# PIL for easy drawing capabilities
# numpy for fast RGB888 to RGB565 convertion
# ----------------------------------------------------------------------
# NEED HEAVY CLEANING !
import wiringpi2
import spidev
import time
import sys
from PIL import Image, ImageDraw, ImageFont
import logging
import numpy as np
import tools
class SSD1351:
# SSD1351 Commands
EXTERNAL_VCC = 0x1
SWITCH_CAP_VCC = 0x2
MEMORY_MODE_HORIZ = 0x00
MEMORY_MODE_VERT = 0x01
CMD_SETCOLUMN = 0x15
CMD_SETROW = 0x75
CMD_WRITERAM = 0x5C
CMD_READRAM = 0x5D
CMD_SETREMAP = 0xA0
CMD_STARTLINE = 0xA1
CMD_DISPLAYOFFSET = 0xA2
CMD_DISPLAYALLOFF = 0xA4
CMD_DISPLAYALLON = 0xA5
CMD_NORMALDISPLAY = 0xA6
CMD_INVERTDISPLAY = 0xA7
CMD_FUNCTIONSELECT = 0xAB
CMD_DISPLAYOFF = 0xAE
CMD_DISPLAYON = 0xAF
CMD_PRECHARGE = 0xB1
CMD_DISPLAYENHANCE = 0xB2
CMD_CLOCKDIV = 0xB3
CMD_SETVSL = 0xB4
CMD_SETGPIO = 0xB5
CMD_PRECHARGE2 = 0xB6
CMD_SETGRAY = 0xB8
CMD_USELUT = 0xB9
CMD_PRECHARGELEVEL = 0xBB
CMD_VCOMH = 0xBE
CMD_CONTRASTABC = 0xC1
CMD_CONTRASTMASTER = 0xC7
CMD_MUXRATIO = 0xCA
CMD_COMMANDLOCK = 0xFD
CMD_HORIZSCROLL = 0x96
CMD_STOPSCROLL = 0x9E
CMD_STARTSCROLL = 0x9F
# Device name will be /dev/spidev-{bus}.{device}
# dc_pin is the data/commmand pin. This line is HIGH for data, LOW for command.
# We will keep d/c low and bump it high only for commands with data
# reset is normally HIGH, and pulled LOW to reset the display
def __init__(self, bus=0, device=0, dc_pin="P9_15", reset_pin="P9_13", rows=128, cols=128):
self.cols = cols
self.rows = rows
self.dc_pin = dc_pin
self.reset_pin = reset_pin
# SPI
self.spi = spidev.SpiDev(bus, device)
self.spi.max_speed_hz = 16000000 # 16Mhz
# GPIO
self.gpio = wiringpi2.GPIO(wiringpi2.GPIO.WPI_MODE_PINS)
self.gpio.pinMode(self.reset_pin, self.gpio.OUTPUT)
self.gpio.digitalWrite(self.reset_pin, self.gpio.HIGH)
self.gpio.pinMode(self.dc_pin, self.gpio.OUTPUT)
self.gpio.digitalWrite(self.dc_pin, self.gpio.LOW)
# Drawing tools
self.im = Image.new("RGB", (cols, rows), 'black')
self.draw = ImageDraw.Draw(self.im)
# logging
self.log = logging.getLogger(self.__class__.__name__)
self.log.setLevel(logging.INFO)
self.contrast = 15
def reset(self):
self.gpio.digitalWrite(self.reset_pin, self.gpio.LOW)
time.sleep(0.010) # 10ms
self.gpio.digitalWrite(self.reset_pin, self.gpio.HIGH)
def command(self, cmd, cmddata=None):
# already low
# self.gpio.digitalWrite(self.dc_pin, self.gpio.LOW)
if type(cmd) == list:
self.spi.writebytes(cmd)
else:
self.spi.writebytes([cmd])
if cmddata is not None:
if type(cmddata) == list:
self.data(cmddata)
else:
self.data([cmddata])
def data(self, bytes):
self.gpio.digitalWrite(self.dc_pin, self.gpio.HIGH)
max_xfer = 1024
start = 0
remaining = len(bytes)
while remaining>0:
count = remaining if remaining <= max_xfer else max_xfer
remaining -= count
self.spi.writebytes(bytes[start:start+count])
start += count
self.gpio.digitalWrite(self.dc_pin, self.gpio.LOW)
def begin(self, vcc_state = SWITCH_CAP_VCC):
time.sleep(0.001) # 1ms
self.reset()
self.command(self.CMD_COMMANDLOCK, 0x12)
self.command(self.CMD_COMMANDLOCK, 0xB1)
self.command(self.CMD_DISPLAYOFF)
self.command(self.CMD_CLOCKDIV, 0xF1)
# support for 128x128 line mode
self.command(self.CMD_MUXRATIO, 127)
self.command(self.CMD_SETREMAP, 0x74)
self.command(self.CMD_SETCOLUMN, [0x00, self.cols-1])
self.command(self.CMD_SETROW, [0x00, self.rows-1])
# TODO Support 96-row display
self.command(self.CMD_STARTLINE, 96)
self.command(self.CMD_DISPLAYOFFSET, 0x00)
self.command(self.CMD_SETGPIO, 0x00)
self.command(self.CMD_FUNCTIONSELECT, 0x01)
self.command(self.CMD_PRECHARGE, 0x32)
self.command(self.CMD_VCOMH, 0x05)
self.command(self.CMD_NORMALDISPLAY)
self.set_contrast(200) # c8 -> 200
self.set_master_contrast(10)
self.command(self.CMD_SETVSL, [0xA0, 0xB5, 0x55])
self.command(self.CMD_PRECHARGE2, 0x01)
self.command(self.CMD_DISPLAYON)
def set_master_contrast(self, level):
# 0 to 15
level &= 0x0F
self.command(self.CMD_CONTRASTMASTER, level)
def set_contrast(self, level):
# 0 to 255
level &= 0xFF
self.command(self.CMD_CONTRASTABC, [level, level, level])
self.contrast = level
def invert_display(self):
self.command(self.CMD_INVERTDISPLAY)
def normal_display(self):
self.command(self.CMD_NORMALDISPLAY)
def scale(self, x, inLow, inHigh, outLow, outHigh):
return ((x - inLow) / float(inHigh) * outHigh) + outLow
def encode_color(self, color):
red = (color >> 16) & 0xFF
green = (color >> 8) & 0xFF
blue = color & 0xFF
redScaled = int(self.scale(red, 0, 0xFF, 0, 0x1F))
greenScaled = int(self.scale(green, 0, 0xFF, 0, 0x3F))
blueScaled = int(self.scale(blue, 0, 0xFF, 0, 0x1F))
return (((redScaled << 6) | greenScaled) << 5) | blueScaled
def color565(self, r, g, b):
# 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
# r r r r r g g g g g g b b b b b
# r = 31 g = 63 b = 31
redScaled = int(self.scale(r, 0, 0xFF, 0, 0x1F))
greenScaled = int(self.scale(g, 0, 0xFF, 0, 0x3F))
blueScaled = int(self.scale(b, 0, 0xFF, 0, 0x1F))
return (((redScaled << 6) | greenScaled) << 5) | blueScaled
def goTo(self, x, y):
if x >= self.cols or y >= self.rows:
return
# set x and y coordinate
self.command(self.CMD_SETCOLUMN, [x, self.cols-1])
self.command(self.CMD_SETROW, [y, self.rows-1])
self.command(self.CMD_WRITERAM)
def drawPixel(self, x, y, color):
if x >= self.cols or y >= self.rows:
return
if x < 0 or y < 0:
return
color = self.encode_color(color)
# set location
self.goTo(x, y)
self.data([color >> 8, color & 0xFF])
def clear(self):
"""Clear display buffer"""
self.im = Image.new("RGB", (self.cols, self.rows), 'black')
self.draw = ImageDraw.Draw(self.im)
def text_center(self, string, color, font=None, size=10):
if font is None:
font = ImageFont.truetype("/usr/share/fonts/truetype/droid/DroidSansMono.ttf", size)
text_size = self.draw.textsize(string, font=font)
text_x = max((self.cols-text_size[0])/2, 0)
text_y = max((self.rows-text_size[1])/2, 0)
self.draw_text(text_x, text_y, string, color, font=font, size=size)
return text_x, text_y
def text_center_y(self, text_y, string, color, font=None, size=10):
if font is None:
font = ImageFont.truetype("/usr/share/fonts/truetype/droid/DroidSansMono.ttf", size)
text_size = self.draw.textsize(string, font=font)
text_x = max((self.cols-text_size[0])/2, 0)
self.draw_text(text_x, text_y, string, color, font=font, size=size)
return text_x, text_y
def draw_text(self, x, y, string, color, font=None, size=10):
if font is None:
font = ImageFont.truetype("/usr/share/fonts/truetype/droid/DroidSansMono.ttf", size)
self.draw.text((x, y), string, font=font, fill=color)
return self.draw.textsize(string, font=font)
def fillScreen(self, fillcolor):
self.rawFillRect(0, 0, self.cols, self.rows, fillcolor)
def rawFillRect(self, x, y, w, h, fillcolor):
self.log.debug("fillScreen start")
# Bounds check
if (x >= self.cols) or (y >= self.rows):
return
# Y bounds check
if y+h > self.rows:
h = self.rows - y - 1
# X bounds check
if x+w > self.cols:
w = self.cols - x - 1
self.setDisplay(x, y, x+(w-1), y+(h-1))
color = self.encode_color(fillcolor)
self.data([color >> 8, color & 0xFF] * w*h)
self.log.debug("fillScreen end")
def setDisplay(self, startx, starty, endx, endy):
if startx >= self.cols or starty >= self.rows:
return
# Y bounds check
if endx > self.cols - 1:
endx = self.cols - 1
# X bounds check
if endy > self.rows - 1:
endy = self.rows - 1
# set x and y coordinate
# print "x:%d y:%d endx:%d endy:%d" % (startx, starty, endx, endy)
self.command(self.CMD_SETCOLUMN, [startx, endx])
self.command(self.CMD_SETROW, [starty, endy])
self.command(self.CMD_WRITERAM)
def im2list(self):
"""Convert PIL RGB888 Image to SSD1351 RAM buffer"""
image = np.array(self.im).reshape(-1, 3)
image[:,0] *= 0.121
image[:,1] *= 0.247
image[:,2] *= 0.121
d = np.left_shift(image, [11, 5, 0]).sum(axis=1)
data =np.dstack(((d>>8)&0xff, d&0xff)).flatten()
return data.tolist()
def display(self, x=0, y=0, w=None, h=None):
"""Send display buffer to the device"""
self.log.debug("disp in")
if h is None:
h = self.rows
if w is None:
w = self.cols
x = max(x, 0)
y = max(y, 0)
w = min(w, self.cols)
h = min(h, self.rows)
if w-x < 0:
return
self.log.debug("set display")
self.setDisplay(x, y, w-1, h-1)
self.log.debug("set display end")
data = []
start = y * self.cols + x
end = h * self.cols + w
self.log.debug("get data")
self.data(self.im2list())
self.log.debug("disp out")
@tools.timed
def dump_disp(self):
"""Dump display buffer on screen,
for debugging purpose"""
image = np.array(self.im).reshape(-1, 3)
for r in range(0, self.rows,2):
txt = [None,] * self.cols
start = r*self.cols
end = start + self.cols * 2
line = image[start:end]
for c in range(len(line)):
idx = c % self.cols
if line[c].sum() > 0:
if txt[idx] is None:
txt[idx] = '▀'
elif txt[idx] == '▀':
txt[idx] = '█'
else:
txt[idx] = '▄'
else:
if txt[idx] is None:
txt[idx] = ' '
print ''.join(txt) + '║'
@tools.timed
def dump_disp2(self):
#image = list(self.im.convert("I").getdata())
image = np.array(self.im)
for row, r in enumerate(image):
if row % 2 == 0:
txt = [None,] * self.cols
for idx, c in enumerate(r):
if c.sum() > 0:
if txt[idx] is None:
txt[idx] = '▀'
elif txt[idx] == '▀':
txt[idx] = '█'
else:
txt[idx] = '▄'
else:
if txt[idx] is None:
txt[idx] = ' '
print ''.join(txt) + '║'
if __name__ == '__main__':
import datetime
import time
import ssd1351
import random
from PIL import ImageFont
import psutil
import logging
import os
log = logging.getLogger("clock")
logging.basicConfig(
format='%(asctime)-23s - %(levelname)-7s - %(name)s - %(message)s')
log.setLevel(logging.INFO)
RESET_PIN = 15
DC_PIN = 16
led = ssd1351.SSD1351(reset_pin=15, dc_pin=16, rows=96)
led.begin()
led.fillScreen(0)
color = 0x000000
bands = 10
color_step = 0xFF / bands
color_width = led.cols / 3
for x in range(0, led.rows, led.rows/bands):
led.rawFillRect(0, x, color_width, bands, color&0xff0000)
led.rawFillRect(color_width, x, color_width*2, bands, color&0xff00)
led.rawFillRect(color_width*2, x, color_width*3, bands, color&0xff)
color = (color + (color_step << 16) + (color_step << 8) + (color_step)) & 0xFFFFFF
|
gpl-3.0
| 4,144,339,368,829,440,500
| 32.039216
| 96
| 0.548368
| false
| 3.222568
| false
| false
| false
|
bnx05/pytest-selenium
|
test_parameters.py
|
1
|
2603
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import time
from selenium import webdriver
sample_email_address = "demo@engagespark.com"
sample_password = "Password123"
email_addresses = ["invalid_email", "another_invalid_email@", "not_another_invalid_email@blah"]
passwords = ["weak_password", "generic_password", "bleep_password"]
browser = webdriver.Firefox()
browser.maximize_window()
# this test checks the maxlength attribute of the login and password fields
@pytest.mark.parametrize("field_name, maxlength", [
("login", "75"),
("password", "128"),
])
def test_assert_field_maxlength(field_name, maxlength):
browser.get("https://start.engagespark.com/sign-in/")
time.sleep(5)
browser.find_element_by_name(field_name).get_attribute("maxlength") == maxlength
# this test asserts the string length of values entered in the login and
# password fields
@pytest.mark.parametrize("field_name, sample_string, string_length", [
("login", sample_email_address, 20),
("password", sample_password, 11),
])
def test_assert_email_and_password_length(field_name, sample_string, string_length):
browser.get("https://start.engagespark.com/sign-in/")
time.sleep(5)
browser.find_element_by_name(field_name).click()
browser.find_element_by_name(field_name).send_keys(sample_string)
assert len(browser.find_element_by_name(field_name).get_attribute("value")) == string_length
# this test checks if the login button is enabled after entering different
# combinations of invalid values in the email and password fields
@pytest.mark.parametrize("email", email_addresses)
@pytest.mark.parametrize("password", passwords)
def test_assert_login_button_enabled(email, password):
browser.get("https://start.engagespark.com/sign-in/")
time.sleep(5)
browser.find_element_by_name("login").click()
browser.find_element_by_name("login").send_keys(email)
browser.find_element_by_name("password").click()
browser.find_element_by_name("password").send_keys(password)
assert browser.find_element_by_xpath("//button[contains(text(), 'Login')]").is_enabled()
# this test checks if the values entered into the email field contain '@'
@pytest.mark.parametrize("email", [
"123@abc.org",
"info@engagespark.com",
"blah",
])
def test_assert_valid_email_entry(email):
browser.get("https://start.engagespark.com/sign-in/")
time.sleep(5)
browser.find_element_by_name("login").click()
browser.find_element_by_name("login").send_keys(email)
assert "@" in browser.find_element_by_name("login").get_attribute("value")
|
mit
| -6,535,209,611,040,000,000
| 36.724638
| 96
| 0.717633
| false
| 3.443122
| true
| false
| false
|
DonaldTrumpHasTinyHands/tiny_hands_pac
|
documents_gallery/models.py
|
1
|
4091
|
from django.db import models
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailcore.models import Page
from wagtail.wagtailadmin.edit_handlers import FieldPanel, MultiFieldPanel
from wagtail.wagtaildocs.models import Document
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailsearch import index
from modelcluster.fields import ParentalKey
from modelcluster.tags import ClusterTaggableManager
from taggit.models import TaggedItemBase, Tag
class DocumentsIndexPage(Page):
"""
This is the index page for the Documents Gallery. It contains the links to Gallery pages.
Gallery Page displays the gallery documents according to tags defined.
"""
intro = RichTextField(blank=True)
search_fields = Page.search_fields + (
index.SearchField('intro'),
)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def children(self):
return self.get_children().live()
def get_context(self, request):
# Get list of live Gallery pages that are descendants of this page
pages = DocumentsPage.objects.live().descendant_of(self)
# Update template context
context = super(DocumentsIndexPage, self).get_context(request)
context['pages'] = pages
return context
class Meta:
verbose_name = "Documents Index Page"
DocumentsIndexPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('intro', classname="full")
]
DocumentsIndexPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "SEO and metadata fields"),
ImageChooserPanel('feed_image'),
]
class DocumentsPageTag(TaggedItemBase):
content_object = ParentalKey('documents_gallery.DocumentsPage', related_name='tagged_items')
class DocumentsPage(Page):
"""
This is the Documents page. It takes tag names which you have assigned to your
documents. It gets the document objects according to tags defined by you. Your document gallery will
be created as per tags.
"""
tags = ClusterTaggableManager(through=DocumentsPageTag, blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def gallery_index(self):
# Find closest ancestor which is a Gallery index
return self.get_ancestors().type(GalleryIndexPage).last()
def get_context(self, request):
# Get tags and convert them into list so we can iterate over them
tags = self.tags.values_list('name', flat=True)
# Creating empty Queryset from Wagtail Document model
documents = Document.objects.none()
# Populating the empty documents Queryset with documents of all tags in tags list.
if tags:
len_tags = len(tags)
for i in range(0, len_tags):
doc = Document.objects.filter(tags__name=tags[i])
documents = documents | doc
# Pagination
page = request.GET.get('page')
paginator = Paginator(documents, 25) # Show 25 documents per page
try:
documents = paginator.page(page)
except PageNotAnInteger:
documents = paginator.page(1)
except EmptyPage:
documents = paginator.page(paginator.num_pages)
# Update template context
context = super(DocumentsPage, self).get_context(request)
context['documents'] = documents
return context
class Meta:
verbose_name = "Documents Page"
DocumentsPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('tags'),
]
DocumentsPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "SEO and metadata fields"),
ImageChooserPanel('feed_image'),
]
|
mit
| 3,715,756,117,222,630,000
| 29.088235
| 104
| 0.676363
| false
| 4.279289
| false
| false
| false
|
mrocklin/into
|
into/backends/sql_csv.py
|
1
|
2811
|
from ..regex import RegexDispatcher
from ..append import append
from .csv import CSV
import os
import datashape
import sqlalchemy
import subprocess
copy_command = RegexDispatcher('copy_command')
execute_copy = RegexDispatcher('execute_copy')
@copy_command.register('.*sqlite')
def copy_sqlite(dialect, tbl, csv):
abspath = os.path.abspath(csv.path)
tblname = tbl.name
dbpath = str(tbl.bind.url).split('///')[-1]
statement = """
(echo '.mode csv'; echo '.import {abspath} {tblname}';) | sqlite3 {dbpath}
"""
return statement.format(**locals())
@execute_copy.register('sqlite')
def execute_copy_sqlite(dialect, engine, statement):
ps = subprocess.Popen(statement, shell=True, stdout=subprocess.PIPE)
return ps.stdout.read()
@copy_command.register('postgresql')
def copy_postgres(dialect, tbl, csv):
abspath = os.path.abspath(csv.path)
tblname = tbl.name
format_str = 'csv'
delimiter = csv.dialect.get('delimiter', ',')
na_value = ''
quotechar = csv.dialect.get('quotechar', '"')
escapechar = csv.dialect.get('escapechar', '\\')
header = not not csv.has_header
encoding = csv.encoding or 'utf-8'
statement = """
COPY {tblname} FROM '{abspath}'
(FORMAT {format_str},
DELIMITER E'{delimiter}',
NULL '{na_value}',
QUOTE '{quotechar}',
ESCAPE '{escapechar}',
HEADER {header},
ENCODING '{encoding}');"""
return statement.format(**locals())
@copy_command.register('mysql.*')
def copy_mysql(dialect, tbl, csv):
mysql_local = ''
abspath = os.path.abspath(csv.path)
tblname = tbl.name
delimiter = csv.dialect.get('delimiter', ',')
quotechar = csv.dialect.get('quotechar', '"')
escapechar = csv.dialect.get('escapechar', '\\')
lineterminator = csv.dialect.get('lineterminator', r'\n\r')
skiprows = 1 if csv.has_header else 0
encoding = csv.encoding or 'utf-8'
statement = u"""
LOAD DATA {mysql_local} INFILE '{abspath}'
INTO TABLE {tblname}
CHARACTER SET {encoding}
FIELDS
TERMINATED BY '{delimiter}'
ENCLOSED BY '{quotechar}'
ESCAPED BY '{escapechar}'
LINES TERMINATED by '{lineterminator}'
IGNORE {skiprows} LINES;
"""
return statement.format(**locals())
@execute_copy.register('.*', priority=9)
def execute_copy_all(dialect, engine, statement):
conn = engine.raw_connection()
cursor = conn.cursor()
cursor.execute(statement)
conn.commit()
@append.register(sqlalchemy.Table, CSV)
def append_csv_to_sql_table(tbl, csv, **kwargs):
statement = copy_command(tbl.bind.dialect.name, tbl, csv)
execute_copy(tbl.bind.dialect.name, tbl.bind, statement)
return tbl
|
bsd-3-clause
| -432,463,007,566,880,260
| 27.683673
| 82
| 0.630736
| false
| 3.655397
| false
| false
| false
|
blenderben/lolstatbot
|
lolstatbot.py
|
1
|
22559
|
# Leauge of Legends Statistics Chat Bot
# A chat bot written in Python that provides match statistics right to your Twitch chat.
# 2015 Benjamin Chu - https://github.com/blenderben
import socket # imports module allowing connection to IRC
import threading # imports module allowing timing functions
import requests # imports module allowing requests
import json
import time
import calendar # imports module allowing epoch time
import ConfigParser # imports module allowing reading of .ini files
import os # for relative pathing
import string # for string manipulation
# from routes import API_ROUTES
class API_ROUTES:
# summoner-v1.4 - get summoner id data
summoner_url = 'https://{region}.api.pvp.net/api/lol/{region}/v1.4/summoner/by-name/{summonername}?api_key={key}'
# summoner-v1.4 - summoner mastery data
summonermastery_url = 'https://{region}.api.pvp.net/api/lol/{region}/v1.4/summoner/{summonerid}/masteries?api_key={key}'
# league-v2.5 - summoner league data
summonerleague_url = 'https://{region}.api.pvp.net/api/lol/{region}/v2.5/league/by-summoner/{summonerid}/entry?api_key={key}'
# lol-static-data-v1.2 - static champion data
championstaticdata_url = 'https://global.api.pvp.net/api/lol/static-data/{region}/v1.2/champion/{championid}?champData=all&api_key={key}'
# lol-static-data-v1.2 - static rune data
runestaticdata_url = 'https://global.api.pvp.net/api/lol/static-data/{region}/v1.2/rune/{runeid}?runeData=all&api_key={key}'
# lol-static-data-v1.2 - static mastery data
masterystaticdata_url = 'https://global.api.pvp.net/api/lol/static-data/{region}/v1.2/mastery/{masteryid}?masteryData=all&api_key={key}'
# lol-static-data-v1.2 - static spell data
spellstaticdata_url = 'https://global.api.pvp.net/api/lol/static-data/{region}/v1.2/summoner-spell/{spellid}?api_key={key}'
# current-game-v1.0 - current game data
current_url = 'https://{region}.api.pvp.net/observer-mode/rest/consumer/getSpectatorGameInfo/{region_upper}1/{summonerid}?api_key={key}'
# game-v1.3 - historic game data
last_url = 'https://{region}.api.pvp.net/api/lol/{region}/v1.3/game/by-summoner/{summonerid}/recent?api_key={key}'
# op.gg
opgg_url = 'http://{region}.op.gg/summoner/userName={summonername}'
opgg_masteries_url = 'http://{region}.op.gg/summoner/mastery/userName={summonername}'
opgg_runes_url = 'http://{region}.op.gg/summoner/rune/userName={summonername}'
opgg_matches_url = 'http://{region}.op.gg/summoner/matches/userName={summonername}'
opgg_leagues_url = 'http://{region}.op.gg/summoner/league/userName={summonername}'
opgg_champions_url = 'http://{region}.op.gg/summoner/champions/userName={summonername}'
# LoLNexus
lolnexus_url = 'http://www.lolnexus.com/{region}/search?name={summonername}&server={region}'
# LoLKing
lolking_url = 'http://www.lolking.net/summoner/{region}/{summonerid}'
# LoLSkill
lolskill_url = 'http://www.lolskill.net/summoner/{region}/{summonername}'
# ====== READ CONFIG ======
Config = ConfigParser.ConfigParser()
Config.read(os.path.dirname(os.path.abspath(__file__)) + '/config.ini')
def ConfigSectionMap(section):
temp_dict = {}
options = Config.options(section)
for option in options:
try:
temp_dict[option] = Config.get(section, option)
if temp_dict[option] == -1:
DebugPrint('skip: %s' % option)
except:
print('exception on %s!' % option)
temp_dict[option] = None
return temp_dict
# ====== CONNECTION INFO ======
# Set variables for connection
botOwner = ConfigSectionMap('settings')['botowner']
nick = ConfigSectionMap('settings')['nick']
channel = '#' + ConfigSectionMap('settings')['channel']
server = ConfigSectionMap('settings')['server']
port = int(ConfigSectionMap('settings')['port'])
password = ConfigSectionMap('settings')['oauth']
# ====== RIOT API PRELIM DATA ======
api_key = ConfigSectionMap('settings')['api']
# Riot API Information
summonerName = ConfigSectionMap('settings')['summonername'].lower()
summonerName = summonerName.replace(" ", "")
region = ConfigSectionMap('settings')['region']
summoner_url = API_ROUTES.summoner_url.format(region=region, summonername=summonerName, key=api_key)
# Initial Data Load // Get Summoner ID and Level
summonerName_dict = requests.get(summoner_url).json()
summonerID = str(summonerName_dict[summonerName]['id'])
summonerLevel = str(summonerName_dict[summonerName]['summonerLevel'])
# ====== RIOT API FUNCTIONS ======
def about(ircname):
return 'Hello ' + ircname + '! I am a League of Legends statistics chat bot. My creator is blenderben [ https://github.com/blenderben/LoLStatBot ].'\
+ ' I am currently assigned to summoner ' + summonerName.upper() + ' [ID:' + getSummonerID() + '].'
def getCommands():
return 'Available commands: ['\
+ ' !about, !summoner, !league, !last, !current, !runes, !mastery, !opgg, !lolnexus, !lolking, !lolskill ]'
def getSummonerInfo():
return summonerName.upper() + ' is summoner level ' + getSummonerLevel() + ', playing in Region: ' + region.upper() + ' // ' + opgg('')
def opgg(details):
if details == 'runes':
return API_ROUTES.opgg_runes_url.format(region=region, summonername=summonerName)
elif details == 'masteries':
return API_ROUTES.opgg_masteries_url.format(region=region, summonername=summonerName)
elif details == 'matches':
return API_ROUTES.opgg_matches_url.format(region=region, summonername=summonerName)
elif details == 'leagues':
return API_ROUTES.opgg_leagues_url.format(region=region, summonername=summonerName)
elif details == 'champions':
return API_ROUTES.opgg_champions_url.format(region=region, summonername=summonerName)
else:
return API_ROUTES.opgg_url.format(region=region, summonername=summonerName)
def lolnexus():
return API_ROUTES.lolnexus_url.format(region=region, summonername=summonerName)
def lolking(details):
if details == 'runes':
return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#runes'
elif details == 'masteries':
return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#masteries'
elif details == 'matches':
return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#matches'
elif details == 'rankedstats':
return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#ranked-stats'
elif details == 'leagues':
return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#leagues'
else:
return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID)
def lolskill(details):
if details == 'runes':
return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/runes'
elif details == 'masteries':
return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/masteries'
elif details == 'matches':
return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/matches'
elif details == 'stats':
return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/stats'
elif details == 'champions':
return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/champions'
else:
return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName)
def getTeamColor(teamid):
if teamid == 100:
return 'Blue Team'
elif teamid == 200:
return 'Purple Team'
else:
return 'No Team'
def getWinLoss(win):
if win == True:
return 'WON'
elif win == False:
return 'LOST'
else:
return 'TIED'
def getTimePlayed(time):
if time > 3600:
hours = time / 3600
minutes = time % 3600 / 60
seconds = time % 3600 % 60
if hours > 1:
return str(hours) + ' hours & ' + str(minutes) + ' minutes & ' + str(seconds) + ' seconds'
else:
return str(hours) + ' hour & ' + str(minutes) + ' minutes & ' + str(seconds) + ' seconds'
elif time > 60:
minutes = time / 60
seconds = time % 60
return str(minutes) + ' minutes & ' + str(seconds) + ' seconds'
else:
return str(time) + ' seconds'
def getKDA(kills, deaths, assists):
if deaths < 1:
return 'PERFECT'
else:
kda = float(kills) + float(assists) / (float(deaths))
kda = round(kda, 2)
return str(kda) + ':1'
def getChampionbyID(championid):
tempDict = requests.get(API_ROUTES.championstaticdata_url.format(region=region, championid=int(championid), key=api_key)).json()
name = tempDict['name'] + " " + tempDict['title']
return name
def getSpellbyID(spellid):
tempDict = requests.get(API_ROUTES.spellstaticdata_url.format(region=region, spellid=int(spellid), key=api_key)).json()
spellName = tempDict['name']
return spellName
# Refresh / Get Summoner ID
def getSummonerID():
global summonerID
try:
tempDict = requests.get(summoner_url).json()
summonerID = str(tempDict[summonerName]['id'])
return summonerID
except:
print 'Riot API Down'
return 1
# Refresh / Get Summoner Level
def getSummonerLevel():
global summonerLevel
tempDict = requests.get(summoner_url).json()
summonerLevel = str(tempDict[summonerName]['summonerLevel'])
return summonerLevel
def getWinRatio(win, loss):
total = float(win) + float(loss)
ratio = win / total
ratioPercent = round(ratio * 100, 1)
return str(ratioPercent) + '%'
def getStats():
# Function to eventually get statistics, avg kills, etc, for now, output Stats page from Lolskill
return lolskill('stats')
def getSummonerMastery():
tempDict = requests.get(API_ROUTES.summonermastery_url.format(region=region, summonerid=summonerID, key=api_key)).json()
i = 0
masteryIDList = []
masteryRank = []
for pages in tempDict[summonerID]['pages']:
if bool(pages.get('current')) == True:
pageName = tempDict[summonerID]['pages'][i]['name']
for mastery in tempDict[summonerID]['pages'][i]['masteries']:
masteryIDList.append(mastery.get('id'))
masteryRank.append(mastery.get('rank'))
else:
i += 1
return getCurrentMastery(masteryIDList, masteryRank) + ' // Mastery Name: ' + pageName
def getLeagueInfo():
try:
tempDict = requests.get(API_ROUTES.summonerleague_url.format(region=region, summonerid=summonerID, key=api_key)).json()
LEAGUE_TIER = string.capwords(tempDict[summonerID][0]['tier'])
LEAGUE_QUEUE = tempDict[summonerID][0]['queue'].replace('_', ' ')
LEAGUE_DIVISION = tempDict[summonerID][0]['entries'][0]['division']
LEAGUE_WINS = tempDict[summonerID][0]['entries'][0]['wins']
LEAGUE_LOSSES = tempDict[summonerID][0]['entries'][0]['losses']
LEAGUE_POINTS = tempDict[summonerID][0]['entries'][0]['leaguePoints']
# LEAGUE_ISVETERAN = tempDict[summonerID][0]['entries'][0]['isHotStreak']
# LEAGUE_ISHOTSTREAK = tempDict[summonerID][0]['entries'][0]['isVeteran']
# LEAGUE_ISFRESHBLOOD = tempDict[summonerID][0]['entries'][0]['isFreshBlood']
# LEAGUE_ISINACTIVE = tempDict[summonerID][0]['entries'][0]['isInactive']
return summonerName.upper() + ' is ' + LEAGUE_TIER + ' ' + LEAGUE_DIVISION + ' in ' + LEAGUE_QUEUE\
+ ' // ' + str(LEAGUE_WINS) + 'W / ' + str(LEAGUE_LOSSES) + 'L (Win Ratio ' + getWinRatio(LEAGUE_WINS, LEAGUE_LOSSES) + ')'\
+ ' // LP: ' + str(LEAGUE_POINTS)\
+ ' // ' + lolking('leagues')
except:
return 'Summoner ' + summonerName.upper() + ' has not played any Ranked Solo 5x5 matches'\
+ ' // ' + lolking('leagues')
# Get Current Match Stats
def getCurrent(details):
try:
current_api_url = API_ROUTES.current_url.format(region=region, region_upper=region.upper(), summonerid=summonerID, key=api_key)
tempDict = requests.get(current_api_url).json()
CURRENT_GAMEMODE = tempDict['gameMode']
CURRENT_GAMELENGTH = tempDict['gameLength']
CURRENT_GAMETYPE = tempDict['gameType'].replace('_', ' ')
CURRENT_TIME = calendar.timegm(time.gmtime())
CURRENT_EPOCHTIME = tempDict['gameStartTime'] / 1000
if CURRENT_EPOCHTIME <= 0:
CURRENT_TIMEDIFF = 0
else:
CURRENT_TIMEDIFF = CURRENT_TIME - CURRENT_EPOCHTIME
if CURRENT_TIMEDIFF < 0:
CURRENT_TIMEDIFF = 0
runeIDList = []
runeCount = []
masteryIDList = []
masteryRank = []
i = 0
for participant in tempDict['participants']:
if int(summonerID) == int(participant.get('summonerId')):
CURRENT_TEAM = participant.get('teamId')
CURRENT_CHAMPION = participant.get('championId')
CURRENT_SPELL1 = participant.get('spell1Id')
CURRENT_SPELL2 = participant.get('spell2Id')
for rune in tempDict['participants'][i]['runes']:
runeIDList.append(rune.get('runeId'))
runeCount.append(rune.get('count'))
for mastery in tempDict['participants'][i]['masteries']:
masteryIDList.append(mastery.get('masteryId'))
masteryRank.append(mastery.get('rank'))
else:
i += 1
runeCountOutput = ''
runeBonusOutput = ''
for x in range(len(runeIDList)):
runeCountOutput += ' [' + getCurrentRuneTotal(runeIDList[x], runeCount[x]) + '] '
runeBonusOutput += ' [' + getCurrentRuneBonusTotal(runeIDList[x], runeCount[x]) + '] '
masteryOutput = getCurrentMastery(masteryIDList, masteryRank)
if details == 'runes':
return 'Current Runes: ' + runeCountOutput\
+ ' // Rune Bonuses: ' + runeBonusOutput\
+ ' // ' + lolskill('runes')
elif details == 'masteries':
return 'Current Mastery Distribution: ' + masteryOutput\
+ ' // ' + lolskill('masteries')
else:
return summonerName.upper()\
+ ' is currently playing ' + CURRENT_GAMEMODE + ' ' + CURRENT_GAMETYPE\
+ ' with ' + getChampionbyID(CURRENT_CHAMPION)\
+ ' on the ' + getTeamColor(CURRENT_TEAM)\
+ ' // Elapsed Time: ' + getTimePlayed(CURRENT_TIMEDIFF)\
+ ' // Spells Chosen: ' + getSpellbyID(CURRENT_SPELL1) + ' & ' + getSpellbyID(CURRENT_SPELL2)\
+ ' // Mastery Distribution: ' + masteryOutput\
+ ' // Rune Bonuses: ' + runeBonusOutput\
+ ' // ' + lolnexus()
except:
if details == 'runes':
return 'Summoner ' + summonerName.upper() + ' needs to currently be in a game for current Rune data to display'\
+ ' // ' + lolking('runes')
elif details == 'masteries':
return 'Current Mastery Distribution: ' + getSummonerMastery() + ' // ' + lolskill('masteries')
else:
return 'The summoner ' + summonerName.upper() + ' is not currently in a game.'
def getCurrentMastery(masteryidlist, masteryrank):
offense = 0
defense = 0
utility = 0
for x in range(len(masteryidlist)):
masteryID = masteryidlist[x]
tempDict = requests.get(API_ROUTES.masterystaticdata_url.format(region=region, masteryid=masteryID, key=api_key)).json()
masteryTree = tempDict['masteryTree']
ranks = int(masteryrank[x])
if masteryTree == 'Offense':
offense += ranks
elif masteryTree == 'Defense':
defense += ranks
else:
utility += ranks
return '(' + str(offense) + '/' + str(defense) + '/' + str(utility) + ')'
def getCurrentRuneTotal(runeid, count):
tempDict = requests.get(API_ROUTES.runestaticdata_url.format(region=region, runeid=runeid, key=api_key)).json()
runeName = tempDict['name']
return str(count) + 'x ' + runeName
def getCurrentRuneBonusTotal(runeid, count):
tempDict = requests.get(API_ROUTES.runestaticdata_url.format(region=region, runeid=runeid, key=api_key)).json()
runeBonus = tempDict['description']
try:
runeBonus.split('/')[1]
except IndexError:
# Single Bonus
value = runeBonus.split()[0]
value = value.replace('+', '').replace('%', '').replace('-', '')
valueCount = float(value) * float(count)
valueCount = round(valueCount, 2)
description = tempDict['description'].split(' (', 1)[0]
description = string.capwords(description)
description = description.replace(value, str(valueCount))
return description
else:
# Hybrid Bonus
value = runeBonus.split()[0]
value = value.replace('+', '').replace('%', '').replace('-', '')
valueCount = float(value) * float(count)
valueCount = round(valueCount, 2)
firstDescription = runeBonus.split('/')[0].strip()
firstDescription = firstDescription.split(' (', 1)[0]
firstDescription = string.capwords(firstDescription)
firstDescription = firstDescription.replace(value, str(valueCount))
value = runeBonus.split('/')[1].strip()
if value.split()[1] == 'sec.':
return firstDescription + ' / 5 Sec.'
else:
value = value.split()[0]
value = value.replace('+', '').replace('%', '').replace('-', '')
valueCount = float(value) * float(count)
valueCount = round(valueCount, 2)
secondDescription = runeBonus.split('/')[1].strip()
secondDescription = secondDescription.split(' (', 1)[0]
secondDescription = string.capwords(secondDescription)
secondDescription = secondDescription.replace(value, str(valueCount))
return firstDescription + ' / ' + secondDescription
# Get Last Match Stats
def getLast():
tempDict = requests.get(API_ROUTES.last_url.format(region=region, summonerid=summonerID, key=api_key)).json()
LAST_GAMEID = tempDict['games'][0]['gameId']
# LAST_GAMEMODE = tempDict['games'][0]['gameMode']
LAST_SUBTYPE = tempDict['games'][0]['subType'].replace('_', ' ')
LAST_GAMETYPE = tempDict['games'][0]['gameType'].replace('_GAME', '')
LAST_TIMEPLAYED = tempDict['games'][0]['stats']['timePlayed']
LAST_WIN = tempDict['games'][0]['stats']['win']
LAST_GOLDSPENT = tempDict['games'][0]['stats']['goldSpent']
LAST_GOLDEARNED = tempDict['games'][0]['stats']['goldEarned']
LAST_CHAMPION_ID = str(tempDict['games'][0]['championId'])
LAST_IPEARNED = str(tempDict['games'][0]['ipEarned'])
LAST_LEVEL = str(tempDict['games'][0]['stats']['level'])
LAST_SPELL1 = tempDict['games'][0]['spell1']
LAST_SPELL2 = tempDict['games'][0]['spell2']
LAST_CHAMPIONSKILLED = str(tempDict['games'][0]['stats'].get('championsKilled', 0))
LAST_NUMDEATHS = str(tempDict['games'][0]['stats'].get('numDeaths' , 0))
LAST_ASSISTS = str(tempDict['games'][0]['stats'].get('assists', 0))
LAST_TOTALDAMAGECHAMPIONS = str(tempDict['games'][0]['stats']['totalDamageDealtToChampions'])
LAST_MINIONSKILLED = str(tempDict['games'][0]['stats']['minionsKilled'])
LAST_WARDSPLACED = str(tempDict['games'][0]['stats'].get('wardPlaced', 0))
output = summonerName.upper() + ' ' + getWinLoss(LAST_WIN)\
+ ' the last ' + LAST_GAMETYPE + ' ' + LAST_SUBTYPE\
+ ' GAME using ' + getChampionbyID(LAST_CHAMPION_ID)\
+ ' // The game took ' + getTimePlayed(LAST_TIMEPLAYED)\
+ ' // ' + getKDA(LAST_CHAMPIONSKILLED, LAST_NUMDEATHS, LAST_ASSISTS) + ' KDA (' + LAST_CHAMPIONSKILLED + '/' + LAST_NUMDEATHS + '/' + LAST_ASSISTS + ')'\
+ ' // ' + getSpellbyID(LAST_SPELL1) + ' & ' + getSpellbyID(LAST_SPELL2) + ' spells were chosen'\
+ ' // ' + LAST_TOTALDAMAGECHAMPIONS + ' damage was dealt to champions'\
+ ' // ' + LAST_MINIONSKILLED + ' minions were killed'\
+ ' // ' + LAST_WARDSPLACED + ' wards were placed'\
+ ' // Spent ' + str(round(float(LAST_GOLDSPENT) / float(LAST_GOLDEARNED)*100, 1)) + '% of Gold earned [' + str(LAST_GOLDSPENT) + '/' + str(LAST_GOLDEARNED) + ']'\
+ ' // ' + LAST_IPEARNED + ' IP was earned'
# add Official League Match history here
return output
# ====== IRC FUNCTIONS ======
# Extract Nickname
def getNick(data):
nick = data.split('!')[0]
nick = nick.replace(':', ' ')
nick = nick.replace(' ', '')
nick = nick.strip(' \t\n\r')
return nick
def getMessage(data):
if data.find('PRIVMSG'):
try:
message = data.split(channel, 1)[1][2:]
return message
except IndexError:
return 'Index Error'
except:
return 'No message'
else:
return 'Not a message'
# ====== TIMER FUNCTIONS ======
def printit():
threading.Timer(60.0, printit).start()
print "Hello World"
# ===============================
# queue = 13 #sets variable for anti-spam queue functionality
# Connect to server
print '\nConnecting to: ' + server + ' over port ' + str(port)
irc = socket.socket()
irc.connect((server, port))
# Send variables for connection to Twitch chat
irc.send('PASS ' + password + '\r\n')
irc.send('USER ' + nick + ' 0 * :' + botOwner + '\r\n')
irc.send('NICK ' + nick + '\r\n')
irc.send('JOIN ' + channel + '\r\n')
printit()
# Main Program Loop
while True:
ircdata = irc.recv(4096) # gets output from IRC server
ircuser = ircdata.split(':')[1]
ircuser = ircuser.split('!')[0] # determines the sender of the messages
# Check messages for any banned words against banned.txt list
f = open(os.path.dirname(os.path.abspath(__file__)) + '/banned.txt', 'r')
banned = f.readlines()
message = getMessage(ircdata).lower().strip(' \t\n\r')
for i in range(len(banned)):
if message.find(banned[i].strip(' \t\n\r')) != -1:
irc.send('PRIVMSG ' + channel + ' :' + getNick(ircdata) + ', banned words are not allowed. A timeout has been issued.' + '\r\n')
# irc.send('PRIVMSG ' + channel + ' :\/timeout ' + getNick(ircdata) + ' 5\r\n')
break
else:
pass
print 'DEBUG: ' + ircdata.strip(' \t\n\r')
print 'USER: ' + getNick(ircdata).strip(' \t\n\r')
print 'MESSAGE: ' + getMessage(ircdata).strip(' \t\n\r')
print '======================='
# About
if ircdata.find(':!about') != -1:
irc.send('PRIVMSG ' + channel + ' :' + about(getNick(ircdata)) + '\r\n')
# Commands
if ircdata.find(':!commands') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getCommands() + '\r\n')
# Last
if ircdata.find(':!last') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getLast() + '\r\n')
# Current
if ircdata.find(':!current') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getCurrent('games') + '\r\n')
# Current Runes
if ircdata.find(':!runes') != -1 or ircdata.find(':!rune') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getCurrent('runes') + '\r\n')
# Current Mastery
if ircdata.find(':!mastery') != -1 or ircdata.find(':!masteries') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getCurrent('masteries') + '\r\n')
# Basic Summoner Data
if ircdata.find(':!summoner') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getSummonerInfo() + '\r\n')
# Seaonal League Rank Data
if ircdata.find(':!league') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getLeagueInfo() + '\r\n')
# Stats
if ircdata.find(':!stats') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getStats() + '\r\n')
# Return op.gg
if ircdata.find(':!opgg') != -1:
irc.send('PRIVMSG ' + channel + ' :' + opgg('') + '\r\n')
# Return lolnexus
if ircdata.find(':!lolnexus') != -1:
irc.send('PRIVMSG ' + channel + ' :' + lolnexus() + '\r\n')
# Return lolking
if ircdata.find(':!lolking') != -1:
irc.send('PRIVMSG ' + channel + ' :' + lolking('') + '\r\n')
# Return lolskill
if ircdata.find(':!lolskill') != -1:
irc.send('PRIVMSG ' + channel + ' :' + lolskill('') + '\r\n')
# Keep Alive
if ircdata.find('PING') != -1:
irc.send('PONG ' + ircdata.split()[1] + '\r\n')
|
mit
| 362,198,703,597,034,500
| 37.894828
| 164
| 0.675074
| false
| 2.769335
| true
| false
| false
|
pacpac1992/mymockup
|
src/widgets/tab.py
|
1
|
3754
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import wx
import wx.lib.ogl as ogl
class Tab_dialog(wx.Dialog):
def __init__(self, parent, title):
super(Tab_dialog, self).__init__(parent, title=title,size=(410,220))
self.parent = parent
self.nombre = wx.TextCtrl(self,-1, pos=(10,10), size=(200,30),style=wx.TE_PROCESS_ENTER)
wx.StaticText(self,-1,'Activo: ',pos=(10,55))
self.lbl_selection = wx.StaticText(self,-1,'',(60, 55),(150, -1))
btn = wx.Button(self,-1,'Aceptar',pos=(10,100))
self.listBox = wx.ListBox(self, -1, (220, 10), (90, 170), [], wx.LB_SINGLE)
up = wx.Button(self,-1,'Arriba',pos=(320,10))
down = wx.Button(self,-1,'Abajo',pos=(320,50))
delete = wx.Button(self,-1,'Eliminar',pos=(320,90))
btn.Bind(wx.EVT_BUTTON,self.crear_tabs)
up.Bind(wx.EVT_BUTTON,self.up)
down.Bind(wx.EVT_BUTTON,self.down)
delete.Bind(wx.EVT_BUTTON,self.delete)
self.nombre.Bind(wx.EVT_TEXT_ENTER, self.add_list)
self.Bind(wx.EVT_LISTBOX, self.onListBox, self.listBox)
def crear_tabs(self,evt):
if self.lbl_selection.GetLabel() != '':
lista = {}
for i in range(0,self.listBox.GetCount()):
lista[i] = self.listBox.GetString(i)
self.parent.draw_tab(None,self.lbl_selection.GetLabel(),lista,False)
self.Destroy()
else:
wx.MessageBox("Seleccione un item", "Message" ,wx.OK | wx.ICON_ERROR)
def add_list(self,evt):
n = self.nombre.GetValue()
self.listBox.Append(n)
self.nombre.SetValue('')
def up(self,evt):
n = self.listBox.GetCount()
r = 0
for i in range(0,n):
if self.listBox.GetString(i) == self.listBox.GetStringSelection():
r = i
dato = self.listBox.GetStringSelection()
if r != 0:
r = r - 1
d = self.listBox.GetString(r)
self.listBox.SetString(r,dato)
self.listBox.SetString(r+1,d)
def down(self,evt):
try:
n = self.listBox.GetCount()
r = 0
for i in range(0,n):
if self.listBox.GetString(i) == self.listBox.GetStringSelection():
r = i
dato = self.listBox.GetStringSelection()
if r <= (n-1):
r = r + 1
d = self.listBox.GetString(r)
self.listBox.SetString(r,dato)
self.listBox.SetString(r-1,d)
except Exception as e:
print(e)
def delete(self,evt):
n = self.listBox.GetCount()
r = 0
for i in range(0,n):
if self.listBox.GetString(i) == self.listBox.GetStringSelection():
r = i
self.listBox.Delete(r)
def onListBox(self,evt):
self.lbl_selection.SetLabel(evt.GetEventObject().GetStringSelection())
class Tab(ogl.DrawnShape):
def __init__(self,lista,active):
ogl.DrawnShape.__init__(self)
n = len(lista)
self.diccionario = lista
i = self.buscarElemento(lista,active)
r = (int(n) * 70 + ((int(n)-1))*4)+50
self.calculate_size(r)
self.tabs(n,r,i)
self.labels(n,r)
self.CalculateSize()
def calculate_size(self,r):
w = r/2
self.SetDrawnPen(wx.BLACK_PEN)
self.SetDrawnBrush(wx.WHITE_BRUSH)
return self.DrawPolygon([(w, 100), (-w,100),(-w,-70),(w,-70),(w,100)])
def tabs(self,n,r,i):
w = r / 2
cp4 = 0
for x in range(0,n):
sp = 70
self.SetDrawnPen(wx.BLACK_PEN)
if x == i:
self.SetDrawnBrush(wx.Brush(wx.Colour(240, 240, 240)))
else:
self.SetDrawnBrush(wx.Brush(wx.Colour(155, 155, 155)))
self.DrawPolygon([((-w + cp4),-70),((-w + cp4),-100),(((-w+cp4)+sp),-100),(((-w+cp4)+sp),-70)])
cp4 = cp4 + 74
def labels(self,items,r):
w = r / 2
ran = 0
for x in xrange(0,items):
self.SetDrawnTextColour(wx.BLACK)
self.SetDrawnFont(wx.Font(10, wx.SWISS, wx.NORMAL, wx.NORMAL))
name = self.diccionario[x]
self.DrawText(str(name), (-w+ran+10, -90))
ran = ran + 74
def buscarElemento(self,lista, elemento):
for i in range(0,len(lista)):
if(lista[i] == elemento):
return i
|
mit
| 5,063,618,514,684,932,000
| 23.86755
| 98
| 0.633724
| false
| 2.453595
| false
| false
| false
|
vegeclic/django-regularcom
|
blog/migrations/0001_initial.py
|
1
|
14127
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TaggedItem'
db.create_table('blog_taggeditem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tag', self.gf('django.db.models.fields.SlugField')(max_length=50)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], related_name='blog_tags')),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('blog', ['TaggedItem'])
# Adding model 'CategoryTranslation'
db.create_table('blog_category_translation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('language_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('master', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['blog.Category'], related_name='translations', null=True)),
))
db.send_create_signal('blog', ['CategoryTranslation'])
# Adding unique constraint on 'CategoryTranslation', fields ['language_code', 'master']
db.create_unique('blog_category_translation', ['language_code', 'master_id'])
# Adding model 'Category'
db.create_table('blog_category', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now_add=True)),
('date_last_modified', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now=True)),
))
db.send_create_signal('blog', ['Category'])
# Adding model 'ArticleTranslation'
db.create_table('blog_article_translation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('body', self.gf('django.db.models.fields.TextField')()),
('language_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('master', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['blog.Article'], related_name='translations', null=True)),
))
db.send_create_signal('blog', ['ArticleTranslation'])
# Adding unique constraint on 'ArticleTranslation', fields ['language_code', 'master']
db.create_unique('blog_article_translation', ['language_code', 'master_id'])
# Adding model 'Article'
db.create_table('blog_article', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=200)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=True)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now_add=True)),
('date_last_modified', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now=True)),
('main_image', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, to=orm['common.Image'], related_name='blog_article_main_image', unique=True, null=True)),
('title_image', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, to=orm['common.Image'], related_name='blog_article_title_image', unique=True, null=True)),
('thumb_image', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, to=orm['common.Image'], related_name='blog_article_thumb_image', unique=True, null=True)),
))
db.send_create_signal('blog', ['Article'])
# Adding M2M table for field authors on 'Article'
m2m_table_name = db.shorten_name('blog_article_authors')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('article', models.ForeignKey(orm['blog.article'], null=False)),
('author', models.ForeignKey(orm['accounts.author'], null=False))
))
db.create_unique(m2m_table_name, ['article_id', 'author_id'])
# Adding M2M table for field categories on 'Article'
m2m_table_name = db.shorten_name('blog_article_categories')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('article', models.ForeignKey(orm['blog.article'], null=False)),
('category', models.ForeignKey(orm['blog.category'], null=False))
))
db.create_unique(m2m_table_name, ['article_id', 'category_id'])
# Adding model 'Comment'
db.create_table('blog_comment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('participant', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, to=orm['accounts.Account'], null=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['blog.Article'])),
('body', self.gf('django.db.models.fields.TextField')()),
('date_created', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now_add=True)),
('date_last_modified', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now=True)),
))
db.send_create_signal('blog', ['Comment'])
def backwards(self, orm):
# Removing unique constraint on 'ArticleTranslation', fields ['language_code', 'master']
db.delete_unique('blog_article_translation', ['language_code', 'master_id'])
# Removing unique constraint on 'CategoryTranslation', fields ['language_code', 'master']
db.delete_unique('blog_category_translation', ['language_code', 'master_id'])
# Deleting model 'TaggedItem'
db.delete_table('blog_taggeditem')
# Deleting model 'CategoryTranslation'
db.delete_table('blog_category_translation')
# Deleting model 'Category'
db.delete_table('blog_category')
# Deleting model 'ArticleTranslation'
db.delete_table('blog_article_translation')
# Deleting model 'Article'
db.delete_table('blog_article')
# Removing M2M table for field authors on 'Article'
db.delete_table(db.shorten_name('blog_article_authors'))
# Removing M2M table for field categories on 'Article'
db.delete_table(db.shorten_name('blog_article_categories'))
# Deleting model 'Comment'
db.delete_table('blog_comment')
models = {
'accounts.account': {
'Meta': {'object_name': 'Account'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}),
'date_last_modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'unique': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'accounts.author': {
'Meta': {'object_name': 'Author'},
'account': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['accounts.Account']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_image': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'to': "orm['common.Image']", 'related_name': "'+'", 'unique': 'True', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '30'})
},
'blog.article': {
'Meta': {'object_name': 'Article'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['accounts.Author']", 'symmetrical': 'False', 'related_name': "'blog_article_authors'"}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'to': "orm['blog.Category']", 'symmetrical': 'False', 'related_name': "'blog_article_categories'", 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}),
'date_last_modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_image': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'to': "orm['common.Image']", 'related_name': "'blog_article_main_image'", 'unique': 'True', 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200'}),
'thumb_image': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'to': "orm['common.Image']", 'related_name': "'blog_article_thumb_image'", 'unique': 'True', 'null': 'True'}),
'title_image': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'to': "orm['common.Image']", 'related_name': "'blog_article_title_image'", 'unique': 'True', 'null': 'True'})
},
'blog.articletranslation': {
'Meta': {'object_name': 'ArticleTranslation', 'unique_together': "[('language_code', 'master')]", 'db_table': "'blog_article_translation'"},
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blog.Article']", 'related_name': "'translations'", 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'blog.category': {
'Meta': {'object_name': 'Category'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}),
'date_last_modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'blog.categorytranslation': {
'Meta': {'object_name': 'CategoryTranslation', 'unique_together': "[('language_code', 'master')]", 'db_table': "'blog_category_translation'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blog.Category']", 'related_name': "'translations'", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'blog.comment': {
'Meta': {'object_name': 'Comment'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blog.Article']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}),
'date_last_modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participant': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'to': "orm['accounts.Account']", 'null': 'True'})
},
'blog.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'related_name': "'blog_tags'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'tag': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'common.image': {
'Meta': {'object_name': 'Image'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'related_name': "'+'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '200'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'", 'object_name': 'ContentType'},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blog']
|
agpl-3.0
| -4,488,898,404,940,486,700
| 65.328638
| 216
| 0.590784
| false
| 3.72253
| false
| false
| false
|
Ultimaker/Cura
|
cura/XRayPass.py
|
1
|
1577
|
# Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import os.path
from UM.Resources import Resources
from UM.Application import Application
from UM.PluginRegistry import PluginRegistry
from UM.View.RenderPass import RenderPass
from UM.View.RenderBatch import RenderBatch
from UM.View.GL.OpenGL import OpenGL
from cura.Scene.CuraSceneNode import CuraSceneNode
from UM.Scene.Iterator.DepthFirstIterator import DepthFirstIterator
class XRayPass(RenderPass):
def __init__(self, width, height):
super().__init__("xray", width, height)
self._shader = None
self._gl = OpenGL.getInstance().getBindingsObject()
self._scene = Application.getInstance().getController().getScene()
def render(self):
if not self._shader:
self._shader = OpenGL.getInstance().createShaderProgram(Resources.getPath(Resources.Shaders, "xray.shader"))
batch = RenderBatch(self._shader, type = RenderBatch.RenderType.NoType, backface_cull = False, blend_mode = RenderBatch.BlendMode.Additive)
for node in DepthFirstIterator(self._scene.getRoot()):
if isinstance(node, CuraSceneNode) and node.getMeshData() and node.isVisible():
batch.addItem(node.getWorldTransformation(copy = False), node.getMeshData(), normal_transformation=node.getCachedNormalMatrix())
self.bind()
self._gl.glDisable(self._gl.GL_DEPTH_TEST)
batch.render(self._scene.getActiveCamera())
self._gl.glEnable(self._gl.GL_DEPTH_TEST)
self.release()
|
lgpl-3.0
| -1,777,091,487,277,248,500
| 38.425
| 147
| 0.715916
| false
| 3.754762
| false
| false
| false
|
the-nick-of-time/DnD
|
DnD/modules/resourceModule.py
|
1
|
2471
|
import tkinter as tk
from typing import Union
import lib.components as gui
import lib.resourceLib as res
import lib.settingsLib as settings
class ResourceDisplay(gui.Section):
"""Displays a resource like sorcery points or Hit Dice."""
def __init__(self, container: Union[tk.BaseWidget, tk.Tk], resource: res.Resource,
lockMax=False, **kwargs):
super().__init__(container, **kwargs)
self.resource = resource
self.numbers = tk.Frame(self.f)
self.current = gui.NumericEntry(self.numbers, self.resource.number, self.set_current,
width=5)
self.max = gui.NumericEntry(self.numbers, self.resource.maxnumber, self.set_max,
width=5)
if lockMax:
self.max.disable()
self.value = tk.Label(self.numbers, text='*' + str(self.resource.value))
self.buttonFrame = tk.Frame(self.f)
self.use = tk.Button(self.buttonFrame, text='-', command=self.increment)
self.regain = tk.Button(self.buttonFrame, text='+', command=self.decrement)
self.display = tk.Label(self.buttonFrame, width=3)
self.reset_ = tk.Button(self.buttonFrame, text='Reset', command=self.reset)
self._draw()
def _draw(self):
tk.Label(self.f, text=self.resource.name).grid(row=0, column=0)
self.numbers.grid(row=1, column=0)
self.current.grid(1, 0)
tk.Label(self.numbers, text='/').grid(row=1, column=1)
self.max.grid(1, 2)
self.value.grid(row=1, column=4)
self.buttonFrame.grid(row=2, column=0, columnspan=3)
self.display.grid(row=0, column=0)
self.regain.grid(row=0, column=1)
self.use.grid(row=0, column=2)
self.reset_.grid(row=0, column=3)
def update_view(self):
self.max.set(self.resource.maxnumber)
self.current.set(self.resource.number)
def set_current(self, value):
self.resource.number = value
def set_max(self, value):
self.resource.maxnumber = value
def increment(self):
self.resource.regain(1)
self.update_view()
def decrement(self):
val = self.resource.use(1)
self.display.config(text=str(val))
self.update_view()
def reset(self):
self.resource.reset()
self.update_view()
def rest(self, which: settings.RestLength):
self.resource.rest(which)
self.update_view()
|
gpl-2.0
| 771,843,469,293,149,600
| 34.811594
| 93
| 0.613112
| false
| 3.490113
| false
| false
| false
|
lptorres/noah-inasafe
|
web_api/third_party/simplejson/decoder.py
|
1
|
14670
|
"""Implementation of JSONDecoder
"""
from __future__ import absolute_import
import re
import sys
import struct
from .compat import fromhex, b, u, text_type, binary_type, PY3, unichr
from .scanner import make_scanner, JSONDecodeError
def _import_c_scanstring():
try:
from ._speedups import scanstring
return scanstring
except ImportError:
return None
c_scanstring = _import_c_scanstring()
# NOTE (3.1.0): JSONDecodeError may still be imported from this module for
# compatibility, but it was never in the __all__
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = fromhex('7FF80000000000007FF0000000000000')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u('"'), '\\': u('\u005c'), '/': u('/'),
'b': u('\b'), 'f': u('\f'), 'n': u('\n'), 'r': u('\r'), 't': u('\t'),
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match, _join=u('').join,
_PY3=PY3, _maxunicode=sys.maxunicode):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not _PY3 and not isinstance(content, text_type):
content = text_type(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at"
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\X escape sequence %r"
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
msg = "Invalid \\uXXXX escape sequence"
esc = s[end + 1:end + 5]
escX = esc[1:2]
if len(esc) != 4 or escX == 'x' or escX == 'X':
raise JSONDecodeError(msg, s, end - 1)
try:
uni = int(esc, 16)
except ValueError:
raise JSONDecodeError(msg, s, end - 1)
end += 5
# Check for surrogate pair on UCS-4 systems
# Note that this will join high/low surrogate pairs
# but will also pass unpaired surrogates through
if (_maxunicode > 65535 and
uni & 0xfc00 == 0xd800 and
s[end:end + 2] == '\\u'):
esc2 = s[end + 2:end + 6]
escX = esc2[1:2]
if len(esc2) == 4 and not (escX == 'x' or escX == 'X'):
try:
uni2 = int(esc2, 16)
except ValueError:
raise JSONDecodeError(msg, s, end)
if uni2 & 0xfc00 == 0xdc00:
uni = 0x10000 + (((uni - 0xd800) << 10) |
(uni2 - 0xdc00))
end += 6
char = unichr(uni)
# Append the unescaped character
_append(char)
return _join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject(state, encoding, strict, scan_once, object_hook,
object_pairs_hook, memo=None,
_w=WHITESPACE.match, _ws=WHITESPACE_STR):
(s, end) = state
# Backwards compatibility
if memo is None:
memo = {}
memo_get = memo.setdefault
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end + 1
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError(
"Expecting property name enclosed in double quotes",
s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise JSONDecodeError("Expecting ':' delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
value, end = scan_once(s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting ',' delimiter or '}'", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise JSONDecodeError(
"Expecting property name enclosed in double quotes",
s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray(state, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
(s, end) = state
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
elif nextchar == '':
raise JSONDecodeError("Expecting value or ']'", s, end)
_append = values.append
while True:
value, end = scan_once(s, end)
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting ',' delimiter or ']'", s, end - 1)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
if encoding is None:
encoding = DEFAULT_ENCODING
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match, _PY3=PY3):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
if _PY3 and isinstance(s, binary_type):
s = s.decode(self.encoding)
obj, end = self.raw_decode(s)
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0, _w=WHITESPACE.match, _PY3=PY3):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
Optionally, ``idx`` can be used to specify an offset in ``s`` where
the JSON document begins.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
if _PY3 and not isinstance(s, text_type):
raise TypeError("Input string must be text, not bytes")
return self.scan_once(s, idx=_w(s, idx).end())
|
gpl-3.0
| 8,479,624,750,722,993,000
| 35.712082
| 79
| 0.519632
| false
| 4.300792
| false
| false
| false
|
miaoski/bsideslv-plc-home
|
hmi.py
|
1
|
1699
|
# -*- coding: utf8 -*-
# This trivial HMI is decoupled from ModBus server
import gevent
from flask import Flask, render_template
from flask_sockets import Sockets
from pymodbus.client.sync import ModbusTcpClient
from time import sleep
import sys
app = Flask(__name__)
sockets = Sockets(app)
try:
myip = sys.argv[1]
except IndexError:
print 'Usage python hmi.py 192.168.42.1'
sys.exit(1)
client = ModbusTcpClient(myip)
def read_di(num = 20):
rr = client.read_discrete_inputs(1, num).bits[:num]
di = ['1' if x else '0' for x in rr]
return di
def read_co(num = 20):
rr = client.read_coils(1, num).bits[:num]
di = ['1' if x else '0' for x in rr]
return di
def read_ir(num = 5):
rr = client.read_input_registers(1, num).registers[:num]
di = map(str, rr)
return di
def read_hr(num = 5):
rr = client.read_holding_registers(1, num).registers[:num]
di = map(str, rr)
return di
@sockets.route('/data')
def read_data(ws):
while not ws.closed:
try:
di = read_di()
co = read_co()
ir = read_ir()
hr = read_hr()
except:
print 'Exception. Wait for next run.'
gevent.sleep(1)
continue
ws.send('\n'.join((','.join(di), ','.join(co), ','.join(ir), ','.join(hr))))
gevent.sleep(0.3)
print "Connection Closed!!!", reason
@app.route('/')
def homepage():
return render_template('hmi.html')
# main
if __name__ == "__main__":
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
server = pywsgi.WSGIServer((myip, 8000), app, handler_class=WebSocketHandler)
server.serve_forever()
|
gpl-2.0
| 4,138,471,727,828,687,000
| 23.623188
| 84
| 0.608005
| false
| 3.248566
| false
| false
| false
|
houshengbo/nova_vmware_compute_driver
|
nova/virt/hyperv/vmops.py
|
1
|
25971
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for basic VM operations.
"""
import os
import uuid
from nova.api.metadata import base as instance_metadata
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import configdrive
from nova.virt.hyperv import baseops
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
hyperv_opts = [
cfg.StrOpt('vswitch_name',
default=None,
help='Default vSwitch Name, '
'if none provided first external is used'),
cfg.BoolOpt('limit_cpu_features',
default=False,
help='Required for live migration among '
'hosts with different CPU features'),
cfg.BoolOpt('config_drive_inject_password',
default=False,
help='Sets the admin password in the config drive image'),
cfg.StrOpt('qemu_img_cmd',
default="qemu-img.exe",
help='qemu-img is used to convert between '
'different image types'),
cfg.BoolOpt('config_drive_cdrom',
default=False,
help='Attaches the Config Drive image as a cdrom drive '
'instead of a disk drive')
]
CONF = cfg.CONF
CONF.register_opts(hyperv_opts)
CONF.import_opt('use_cow_images', 'nova.config')
class VMOps(baseops.BaseOps):
def __init__(self, volumeops):
super(VMOps, self).__init__()
self._vmutils = vmutils.VMUtils()
self._volumeops = volumeops
def list_instances(self):
""" Return the names of all the instances known to Hyper-V. """
vms = [v.ElementName
for v in self._conn.Msvm_ComputerSystem(['ElementName'],
Caption="Virtual Machine")]
return vms
def get_info(self, instance):
"""Get information about the VM"""
LOG.debug(_("get_info called for instance"), instance=instance)
return self._get_info(instance['name'])
def _get_info(self, instance_name):
vm = self._vmutils.lookup(self._conn, instance_name)
if vm is None:
raise exception.InstanceNotFound(instance=instance_name)
vm = self._conn.Msvm_ComputerSystem(
ElementName=instance_name)[0]
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
wmi_association_class='Msvm_SettingsDefineState',
wmi_result_class='Msvm_VirtualSystemSettingData')
settings_paths = [v.path_() for v in vmsettings]
#See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
summary_info = vs_man_svc.GetSummaryInformation(
[constants.VM_SUMMARY_NUM_PROCS,
constants.VM_SUMMARY_ENABLED_STATE,
constants.VM_SUMMARY_MEMORY_USAGE,
constants.VM_SUMMARY_UPTIME],
settings_paths)[1]
info = summary_info[0]
LOG.debug(_("hyperv vm state: %s"), info.EnabledState)
state = constants.HYPERV_POWER_STATE[info.EnabledState]
memusage = str(info.MemoryUsage)
numprocs = str(info.NumberOfProcessors)
uptime = str(info.UpTime)
LOG.debug(_("Got Info for vm %(instance_name)s: state=%(state)d,"
" mem=%(memusage)s, num_cpu=%(numprocs)s,"
" uptime=%(uptime)s"), locals())
return {'state': state,
'max_mem': info.MemoryUsage,
'mem': info.MemoryUsage,
'num_cpu': info.NumberOfProcessors,
'cpu_time': info.UpTime}
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None):
""" Create a new VM and start it."""
vm = self._vmutils.lookup(self._conn, instance['name'])
if vm is not None:
raise exception.InstanceExists(name=instance['name'])
ebs_root = self._volumeops.volume_in_mapping(
self._volumeops.get_default_root_device(),
block_device_info)
#If is not a boot from volume spawn
if not (ebs_root):
#Fetch the file, assume it is a VHD file.
vhdfile = self._vmutils.get_vhd_path(instance['name'])
try:
self._cache_image(fn=self._vmutils.fetch_image,
context=context,
target=vhdfile,
fname=instance['image_ref'],
image_id=instance['image_ref'],
user=instance['user_id'],
project=instance['project_id'],
cow=CONF.use_cow_images)
except Exception as exn:
LOG.exception(_('cache image failed: %s'), exn)
self.destroy(instance)
try:
self._create_vm(instance)
if not ebs_root:
self._attach_ide_drive(instance['name'], vhdfile, 0, 0,
constants.IDE_DISK)
else:
self._volumeops.attach_boot_volume(block_device_info,
instance['name'])
#A SCSI controller for volumes connection is created
self._create_scsi_controller(instance['name'])
for vif in network_info:
mac_address = vif['address'].replace(':', '')
self._create_nic(instance['name'], mac_address)
if configdrive.required_by(instance):
self._create_config_drive(instance, injected_files,
admin_password)
LOG.debug(_('Starting VM %s '), instance['name'])
self._set_vm_state(instance['name'], 'Enabled')
LOG.info(_('Started VM %s '), instance['name'])
except Exception as exn:
LOG.exception(_('spawn vm failed: %s'), exn)
self.destroy(instance)
raise exn
def _create_config_drive(self, instance, injected_files, admin_password):
if CONF.config_drive_format != 'iso9660':
vmutils.HyperVException(_('Invalid config_drive_format "%s"') %
CONF.config_drive_format)
LOG.info(_('Using config drive'), instance=instance)
extra_md = {}
if admin_password and CONF.config_drive_inject_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=injected_files, extra_md=extra_md)
instance_path = self._vmutils.get_instance_path(
instance['name'])
configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso')
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path_iso}, instance=instance)
with configdrive.config_drive_helper(instance_md=inst_md) as cdb:
try:
cdb.make_drive(configdrive_path_iso)
except exception.ProcessExecutionError, e:
LOG.error(_('Creating config drive failed with error: %s'),
e, instance=instance)
raise
if not CONF.config_drive_cdrom:
drive_type = constants.IDE_DISK
configdrive_path = os.path.join(instance_path,
'configdrive.vhd')
utils.execute(CONF.qemu_img_cmd,
'convert',
'-f',
'raw',
'-O',
'vpc',
configdrive_path_iso,
configdrive_path,
attempts=1)
os.remove(configdrive_path_iso)
else:
drive_type = constants.IDE_DVD
configdrive_path = configdrive_path_iso
self._attach_ide_drive(instance['name'], configdrive_path, 1, 0,
drive_type)
def _create_vm(self, instance):
"""Create a VM but don't start it. """
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
vs_gs_data.ElementName = instance["name"]
(job, ret_val) = vs_man_svc.DefineVirtualSystem(
[], None, vs_gs_data.GetText_(1))[1:]
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._vmutils.check_job_status(job)
else:
success = (ret_val == 0)
if not success:
raise vmutils.HyperVException(_('Failed to create VM %s') %
instance["name"])
LOG.debug(_('Created VM %s...'), instance["name"])
vm = self._conn.Msvm_ComputerSystem(ElementName=instance["name"])[0]
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
vmsetting = [s for s in vmsettings
if s.SettingType == 3][0] # avoid snapshots
memsetting = vmsetting.associators(
wmi_result_class='Msvm_MemorySettingData')[0]
#No Dynamic Memory, so reservation, limit and quantity are identical.
mem = long(str(instance['memory_mb']))
memsetting.VirtualQuantity = mem
memsetting.Reservation = mem
memsetting.Limit = mem
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
vm.path_(), [memsetting.GetText_(1)])
LOG.debug(_('Set memory for vm %s...'), instance["name"])
procsetting = vmsetting.associators(
wmi_result_class='Msvm_ProcessorSettingData')[0]
vcpus = long(instance['vcpus'])
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
procsetting.Limit = 100000 # static assignment to 100%
if CONF.limit_cpu_features:
procsetting.LimitProcessorFeatures = True
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
vm.path_(), [procsetting.GetText_(1)])
LOG.debug(_('Set vcpus for vm %s...'), instance["name"])
def _create_scsi_controller(self, vm_name):
""" Create an iscsi controller ready to mount volumes """
LOG.debug(_('Creating a scsi controller for %(vm_name)s for volume '
'attaching') % locals())
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
vm = vms[0]
scsicontrldefault = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
WHERE ResourceSubType = 'Microsoft Synthetic SCSI Controller'\
AND InstanceID LIKE '%Default%'")[0]
if scsicontrldefault is None:
raise vmutils.HyperVException(_('Controller not found'))
scsicontrl = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_ResourceAllocationSettingData', scsicontrldefault)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
scsiresource = self._vmutils.add_virt_resource(self._conn,
scsicontrl, vm)
if scsiresource is None:
raise vmutils.HyperVException(
_('Failed to add scsi controller to VM %s') %
vm_name)
def _get_ide_controller(self, vm, ctrller_addr):
#Find the IDE controller for the vm.
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
wmi_result_class='MSVM_ResourceAllocationSettingData')
ctrller = [r for r in rasds
if r.ResourceSubType == 'Microsoft Emulated IDE Controller'
and r.Address == str(ctrller_addr)]
return ctrller
def _attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive_type=constants.IDE_DISK):
"""Create an IDE drive and attach it to the vm"""
LOG.debug(_('Creating disk for %(vm_name)s by attaching'
' disk file %(path)s') % locals())
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
vm = vms[0]
ctrller = self._get_ide_controller(vm, ctrller_addr)
if drive_type == constants.IDE_DISK:
resSubType = 'Microsoft Synthetic Disk Drive'
elif drive_type == constants.IDE_DVD:
resSubType = 'Microsoft Synthetic DVD Drive'
#Find the default disk drive object for the vm and clone it.
drivedflt = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
WHERE ResourceSubType LIKE '%(resSubType)s'\
AND InstanceID LIKE '%%Default%%'" % locals())[0]
drive = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_ResourceAllocationSettingData', drivedflt)
#Set the IDE ctrller as parent.
drive.Parent = ctrller[0].path_()
drive.Address = drive_addr
#Add the cloned disk drive object to the vm.
new_resources = self._vmutils.add_virt_resource(self._conn,
drive, vm)
if new_resources is None:
raise vmutils.HyperVException(
_('Failed to add drive to VM %s') %
vm_name)
drive_path = new_resources[0]
LOG.debug(_('New %(drive_type)s drive path is %(drive_path)s') %
locals())
if drive_type == constants.IDE_DISK:
resSubType = 'Microsoft Virtual Hard Disk'
elif drive_type == constants.IDE_DVD:
resSubType = 'Microsoft Virtual CD/DVD Disk'
#Find the default VHD disk object.
drivedefault = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
WHERE ResourceSubType LIKE '%(resSubType)s' AND \
InstanceID LIKE '%%Default%%' " % locals())[0]
#Clone the default and point it to the image file.
res = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_ResourceAllocationSettingData', drivedefault)
#Set the new drive as the parent.
res.Parent = drive_path
res.Connection = [path]
#Add the new vhd object as a virtual hard disk to the vm.
new_resources = self._vmutils.add_virt_resource(self._conn, res, vm)
if new_resources is None:
raise vmutils.HyperVException(
_('Failed to add %(drive_type)s image to VM %(vm_name)s') %
locals())
LOG.info(_('Created drive type %(drive_type)s for %(vm_name)s') %
locals())
def _create_nic(self, vm_name, mac):
"""Create a (synthetic) nic and attach it to the vm"""
LOG.debug(_('Creating nic for %s '), vm_name)
#Find the vswitch that is connected to the physical nic.
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
extswitch = self._find_external_network()
if extswitch is None:
raise vmutils.HyperVException(_('Cannot find vSwitch'))
vm = vms[0]
switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
#Find the default nic and clone it to create a new nic for the vm.
#Use Msvm_SyntheticEthernetPortSettingData for Windows or Linux with
#Linux Integration Components installed.
syntheticnics_data = self._conn.Msvm_SyntheticEthernetPortSettingData()
default_nic_data = [n for n in syntheticnics_data
if n.InstanceID.rfind('Default') > 0]
new_nic_data = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_SyntheticEthernetPortSettingData',
default_nic_data[0])
#Create a port on the vswitch.
(new_port, ret_val) = switch_svc.CreateSwitchPort(
Name=str(uuid.uuid4()),
FriendlyName=vm_name,
ScopeOfResidence="",
VirtualSwitch=extswitch.path_())
if ret_val != 0:
LOG.error(_('Failed creating a port on the external vswitch'))
raise vmutils.HyperVException(_('Failed creating port for %s') %
vm_name)
ext_path = extswitch.path_()
LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
% locals())
#Connect the new nic to the new port.
new_nic_data.Connection = [new_port]
new_nic_data.ElementName = vm_name + ' nic'
new_nic_data.Address = mac
new_nic_data.StaticMacAddress = 'True'
new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
#Add the new nic to the vm.
new_resources = self._vmutils.add_virt_resource(self._conn,
new_nic_data, vm)
if new_resources is None:
raise vmutils.HyperVException(_('Failed to add nic to VM %s') %
vm_name)
LOG.info(_("Created nic for %s "), vm_name)
def _find_external_network(self):
"""Find the vswitch that is connected to the physical nic.
Assumes only one physical nic on the host
"""
#If there are no physical nics connected to networks, return.
LOG.debug(_("Attempting to bind NIC to %s ")
% CONF.vswitch_name)
if CONF.vswitch_name:
LOG.debug(_("Attempting to bind NIC to %s ")
% CONF.vswitch_name)
bound = self._conn.Msvm_VirtualSwitch(
ElementName=CONF.vswitch_name)
else:
LOG.debug(_("No vSwitch specified, attaching to default"))
self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')
if len(bound) == 0:
return None
if CONF.vswitch_name:
return self._conn.Msvm_VirtualSwitch(
ElementName=CONF.vswitch_name)[0]\
.associators(wmi_result_class='Msvm_SwitchPort')[0]\
.associators(wmi_result_class='Msvm_VirtualSwitch')[0]
else:
return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')\
.associators(wmi_result_class='Msvm_SwitchPort')[0]\
.associators(wmi_result_class='Msvm_VirtualSwitch')[0]
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance."""
vm = self._vmutils.lookup(self._conn, instance['name'])
if vm is None:
raise exception.InstanceNotFound(instance_id=instance["id"])
self._set_vm_state(instance['name'], 'Reboot')
def destroy(self, instance, network_info=None, cleanup=True):
"""Destroy the VM. Also destroy the associated VHD disk files"""
LOG.debug(_("Got request to destroy vm %s"), instance['name'])
vm = self._vmutils.lookup(self._conn, instance['name'])
if vm is None:
return
vm = self._conn.Msvm_ComputerSystem(ElementName=instance['name'])[0]
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
#Stop the VM first.
self._set_vm_state(instance['name'], 'Disabled')
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
wmi_result_class='MSVM_ResourceAllocationSettingData')
disks = [r for r in rasds
if r.ResourceSubType == 'Microsoft Virtual Hard Disk']
disk_files = []
volumes = [r for r in rasds
if r.ResourceSubType == 'Microsoft Physical Disk Drive']
volumes_drives_list = []
#collect the volumes information before destroying the VM.
for volume in volumes:
hostResources = volume.HostResource
drive_path = hostResources[0]
#Appending the Msvm_Disk path
volumes_drives_list.append(drive_path)
#Collect disk file information before destroying the VM.
for disk in disks:
disk_files.extend([c for c in disk.Connection])
#Nuke the VM. Does not destroy disks.
(job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._vmutils.check_job_status(job)
elif ret_val == 0:
success = True
if not success:
raise vmutils.HyperVException(_('Failed to destroy vm %s') %
instance['name'])
#Disconnect volumes
for volume_drive in volumes_drives_list:
self._volumeops.disconnect_volume(volume_drive)
#Delete associated vhd disk files.
for disk in disk_files:
vhdfile = self._conn_cimv2.query(
"Select * from CIM_DataFile where Name = '" +
disk.replace("'", "''") + "'")[0]
LOG.debug(_("Del: disk %(vhdfile)s vm %(name)s")
% {'vhdfile': vhdfile, 'name': instance['name']})
vhdfile.Delete()
def pause(self, instance):
"""Pause VM instance."""
LOG.debug(_("Pause instance"), instance=instance)
self._set_vm_state(instance["name"], 'Paused')
def unpause(self, instance):
"""Unpause paused VM instance."""
LOG.debug(_("Unpause instance"), instance=instance)
self._set_vm_state(instance["name"], 'Enabled')
def suspend(self, instance):
"""Suspend the specified instance."""
print instance
LOG.debug(_("Suspend instance"), instance=instance)
self._set_vm_state(instance["name"], 'Suspended')
def resume(self, instance):
"""Resume the suspended VM instance."""
LOG.debug(_("Resume instance"), instance=instance)
self._set_vm_state(instance["name"], 'Enabled')
def power_off(self, instance):
"""Power off the specified instance."""
LOG.debug(_("Power off instance"), instance=instance)
self._set_vm_state(instance["name"], 'Disabled')
def power_on(self, instance):
"""Power on the specified instance"""
LOG.debug(_("Power on instance"), instance=instance)
self._set_vm_state(instance["name"], 'Enabled')
def _set_vm_state(self, vm_name, req_state):
"""Set the desired state of the VM"""
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
if len(vms) == 0:
return False
(job, ret_val) = vms[0].RequestStateChange(
constants.REQ_POWER_STATE[req_state])
success = False
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._vmutils.check_job_status(job)
elif ret_val == 0:
success = True
elif ret_val == 32775:
#Invalid state for current operation. Typically means it is
#already in the state requested
success = True
if success:
LOG.info(_("Successfully changed vm state of %(vm_name)s"
" to %(req_state)s") % locals())
else:
msg = _("Failed to change vm state of %(vm_name)s"
" to %(req_state)s") % locals()
LOG.error(msg)
raise vmutils.HyperVException(msg)
def _cache_image(self, fn, target, fname, cow=False, Size=None,
*args, **kwargs):
"""Wrapper for a method that creates an image that caches the image.
This wrapper will save the image into a common store and create a
copy for use by the hypervisor.
The underlying method should specify a kwarg of target representing
where the image will be saved.
fname is used as the filename of the base image. The filename needs
to be unique to a given image.
If cow is True, it will make a CoW image instead of a copy.
"""
@lockutils.synchronized(fname, 'nova-')
def call_if_not_exists(path, fn, *args, **kwargs):
if not os.path.exists(path):
fn(target=path, *args, **kwargs)
if not os.path.exists(target):
LOG.debug(_("use_cow_image:%s"), cow)
if cow:
base = self._vmutils.get_base_vhd_path(fname)
call_if_not_exists(base, fn, *args, **kwargs)
image_service = self._conn.query(
"Select * from Msvm_ImageManagementService")[0]
(job, ret_val) = \
image_service.CreateDifferencingVirtualHardDisk(
Path=target, ParentPath=base)
LOG.debug(
"Creating difference disk: JobID=%s, Source=%s, Target=%s",
job, base, target)
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._vmutils.check_job_status(job)
else:
success = (ret_val == 0)
if not success:
raise vmutils.HyperVException(
_('Failed to create Difference Disk from '
'%(base)s to %(target)s') % locals())
else:
call_if_not_exists(target, fn, *args, **kwargs)
|
apache-2.0
| -4,034,157,555,488,214,000
| 42.141196
| 79
| 0.576643
| false
| 4.102844
| true
| false
| false
|
mganeva/mantid
|
qt/applications/workbench/workbench/widgets/plotselector/presenter.py
|
1
|
15293
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
#
from __future__ import absolute_import, print_function
import os
import re
from .model import PlotSelectorModel
from .view import PlotSelectorView, Column
class PlotSelectorPresenter(object):
"""
Presenter for the plot selector widget. This class can be
responsible for the creation of the model and view, passing in
the GlobalFigureManager as an argument, or the presenter and view
can be passed as arguments (only intended for testing).
"""
def __init__(self, global_figure_manager, view=None, model=None):
"""
Initialise the presenter, creating the view and model, and
setting the initial plot list
:param global_figure_manager: The GlobalFigureManager class
:param view: Optional - a view to use instead of letting the
class create one (intended for testing)
:param model: Optional - a model to use instead of letting
the class create one (intended for testing)
"""
# Create model and view, or accept mocked versions
if view is None:
self.view = PlotSelectorView(self)
else:
self.view = view
if model is None:
self.model = PlotSelectorModel(self, global_figure_manager)
else:
self.model = model
# Make sure the plot list is up to date
self.update_plot_list()
def get_plot_name_from_number(self, plot_number):
return self.model.get_plot_name_from_number(plot_number)
# ------------------------ Plot Updates ------------------------
def update_plot_list(self):
"""
Updates the plot list in the model and the view. Filter text
is applied to the updated selection if required.
"""
plot_list = self.model.get_plot_list()
self.view.set_plot_list(plot_list)
def append_to_plot_list(self, plot_number):
"""
Appends the plot name to the end of the plot list
:param plot_number: The unique number in GlobalFigureManager
"""
self.view.append_to_plot_list(plot_number)
self.view.set_visibility_icon(plot_number, self.model.is_visible(plot_number))
def remove_from_plot_list(self, plot_number):
"""
Removes the plot name from the plot list
:param plot_number: The unique number in GlobalFigureManager
"""
self.view.remove_from_plot_list(plot_number)
def rename_in_plot_list(self, plot_number, new_name):
"""
Replaces a name in the plot list
:param plot_number: The unique number in GlobalFigureManager
:param new_name: The new name for the plot
"""
self.view.rename_in_plot_list(plot_number, new_name)
# ----------------------- Plot Filtering ------------------------
def filter_text_changed(self):
"""
Called by the view when the filter text is changed (e.g. by
typing or clearing the text)
"""
if self.view.get_filter_text():
self.view.filter_plot_list()
else:
self.view.unhide_all_plots()
def is_shown_by_filter(self, plot_number):
"""
:param plot_number: The unique number in GlobalFigureManager
:return: True if shown, or False if filtered out
"""
filter_text = self.view.get_filter_text()
plot_name = self.get_plot_name_from_number(plot_number)
return filter_text.lower() in plot_name.lower()
# ------------------------ Plot Showing ------------------------
def show_single_selected(self):
"""
When a list item is double clicked the view calls this method
to bring the selected plot to the front
"""
plot_number = self.view.get_currently_selected_plot_number()
self._make_plot_active(plot_number)
def show_multiple_selected(self):
"""
Shows multiple selected plots, e.g. from pressing the 'Show'
button with multiple selected plots
"""
selected_plots = self.view.get_all_selected_plot_numbers()
for plot_number in selected_plots:
self._make_plot_active(plot_number)
def _make_plot_active(self, plot_number):
"""
Make the plot with the given name active - bring it to the
front and make it the choice for overplotting
:param plot_number: The unique number in GlobalFigureManager
"""
try:
self.model.show_plot(plot_number)
except ValueError as e:
print(e)
def set_active_font(self, plot_number):
"""
Set the icon for the active plot to be colored
:param plot_number: The unique number in GlobalFigureManager
"""
active_plot_number = self.view.active_plot_number
if active_plot_number > 0:
try:
self.view.set_active_font(active_plot_number, False)
except TypeError:
pass
# The last active plot could have been closed
# already, so there is nothing to do
self.view.set_active_font(plot_number, True)
self.view.active_plot_number = plot_number
# ------------------------ Plot Hiding -------------------------
def hide_selected_plots(self):
"""
Hide all plots that are selected in the view
"""
selected_plots = self.view.get_all_selected_plot_numbers()
for plot_number in selected_plots:
self._hide_plot(plot_number)
def _hide_plot(self, plot_number):
"""
Hides a single plot
"""
try:
self.model.hide_plot(plot_number)
except ValueError as e:
print(e)
def toggle_plot_visibility(self, plot_number):
"""
Toggles a plot between hidden and shown
:param plot_number: The unique number in GlobalFigureManager
"""
if self.model.is_visible(plot_number):
self._hide_plot(plot_number)
else:
self._make_plot_active(plot_number)
self.update_visibility_icon(plot_number)
def update_visibility_icon(self, plot_number):
"""
Updates the icon to indicate a plot as hidden or visible
:param plot_number: The unique number in GlobalFigureManager
"""
try:
is_visible = self.model.is_visible(plot_number)
self.view.set_visibility_icon(plot_number, is_visible)
except ValueError:
# There is a chance the plot was closed, which calls an
# update to this method. If we can not get the visibility
# status it is safe to assume the plot has been closed.
pass
# ------------------------ Plot Renaming ------------------------
def rename_figure(self, plot_number, new_name):
"""
Replaces a name in the plot list
:param plot_number: The unique number in GlobalFigureManager
:param new_name: The new plot name
"""
try:
self.model.rename_figure(plot_number, new_name)
except ValueError as e:
# We need to undo the rename in the view
self.view.rename_in_plot_list(plot_number, new_name)
print(e)
# ------------------------ Plot Closing -------------------------
def close_action_called(self):
"""
This is called by the view when closing plots is requested
(e.g. pressing close or delete).
"""
selected_plots = self.view.get_all_selected_plot_numbers()
self._close_plots(selected_plots)
def close_single_plot(self, plot_number):
"""
This is used to close plots when a close action is called
that does not refer to the selected plot(s)
:param plot_number: The unique number in GlobalFigureManager
"""
self._close_plots([plot_number])
def _close_plots(self, list_of_plot_numbers):
"""
Accepts a list of plot names to close
:param list_of_plots: A list of strings containing plot names
"""
for plot_number in list_of_plot_numbers:
try:
self.model.close_plot(plot_number)
except ValueError as e:
print(e)
# ----------------------- Plot Sorting --------------------------
def set_sort_order(self, is_ascending):
"""
Sets the sort order in the view
:param is_ascending: If true ascending order, else descending
"""
self.view.set_sort_order(is_ascending)
def set_sort_type(self, sort_type):
"""
Sets the sort order in the view
:param sort_type: A Column enum with the column to sort on
"""
self.view.set_sort_type(sort_type)
self.update_last_active_order()
def update_last_active_order(self):
"""
Update the sort keys in the view. This is only required when
changes to the last shown order occur in the model, when
renaming the key is set already
"""
if self.view.sort_type() == Column.LastActive:
self._set_last_active_order()
def _set_last_active_order(self):
"""
Set the last shown order in the view. This checks the sorting
currently set and then sets the sort keys to the appropriate
values
"""
last_active_values = self.model.last_active_values()
self.view.set_last_active_values(last_active_values)
def get_initial_last_active_value(self, plot_number):
"""
Gets the initial last active value for a plot just added, in
this case it is assumed to not have been shown
:param plot_number: The unique number in GlobalFigureManager
:return: A string with the last active value
"""
return '_' + self.model.get_plot_name_from_number(plot_number)
def get_renamed_last_active_value(self, plot_number, old_last_active_value):
"""
Gets the initial last active value for a plot that was
renamed. If the plot had a numeric value, i.e. has been shown
this is retained, else it is set
:param plot_number: The unique number in GlobalFigureManager
:param old_last_active_value: The previous last active value
"""
if old_last_active_value.isdigit():
return old_last_active_value
else:
return self.get_initial_last_active_value(plot_number)
# ---------------------- Plot Exporting -------------------------
def export_plots_called(self, extension):
"""
Export plots called from the view, then a single or multiple
plots exported depending on the number currently selected
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
plot_numbers = self.view.get_all_selected_plot_numbers()
if len(plot_numbers) == 1:
self._export_single_plot(plot_numbers[0], extension)
elif len(plot_numbers) > 1:
self._export_multiple_plots(plot_numbers, extension)
def _export_single_plot(self, plot_number, extension):
"""
Called when a single plot is selected to export - prompts for
a filename then tries to save the plot
:param plot_number: The unique number in GlobalFigureManager
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
absolute_path = self.view.get_file_name_for_saving(extension)
if not absolute_path[-4:] == extension:
absolute_path += extension
try:
self.model.export_plot(plot_number, absolute_path)
except ValueError as e:
print(e)
def _export_multiple_plots(self, plot_numbers, extension):
"""
Export all selected plots in the plot_numbers list, first
prompting for a save directory then sanitising plot names to
unique, usable file names
:param plot_numbers: A list of plot numbers to export
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
dir_name = self.view.get_directory_name_for_saving()
# A temporary dictionary holding plot numbers as keys, plot
# names as values
plots = {}
for plot_number in plot_numbers:
plot_name = self.model.get_plot_name_from_number(plot_number)
plot_name = self._replace_special_characters(plot_name)
if plot_name in plots.values():
plot_name = self._make_unique_name(plot_name, plots)
plots[plot_number] = plot_name
self._export_plot(plot_number, plot_name, dir_name, extension)
def _replace_special_characters(self, string):
"""
Removes any characters that are not valid in file names
across all operating systems ('/' for Linux/Mac), more for
Windows
:param string: The string to replace characters in
:return: The string with special characters replace by '-'
"""
return re.sub(r'[<>:"/|\\?*]', r'-', string)
def _make_unique_name(self, name, dictionary):
"""
Given a name and a dictionary, make a unique name that does
not already exist in the dictionary values by appending
' (1)', ' (2)', ' (3)' etc. to the end of the name
:param name: A string with the non-unique name
:param dictionary: A dictionary with string values
:return : The unique plot name
"""
i = 1
while True:
plot_name_attempt = name + ' ({})'.format(str(i))
if plot_name_attempt not in dictionary.values():
break
i += 1
return plot_name_attempt
def _export_plot(self, plot_number, plot_name, dir_name, extension):
"""
Given a plot number, plot name, directory and extension
construct the absolute path name and call the model to save
the figure
:param plot_number: The unique number in GlobalFigureManager
:param plot_name: The name to use for saving
:param dir_name: The directory to save to
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
if dir_name:
filename = os.path.join(dir_name, plot_name + extension)
try:
self.model.export_plot(plot_number, filename)
except ValueError as e:
print(e)
|
gpl-3.0
| 4,486,501,413,745,917,000
| 36.760494
| 86
| 0.589028
| false
| 4.316399
| false
| false
| false
|
DailyActie/Surrogate-Model
|
surrogate/sampling/samLatinHypercube.py
|
1
|
8477
|
# MIT License
#
# Copyright (c) 2016 Daily Actie
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Quan Pan <quanpan302@hotmail.com>
# License: MIT License
# Create: 2016-12-02
import numpy as np
def samLatinHypercube(n, samples=None, criterion=None, iterations=None):
"""Generate a latin-hypercube design
:param n: The number of factors to generate samples for
:param samples: The number of samples to generate for each factor (Default: n)
:param criterion: Allowable values are "center" or "c", "maximin" or "m",
"centermaximin" or "cm", and "correlation" or "corr". If no value
given, the design is simply randomized.
:param iterations: The number of iterations in the maximin and correlations algorithms
(Default: 5).
:returns: An n-by-samples design matrix that has been normalized so factor values
are uniformly spaced between zero and one.
This code was originally published by the following individuals for use with
Scilab:
- Copyright (C) 2012 - 2013 - Michael Baudin
- Copyright (C) 2012 - Maria Christopoulou
- Copyright (C) 2010 - 2011 - INRIA - Michael Baudin
- Copyright (C) 2009 - Yann Collette
- Copyright (C) 2009 - CEA - Jean-Marc Martinez
web: forge.scilab.org/index.php/p/scidoe/sourcetree/master/macros
Much thanks goes to these individuals. It has been converted to Python by
Abraham Lee.
:Example:
A 3-factor design (defaults to 3 samples):
>>> samLatinHypercube(3)
array([[ 0.40069325, 0.08118402, 0.69763298],
[ 0.19524568, 0.41383587, 0.29947106],
[ 0.85341601, 0.75460699, 0.360024 ]])
A 4-factor design with 6 samples:
>>> samLatinHypercube(4, samples=6)
array([[ 0.27226812, 0.02811327, 0.62792445, 0.91988196],
[ 0.76945538, 0.43501682, 0.01107457, 0.09583358],
[ 0.45702981, 0.76073773, 0.90245401, 0.18773015],
[ 0.99342115, 0.85814198, 0.16996665, 0.65069309],
[ 0.63092013, 0.22148567, 0.33616859, 0.36332478],
[ 0.05276917, 0.5819198 , 0.67194243, 0.78703262]])
A 2-factor design with 5 centered samples:
>>> samLatinHypercube(2, samples=5, criterion='center')
array([[ 0.3, 0.5],
[ 0.7, 0.9],
[ 0.1, 0.3],
[ 0.9, 0.1],
[ 0.5, 0.7]])
A 3-factor design with 4 samples where the minimum distance between
all samples has been maximized:
>>> samLatinHypercube(3, samples=4, criterion='maximin')
array([[ 0.02642564, 0.55576963, 0.50261649],
[ 0.51606589, 0.88933259, 0.34040838],
[ 0.98431735, 0.0380364 , 0.01621717],
[ 0.40414671, 0.33339132, 0.84845707]])
A 4-factor design with 5 samples where the samples are as uncorrelated
as possible (within 10 iterations):
>>> samLatinHypercube(4, samples=5, criterion='correlate', iterations=10)
"""
H = None
if samples is None:
samples = n
if criterion is not None:
assert criterion.lower() in ('center', 'c', 'maximin', 'm',
'centermaximin', 'cm', 'correlation',
'corr'), 'Invalid value for "criterion": {}'.format(criterion)
else:
H = _lhsclassic(n, samples)
if criterion is None:
criterion = 'center'
if iterations is None:
iterations = 5
if H is None:
if criterion.lower() in ('center', 'c'):
H = _lhscentered(n, samples)
elif criterion.lower() in ('maximin', 'm'):
H = _lhsmaximin(n, samples, iterations, 'maximin')
elif criterion.lower() in ('centermaximin', 'cm'):
H = _lhsmaximin(n, samples, iterations, 'centermaximin')
elif criterion.lower() in ('correlate', 'corr'):
H = _lhscorrelate(n, samples, iterations)
return H
################################################################################
def _lhsclassic(n, samples):
# Generate the intervals
cut = np.linspace(0, 1, samples + 1)
# Fill points uniformly in each interval
u = np.random.rand(samples, n)
a = cut[:samples]
b = cut[1:samples + 1]
rdpoints = np.zeros_like(u)
for j in range(n):
rdpoints[:, j] = u[:, j] * (b - a) + a
# Make the random pairings
H = np.zeros_like(rdpoints)
for j in range(n):
order = np.random.permutation(range(samples))
H[:, j] = rdpoints[order, j]
return H
################################################################################
def _lhscentered(n, samples):
# Generate the intervals
cut = np.linspace(0, 1, samples + 1)
# Fill points uniformly in each interval
u = np.random.rand(samples, n)
a = cut[:samples]
b = cut[1:samples + 1]
_center = (a + b) / 2
# Make the random pairings
H = np.zeros_like(u)
for j in range(n):
H[:, j] = np.random.permutation(_center)
return H
################################################################################
def _lhsmaximin(n, samples, iterations, lhstype):
maxdist = 0
# Maximize the minimum distance between points
for i in range(iterations):
if lhstype == 'maximin':
Hcandidate = _lhsclassic(n, samples)
else:
Hcandidate = _lhscentered(n, samples)
d = _pdist(Hcandidate)
if maxdist < np.min(d):
maxdist = np.min(d)
H = Hcandidate.copy()
return H
################################################################################
def _lhscorrelate(n, samples, iterations):
mincorr = np.inf
# Minimize the components correlation coefficients
for i in range(iterations):
# Generate a random LHS
Hcandidate = _lhsclassic(n, samples)
R = np.corrcoef(Hcandidate)
if np.max(np.abs(R[R != 1])) < mincorr:
mincorr = np.max(np.abs(R - np.eye(R.shape[0])))
print('new candidate solution found with max,abs corrcoef = {}'.format(mincorr))
H = Hcandidate.copy()
return H
################################################################################
def _pdist(x):
"""Calculate the pair-wise point distances of a matrix
:param x: An m-by-n array of scalars, where there are m points in n dimensions.
:type x: 2d-array
:returns: d array
A 1-by-b array of scalars, where b = m*(m - 1)/2. This array contains
all the pair-wise point distances, arranged in the order (1, 0),
(2, 0), ..., (m-1, 0), (2, 1), ..., (m-1, 1), ..., (m-1, m-2).
:Example:
>>> x = np.array([[0.1629447, 0.8616334],
... [0.5811584, 0.3826752],
... [0.2270954, 0.4442068],
... [0.7670017, 0.7264718],
... [0.8253975, 0.1937736]])
>>> _pdist(x)
array([ 0.6358488, 0.4223272, 0.6189940, 0.9406808, 0.3593699,
0.3908118, 0.3087661, 0.6092392, 0.6486001, 0.5358894])
"""
x = np.atleast_2d(x)
assert len(x.shape) == 2, 'Input array must be 2d-dimensional'
m, n = x.shape
if m < 2:
return []
d = []
for i in range(m - 1):
for j in range(i + 1, m):
d.append((sum((x[j, :] - x[i, :]) ** 2)) ** 0.5)
return np.array(d)
|
mit
| 210,019,104,960,525,800
| 33.044177
| 99
| 0.572018
| false
| 3.502893
| false
| false
| false
|
pyannote/pyannote-parser
|
tests/test_repere.py
|
1
|
2075
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014-2015 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
from __future__ import print_function
import pytest
from pyannote.core import Segment
from pyannote.parser import REPEREParser
import tempfile
import os
SAMPLE_ANNOTATION = """uri1 1.0 3.5 speech alice
uri1 3.0 7.5 speech barbara
uri1 6.0 9.0 speech chris
"""
@pytest.fixture
def sample_annotation(request):
_, filename = tempfile.mkstemp()
with open(filename, 'w') as f:
f.write(SAMPLE_ANNOTATION)
def delete():
os.remove(filename)
request.addfinalizer(delete)
return filename
def test_load_annotation(sample_annotation):
parser = REPEREParser()
annotations = parser.read(sample_annotation)
speech1 = annotations(uri="uri1", modality="speech")
assert list(speech1.itertracks(label=True)) == [
(Segment(1, 3.5), 0, 'alice'),
(Segment(3, 7.5), 1, 'barbara'),
(Segment(6, 9), 2, 'chris')]
|
mit
| 1,854,699,619,890,575,400
| 31.920635
| 79
| 0.729508
| false
| 3.764065
| false
| false
| false
|
Rdbaker/Mealbound
|
tests/models/test_transactions.py
|
1
|
4543
|
"""Test the Transaction models."""
from unittest.mock import patch
import pytest
from ceraon.models.transactions import Transaction
@pytest.mark.usefixtures('db')
class TestTransaction:
"""Transaction tests."""
def test_get_by_id(self, meal, host, guest):
"""Get Transaction by id."""
transaction = Transaction(payer=guest, amount=meal.price, payee=host,
meal=meal)
transaction.save()
retrieved = Transaction.find(transaction.id)
assert retrieved == transaction
@patch('ceraon.models.transactions.stripe')
def test_charge_returns_true_without_error(self, stripe_mock, transaction):
"""Test that charge() returns True if no stripe error is raised."""
assert transaction.charge() is True
@patch('ceraon.models.transactions.stripe')
def test_successful_charge_sets_property(self, stripe_mock, transaction):
"""Test that charge() sets transaction_went_through to True."""
transaction.charge()
assert transaction.transaction_went_through is True
@patch('ceraon.models.transactions.stripe')
def test_failed_charge_returns_false(self, stripe_mock, transaction):
"""Test that charge() returns false if stripe throws an error."""
stripe_mock.Charge.create.side_effect = RuntimeError('failed charge')
assert transaction.charge() is False
@patch('ceraon.models.transactions.stripe')
def test_failed_charge_doesnt_set_attribute(self, stripe_mock, transaction):
"""Test that a failed charge() doesn't set transaction_went_through."""
stripe_mock.Charge.create.side_effect = RuntimeError('failed charge')
transaction.charge()
assert transaction.transaction_went_through is False
def test_cancel_sets_canceled(self, transaction):
"""Test that calling cancel() sets the canceled property."""
transaction.cancel()
assert transaction.canceled is True
@patch('ceraon.models.transactions.stripe')
def test_set_stripe_source_on_user_no_stripe_id(self, stripe_mock, user):
"""Test that setting the stripe customer ID works."""
customer_id = 'this is the stripe customer id'
stripe_mock.Customer.create.return_value.id = customer_id
Transaction.set_stripe_source_on_user(user=user, token='some token')
assert user.stripe_customer_id == customer_id
@patch('ceraon.models.transactions.stripe')
def test_set_stripe_source_on_user_returns_true(self, stripe_mock, user):
"""Test that setting the stripe customer ID returns True."""
customer_id = 'this is the stripe customer id'
stripe_mock.Customer.create.return_value.id = customer_id
assert Transaction.set_stripe_source_on_user(
user=user, token='some token') is True
@patch('ceraon.models.transactions.stripe')
def test_set_stripe_source_on_user_existing_id(self, stripe_mock, user):
"""Test that resetting the stripe customer ID works."""
customer_id = 'this is the stripe customer id'
assert user.stripe_customer_id is None
user.stripe_customer_id = customer_id
assert Transaction.set_stripe_source_on_user(
user=user, token='some token') is True
stripe_mock.Customer.retrieve.assert_called_once()
@patch('ceraon.models.transactions.stripe')
def test_set_stripe_source_on_user_fail(self, stripe_mock, user):
"""Test that a stripe failure returns false."""
stripe_mock.Customer.create.side_effect = RuntimeError('stripe error')
assert Transaction.set_stripe_source_on_user(
user=user, token='some token') is False
@pytest.mark.parametrize('amount,expected', [
(5.00, 0.5),
(5.05, 0.505),
(4.00, 0.5),
(90.00, 9),
(42.10, 4.21),
(2.50, 0.5)
])
def test_operational_overhead_cut(self, transaction, amount, expected):
"""Test that the operational_overhead_cost is as expected."""
transaction.amount = amount
assert transaction.operational_overhead_cut == expected
@pytest.mark.parametrize('amount,expected', [
(5.00, 4.5),
(5.05, 4.545),
(4.00, 3.5),
(90.00, 81),
(42.10, 37.89),
(2.50, 2)
])
def test_takehome_amount(self, transaction, amount, expected):
"""Test that the takehome_amount is as expected."""
transaction.amount = amount
assert transaction.takehome_amount == expected
|
bsd-3-clause
| 5,709,859,063,645,114,000
| 41.064815
| 80
| 0.657495
| false
| 3.866383
| true
| false
| false
|
ptisserand/ansible
|
lib/ansible/modules/cloud/amazon/cloudfront_distribution.py
|
1
|
85955
|
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudfront_distribution
short_description: create, update and delete aws cloudfront distributions.
description:
- Allows for easy creation, updating and deletion of CloudFront distributions.
requirements:
- boto3 >= 1.0.0
- python >= 2.6
version_added: "2.5"
author:
- Willem van Ketwich (@wilvk)
- Will Thames (@willthames)
extends_documentation_fragment:
- aws
- ec2
options:
state:
description:
- The desired state of the distribution
present - creates a new distribution or updates an existing distribution.
absent - deletes an existing distribution.
choices: ['present', 'absent']
default: 'present'
distribution_id:
description:
- The id of the cloudfront distribution. This parameter can be exchanged with I(alias) or I(caller_reference) and is used in conjunction with I(e_tag).
e_tag:
description:
- A unique identifier of a modified or existing distribution. Used in conjunction with I(distribution_id).
Is determined automatically if not specified.
caller_reference:
description:
- A unique identifier for creating and updating cloudfront distributions. Each caller reference must be unique across all distributions. e.g. a caller
reference used in a web distribution cannot be reused in a streaming distribution. This parameter can be used instead of I(distribution_id)
to reference an existing distribution. If not specified, this defaults to a datetime stamp of the format
'YYYY-MM-DDTHH:MM:SS.ffffff'.
tags:
description:
- Should be input as a dict() of key-value pairs.
Note that numeric keys or values must be wrapped in quotes. e.g. "Priority:" '1'
purge_tags:
description:
- Specifies whether existing tags will be removed before adding new tags. When I(purge_tags=yes), existing tags are removed and I(tags) are added, if
specified. If no tags are specified, it removes all existing tags for the distribution. When I(purge_tags=no), existing tags are kept and I(tags)
are added, if specified.
default: 'no'
choices: ['yes', 'no']
alias:
description:
- The name of an alias (CNAME) that is used in a distribution. This is used to effectively reference a distribution by its alias as an alias can only
be used by one distribution per AWS account. This variable avoids having to provide the I(distribution_id) as well as
the I(e_tag), or I(caller_reference) of an existing distribution.
aliases:
description:
- A I(list[]) of domain name aliases (CNAMEs) as strings to be used for the distribution. Each alias must be unique across all distribution for the AWS
account.
purge_aliases:
description:
- Specifies whether existing aliases will be removed before adding new aliases. When I(purge_aliases=yes), existing aliases are removed and I(aliases)
are added.
default: 'no'
choices: ['yes', 'no']
default_root_object:
description:
- A config element that specifies the path to request when the user requests the origin. e.g. if specified as 'index.html', this maps to
www.example.com/index.html when www.example.com is called by the user. This prevents the entire distribution origin from being exposed at the root.
default_origin_domain_name:
description:
- The domain name to use for an origin if no I(origins) have been specified. Should only be used on a first run of generating a distribution and not on
subsequent runs. Should not be used in conjunction with I(distribution_id), I(caller_reference) or I(alias).
default_origin_path:
description:
- The default origin path to specify for an origin if no I(origins) have been specified. Defaults to empty if not specified.
origins:
description:
- A config element that is a I(list[]) of complex origin objects to be specified for the distribution. Used for creating and updating distributions.
Each origin item comprises the attributes
I(id)
I(domain_name) (defaults to default_origin_domain_name if not specified)
I(origin_path) (defaults to default_origin_path if not specified)
I(custom_headers[])
I(header_name)
I(header_value)
I(s3_origin_access_identity_enabled)
I(custom_origin_config)
I(http_port)
I(https_port)
I(origin_protocol_policy)
I(origin_ssl_protocols[])
I(origin_read_timeout)
I(origin_keepalive_timeout)
purge_origins:
description: Whether to remove any origins that aren't listed in I(origins)
default: false
default_cache_behavior:
description:
- A config element that is a complex object specifying the default cache behavior of the distribution. If not specified, the I(target_origin_id) is
defined as the I(target_origin_id) of the first valid I(cache_behavior) in I(cache_behaviors) with defaults.
The default cache behavior comprises the attributes
I(target_origin_id)
I(forwarded_values)
I(query_string)
I(cookies)
I(forward)
I(whitelisted_names)
I(headers[])
I(query_string_cache_keys[])
I(trusted_signers)
I(enabled)
I(items[])
I(viewer_protocol_policy)
I(min_ttl)
I(allowed_methods)
I(items[])
I(cached_methods[])
I(smooth_streaming)
I(default_ttl)
I(max_ttl)
I(compress)
I(lambda_function_associations[])
I(lambda_function_arn)
I(event_type)
cache_behaviors:
description:
- A config element that is a I(list[]) of complex cache behavior objects to be specified for the distribution. The order
of the list is preserved across runs unless C(purge_cache_behavior) is enabled.
Each cache behavior comprises the attributes
I(path_pattern)
I(target_origin_id)
I(forwarded_values)
I(query_string)
I(cookies)
I(forward)
I(whitelisted_names)
I(headers[])
I(query_string_cache_keys[])
I(trusted_signers)
I(enabled)
I(items[])
I(viewer_protocol_policy)
I(min_ttl)
I(allowed_methods)
I(items[])
I(cached_methods[])
I(smooth_streaming)
I(default_ttl)
I(max_ttl)
I(compress)
I(lambda_function_associations[])
purge_cache_behaviors:
description: Whether to remove any cache behaviors that aren't listed in I(cache_behaviors). This switch
also allows the reordering of cache_behaviors.
default: false
custom_error_responses:
description:
- A config element that is a I(list[]) of complex custom error responses to be specified for the distribution. This attribute configures custom http
error messages returned to the user.
Each custom error response object comprises the attributes
I(error_code)
I(reponse_page_path)
I(response_code)
I(error_caching_min_ttl)
purge_custom_error_responses:
description: Whether to remove any custom error responses that aren't listed in I(custom_error_responses)
default: false
comment:
description:
- A comment that describes the cloudfront distribution. If not specified, it defaults to a
generic message that it has been created with Ansible, and a datetime stamp.
logging:
description:
- A config element that is a complex object that defines logging for the distribution.
The logging object comprises the attributes
I(enabled)
I(include_cookies)
I(bucket)
I(prefix)
price_class:
description:
- A string that specifies the pricing class of the distribution. As per
U(https://aws.amazon.com/cloudfront/pricing/)
I(price_class=PriceClass_100) consists of the areas
United States
Canada
Europe
I(price_class=PriceClass_200) consists of the areas
United States
Canada
Europe
Hong Kong, Philippines, S. Korea, Singapore & Taiwan
Japan
India
I(price_class=PriceClass_All) consists of the areas
United States
Canada
Europe
Hong Kong, Philippines, S. Korea, Singapore & Taiwan
Japan
India
South America
Australia
choices: ['PriceClass_100', 'PriceClass_200', 'PriceClass_All']
default: aws defaults this to 'PriceClass_All'
enabled:
description:
- A boolean value that specifies whether the distribution is enabled or disabled.
default: 'yes'
choices: ['yes', 'no']
viewer_certificate:
description:
- A config element that is a complex object that specifies the encryption details of the distribution.
Comprises the following attributes
I(cloudfront_default_certificate)
I(iam_certificate_id)
I(acm_certificate_arn)
I(ssl_support_method)
I(minimum_protocol_version)
I(certificate)
I(certificate_source)
restrictions:
description:
- A config element that is a complex object that describes how a distribution should restrict it's content.
The restriction object comprises the following attributes
I(geo_restriction)
I(restriction_type)
I(items[])
web_acl_id:
description:
- The id of a Web Application Firewall (WAF) Access Control List (ACL).
http_version:
description:
- The version of the http protocol to use for the distribution.
choices: [ 'http1.1', 'http2' ]
default: aws defaults this to 'http2'
ipv6_enabled:
description:
- Determines whether IPv6 support is enabled or not.
choices: ['yes', 'no']
default: 'no'
wait:
description:
- Specifies whether the module waits until the distribution has completed processing the creation or update.
choices: ['yes', 'no']
default: 'no'
wait_timeout:
description:
- Specifies the duration in seconds to wait for a timeout of a cloudfront create or update. Defaults to 1800 seconds (30 minutes).
default: 1800
'''
EXAMPLES = '''
# create a basic distribution with defaults and tags
- cloudfront_distribution:
state: present
default_origin_domain_name: www.my-cloudfront-origin.com
tags:
Name: example distribution
Project: example project
Priority: '1'
# update a distribution comment by distribution_id
- cloudfront_distribution:
state: present
distribution_id: E1RP5A2MJ8073O
comment: modified by ansible cloudfront.py
# update a distribution comment by caller_reference
- cloudfront_distribution:
state: present
caller_reference: my cloudfront distribution 001
comment: modified by ansible cloudfront.py
# update a distribution's aliases and comment using the distribution_id as a reference
- cloudfront_distribution:
state: present
distribution_id: E1RP5A2MJ8073O
comment: modified by cloudfront.py again
aliases: [ 'www.my-distribution-source.com', 'zzz.aaa.io' ]
# update a distribution's aliases and comment using an alias as a reference
- cloudfront_distribution:
state: present
caller_reference: my test distribution
comment: modified by cloudfront.py again
aliases:
- www.my-distribution-source.com
- zzz.aaa.io
# update a distribution's comment and aliases and tags and remove existing tags
- cloudfront_distribution:
state: present
distribution_id: E15BU8SDCGSG57
comment: modified by cloudfront.py again
aliases:
- tested.com
tags:
Project: distribution 1.2
purge_tags: yes
# create a distribution with an origin, logging and default cache behavior
- cloudfront_distribution:
state: present
caller_reference: unique test distribution id
origins:
- id: 'my test origin-000111'
domain_name: www.example.com
origin_path: /production
custom_headers:
- header_name: MyCustomHeaderName
header_value: MyCustomHeaderValue
default_cache_behavior:
target_origin_id: 'my test origin-000111'
forwarded_values:
query_string: true
cookies:
forward: all
headers:
- '*'
viewer_protocol_policy: allow-all
smooth_streaming: true
compress: true
allowed_methods:
items:
- GET
- HEAD
cached_methods:
- GET
- HEAD
logging:
enabled: true
include_cookies: false
bucket: mylogbucket.s3.amazonaws.com
prefix: myprefix/
enabled: false
comment: this is a cloudfront distribution with logging
# delete a distribution
- cloudfront_distribution:
state: absent
caller_reference: replaceable distribution
'''
RETURN = '''
active_trusted_signers:
description: Key pair IDs that CloudFront is aware of for each trusted signer
returned: always
type: complex
contains:
enabled:
description: Whether trusted signers are in use
returned: always
type: bool
sample: false
quantity:
description: Number of trusted signers
returned: always
type: int
sample: 1
items:
description: Number of trusted signers
returned: when there are trusted signers
type: list
sample:
- key_pair_id
aliases:
description: Aliases that refer to the distribution
returned: always
type: complex
contains:
items:
description: List of aliases
returned: always
type: list
sample:
- test.example.com
quantity:
description: Number of aliases
returned: always
type: int
sample: 1
arn:
description: Amazon Resource Name of the distribution
returned: always
type: string
sample: arn:aws:cloudfront::123456789012:distribution/E1234ABCDEFGHI
cache_behaviors:
description: Cloudfront cache behaviors
returned: always
type: complex
contains:
items:
description: List of cache behaviors
returned: always
type: complex
contains:
allowed_methods:
description: Methods allowed by the cache behavior
returned: always
type: complex
contains:
cached_methods:
description: Methods cached by the cache behavior
returned: always
type: complex
contains:
items:
description: List of cached methods
returned: always
type: list
sample:
- HEAD
- GET
quantity:
description: Count of cached methods
returned: always
type: int
sample: 2
items:
description: List of methods allowed by the cache behavior
returned: always
type: list
sample:
- HEAD
- GET
quantity:
description: Count of methods allowed by the cache behavior
returned: always
type: int
sample: 2
compress:
description: Whether compression is turned on for the cache behavior
returned: always
type: bool
sample: false
default_ttl:
description: Default Time to Live of the cache behavior
returned: always
type: int
sample: 86400
forwarded_values:
description: Values forwarded to the origin for this cache behavior
returned: always
type: complex
contains:
cookies:
description: Cookies to forward to the origin
returned: always
type: complex
contains:
forward:
description: Which cookies to forward to the origin for this cache behavior
returned: always
type: string
sample: none
whitelisted_names:
description: The names of the cookies to forward to the origin for this cache behavior
returned: when I(forward) is C(whitelist)
type: complex
contains:
quantity:
description: Count of cookies to forward
returned: always
type: int
sample: 1
items:
description: List of cookies to forward
returned: when list is not empty
type: list
sample: my_cookie
headers:
description: Which headers are used to vary on cache retrievals
returned: always
type: complex
contains:
quantity:
description: Count of headers to vary on
returned: always
type: int
sample: 1
items:
description: List of headers to vary on
returned: when list is not empty
type: list
sample:
- Host
query_string:
description: Whether the query string is used in cache lookups
returned: always
type: bool
sample: false
query_string_cache_keys:
description: Which query string keys to use in cache lookups
returned: always
type: complex
contains:
quantity:
description: Count of query string cache keys to use in cache lookups
returned: always
type: int
sample: 1
items:
description: List of query string cache keys to use in cache lookups
returned: when list is not empty
type: list
sample:
lambda_function_associations:
description: Lambda function associations for a cache behavior
returned: always
type: complex
contains:
quantity:
description: Count of lambda function associations
returned: always
type: int
sample: 1
items:
description: List of lambda function associations
returned: when list is not empty
type: list
sample:
- lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function
event_type: viewer-response
max_ttl:
description: Maximum Time to Live
returned: always
type: int
sample: 31536000
min_ttl:
description: Minimum Time to Live
returned: always
type: int
sample: 0
path_pattern:
description: Path pattern that determines this cache behavior
returned: always
type: string
sample: /path/to/files/*
smooth_streaming:
description: Whether smooth streaming is enabled
returned: always
type: bool
sample: false
target_origin_id:
description: Id of origin reference by this cache behavior
returned: always
type: string
sample: origin_abcd
trusted_signers:
description: Trusted signers
returned: always
type: complex
contains:
enabled:
description: Whether trusted signers are enabled for this cache behavior
returned: always
type: bool
sample: false
quantity:
description: Count of trusted signers
returned: always
type: int
sample: 1
viewer_protocol_policy:
description: Policy of how to handle http/https
returned: always
type: string
sample: redirect-to-https
quantity:
description: Count of cache behaviors
returned: always
type: int
sample: 1
caller_reference:
description: Idempotency reference given when creating cloudfront distribution
returned: always
type: string
sample: '1484796016700'
comment:
description: Any comments you want to include about the distribution
returned: always
type: string
sample: 'my first cloudfront distribution'
custom_error_responses:
description: Custom error responses to use for error handling
returned: always
type: complex
contains:
items:
description: List of custom error responses
returned: always
type: complex
contains:
error_caching_min_ttl:
description: Mininum time to cache this error response
returned: always
type: int
sample: 300
error_code:
description: Origin response code that triggers this error response
returned: always
type: int
sample: 500
response_code:
description: Response code to return to the requester
returned: always
type: string
sample: '500'
response_page_path:
description: Path that contains the error page to display
returned: always
type: string
sample: /errors/5xx.html
quantity:
description: Count of custom error response items
returned: always
type: int
sample: 1
default_cache_behavior:
description: Default cache behavior
returned: always
type: complex
contains:
allowed_methods:
description: Methods allowed by the cache behavior
returned: always
type: complex
contains:
cached_methods:
description: Methods cached by the cache behavior
returned: always
type: complex
contains:
items:
description: List of cached methods
returned: always
type: list
sample:
- HEAD
- GET
quantity:
description: Count of cached methods
returned: always
type: int
sample: 2
items:
description: List of methods allowed by the cache behavior
returned: always
type: list
sample:
- HEAD
- GET
quantity:
description: Count of methods allowed by the cache behavior
returned: always
type: int
sample: 2
compress:
description: Whether compression is turned on for the cache behavior
returned: always
type: bool
sample: false
default_ttl:
description: Default Time to Live of the cache behavior
returned: always
type: int
sample: 86400
forwarded_values:
description: Values forwarded to the origin for this cache behavior
returned: always
type: complex
contains:
cookies:
description: Cookies to forward to the origin
returned: always
type: complex
contains:
forward:
description: Which cookies to forward to the origin for this cache behavior
returned: always
type: string
sample: none
whitelisted_names:
description: The names of the cookies to forward to the origin for this cache behavior
returned: when I(forward) is C(whitelist)
type: complex
contains:
quantity:
description: Count of cookies to forward
returned: always
type: int
sample: 1
items:
description: List of cookies to forward
returned: when list is not empty
type: list
sample: my_cookie
headers:
description: Which headers are used to vary on cache retrievals
returned: always
type: complex
contains:
quantity:
description: Count of headers to vary on
returned: always
type: int
sample: 1
items:
description: List of headers to vary on
returned: when list is not empty
type: list
sample:
- Host
query_string:
description: Whether the query string is used in cache lookups
returned: always
type: bool
sample: false
query_string_cache_keys:
description: Which query string keys to use in cache lookups
returned: always
type: complex
contains:
quantity:
description: Count of query string cache keys to use in cache lookups
returned: always
type: int
sample: 1
items:
description: List of query string cache keys to use in cache lookups
returned: when list is not empty
type: list
sample:
lambda_function_associations:
description: Lambda function associations for a cache behavior
returned: always
type: complex
contains:
quantity:
description: Count of lambda function associations
returned: always
type: int
sample: 1
items:
description: List of lambda function associations
returned: when list is not empty
type: list
sample:
- lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function
event_type: viewer-response
max_ttl:
description: Maximum Time to Live
returned: always
type: int
sample: 31536000
min_ttl:
description: Minimum Time to Live
returned: always
type: int
sample: 0
path_pattern:
description: Path pattern that determines this cache behavior
returned: always
type: string
sample: /path/to/files/*
smooth_streaming:
description: Whether smooth streaming is enabled
returned: always
type: bool
sample: false
target_origin_id:
description: Id of origin reference by this cache behavior
returned: always
type: string
sample: origin_abcd
trusted_signers:
description: Trusted signers
returned: always
type: complex
contains:
enabled:
description: Whether trusted signers are enabled for this cache behavior
returned: always
type: bool
sample: false
quantity:
description: Count of trusted signers
returned: always
type: int
sample: 1
viewer_protocol_policy:
description: Policy of how to handle http/https
returned: always
type: string
sample: redirect-to-https
default_root_object:
description: The object that you want CloudFront to request from your origin (for example, index.html)
when a viewer requests the root URL for your distribution
returned: always
type: string
sample: ''
diff:
description: Difference between previous configuration and new configuration
returned: always
type: dict
sample: {}
domain_name:
description: Domain name of cloudfront distribution
returned: always
type: string
sample: d1vz8pzgurxosf.cloudfront.net
enabled:
description: Whether the cloudfront distribution is enabled or not
returned: always
type: bool
sample: true
http_version:
description: Version of HTTP supported by the distribution
returned: always
type: string
sample: http2
id:
description: Cloudfront distribution ID
returned: always
type: string
sample: E123456ABCDEFG
in_progress_invalidation_batches:
description: The number of invalidation batches currently in progress
returned: always
type: int
sample: 0
is_ipv6_enabled:
description: Whether IPv6 is enabled
returned: always
type: bool
sample: true
last_modified_time:
description: Date and time distribution was last modified
returned: always
type: string
sample: '2017-10-13T01:51:12.656000+00:00'
logging:
description: Logging information
returned: always
type: complex
contains:
bucket:
description: S3 bucket logging destination
returned: always
type: string
sample: logs-example-com.s3.amazonaws.com
enabled:
description: Whether logging is enabled
returned: always
type: bool
sample: true
include_cookies:
description: Whether to log cookies
returned: always
type: bool
sample: false
prefix:
description: Prefix added to logging object names
returned: always
type: string
sample: cloudfront/test
origins:
description: Origins in the cloudfront distribution
returned: always
type: complex
contains:
items:
description: List of origins
returned: always
type: complex
contains:
custom_headers:
description: Custom headers passed to the origin
returned: always
type: complex
contains:
quantity:
description: Count of headers
returned: always
type: int
sample: 1
custom_origin_config:
description: Configuration of the origin
returned: always
type: complex
contains:
http_port:
description: Port on which HTTP is listening
returned: always
type: int
sample: 80
https_port:
description: Port on which HTTPS is listening
returned: always
type: int
sample: 443
origin_keepalive_timeout:
description: Keep-alive timeout
returned: always
type: int
sample: 5
origin_protocol_policy:
description: Policy of which protocols are supported
returned: always
type: string
sample: https-only
origin_read_timeout:
description: Timeout for reads to the origin
returned: always
type: int
sample: 30
origin_ssl_protocols:
description: SSL protocols allowed by the origin
returned: always
type: complex
contains:
items:
description: List of SSL protocols
returned: always
type: list
sample:
- TLSv1
- TLSv1.1
- TLSv1.2
quantity:
description: Count of SSL protocols
returned: always
type: int
sample: 3
domain_name:
description: Domain name of the origin
returned: always
type: string
sample: test-origin.example.com
id:
description: ID of the origin
returned: always
type: string
sample: test-origin.example.com
origin_path:
description: Subdirectory to prefix the request from the S3 or HTTP origin
returned: always
type: string
sample: ''
quantity:
description: Count of origins
returned: always
type: int
sample: 1
price_class:
description: Price class of cloudfront distribution
returned: always
type: string
sample: PriceClass_All
restrictions:
description: Restrictions in use by Cloudfront
returned: always
type: complex
contains:
geo_restriction:
description: Controls the countries in which your content is distributed.
returned: always
type: complex
contains:
quantity:
description: Count of restrictions
returned: always
type: int
sample: 1
items:
description: List of country codes allowed or disallowed
returned: always
type: list
sample: xy
restriction_type:
description: Type of restriction
returned: always
type: string
sample: blacklist
status:
description: Status of the cloudfront distribution
returned: always
type: string
sample: InProgress
tags:
description: Distribution tags
returned: always
type: dict
sample:
Hello: World
viewer_certificate:
description: Certificate used by cloudfront distribution
returned: always
type: complex
contains:
acm_certificate_arn:
description: ARN of ACM certificate
returned: when certificate comes from ACM
type: string
sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef
certificate:
description: Reference to certificate
returned: always
type: string
sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef
certificate_source:
description: Where certificate comes from
returned: always
type: string
sample: acm
minimum_protocol_version:
description: Minimum SSL/TLS protocol supported by this distribution
returned: always
type: string
sample: TLSv1
ssl_support_method:
description: Support for pre-SNI browsers or not
returned: always
type: string
sample: sni-only
web_acl_id:
description: ID of Web Access Control List (from WAF service)
returned: always
type: string
sample: abcd1234-1234-abcd-abcd-abcd12345678
'''
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager
from ansible.module_utils.ec2 import get_aws_connection_info
from ansible.module_utils.ec2 import ec2_argument_spec, boto3_conn, compare_aws_tags
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list
from ansible.module_utils.ec2 import snake_dict_to_camel_dict, boto3_tag_list_to_ansible_dict
import datetime
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
pass # caught by AnsibleAWSModule (as python 2.6 + boto3 => ordereddict is installed)
try:
import botocore
except ImportError:
pass
def change_dict_key_name(dictionary, old_key, new_key):
if old_key in dictionary:
dictionary[new_key] = dictionary.get(old_key)
dictionary.pop(old_key, None)
return dictionary
def merge_validation_into_config(config, validated_node, node_name):
if validated_node is not None:
if isinstance(validated_node, dict):
config_node = config.get(node_name)
if config_node is not None:
config_node_items = list(config_node.items())
else:
config_node_items = []
config[node_name] = dict(config_node_items + list(validated_node.items()))
if isinstance(validated_node, list):
config[node_name] = list(set(config.get(node_name) + validated_node))
return config
def ansible_list_to_cloudfront_list(list_items=None, include_quantity=True):
if list_items is None:
list_items = []
if not isinstance(list_items, list):
raise ValueError('Expected a list, got a {0} with value {1}'.format(type(list_items).__name__, str(list_items)))
result = {}
if include_quantity:
result['quantity'] = len(list_items)
if len(list_items) > 0:
result['items'] = list_items
return result
def recursive_diff(dict1, dict2):
left = dict((k, v) for (k, v) in dict1.items() if k not in dict2)
right = dict((k, v) for (k, v) in dict2.items() if k not in dict1)
for k in (set(dict1.keys()) & set(dict2.keys())):
if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):
result = recursive_diff(dict1[k], dict2[k])
if result:
left[k] = result[0]
right[k] = result[1]
elif dict1[k] != dict2[k]:
left[k] = dict1[k]
right[k] = dict2[k]
if left or right:
return left, right
else:
return None
def create_distribution(client, module, config, tags):
try:
if not tags:
return client.create_distribution(DistributionConfig=config)['Distribution']
else:
distribution_config_with_tags = {
'DistributionConfig': config,
'Tags': {
'Items': tags
}
}
return client.create_distribution_with_tags(DistributionConfigWithTags=distribution_config_with_tags)['Distribution']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error creating distribution")
def delete_distribution(client, module, distribution):
try:
return client.delete_distribution(Id=distribution['Distribution']['Id'], IfMatch=distribution['ETag'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error deleting distribution %s" % to_native(distribution['Distribution']))
def update_distribution(client, module, config, distribution_id, e_tag):
try:
return client.update_distribution(DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)['Distribution']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error updating distribution to %s" % to_native(config))
def tag_resource(client, module, arn, tags):
try:
return client.tag_resource(Resource=arn, Tags=dict(Items=tags))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error tagging resource")
def untag_resource(client, module, arn, tag_keys):
try:
return client.untag_resource(Resource=arn, TagKeys=dict(Items=tag_keys))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error untagging resource")
def list_tags_for_resource(client, module, arn):
try:
response = client.list_tags_for_resource(Resource=arn)
return boto3_tag_list_to_ansible_dict(response.get('Tags').get('Items'))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error listing tags for resource")
def update_tags(client, module, existing_tags, valid_tags, purge_tags, arn):
changed = False
to_add, to_remove = compare_aws_tags(existing_tags, valid_tags, purge_tags)
if to_remove:
untag_resource(client, module, arn, to_remove)
changed = True
if to_add:
tag_resource(client, module, arn, ansible_dict_to_boto3_tag_list(to_add))
changed = True
return changed
class CloudFrontValidationManager(object):
"""
Manages Cloudfront validations
"""
def __init__(self, module):
self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
self.module = module
self.__default_distribution_enabled = True
self.__default_http_port = 80
self.__default_https_port = 443
self.__default_ipv6_enabled = False
self.__default_origin_ssl_protocols = [
'TLSv1',
'TLSv1.1',
'TLSv1.2'
]
self.__default_custom_origin_protocol_policy = 'match-viewer'
self.__default_custom_origin_read_timeout = 30
self.__default_custom_origin_keepalive_timeout = 5
self.__default_datetime_string = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
self.__default_cache_behavior_min_ttl = 0
self.__default_cache_behavior_max_ttl = 31536000
self.__default_cache_behavior_default_ttl = 86400
self.__default_cache_behavior_compress = False
self.__default_cache_behavior_viewer_protocol_policy = 'allow-all'
self.__default_cache_behavior_smooth_streaming = False
self.__default_cache_behavior_forwarded_values_forward_cookies = 'none'
self.__default_cache_behavior_forwarded_values_query_string = True
self.__default_trusted_signers_enabled = False
self.__valid_price_classes = set([
'PriceClass_100',
'PriceClass_200',
'PriceClass_All'
])
self.__valid_origin_protocol_policies = set([
'http-only',
'match-viewer',
'https-only'
])
self.__valid_origin_ssl_protocols = set([
'SSLv3',
'TLSv1',
'TLSv1.1',
'TLSv1.2'
])
self.__valid_cookie_forwarding = set([
'none',
'whitelist',
'all'
])
self.__valid_viewer_protocol_policies = set([
'allow-all',
'https-only',
'redirect-to-https'
])
self.__valid_methods = set([
'GET',
'HEAD',
'POST',
'PUT',
'PATCH',
'OPTIONS',
'DELETE'
])
self.__valid_methods_cached_methods = [
set([
'GET',
'HEAD'
]),
set([
'GET',
'HEAD',
'OPTIONS'
])
]
self.__valid_methods_allowed_methods = [
self.__valid_methods_cached_methods[0],
self.__valid_methods_cached_methods[1],
self.__valid_methods
]
self.__valid_lambda_function_association_event_types = set([
'viewer-request',
'viewer-response',
'origin-request',
'origin-response'
])
self.__valid_viewer_certificate_ssl_support_methods = set([
'sni-only',
'vip'
])
self.__valid_viewer_certificate_minimum_protocol_versions = set([
'SSLv3',
'TLSv1'
])
self.__valid_viewer_certificate_certificate_sources = set([
'cloudfront',
'iam',
'acm'
])
self.__valid_http_versions = set([
'http1.1',
'http2'
])
self.__s3_bucket_domain_identifier = '.s3.amazonaws.com'
def add_missing_key(self, dict_object, key_to_set, value_to_set):
if key_to_set not in dict_object and value_to_set is not None:
dict_object[key_to_set] = value_to_set
return dict_object
def add_key_else_change_dict_key(self, dict_object, old_key, new_key, value_to_set):
if old_key not in dict_object and value_to_set is not None:
dict_object[new_key] = value_to_set
else:
dict_object = change_dict_key_name(dict_object, old_key, new_key)
return dict_object
def add_key_else_validate(self, dict_object, key_name, attribute_name, value_to_set, valid_values, to_aws_list=False):
if key_name in dict_object:
self.validate_attribute_with_allowed_values(value_to_set, attribute_name, valid_values)
else:
if to_aws_list:
dict_object[key_name] = ansible_list_to_cloudfront_list(value_to_set)
elif value_to_set is not None:
dict_object[key_name] = value_to_set
return dict_object
def validate_logging(self, logging):
try:
if logging is None:
return None
valid_logging = {}
if logging and not set(['enabled', 'include_cookies', 'bucket', 'prefix']).issubset(logging):
self.module.fail_json(msg="The logging parameters enabled, include_cookies, bucket and prefix must be specified.")
valid_logging['include_cookies'] = logging.get('include_cookies')
valid_logging['enabled'] = logging.get('enabled')
valid_logging['bucket'] = logging.get('bucket')
valid_logging['prefix'] = logging.get('prefix')
return valid_logging
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution logging")
def validate_is_list(self, list_to_validate, list_name):
if not isinstance(list_to_validate, list):
self.module.fail_json(msg='%s is of type %s. Must be a list.' % (list_name, type(list_to_validate).__name__))
def validate_required_key(self, key_name, full_key_name, dict_object):
if key_name not in dict_object:
self.module.fail_json(msg="%s must be specified." % full_key_name)
def validate_origins(self, client, config, origins, default_origin_domain_name,
default_origin_path, create_distribution, purge_origins=False):
try:
if origins is None:
if default_origin_domain_name is None and not create_distribution:
if purge_origins:
return None
else:
return ansible_list_to_cloudfront_list(config)
if default_origin_domain_name is not None:
origins = [{
'domain_name': default_origin_domain_name,
'origin_path': default_origin_path or ''
}]
else:
origins = []
self.validate_is_list(origins, 'origins')
if not origins and default_origin_domain_name is None and create_distribution:
self.module.fail_json(msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one.")
all_origins = OrderedDict()
new_domains = list()
for origin in config:
all_origins[origin.get('domain_name')] = origin
for origin in origins:
origin = self.validate_origin(client, all_origins.get(origin.get('domain_name'), {}), origin, default_origin_path)
all_origins[origin['domain_name']] = origin
new_domains.append(origin['domain_name'])
if purge_origins:
for domain in list(all_origins.keys()):
if domain not in new_domains:
del(all_origins[domain])
return ansible_list_to_cloudfront_list(list(all_origins.values()))
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution origins")
def validate_s3_origin_configuration(self, client, existing_config, origin):
if origin['s3_origin_access_identity_enabled'] and existing_config.get('s3_origin_config', {}).get('origin_access_identity'):
return existing_config['s3_origin_config']['origin_access_identity']
if not origin['s3_origin_access_identity_enabled']:
return None
try:
comment = "access-identity-by-ansible-%s-%s" % (origin.get('domain_name'), self.__default_datetime_string)
cfoai_config = dict(CloudFrontOriginAccessIdentityConfig=dict(CallerReference=self.__default_datetime_string,
Comment=comment))
oai = client.create_cloud_front_origin_access_identity(**cfoai_config)['CloudFrontOriginAccessIdentity']['Id']
except Exception as e:
self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin['id'])
return "origin-access-identity/cloudfront/%s" % oai
def validate_origin(self, client, existing_config, origin, default_origin_path):
try:
origin = self.add_missing_key(origin, 'origin_path', existing_config.get('origin_path', default_origin_path or ''))
self.validate_required_key('origin_path', 'origins[].origin_path', origin)
origin = self.add_missing_key(origin, 'id', existing_config.get('id', self.__default_datetime_string))
if 'custom_headers' in origin and len(origin.get('custom_headers')) > 0:
for custom_header in origin.get('custom_headers'):
if 'header_name' not in custom_header or 'header_value' not in custom_header:
self.module.fail_json(msg="Both origins[].custom_headers.header_name and origins[].custom_headers.header_value must be specified.")
origin['custom_headers'] = ansible_list_to_cloudfront_list(origin.get('custom_headers'))
else:
origin['custom_headers'] = ansible_list_to_cloudfront_list()
if self.__s3_bucket_domain_identifier in origin.get('domain_name').lower():
if origin.get("s3_origin_access_identity_enabled") is not None:
s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin)
if s3_origin_config:
oai = s3_origin_config
else:
oai = ""
origin["s3_origin_config"] = dict(origin_access_identity=oai)
del(origin["s3_origin_access_identity_enabled"])
if 'custom_origin_config' in origin:
self.module.fail_json(msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive")
else:
origin = self.add_missing_key(origin, 'custom_origin_config', existing_config.get('custom_origin_config', {}))
custom_origin_config = origin.get('custom_origin_config')
custom_origin_config = self.add_key_else_validate(custom_origin_config, 'origin_protocol_policy',
'origins[].custom_origin_config.origin_protocol_policy',
self.__default_custom_origin_protocol_policy, self.__valid_origin_protocol_policies)
custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_read_timeout', self.__default_custom_origin_read_timeout)
custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_keepalive_timeout', self.__default_custom_origin_keepalive_timeout)
custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'http_port', 'h_t_t_p_port', self.__default_http_port)
custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'https_port', 'h_t_t_p_s_port', self.__default_https_port)
if custom_origin_config.get('origin_ssl_protocols', {}).get('items'):
custom_origin_config['origin_ssl_protocols'] = custom_origin_config['origin_ssl_protocols']['items']
if custom_origin_config.get('origin_ssl_protocols'):
self.validate_attribute_list_with_allowed_list(custom_origin_config['origin_ssl_protocols'], 'origins[].origin_ssl_protocols',
self.__valid_origin_ssl_protocols)
else:
custom_origin_config['origin_ssl_protocols'] = self.__default_origin_ssl_protocols
custom_origin_config['origin_ssl_protocols'] = ansible_list_to_cloudfront_list(custom_origin_config['origin_ssl_protocols'])
return origin
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error validating distribution origin")
def validate_cache_behaviors(self, config, cache_behaviors, valid_origins, purge_cache_behaviors=False):
try:
if cache_behaviors is None and valid_origins is not None and purge_cache_behaviors is False:
return ansible_list_to_cloudfront_list(config)
all_cache_behaviors = OrderedDict()
# cache behaviors are order dependent so we don't preserve the existing ordering when purge_cache_behaviors
# is true (if purge_cache_behaviors is not true, we can't really know the full new order)
if not purge_cache_behaviors:
for behavior in config:
all_cache_behaviors[behavior['path_pattern']] = behavior
for cache_behavior in cache_behaviors:
valid_cache_behavior = self.validate_cache_behavior(all_cache_behaviors.get(cache_behavior.get('path_pattern'), {}),
cache_behavior, valid_origins)
all_cache_behaviors[cache_behavior['path_pattern']] = valid_cache_behavior
if purge_cache_behaviors:
for target_origin_id in set(all_cache_behaviors.keys()) - set([cb['path_pattern'] for cb in cache_behaviors]):
del(all_cache_behaviors[target_origin_id])
return ansible_list_to_cloudfront_list(list(all_cache_behaviors.values()))
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution cache behaviors")
def validate_cache_behavior(self, config, cache_behavior, valid_origins, is_default_cache=False):
if is_default_cache and cache_behavior is None:
cache_behavior = {}
if cache_behavior is None and valid_origins is not None:
return config
cache_behavior = self.validate_cache_behavior_first_level_keys(config, cache_behavior, valid_origins, is_default_cache)
cache_behavior = self.validate_forwarded_values(config, cache_behavior.get('forwarded_values'), cache_behavior)
cache_behavior = self.validate_allowed_methods(config, cache_behavior.get('allowed_methods'), cache_behavior)
cache_behavior = self.validate_lambda_function_associations(config, cache_behavior.get('lambda_function_associations'), cache_behavior)
cache_behavior = self.validate_trusted_signers(config, cache_behavior.get('trusted_signers'), cache_behavior)
return cache_behavior
def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid_origins, is_default_cache):
try:
cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'min_ttl', 'min_t_t_l',
config.get('min_t_t_l', self.__default_cache_behavior_min_ttl))
cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'max_ttl', 'max_t_t_l',
config.get('max_t_t_l', self.__default_cache_behavior_max_ttl))
cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'default_ttl', 'default_t_t_l',
config.get('default_t_t_l', self.__default_cache_behavior_default_ttl))
cache_behavior = self.add_missing_key(cache_behavior, 'compress', config.get('compress', self.__default_cache_behavior_compress))
target_origin_id = cache_behavior.get('target_origin_id', config.get('target_origin_id'))
if not target_origin_id:
target_origin_id = self.get_first_origin_id_for_default_cache_behavior(valid_origins)
if target_origin_id not in [origin['id'] for origin in valid_origins.get('items', [])]:
if is_default_cache:
cache_behavior_name = 'Default cache behavior'
else:
cache_behavior_name = 'Cache behavior for path %s' % cache_behavior['path_pattern']
self.module.fail_json(msg="%s has target_origin_id pointing to an origin that does not exist." %
cache_behavior_name)
cache_behavior['target_origin_id'] = target_origin_id
cache_behavior = self.add_key_else_validate(cache_behavior, 'viewer_protocol_policy', 'cache_behavior.viewer_protocol_policy',
config.get('viewer_protocol_policy',
self.__default_cache_behavior_viewer_protocol_policy),
self.__valid_viewer_protocol_policies)
cache_behavior = self.add_missing_key(cache_behavior, 'smooth_streaming',
config.get('smooth_streaming', self.__default_cache_behavior_smooth_streaming))
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution cache behavior first level keys")
def validate_forwarded_values(self, config, forwarded_values, cache_behavior):
try:
if not forwarded_values:
forwarded_values = dict()
existing_config = config.get('forwarded_values', {})
headers = forwarded_values.get('headers', existing_config.get('headers', {}).get('items'))
if headers:
headers.sort()
forwarded_values['headers'] = ansible_list_to_cloudfront_list(headers)
if 'cookies' not in forwarded_values:
forward = existing_config.get('cookies', {}).get('forward', self.__default_cache_behavior_forwarded_values_forward_cookies)
forwarded_values['cookies'] = {'forward': forward}
else:
existing_whitelist = existing_config.get('cookies', {}).get('whitelisted_names', {}).get('items')
whitelist = forwarded_values.get('cookies').get('whitelisted_names', existing_whitelist)
if whitelist:
self.validate_is_list(whitelist, 'forwarded_values.whitelisted_names')
forwarded_values['cookies']['whitelisted_names'] = ansible_list_to_cloudfront_list(whitelist)
cookie_forwarding = forwarded_values.get('cookies').get('forward', existing_config.get('cookies', {}).get('forward'))
self.validate_attribute_with_allowed_values(cookie_forwarding, 'cache_behavior.forwarded_values.cookies.forward',
self.__valid_cookie_forwarding)
forwarded_values['cookies']['forward'] = cookie_forwarding
query_string_cache_keys = forwarded_values.get('query_string_cache_keys', existing_config.get('query_string_cache_keys', {}).get('items', []))
self.validate_is_list(query_string_cache_keys, 'forwarded_values.query_string_cache_keys')
forwarded_values['query_string_cache_keys'] = ansible_list_to_cloudfront_list(query_string_cache_keys)
forwarded_values = self.add_missing_key(forwarded_values, 'query_string',
existing_config.get('query_string', self.__default_cache_behavior_forwarded_values_query_string))
cache_behavior['forwarded_values'] = forwarded_values
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating forwarded values")
def validate_lambda_function_associations(self, config, lambda_function_associations, cache_behavior):
try:
if lambda_function_associations is not None:
self.validate_is_list(lambda_function_associations, 'lambda_function_associations')
for association in lambda_function_associations:
association = change_dict_key_name(association, 'lambda_function_arn', 'lambda_function_a_r_n')
self.validate_attribute_with_allowed_values(association.get('event_type'), 'cache_behaviors[].lambda_function_associations.event_type',
self.__valid_lambda_function_association_event_types)
cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list(lambda_function_associations)
else:
if 'lambda_function_associations' in config:
cache_behavior['lambda_function_associations'] = config.get('lambda_function_associations')
else:
cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list([])
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating lambda function associations")
def validate_allowed_methods(self, config, allowed_methods, cache_behavior):
try:
if allowed_methods is not None:
self.validate_required_key('items', 'cache_behavior.allowed_methods.items[]', allowed_methods)
temp_allowed_items = allowed_methods.get('items')
self.validate_is_list(temp_allowed_items, 'cache_behavior.allowed_methods.items')
self.validate_attribute_list_with_allowed_list(temp_allowed_items, 'cache_behavior.allowed_methods.items[]',
self.__valid_methods_allowed_methods)
cached_items = allowed_methods.get('cached_methods')
if 'cached_methods' in allowed_methods:
self.validate_is_list(cached_items, 'cache_behavior.allowed_methods.cached_methods')
self.validate_attribute_list_with_allowed_list(cached_items, 'cache_behavior.allowed_items.cached_methods[]',
self.__valid_methods_cached_methods)
# we don't care if the order of how cloudfront stores the methods differs - preserving existing
# order reduces likelihood of making unnecessary changes
if 'allowed_methods' in config and set(config['allowed_methods']['items']) == set(temp_allowed_items):
cache_behavior['allowed_methods'] = config['allowed_methods']
else:
cache_behavior['allowed_methods'] = ansible_list_to_cloudfront_list(temp_allowed_items)
if cached_items and set(cached_items) == set(config.get('allowed_methods', {}).get('cached_methods', {}).get('items', [])):
cache_behavior['allowed_methods']['cached_methods'] = config['allowed_methods']['cached_methods']
else:
cache_behavior['allowed_methods']['cached_methods'] = ansible_list_to_cloudfront_list(cached_items)
else:
if 'allowed_methods' in config:
cache_behavior['allowed_methods'] = config.get('allowed_methods')
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating allowed methods")
def validate_trusted_signers(self, config, trusted_signers, cache_behavior):
try:
if trusted_signers is None:
trusted_signers = {}
if 'items' in trusted_signers:
valid_trusted_signers = ansible_list_to_cloudfront_list(trusted_signers.get('items'))
else:
valid_trusted_signers = dict(quantity=config.get('quantity', 0))
if 'items' in config:
valid_trusted_signers = dict(items=config['items'])
valid_trusted_signers['enabled'] = trusted_signers.get('enabled', config.get('enabled', self.__default_trusted_signers_enabled))
cache_behavior['trusted_signers'] = valid_trusted_signers
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating trusted signers")
def validate_viewer_certificate(self, viewer_certificate):
try:
if viewer_certificate is None:
return None
if viewer_certificate.get('cloudfront_default_certificate') and viewer_certificate.get('ssl_support_method') is not None:
self.module.fail_json(msg="viewer_certificate.ssl_support_method should not be specified with viewer_certificate_cloudfront_default" +
"_certificate set to true.")
self.validate_attribute_with_allowed_values(viewer_certificate.get('ssl_support_method'), 'viewer_certificate.ssl_support_method',
self.__valid_viewer_certificate_ssl_support_methods)
self.validate_attribute_with_allowed_values(viewer_certificate.get('minimum_protocol_version'), 'viewer_certificate.minimum_protocol_version',
self.__valid_viewer_certificate_minimum_protocol_versions)
self.validate_attribute_with_allowed_values(viewer_certificate.get('certificate_source'), 'viewer_certificate.certificate_source',
self.__valid_viewer_certificate_certificate_sources)
viewer_certificate = change_dict_key_name(viewer_certificate, 'cloudfront_default_certificate', 'cloud_front_default_certificate')
viewer_certificate = change_dict_key_name(viewer_certificate, 'ssl_support_method', 's_s_l_support_method')
viewer_certificate = change_dict_key_name(viewer_certificate, 'iam_certificate_id', 'i_a_m_certificate_id')
viewer_certificate = change_dict_key_name(viewer_certificate, 'acm_certificate_arn', 'a_c_m_certificate_arn')
return viewer_certificate
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating viewer certificate")
def validate_custom_error_responses(self, config, custom_error_responses, purge_custom_error_responses):
try:
if custom_error_responses is None and not purge_custom_error_responses:
return ansible_list_to_cloudfront_list(config)
self.validate_is_list(custom_error_responses, 'custom_error_responses')
result = list()
existing_responses = dict((response['error_code'], response) for response in custom_error_responses)
for custom_error_response in custom_error_responses:
self.validate_required_key('error_code', 'custom_error_responses[].error_code', custom_error_response)
custom_error_response = change_dict_key_name(custom_error_response, 'error_caching_min_ttl', 'error_caching_min_t_t_l')
if 'response_code' in custom_error_response:
custom_error_response['response_code'] = str(custom_error_response['response_code'])
if custom_error_response['error_code'] in existing_responses:
del(existing_responses[custom_error_response['error_code']])
result.append(custom_error_response)
if not purge_custom_error_responses:
result.extend(existing_responses.values())
return ansible_list_to_cloudfront_list(result)
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating custom error responses")
def validate_restrictions(self, config, restrictions, purge_restrictions=False):
try:
if restrictions is None:
if purge_restrictions:
return None
else:
return config
self.validate_required_key('geo_restriction', 'restrictions.geo_restriction', restrictions)
geo_restriction = restrictions.get('geo_restriction')
self.validate_required_key('restriction_type', 'restrictions.geo_restriction.restriction_type', geo_restriction)
existing_restrictions = config.get('geo_restriction', {}).get(geo_restriction['restriction_type'], {}).get('items', [])
geo_restriction_items = geo_restriction.get('items')
if not purge_restrictions:
geo_restriction_items.extend([rest for rest in existing_restrictions if
rest not in geo_restriction_items])
valid_restrictions = ansible_list_to_cloudfront_list(geo_restriction_items)
valid_restrictions['restriction_type'] = geo_restriction.get('restriction_type')
return valid_restrictions
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating restrictions")
def validate_distribution_config_parameters(self, config, default_root_object, ipv6_enabled, http_version, web_acl_id):
try:
config['default_root_object'] = default_root_object or config.get('default_root_object', '')
config['is_i_p_v_6_enabled'] = ipv6_enabled or config.get('i_p_v_6_enabled', self.__default_ipv6_enabled)
if http_version is not None or config.get('http_version'):
self.validate_attribute_with_allowed_values(http_version, 'http_version', self.__valid_http_versions)
config['http_version'] = http_version or config.get('http_version')
if web_acl_id or config.get('web_a_c_l_id'):
config['web_a_c_l_id'] = web_acl_id or config.get('web_a_c_l_id')
return config
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution config parameters")
def validate_common_distribution_parameters(self, config, enabled, aliases, logging, price_class, purge_aliases=False):
try:
if config is None:
config = {}
if aliases is not None:
if not purge_aliases:
aliases.extend([alias for alias in config.get('aliases', {}).get('items', [])
if alias not in aliases])
config['aliases'] = ansible_list_to_cloudfront_list(aliases)
if logging is not None:
config['logging'] = self.validate_logging(logging)
config['enabled'] = enabled or config.get('enabled', self.__default_distribution_enabled)
if price_class is not None:
self.validate_attribute_with_allowed_values(price_class, 'price_class', self.__valid_price_classes)
config['price_class'] = price_class
return config
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating common distribution parameters")
def validate_comment(self, config, comment):
config['comment'] = comment or config.get('comment', "Distribution created by Ansible with datetime stamp " + self.__default_datetime_string)
return config
def validate_caller_reference(self, caller_reference):
return caller_reference or self.__default_datetime_string
def get_first_origin_id_for_default_cache_behavior(self, valid_origins):
try:
if valid_origins is not None:
valid_origins_list = valid_origins.get('items')
if valid_origins_list is not None and isinstance(valid_origins_list, list) and len(valid_origins_list) > 0:
return str(valid_origins_list[0].get('id'))
self.module.fail_json(msg="There are no valid origins from which to specify a target_origin_id for the default_cache_behavior configuration.")
except Exception as e:
self.module.fail_json_aws(e, msg="Error getting first origin_id for default cache behavior")
def validate_attribute_list_with_allowed_list(self, attribute_list, attribute_list_name, allowed_list):
try:
self.validate_is_list(attribute_list, attribute_list_name)
if (isinstance(allowed_list, list) and set(attribute_list) not in allowed_list or
isinstance(allowed_list, set) and not set(allowed_list).issuperset(attribute_list)):
self.module.fail_json(msg='The attribute list {0} must be one of [{1}]'.format(attribute_list_name, ' '.join(str(a) for a in allowed_list)))
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating attribute list with allowed value list")
def validate_attribute_with_allowed_values(self, attribute, attribute_name, allowed_list):
if attribute is not None and attribute not in allowed_list:
self.module.fail_json(msg='The attribute {0} must be one of [{1}]'.format(attribute_name, ' '.join(str(a) for a in allowed_list)))
def validate_distribution_from_caller_reference(self, caller_reference):
try:
distributions = self.__cloudfront_facts_mgr.list_distributions(False)
distribution_name = 'Distribution'
distribution_config_name = 'DistributionConfig'
distribution_ids = [dist.get('Id') for dist in distributions]
for distribution_id in distribution_ids:
config = self.__cloudfront_facts_mgr.get_distribution(distribution_id)
distribution = config.get(distribution_name)
if distribution is not None:
distribution_config = distribution.get(distribution_config_name)
if distribution_config is not None and distribution_config.get('CallerReference') == caller_reference:
distribution['DistributionConfig'] = distribution_config
return distribution
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution from caller reference")
def validate_distribution_from_aliases_caller_reference(self, distribution_id, aliases, caller_reference):
try:
if caller_reference is not None:
return self.validate_distribution_from_caller_reference(caller_reference)
else:
if aliases:
distribution_id = self.validate_distribution_id_from_alias(aliases)
if distribution_id:
return self.__cloudfront_facts_mgr.get_distribution(distribution_id)
return None
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution_id from alias, aliases and caller reference")
def validate_distribution_id_from_alias(self, aliases):
distributions = self.__cloudfront_facts_mgr.list_distributions(False)
if distributions:
for distribution in distributions:
distribution_aliases = distribution.get('Aliases', {}).get('Items', [])
if set(aliases) & set(distribution_aliases):
return distribution['Id']
return None
def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference):
if distribution_id is None:
distribution_id = self.validate_distribution_id_from_caller_reference(caller_reference=caller_reference)
try:
waiter = client.get_waiter('distribution_deployed')
attempts = 1 + int(wait_timeout / 60)
waiter.wait(Id=distribution_id, WaiterConfig={'MaxAttempts': attempts})
except botocore.exceptions.WaiterError as e:
self.module.fail_json(msg="Timeout waiting for cloudfront action. Waited for {0} seconds before timeout. "
"Error: {1}".format(to_text(wait_timeout), to_native(e)))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error getting distribution {0}".format(distribution_id))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(choices=['present', 'absent'], default='present'),
caller_reference=dict(),
comment=dict(),
distribution_id=dict(),
e_tag=dict(),
tags=dict(type='dict', default={}),
purge_tags=dict(type='bool', default=False),
alias=dict(),
aliases=dict(type='list', default=[]),
purge_aliases=dict(type='bool', default=False),
default_root_object=dict(),
origins=dict(type='list'),
purge_origins=dict(type='bool', default=False),
default_cache_behavior=dict(type='dict'),
cache_behaviors=dict(type='list'),
purge_cache_behaviors=dict(type='bool', default=False),
custom_error_responses=dict(type='list'),
purge_custom_error_responses=dict(type='bool', default=False),
logging=dict(type='dict'),
price_class=dict(),
enabled=dict(type='bool'),
viewer_certificate=dict(type='dict'),
restrictions=dict(type='dict'),
web_acl_id=dict(),
http_version=dict(),
ipv6_enabled=dict(type='bool'),
default_origin_domain_name=dict(),
default_origin_path=dict(),
wait=dict(default=False, type='bool'),
wait_timeout=dict(default=1800, type='int')
))
result = {}
changed = True
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=False,
mutually_exclusive=[
['distribution_id', 'alias'],
['default_origin_domain_name', 'distribution_id'],
['default_origin_domain_name', 'alias'],
]
)
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='cloudfront', region=region, endpoint=ec2_url, **aws_connect_kwargs)
validation_mgr = CloudFrontValidationManager(module)
state = module.params.get('state')
caller_reference = module.params.get('caller_reference')
comment = module.params.get('comment')
e_tag = module.params.get('e_tag')
tags = module.params.get('tags')
purge_tags = module.params.get('purge_tags')
distribution_id = module.params.get('distribution_id')
alias = module.params.get('alias')
aliases = module.params.get('aliases')
purge_aliases = module.params.get('purge_aliases')
default_root_object = module.params.get('default_root_object')
origins = module.params.get('origins')
purge_origins = module.params.get('purge_origins')
default_cache_behavior = module.params.get('default_cache_behavior')
cache_behaviors = module.params.get('cache_behaviors')
purge_cache_behaviors = module.params.get('purge_cache_behaviors')
custom_error_responses = module.params.get('custom_error_responses')
purge_custom_error_responses = module.params.get('purge_custom_error_responses')
logging = module.params.get('logging')
price_class = module.params.get('price_class')
enabled = module.params.get('enabled')
viewer_certificate = module.params.get('viewer_certificate')
restrictions = module.params.get('restrictions')
purge_restrictions = module.params.get('purge_restrictions')
web_acl_id = module.params.get('web_acl_id')
http_version = module.params.get('http_version')
ipv6_enabled = module.params.get('ipv6_enabled')
default_origin_domain_name = module.params.get('default_origin_domain_name')
default_origin_path = module.params.get('default_origin_path')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
if alias and alias not in aliases:
aliases.append(alias)
distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference)
update = state == 'present' and distribution
create = state == 'present' and not distribution
delete = state == 'absent' and distribution
if not (update or create or delete):
module.exit_json(changed=False)
if update or delete:
config = distribution['Distribution']['DistributionConfig']
e_tag = distribution['ETag']
distribution_id = distribution['Distribution']['Id']
else:
config = dict()
if update:
config = camel_dict_to_snake_dict(config, reversible=True)
if create or update:
config = validation_mgr.validate_common_distribution_parameters(config, enabled, aliases, logging, price_class, purge_aliases)
config = validation_mgr.validate_distribution_config_parameters(config, default_root_object, ipv6_enabled, http_version, web_acl_id)
config['origins'] = validation_mgr.validate_origins(client, config.get('origins', {}).get('items', []), origins, default_origin_domain_name,
default_origin_path, create, purge_origins)
config['cache_behaviors'] = validation_mgr.validate_cache_behaviors(config.get('cache_behaviors', {}).get('items', []),
cache_behaviors, config['origins'], purge_cache_behaviors)
config['default_cache_behavior'] = validation_mgr.validate_cache_behavior(config.get('default_cache_behavior', {}),
default_cache_behavior, config['origins'], True)
config['custom_error_responses'] = validation_mgr.validate_custom_error_responses(config.get('custom_error_responses', {}).get('items', []),
custom_error_responses, purge_custom_error_responses)
valid_restrictions = validation_mgr.validate_restrictions(config.get('restrictions', {}), restrictions, purge_restrictions)
if valid_restrictions:
config['restrictions'] = valid_restrictions
valid_viewer_certificate = validation_mgr.validate_viewer_certificate(viewer_certificate)
config = merge_validation_into_config(config, valid_viewer_certificate, 'viewer_certificate')
config = validation_mgr.validate_comment(config, comment)
config = snake_dict_to_camel_dict(config, capitalize_first=True)
if create:
config['CallerReference'] = validation_mgr.validate_caller_reference(caller_reference)
result = create_distribution(client, module, config, ansible_dict_to_boto3_tag_list(tags))
result = camel_dict_to_snake_dict(result)
result['tags'] = list_tags_for_resource(client, module, result['arn'])
if delete:
if config['Enabled']:
config['Enabled'] = False
result = update_distribution(client, module, config, distribution_id, e_tag)
validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference'))
distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference)
# e_tag = distribution['ETag']
result = delete_distribution(client, module, distribution)
if update:
changed = config != distribution['Distribution']['DistributionConfig']
if changed:
result = update_distribution(client, module, config, distribution_id, e_tag)
else:
result = distribution['Distribution']
existing_tags = list_tags_for_resource(client, module, result['ARN'])
distribution['Distribution']['DistributionConfig']['tags'] = existing_tags
changed |= update_tags(client, module, existing_tags, tags, purge_tags, result['ARN'])
result = camel_dict_to_snake_dict(result)
result['distribution_config']['tags'] = config['tags'] = list_tags_for_resource(client, module, result['arn'])
result['diff'] = dict()
diff = recursive_diff(distribution['Distribution']['DistributionConfig'], config)
if diff:
result['diff']['before'] = diff[0]
result['diff']['after'] = diff[1]
if wait and (create or update):
validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference'))
if 'distribution_config' in result:
result.update(result['distribution_config'])
del(result['distribution_config'])
module.exit_json(changed=changed, **result)
if __name__ == '__main__':
main()
|
gpl-3.0
| 7,483,290,099,521,737,000
| 42.302267
| 159
| 0.610645
| false
| 4.467051
| true
| false
| false
|
40223232/w16b_test
|
wsgi.py
|
1
|
27797
|
#@+leo-ver=5-thin
#@+node:2014fall.20141212095015.1775: * @file wsgi.py
# coding=utf-8
# 上面的程式內容編碼必須在程式的第一或者第二行才會有作用
################# (1) 模組導入區
# 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝
#@@language python
#@@tabwidth -4
#@+<<declarations>>
#@+node:2014fall.20141212095015.1776: ** <<declarations>> (wsgi)
import cherrypy
# 導入 Python 內建的 os 模組, 因為 os 模組為 Python 內建, 所以無需透過 setup.py 安裝
import os
# 導入 random 模組
import random
# 導入 gear 模組
import gear
################# (2) 廣域變數設定區
# 確定程式檔案所在目錄, 在 Windows 下有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 設定在雲端與近端的資料儲存目錄
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示程式在雲端執行
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# 表示程式在近端執行
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
'''以下為近端 input() 與 for 迴圈應用的程式碼, 若要將程式送到 OpenShift 執行, 除了採用 CherryPy 網際框架外, 還要轉為 html 列印
# 利用 input() 取得的資料型別為字串
toprint = input("要印甚麼內容?")
# 若要將 input() 取得的字串轉為整數使用, 必須利用 int() 轉換
repeat_no = int(input("重複列印幾次?"))
for i in range(repeat_no):
print(toprint)
'''
#@-<<declarations>>
#@+others
#@+node:2014fall.20141212095015.1777: ** class Hello
################# (3) 程式類別定義區
# 以下改用 CherryPy 網際框架程式架構
# 以下為 Hello 類別的設計內容, 其中的 object 使用, 表示 Hello 類別繼承 object 的所有特性, 包括方法與屬性設計
class Hello(object):
# Hello 類別的啟動設定
_cp_config = {
'tools.encode.encoding': 'utf-8',
'tools.sessions.on' : True,
'tools.sessions.storage_type' : 'file',
#'tools.sessions.locking' : 'explicit',
# session 以檔案儲存, 而且位於 data_dir 下的 tmp 目錄
'tools.sessions.storage_path' : data_dir+'/tmp',
# session 有效時間設為 60 分鐘
'tools.sessions.timeout' : 60
}
#@+others
#@+node:2014fall.20141212095015.2004: *3* __init__
def __init__(self):
# 配合透過案例啟始建立所需的目錄
if not os.path.isdir(data_dir+'/tmp'):
os.mkdir(data_dir+'/tmp')
if not os.path.isdir(data_dir+"/downloads"):
os.mkdir(data_dir+"/downloads")
if not os.path.isdir(data_dir+"/images"):
os.mkdir(data_dir+"/images")
#@+node:2014fall.20141212095015.1778: *3* index_orig
# 以 @ 開頭的 cherrypy.expose 為 decorator, 用來表示隨後的成員方法, 可以直接讓使用者以 URL 連結執行
@cherrypy.expose
# index 方法為 CherryPy 各類別成員方法中的內建(default)方法, 當使用者執行時未指定方法, 系統將會優先執行 index 方法
# 有 self 的方法為類別中的成員方法, Python 程式透過此一 self 在各成員方法間傳遞物件內容
def index_orig(self, toprint="Hello World!"):
return toprint
#@+node:2014fall.20141212095015.1779: *3* hello
@cherrypy.expose
def hello(self, toprint="Hello World!"):
return toprint
#@+node:2014fall.20141215194146.1791: *3* index
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def index(self):
outstring = '''
<!DOCTYPE html>
<html>
<head>
40223232
</head>
<body>
<br /><a href ="index">index</a><br />
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1713: *3* twoDgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def twoDgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=do2Dgear>
齒數:<input type=text name=N><br />
模數:<input type=text name=M><br />
壓力角:<input type=text name=P><br />
<input type=submit value=send>
</form>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1733: *3* threeDgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def threeDgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=do3Dgear>
齒數:<input type=text name=N><br />
模數:<input type=text name=M><br />
壓力角:<input type=text name=P><br />
<input type=submit value=send>
</form>
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1762: *3* do2Dgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def do2Dgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
'''
outstring += '''
ctx.moveTo('''+str(N)+","+str(M)+")"
outstring += '''
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1735: *3* do3Dgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def do3Dgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
'''
outstring += '''
ctx.moveTo('''+str(N)+","+str(M)+")"
outstring += '''
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1765: *3* mygeartest
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def mygeartest(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def 齒輪(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos(20*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan(20*deg)-20*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
齒輪(400,400,300,41,"blue")
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
</body>
</html>
'''
return outstring
#@+node:amd.20150415215023.1: *3* mygeartest2
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def mygeartest2(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 請注意, 這裡導入位於 Lib/site-packages 目錄下的 spur.py 檔案
import spur
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 以下利用 spur.py 程式進行繪圖, 接下來的協同設計運算必須要配合使用者的需求進行設計運算與繪圖
# 其中並將工作分配給其他組員建立類似 spur.py 的相關零件繪圖模組
# midx, midy 為齒輪圓心座標, rp 為節圓半徑, n 為齒數, pa 為壓力角, color 為線的顏色
# Gear(midx, midy, rp, n=20, pa=20, color="black"):
# 模數決定齒的尺寸大小, 囓合齒輪組必須有相同的模數與壓力角
# 壓力角 pa 單位為角度
pa = 20
# m 為模數
m = 20
# 第1齒輪齒數
n_g1 = 17
# 第2齒輪齒數
n_g2 = 11
# 第3齒輪齒數
n_g3 = 13
# 計算兩齒輪的節圓半徑
rp_g1 = m*n_g1/2
rp_g2 = m*n_g2/2
rp_g3 = m*n_g3/2
# 繪圖第1齒輪的圓心座標
x_g1 = 400
y_g1 = 400
# 第2齒輪的圓心座標, 假設排列成水平, 表示各齒輪圓心 y 座標相同
x_g2 = x_g1 + rp_g1 + rp_g2
y_g2 = y_g1
# 第3齒輪的圓心座標
x_g3 = x_g1 + rp_g1 + 2*rp_g2 + rp_g3
y_g3 = y_g1
# 將第1齒輪順時鐘轉 90 度
# 使用 ctx.save() 與 ctx.restore() 以確保各齒輪以相對座標進行旋轉繪圖
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g1, y_g1)
# rotate to engage
ctx.rotate(pi/2)
# put it back
ctx.translate(-x_g1, -y_g1)
spur.Spur(ctx).Gear(x_g1, y_g1, rp_g1, n_g1, pa, "blue")
ctx.restore()
# 將第2齒輪逆時鐘轉 90 度之後, 再多轉一齒, 以便與第1齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g2, y_g2)
# rotate to engage
ctx.rotate(-pi/2-pi/n_g2)
# put it back
ctx.translate(-x_g2, -y_g2)
spur.Spur(ctx).Gear(x_g2, y_g2, rp_g2, n_g2, pa, "black")
ctx.restore()
# 將第3齒輪逆時鐘轉 90 度之後, 再往回轉第2齒輪定位帶動轉角, 然後再逆時鐘多轉一齒, 以便與第2齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g3, y_g3)
# rotate to engage
# pi+pi/n_g2 為第2齒輪從順時鐘轉 90 度之後, 必須配合目前的標記線所作的齒輪 2 轉動角度, 要轉換到齒輪3 的轉動角度
# 必須乘上兩齒輪齒數的比例, 若齒輪2 大, 則齒輪3 會轉動較快
# 第1個 -pi/2 為將原先垂直的第3齒輪定位線逆時鐘旋轉 90 度
# -pi/n_g3 則是第3齒與第2齒定位線重合後, 必須再逆時鐘多轉一齒的轉角, 以便進行囓合
# (pi+pi/n_g2)*n_g2/n_g3 則是第2齒原定位線為順時鐘轉動 90 度,
# 但是第2齒輪為了與第1齒輪囓合, 已經距離定位線, 多轉了 180 度, 再加上第2齒輪的一齒角度, 因為要帶動第3齒輪定位,
# 這個修正角度必須要再配合第2齒與第3齒的轉速比加以轉換成第3齒輪的轉角, 因此乘上 n_g2/n_g3
ctx.rotate(-pi/2-pi/n_g3+(pi+pi/n_g2)*n_g2/n_g3)
# put it back
ctx.translate(-x_g3, -y_g3)
spur.Spur(ctx).Gear(x_g3, y_g3, rp_g3, n_g3, pa, "red")
ctx.restore()
# 按照上面三個正齒輪的囓合轉角運算, 隨後的傳動齒輪轉角便可依此類推, 完成6個齒輪的囓合繪圖
</script>
<canvas id="plotarea" width="1200" height="1200"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1737: *3* my3Dgeartest
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def my3Dgeartest(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def gear(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos(20*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan(20*deg)-20*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
gear(400,400,300,41,"blue")
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
</body>
</html>
'''
return outstring
#@+node:2014fall.20141215194146.1793: *3* doCheck
@cherrypy.expose
def doCheck(self, guess=None):
# 假如使用者直接執行 doCheck, 則設法轉回根方法
if guess is None:
raise cherrypy.HTTPRedirect("/")
# 從 session 取出 answer 對應資料, 且處理直接執行 doCheck 時無法取 session 值情況
try:
theanswer = int(cherrypy.session.get('answer'))
except:
raise cherrypy.HTTPRedirect("/")
# 經由表單所取得的 guess 資料型別為 string
try:
theguess = int(guess)
except:
return "error " + self.guessform()
# 每執行 doCheck 一次,次數增量一次
cherrypy.session['count'] += 1
# 答案與所猜數字進行比對
if theanswer < theguess:
return "big " + self.guessform()
elif theanswer > theguess:
return "small " + self.guessform()
else:
# 已經猜對, 從 session 取出累計猜測次數
thecount = cherrypy.session.get('count')
return "exact: <a href=''>再猜</a>"
#@+node:2014fall.20141215194146.1789: *3* guessform
def guessform(self):
# 印出讓使用者輸入的超文件表單
outstring = str(cherrypy.session.get('answer')) + "/" + str(cherrypy.session.get('count')) + '''<form method=POST action=doCheck>
請輸入您所猜的整數:<input type=text name=guess><br />
<input type=submit value=send>
</form>'''
return outstring
#@-others
#@-others
################# (4) 程式啟動區
# 配合程式檔案所在目錄設定靜態目錄或靜態檔案
application_conf = {'/static':{
'tools.staticdir.on': True,
# 程式執行目錄下, 必須自行建立 static 目錄
'tools.staticdir.dir': _curdir+"/static"},
'/downloads':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/downloads"},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/images"}
}
root = Hello()
root.gear = gear.Gear()
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示在 OpenSfhit 執行
application = cherrypy.Application(root, config=application_conf)
else:
# 表示在近端執行
cherrypy.quickstart(root, config=application_conf)
#@-leo
|
gpl-3.0
| 421,274,124,816,849,400
| 29.535904
| 137
| 0.554152
| false
| 2.049902
| false
| false
| false
|
NeerajM999/recap-python
|
LearnPython/data_structures/binary_tree.py
|
1
|
1761
|
class Node:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class BinaryTree(object):
def __init__(self, root_val):
self.root = Node(root_val)
def preorder_traversal(self, start, traversal):
""" Root -> left -> right """
if start:
traversal += (str(start.value) + "-")
traversal = self.preorder_traversal(start.left, traversal)
traversal = self.preorder_traversal(start.right, traversal)
return traversal
def inorder_traversal(self, start, traversal):
""" left -> root -> right """
if start:
traversal = self.inorder_traversal(start.left, traversal)
traversal += (str(start.value) + "-")
traversal = self.inorder_traversal(start.right, traversal)
return traversal
def postorder_traversal(self, start, traversal):
""" left -> right -> root """
if start:
traversal = self.postorder_traversal(start.left, traversal)
traversal = self.postorder_traversal(start.right, traversal)
traversal += (str(start.value) + "-")
return traversal
if __name__ == "__main__":
"""
1
/ \
2 3
/ \ / \
4 5 6 7
"""
tree = BinaryTree(1)
tree.root.left = Node(2)
tree.root.right = Node(3)
tree.root.left.left = Node(4)
tree.root.left.right = Node(5)
tree.root.right.left = Node(6)
tree.root.right.right = Node(7)
print("preorder-traversal: ", tree.preorder_traversal(tree.root, ""))
print("inorder-traversal: ", tree.inorder_traversal(tree.root, ""))
print("postorder-traversal: ", tree.postorder_traversal(tree.root, ""))
|
gpl-3.0
| 1,749,836,215,305,637,600
| 27.885246
| 75
| 0.571266
| false
| 3.6841
| false
| false
| false
|
kret0s/gnuhealth-live
|
tryton/server/trytond-3.8.3/trytond/model/fields/one2one.py
|
1
|
2080
|
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from types import NoneType
from trytond.model.fields.field import Field
from trytond.model.fields.many2many import Many2Many
from trytond.pool import Pool
class One2One(Many2Many):
'''
Define one2one field (``int``).
'''
_type = 'one2one'
def get(self, ids, model, name, values=None):
'''
Return target record.
:param ids: a list of ids
:param model: a string with the name of the model
:param name: a string with the name of the field
:param values: a dictionary with the read values
:return: a dictionary with ids as key and target id as value
'''
res = super(One2One, self).get(ids, model, name, values=values)
for i, vals in res.iteritems():
res[i] = vals[0] if vals else None
return res
def set(self, Model, name, ids, value, *args):
'''
Set the values.
'''
pool = Pool()
Relation = pool.get(self.relation_name)
to_delete = []
to_create = []
args = iter((ids, value) + args)
for ids, value in zip(args, args):
relations = Relation.search([
(self.origin, 'in', ids),
])
to_delete.extend(relations)
if value:
for record_id in ids:
to_create.append({
self.origin: record_id,
self.target: value,
})
if to_delete:
Relation.delete(to_delete)
if to_create:
Relation.create(to_create)
def __set__(self, inst, value):
Target = self.get_target()
if isinstance(value, dict):
value = Target(*value)
elif isinstance(value, (int, long)):
value = Target(value)
assert isinstance(value, (Target, NoneType))
Field.__set__(self, inst, value)
|
gpl-3.0
| 4,749,240,343,853,297,000
| 32.015873
| 72
| 0.546635
| false
| 4.16
| false
| false
| false
|
Metonimie/Beaglebone
|
programs/server.py
|
1
|
3147
|
#!/usr/bin/env python
"""
A very simple server in python
used to control gpio pins on the beaglebone black.
The server listens for POST requests on port
6410. It has no security at all, which means
that it accepts post-data from everyone.
Send a GET request::
curl http://localhost
Send a POST request::
curl -d "foo=bar&bin=baz" http://localhost
Usage:
nohup python3 server.py &
"""
# TODO: Add basic security
# TODO: Use dictionary for gpio name : file
import http.server
import urllib
PORT = 6410
gpio_path = "/sys/class/gpio/"
# If the param name is in here then we handle the value.
authorized_gpio = ["gpio60"]
class Server(http.server.BaseHTTPRequestHandler):
def prepare_response(self, code):
"""
Prepares the response that will be send back to the requester,
along with the code.
"""
self.send_response(code)
self.send_header("Content-type", "text/html")
self.send_header("Access-Control-Allow-Origin", "*")
self.end_headers()
def handle_gpio(self, key, value):
"""
Very basic gpio handling, converts the value into
an int and then it writes it to the file.
"""
try:
clean_value = int(value)
with open("{}{}/value".format(gpio_path, key), mode="w") as file:
file.write(str(clean_value))
return False
except ValueError as e:
print(e)
except Exception as e:
print("Exception: {}".format(e))
return True
def unsupported(self):
self.wfile.write("Go Away!\n".encode())
def do_GET(self):
self.unsupported()
def do_HEAD(self):
self.unsupported()
def do_POST(self):
"""
Handles the post request.
If error is True then the handling has failed or the request is
invalid
"""
error = False
try:
# The length of the request, in bytes.
length = int(self.headers['content-length'])
# Dictionary containing keys and values from the request.
postvars = urllib.parse.parse_qs(self.rfile.read(length))
for key, value in postvars.items():
clean_key = key.decode()
clean_value = value[0].decode()
print("Received: " + clean_key + " : " + clean_value)
if clean_key in authorized_gpio:
error = self.handle_gpio(clean_key, clean_value)
else:
error = True
except Exception as e:
print(e)
error = True
response = None
if not error:
self.prepare_response(200)
response = "Operation authorized.\n"
else:
self.prepare_response(403)
response = "Go away!\n"
# Write response to the client.
self.wfile.write(response.encode())
if __name__ == "__main__":
server_address = ('', PORT)
httpd = http.server.HTTPServer(server_address, Server)
print('Starting server')
httpd.serve_forever()
|
gpl-3.0
| 3,281,151,376,845,631,500
| 28.688679
| 77
| 0.571973
| false
| 4.157199
| false
| false
| false
|
arbrandes/edx-configuration
|
playbooks/roles/supervisor/files/pre_supervisor_checks.py
|
1
|
12593
|
import argparse
import boto.ec2
from boto.utils import get_instance_metadata, get_instance_identity
from boto.exception import AWSConnectionError
import hipchat
import os
import subprocess
import traceback
import socket
import time
# Services that should be checked for migrations.
MIGRATION_COMMANDS = {
'lms': "/edx/bin/edxapp-migrate-lms --noinput --list",
'cms': "/edx/bin/edxapp-migrate-cms --noinput --list",
'xqueue': ". {env_file}; sudo -E -u xqueue {python} {code_dir}/manage.py showmigrations",
'ecommerce': ". {env_file}; sudo -E -u ecommerce {python} {code_dir}/manage.py showmigrations",
'insights': ". {env_file}; sudo -E -u insights {python} {code_dir}/manage.py showmigrations",
'analytics_api': ". {env_file}; sudo -E -u analytics_api {python} {code_dir}/manage.py showmigrations",
'credentials': ". {env_file}; sudo -E -u credentials {python} {code_dir}/manage.py showmigrations",
'discovery': ". {env_file}; sudo -E -u discovery {python} {code_dir}/manage.py showmigrations",
}
HIPCHAT_USER = "PreSupervisor"
# Max amount of time to wait for tags to be applied.
MAX_BACKOFF = 120
INITIAL_BACKOFF = 1
REGION = get_instance_identity()['document']['region']
def services_for_instance(instance_id):
"""
Get the list of all services named by the services tag in this
instance's tags.
"""
ec2 = boto.ec2.connect_to_region(REGION)
reservations = ec2.get_all_instances(instance_ids=[instance_id])
for reservation in reservations:
for instance in reservation.instances:
if instance.id == instance_id:
try:
services = instance.tags['services'].split(',')
except KeyError as ke:
msg = "Tag named 'services' not found on this instance({})".format(instance_id)
raise Exception(msg)
for service in services:
yield service
def edp_for_instance(instance_id):
ec2 = boto.ec2.connect_to_region(REGION)
reservations = ec2.get_all_instances(instance_ids=[instance_id])
for reservation in reservations:
for instance in reservation.instances:
if instance.id == instance_id:
try:
environment = instance.tags['environment']
deployment = instance.tags['deployment']
play = instance.tags['play']
except KeyError as ke:
msg = "{} tag not found on this instance({})".format(ke.message, instance_id)
raise Exception(msg)
return (environment, deployment, play)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Enable all services that are in the services tag of this ec2 instance.")
parser.add_argument("-a","--available",
help="The location of the available services.")
parser.add_argument("-e","--enabled",
help="The location of the enabled services.")
migration_args = parser.add_argument_group("edxapp_migrations",
"Args for running edxapp migration checks.")
migration_args.add_argument("--edxapp-code-dir",
help="Location of the edx-platform code.")
migration_args.add_argument("--edxapp-python",
help="Path to python to use for executing migration check.")
migration_args.add_argument("--edxapp-env",
help="Location of the edxapp environment file.")
xq_migration_args = parser.add_argument_group("xqueue_migrations",
"Args for running xqueue migration checks.")
xq_migration_args.add_argument("--xqueue-code-dir",
help="Location of the xqueue code.")
xq_migration_args.add_argument("--xqueue-python",
help="Path to python to use for executing migration check.")
migration_args.add_argument("--xqueue-env",
help="Location of the xqueue environment file.")
ecom_migration_args = parser.add_argument_group("ecommerce_migrations",
"Args for running ecommerce migration checks.")
ecom_migration_args.add_argument("--ecommerce-python",
help="Path to python to use for executing migration check.")
ecom_migration_args.add_argument("--ecommerce-env",
help="Location of the ecommerce environment file.")
ecom_migration_args.add_argument("--ecommerce-code-dir",
help="Location of the ecommerce code.")
credentials_migration_args = parser.add_argument_group("credentials_migrations",
"Args for running credentials migration checks.")
credentials_migration_args.add_argument("--credentials-python",
help="Path to python to use for executing migration check.")
credentials_migration_args.add_argument("--credentials-env",
help="Location of the credentials environment file.")
credentials_migration_args.add_argument("--credentials-code-dir",
help="Location of the credentials code.")
discovery_migration_args = parser.add_argument_group("discovery_migrations",
"Args for running discovery migration checks.")
discovery_migration_args.add_argument("--discovery-python",
help="Path to python to use for executing migration check.")
discovery_migration_args.add_argument("--discovery-env",
help="Location of the discovery environment file.")
discovery_migration_args.add_argument("--discovery-code-dir",
help="Location of the discovery code.")
insights_migration_args = parser.add_argument_group("insights_migrations",
"Args for running insights migration checks.")
insights_migration_args.add_argument("--insights-python",
help="Path to python to use for executing migration check.")
insights_migration_args.add_argument("--insights-env",
help="Location of the insights environment file.")
insights_migration_args.add_argument("--insights-code-dir",
help="Location of the insights code.")
analyticsapi_migration_args = parser.add_argument_group("analytics_api_migrations",
"Args for running analytics_api migration checks.")
analyticsapi_migration_args.add_argument("--analytics-api-python",
help="Path to python to use for executing migration check.")
analyticsapi_migration_args.add_argument("--analytics-api-env",
help="Location of the analytics_api environment file.")
analyticsapi_migration_args.add_argument("--analytics-api-code-dir",
help="Location of the analytics_api code.")
hipchat_args = parser.add_argument_group("hipchat",
"Args for hipchat notification.")
hipchat_args.add_argument("-c","--hipchat-api-key",
help="Hipchat token if you want to receive notifications via hipchat.")
hipchat_args.add_argument("-r","--hipchat-room",
help="Room to send messages to.")
args = parser.parse_args()
report = []
prefix = None
notify = None
try:
if args.hipchat_api_key:
hc = hipchat.HipChat(token=args.hipchat_api_key)
notify = lambda message: hc.message_room(room_id=args.hipchat_room,
message_from=HIPCHAT_USER, message=message)
except Exception as e:
print("Failed to initialize hipchat, {}".format(e))
traceback.print_exc()
instance_id = get_instance_metadata()['instance-id']
prefix = instance_id
ec2 = boto.ec2.connect_to_region(REGION)
reservations = ec2.get_all_instances(instance_ids=[instance_id])
instance = reservations[0].instances[0]
if instance.instance_profile['arn'].endswith('/abbey'):
print("Running an abbey build. Not starting any services.")
# Needs to exit with 1 instead of 0 to prevent
# services from starting.
exit(1)
time_left = MAX_BACKOFF
backoff = INITIAL_BACKOFF
environment = None
deployment = None
play = None
while time_left > 0:
try:
environment, deployment, play = edp_for_instance(instance_id)
prefix = "{environment}-{deployment}-{play}-{instance_id}".format(
environment=environment,
deployment=deployment,
play=play,
instance_id=instance_id)
break
except Exception as e:
print("Failed to get EDP for {}: {}".format(instance_id, str(e)))
# With the time limit being 2 minutes we will
# try 5 times before giving up.
time.sleep(backoff)
time_left -= backoff
backoff = backoff * 2
if environment is None or deployment is None or play is None:
msg = "Unable to retrieve environment, deployment, or play tag."
print(msg)
if notify:
notify("{} : {}".format(prefix, msg))
exit(0)
#get the hostname of the sandbox
hostname = socket.gethostname()
try:
#get the list of the volumes, that are attached to the instance
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id})
for volume in volumes:
volume.add_tags({"hostname": hostname,
"environment": environment,
"deployment": deployment,
"cluster": play,
"instance-id": instance_id,
"created": volume.create_time })
except Exception as e:
msg = "Failed to tag volumes associated with {}: {}".format(instance_id, str(e))
print(msg)
if notify:
notify(msg)
try:
for service in services_for_instance(instance_id):
if service in MIGRATION_COMMANDS:
services = {
"lms": {'python': args.edxapp_python, 'env_file': args.edxapp_env, 'code_dir': args.edxapp_code_dir},
"cms": {'python': args.edxapp_python, 'env_file': args.edxapp_env, 'code_dir': args.edxapp_code_dir},
"ecommerce": {'python': args.ecommerce_python, 'env_file': args.ecommerce_env, 'code_dir': args.ecommerce_code_dir},
"credentials": {'python': args.credentials_python, 'env_file': args.credentials_env, 'code_dir': args.credentials_code_dir},
"discovery": {'python': args.discovery_python, 'env_file': args.discovery_env, 'code_dir': args.discovery_code_dir},
"insights": {'python': args.insights_python, 'env_file': args.insights_env, 'code_dir': args.insights_code_dir},
"analytics_api": {'python': args.analytics_api_python, 'env_file': args.analytics_api_env, 'code_dir': args.analytics_api_code_dir},
"xqueue": {'python': args.xqueue_python, 'env_file': args.xqueue_env, 'code_dir': args.xqueue_code_dir},
}
if service in services and all(arg!=None for arg in services[service].values()) and service in MIGRATION_COMMANDS:
serv_vars = services[service]
cmd = MIGRATION_COMMANDS[service].format(**serv_vars)
if os.path.exists(serv_vars['code_dir']):
os.chdir(serv_vars['code_dir'])
# Run migration check command.
output = subprocess.check_output(cmd, shell=True, )
if '[ ]' in output:
raise Exception("Migrations have not been run for {}".format(service))
# Link to available service.
available_file = os.path.join(args.available, "{}.conf".format(service))
link_location = os.path.join(args.enabled, "{}.conf".format(service))
if os.path.exists(available_file):
subprocess.call("sudo -u supervisor ln -sf {} {}".format(available_file, link_location), shell=True)
report.append("Enabling service: {}".format(service))
else:
raise Exception("No conf available for service: {}".format(link_location))
except AWSConnectionError as ae:
msg = "{}: ERROR : {}".format(prefix, ae)
if notify:
notify(msg)
notify(traceback.format_exc())
raise ae
except Exception as e:
msg = "{}: ERROR : {}".format(prefix, e)
print(msg)
if notify:
notify(msg)
traceback.print_exc()
raise e
else:
msg = "{}: {}".format(prefix, " | ".join(report))
print(msg)
if notify:
notify(msg)
|
agpl-3.0
| -2,843,304,026,179,480,000
| 45.640741
| 152
| 0.615262
| false
| 4.182331
| false
| false
| false
|
cyanogen/uchroma
|
uchroma/traits.py
|
1
|
11759
|
#
# uchroma - Copyright (C) 2021 Stefanie Kondik
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, version 3.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# pylint: disable=protected-access, invalid-name, no-member
import enum
import importlib
import sys
from argparse import ArgumentParser
from typing import Iterable
from traitlets import CaselessStrEnum, Container, Dict, Enum, Int, HasTraits, \
List, TraitType, Undefined, UseEnum
from frozendict import frozendict
from uchroma.color import to_color
from uchroma.util import ArgsDict
class ColorTrait(TraitType):
"""
A traitlet which encapsulates a grapefruit.Color and performs
type coercion as needed.
"""
info_text = "a color"
allow_none = True
default_value = 'black'
def __init__(self, *args, **kwargs):
super(ColorTrait, self).__init__(*args, **kwargs)
def validate(self, obj, value):
try:
if value is not None:
value = to_color(value)
except:
self.error(obj, value)
return value
class ColorSchemeTrait(List):
"""
A list of ColorTraits which comprise a scheme
"""
info_text = 'a list of colors'
def __init__(self, trait=ColorTrait(), default_value=(),
minlen=0, maxlen=sys.maxsize, **kwargs):
super(ColorSchemeTrait, self).__init__(trait=trait, default_value=default_value,
minlen=minlen, maxlen=maxlen, **kwargs)
class ColorPresetTrait(UseEnum):
"""
A trait which represents a group of color schemes defined
as a Python Enum.
"""
info_text = 'a predefined color scheme'
def __init__(self, enum_class, default_value=None, **kwargs):
super(ColorPresetTrait, self).__init__(enum_class, default_value=default_value, **kwargs)
class WriteOnceMixin(object):
"""
Mixin for traits which cannot be changed after an initial
value has been set.
"""
write_once = True
def validate(self, obj, value):
if self.name not in obj._trait_values or \
obj._trait_values[self.name] == self.default_value:
return super().validate(obj, value)
self.error(obj, value)
class WriteOnceInt(WriteOnceMixin, Int):
"""
Subclass of Int which may only be written once
"""
pass
class FrozenDict(WriteOnceMixin, Dict):
"""
Subclass of Dict which converts the value to a frozendict on
the first setting.
"""
def validate(self, obj, value):
return frozendict(super().validate(obj, value))
class UseEnumCaseless(UseEnum):
"""
Subclass of UseEnum which allows selection of values using
case insensitive strings
"""
def select_by_name(self, value, default=Undefined):
if value.startswith(self.name_prefix):
# -- SUPPORT SCOPED-NAMES, like: "Color.red" => "red"
value = value.replace(self.name_prefix, "", 1)
keys = [x.lower() for x in self.enum_class.__members__.keys()]
idx = keys.index(value.lower())
if idx < 0:
return Undefined
return self.enum_class[list(self.enum_class.__members__.keys())[idx]]
class WriteOnceUseEnumCaseless(WriteOnceMixin, UseEnumCaseless):
"""
Subclass of UseEnumCaseless which may only be written once.
"""
pass
class DefaultCaselessStrEnum(CaselessStrEnum):
"""
Extension of CaselessStrEnum which handles default values better
"""
def validate(self, obj, value):
if self.default_value and (value is None or value == ''):
value = self.default_value
return super().validate(obj, value)
def is_trait_writable(trait: TraitType) -> bool:
"""
Test if a trait is writable
:param trait: the trait to be tested
:return: True if the trait is writable
"""
if trait.read_only:
return False
if hasattr(trait, 'write_once') and trait.write_once:
return False
return True
def trait_as_dict(trait: TraitType) -> dict:
"""
Convert a trait to a dict for sending over D-Bus or the like
:param trait: the trait to be converted
:return: dict representing this trait
"""
cls = trait.__class__
tdict = {}
for k, v in vars(trait).items():
if k.startswith('__') or k == 'this_class':
continue
if hasattr(cls, k) and getattr(cls, k) == v:
continue
if isinstance(v, Iterable) and len(v) == 0:
continue
if k.startswith('_'):
tdict[k[1:]] = v
else:
tdict[k] = v
if isinstance(trait, UseEnum):
cls = CaselessStrEnum
tdict['values'] = tuple(trait.enum_class.__members__.keys())
if 'enum_class' in tdict:
del tdict['enum_class']
for k, v in tdict.items():
if isinstance(v, TraitType):
tdict[k] = trait_as_dict(v)
if isinstance(v, enum.Enum):
tdict[k] = v.name
if isinstance(v, type):
tdict[k] = '%s.%s' % (v.__module__, v.__name__)
tdict['__class__'] = (cls.__module__, cls.__name__)
return tdict
def class_traits_as_dict(obj: HasTraits, values: dict=None) -> dict:
"""
Create a dict which represents all traits of the given object.
This dict itself can be inspected in a generic API, or it
may be converted back to a (stub) instance of HasTraits. This
facilitates the sending of configurable object properties over
an interface such as D-Bus.
:param obj: an instance of HasTraits
:param value: optional dict of trait values (pulled from obj by default)
:return: dict representing all traits in obj
"""
cls_dt = {}
if isinstance(obj, type) and hasattr(obj, 'class_traits'):
traits = obj.class_traits()
elif isinstance(obj, dict):
traits = obj
elif isinstance(obj, HasTraits):
traits = obj.traits()
values = obj._trait_values
else:
raise TypeError("Object does not support traits")
for k, v in traits.items():
dt = trait_as_dict(v)
if dt is None:
continue
if values is not None and k in values:
dt['__value__'] = values[k]
cls_dt[k] = dt
return cls_dt
def dict_as_trait(obj: dict) -> TraitType:
"""
Create a trait from a dict (trait_as_dict).
"""
if '__class__' not in obj:
raise ValueError("No module and class attribute present")
tobj = obj.copy()
module_name, trait_class = tobj.pop('__class__')
module = importlib.import_module(module_name)
if not hasattr(module, trait_class):
raise TypeError("Unknown class: %s" % trait_class)
cls = getattr(module, trait_class)
if 'trait' in tobj:
tobj['trait'] = dict_as_trait(tobj.pop('trait'))
metadata = {}
if 'metadata' in tobj:
metadata.update(tobj.pop('metadata'))
if issubclass(cls, Enum):
trait = cls(tobj.pop('values'), **tobj)
else:
trait = cls(**tobj)
for k in list(metadata.keys()):
if k in ('name', 'default_args', 'default_kwargs'):
setattr(trait, k, metadata.pop(k))
trait.metadata = metadata
return trait
def dict_as_class_traits(obj: dict) -> HasTraits:
"""
Convert a dict of unpacked traits to a HasTraits instance.
Useful for remote parameter inspection and validation.
:param obj: dict of unpacked traits
:return: the stub HasTraits instance
"""
if not isinstance(obj, dict):
raise TypeError("Object must be a dict (was: %s)" % obj)
traits = {}
values = {}
for k, v in obj.items():
if '__value__' in v:
values[k] = v.pop('__value__')
trait = dict_as_trait(v)
if trait is None:
continue
traits[k] = trait
cls = HasTraits()
cls.add_traits(**traits)
for k, v in values.items():
setattr(cls, k, v)
return cls
def get_args_dict(obj: HasTraits, incl_all=False):
"""
Return a dict of user-configurable traits for an object
:param obj: an instance of HasTraits
:param incl_all: If all items should be included, regardless of RO status
:return: dict of arguments
"""
argsdict = ArgsDict()
for k in sorted(obj._trait_values.keys()):
v = obj._trait_values[k]
trait = obj.traits()[k]
if incl_all or (not trait.get_metadata('hidden') and is_trait_writable(trait)):
argsdict[k] = v
return argsdict
def add_traits_to_argparse(obj: HasTraits, parser: ArgumentParser,
prefix: str=None):
"""
Add all traits from the given object to the argparse context.
:param obj: an instance of HasTraits
:param parser: argparse parser
:param prefix: string to prefix keys with
"""
for key, trait in obj.traits().items():
if trait.get_metadata('config') is not True:
continue
argname = '--%s' % key
if prefix is not None:
argname = '--%s.%s' % (prefix, key)
if isinstance(trait, Container):
parser.add_argument(argname, nargs='+', help=trait.info_text)
elif isinstance(trait, Enum):
parser.add_argument(argname, type=str.lower,
choices=[x.lower() for x in trait.values],
help=trait.info_text)
else:
argtype = str
if hasattr(trait, 'default_value'):
argtype = type(trait.default_value)
parser.add_argument(argname, type=argtype, help=trait.info_text)
def apply_from_argparse(args, traits=None, target: HasTraits=None) -> dict:
"""
Applies arguments added via add_traits_to_argparse to
a target object which implements HasTraits. If a target
is not known, a dict of traits may be passed instead.
Will throw TraitError if validation fails.
:param args: Parsed args from argparse
:param traits: Dictionary of traits (optional)
:param target: Target object (optional)
:return: Dict of the arguments which actually changed
"""
# apply the traits to an empty object, which will run
# the validators on the client
if isinstance(traits, HasTraits):
traits = traits.traits()
traits = traits.copy()
for k, v in traits.items():
if not isinstance(v, TraitType):
if isinstance(v, dict):
k[v] = dict_as_trait(v)
else:
raise TypeError("A dict or trait object must be supplied")
if target is None:
if traits is None:
raise ValueError("Either traits or target must be specified")
target = HasTraits()
target.add_traits(**traits)
# determine what should actually be changed
argkeys = [k for k, v in vars(args).items() if v is not None]
intersect = set(target.traits().keys()).intersection(set(argkeys))
# apply the argparse flags to the target object
for key in intersect:
if target.traits()[key].get_metadata('config') is not True:
raise ValueError("Trait is not marked as configurable: %s" % key)
setattr(target, key, getattr(args, key))
# if all validators passed, return a dict of the changed args
changed = {}
for key in intersect:
changed[key] = target._trait_values[key]
return changed
|
lgpl-3.0
| 468,926,082,809,482,900
| 28.619647
| 97
| 0.615188
| false
| 3.944649
| false
| false
| false
|
cardmagic/PyAMF
|
pyamf/adapters/_django_db_models_base.py
|
1
|
8476
|
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
`django.db.models` adapter module.
:see: `Django Project <http://www.djangoproject.com>`_
:since: 0.4.1
"""
from django.db.models.base import Model
from django.db.models import fields
from django.db.models.fields import related, files
import datetime
import pyamf
from pyamf.util import imports
class DjangoReferenceCollection(dict):
"""
This helper class holds a dict of klass to pk/objects loaded from the
underlying db.
:since: 0.5
"""
def _getClass(self, klass):
if klass not in self.keys():
self[klass] = {}
return self[klass]
def getClassKey(self, klass, key):
"""
Return an instance based on klass/key.
If an instance cannot be found then `KeyError` is raised.
:param klass: The class of the instance.
:param key: The primary_key of the instance.
:return: The instance linked to the `klass`/`key`.
:rtype: Instance of `klass`.
"""
d = self._getClass(klass)
return d[key]
def addClassKey(self, klass, key, obj):
"""
Adds an object to the collection, based on klass and key.
:param klass: The class of the object.
:param key: The datastore key of the object.
:param obj: The loaded instance from the datastore.
"""
d = self._getClass(klass)
d[key] = obj
class DjangoClassAlias(pyamf.ClassAlias):
def getCustomProperties(self):
self.fields = {}
self.relations = {}
self.columns = []
self.meta = self.klass._meta
for name in self.meta.get_all_field_names():
x = self.meta.get_field_by_name(name)[0]
if isinstance(x, files.FileField):
self.readonly_attrs.update([name])
if isinstance(x, related.RelatedObject):
continue
if not isinstance(x, related.ForeignKey):
self.fields[name] = x
else:
self.relations[name] = x
for k, v in self.klass.__dict__.iteritems():
if isinstance(v, related.ReverseManyRelatedObjectsDescriptor):
self.fields[k] = v.field
parent_fields = []
for field in self.meta.parents.values():
parent_fields.append(field.attname)
del self.relations[field.name]
self.exclude_attrs.update(parent_fields)
props = self.fields.keys()
self.encodable_properties.update(props)
self.decodable_properties.update(props)
def _compile_base_class(self, klass):
if klass is Model:
return
pyamf.ClassAlias._compile_base_class(self, klass)
def _encodeValue(self, field, value):
if value is fields.NOT_PROVIDED:
return pyamf.Undefined
if value is None:
return value
# deal with dates ..
if isinstance(field, fields.DateTimeField):
return value
elif isinstance(field, fields.DateField):
return datetime.datetime(value.year, value.month, value.day, 0, 0, 0)
elif isinstance(field, fields.TimeField):
return datetime.datetime(1970, 1, 1,
value.hour, value.minute, value.second, value.microsecond)
elif isinstance(value, files.FieldFile):
return value.name
return value
def _decodeValue(self, field, value):
if value is pyamf.Undefined:
return fields.NOT_PROVIDED
if isinstance(field, fields.AutoField) and value == 0:
return None
elif isinstance(field, fields.DateTimeField):
# deal with dates
return value
elif isinstance(field, fields.DateField):
if not value:
return None
return datetime.date(value.year, value.month, value.day)
elif isinstance(field, fields.TimeField):
if not value:
return None
return datetime.time(value.hour, value.minute, value.second, value.microsecond)
return value
def getEncodableAttributes(self, obj, **kwargs):
attrs = pyamf.ClassAlias.getEncodableAttributes(self, obj, **kwargs)
if not attrs:
attrs = {}
for name, prop in self.fields.iteritems():
if name not in attrs.keys():
continue
if isinstance(prop, related.ManyToManyField):
attrs[name] = [x for x in getattr(obj, name).all()]
else:
attrs[name] = self._encodeValue(prop, getattr(obj, name))
keys = attrs.keys()
for key in keys:
if key.startswith('_'):
del attrs[key]
for name, relation in self.relations.iteritems():
if '_%s_cache' % name in obj.__dict__:
attrs[name] = getattr(obj, name)
del attrs[relation.column]
if not attrs:
attrs = None
return attrs
def getDecodableAttributes(self, obj, attrs, **kwargs):
attrs = pyamf.ClassAlias.getDecodableAttributes(self, obj, attrs, **kwargs)
for n in self.decodable_properties:
if n in self.relations:
continue
f = self.fields[n]
attrs[f.attname] = self._decodeValue(f, attrs[n])
# primary key of django object must always be set first for
# relationships with other model objects to work properly
# and dict.iteritems() does not guarantee order
#
# django also forces the use only one attribute as primary key, so
# our obj._meta.pk.attname check is sufficient)
try:
setattr(obj, obj._meta.pk.attname, attrs[obj._meta.pk.attname])
del attrs[obj._meta.pk.attname]
except KeyError:
pass
return attrs
def getDjangoObjects(context):
"""
Returns a reference to the `django_objects` on the context. If it doesn't
exist then it is created.
:param context: The context to load the `django_objects` index from.
:type context: Instance of :class:`pyamf.BaseContext`
:return: The `django_objects` index reference.
:rtype: Instance of :class:`DjangoReferenceCollection`
:since: 0.5
"""
if not hasattr(context, 'django_objects'):
context.django_objects = DjangoReferenceCollection()
return context.django_objects
def writeDjangoObject(self, obj, *args, **kwargs):
"""
The Django ORM creates new instances of objects for each db request.
This is a problem for PyAMF as it uses the id(obj) of the object to do
reference checking.
We could just ignore the problem, but the objects are conceptually the
same so the effort should be made to attempt to resolve references for a
given object graph.
We create a new map on the encoder context object which contains a dict of
C{object.__class__: {key1: object1, key2: object2, .., keyn: objectn}}. We
use the primary key to do the reference checking.
:since: 0.5
"""
if not isinstance(obj, Model):
self.writeNonDjangoObject(obj, *args, **kwargs)
return
context = self.context
kls = obj.__class__
s = obj.pk
if s is None:
self.writeNonDjangoObject(obj, *args, **kwargs)
return
django_objects = getDjangoObjects(context)
try:
referenced_object = django_objects.getClassKey(kls, s)
except KeyError:
referenced_object = obj
django_objects.addClassKey(kls, s, obj)
self.writeNonDjangoObject(referenced_object, *args, **kwargs)
def install_django_reference_model_hook(mod):
"""
Called when :module:`pyamf.amf0` or :module:`pyamf.amf3` are imported. Attaches the
:func:`writeDjangoObject` method to the `Encoder` class in that module.
:param mod: The module imported.
:since: 0.4.1
"""
if not hasattr(mod.Encoder, 'writeNonDjangoObject'):
mod.Encoder.writeNonDjangoObject = mod.Encoder.writeObject
mod.Encoder.writeObject = writeDjangoObject
# initialise the module here: hook into pyamf
pyamf.register_alias_type(DjangoClassAlias, Model)
# hook the L{writeDjangobject} method to the Encoder class on import
imports.when_imported('pyamf.amf0', install_django_reference_model_hook)
imports.when_imported('pyamf.amf3', install_django_reference_model_hook)
|
mit
| -6,217,738,496,913,844,000
| 28.430556
| 91
| 0.61975
| false
| 4.080886
| false
| false
| false
|
ICOS-Carbon-Portal/data
|
src/main/python/update-restheart/Restheart.py
|
1
|
2242
|
import requests
class Restheart(object):
def __init__(self):
# self._baseUrl = 'http://127.0.0.1:8088/db/' # localhost
self._baseUrl = 'https://restheart.icos-cp.eu/db/' # production
self._verfify = True if self._baseUrl.__contains__('restheart') else False
def get_records_to_update(self, op, pagesize, collection):
resp = None
try:
url = self.get_url(op, pagesize, collection)
resp = requests.get(url, timeout=10, verify=self._verfify)
if resp.status_code != 200:
print(resp.status_code, resp.reason, resp.json())
return resp.json()
except:
print(resp)
def update_record(self, id, record, collection):
url = self._baseUrl + collection + '/' + id
headers = {"Content-Type": "application/json"}
resp = None
try:
resp = requests.patch(url, headers=headers, json=record, timeout=5, verify=self._verfify)
if resp.status_code != 200:
print(resp.status_code, resp.reason)
except:
print(resp)
def get_url(self, op, pagesize, collection):
if op == 'geo':
if collection == 'portaluse':
return self._baseUrl + collection + '?filter={"city":{"$exists":0}}&np&pagesize=' + str(pagesize)
elif collection == 'dobjdls':
return self._baseUrl + collection + '?filter={"$and":[{"ip":{"$exists":1}},{"city":{"$exists":0}}]}&np&pagesize=' + str(pagesize)
else:
raise ValueError("Unknown collection: " + collection)
elif op == 'label':
if collection == 'portaluse':
return self._baseUrl + collection + '?np&pagesize=' + str(pagesize)
# return self._baseUrl + collection + '?filter={"_id":{"$oid":"5bb21519f17df4d065e9c53c"}}&np&pagesize=' + str(pagesize)
# return self._baseUrl + collection + '?filter={"filterChange":{"$exists":1}}&np&pagesize=' + str(pagesize)
# return self._baseUrl + collection + '?filter={"previewNetCDF":{"$exists":1}}&np&pagesize=' + str(pagesize)
# return self._baseUrl + collection + '?filter={"previewTimeserie":{"$exists":1}}&np&pagesize=' + str(pagesize)
# return self._baseUrl + collection + '?filter={"$and":[{"filterChange":{"$exists":0}},{"previewNetCDF":{"$exists":0}},{"previewTimeserie":{"$exists":0}}]}&np&pagesize=' + str(pagesize)
else:
raise ValueError("Unknown collection: " + collection)
|
gpl-3.0
| 5,153,336,162,370,622,000
| 37.655172
| 189
| 0.650758
| false
| 3.058663
| false
| false
| false
|
domecraft/Games
|
RPG/classes.py
|
1
|
3530
|
class character:
def __init__(self, name, gender ,health, race, role, status, strength, defense, magic, bounty, income, reputation):
self.name = name
self.health = health
self.status = status
self.strength = strength
self.defense = defense
self.race = race
self.role = role
self.bounty = bounty
self.magic = magic
self.gender = gender
self.income = income
self.reputation = reputation
self.inventory = []
def modify_health(self, amount):
self.health += amount
def set_health(self, amount):
self.health = amount
def set_status(self, status):
self.status = status
def modify_str(self, amount):
self.strength += amount
def modify_def(self, amount):
self.defense += amount
def add_item(self, item):
self.inventory.append(item)
def remove_item(self, item):
if item in self.inventory:
self.inventory.remove(item)
else:
print item + " is not in your inventory!"
def set_race(self, race):
self.race = race
def modify_bounty(self, amount):
self.bounty += amount
def checkDead(self, health):
if self.health <= 0:
self.status = "dead"
return "dead"
else:
self.status = "alive"
return "alive"
def modify_income(self, amount):
self.income += amount
def modify_reputation(self, amount):
self.reputation += amount
#The following class is used for random npcs that I don't really develop in the storyline.
class basicCharacter:
def __init__(self, name, gender, income, status):
self.name = name
self.gender = gender
self.income = income
self.status = status
def set_status(self, status):
self.status = status
class store:
def __init__(self, name = "General Store" , owner = "Store Owner", alliance = "Rebellion"):
self.name = name
self.store_owner = owner
self.alliance = alliance
self.stock = {
'longsword': {'cost': 10, 'speed': 3, 'strength': 7, 'defense': 2},
'shortsword': {'cost': 8, 'speed': 5, 'strength': 4, 'defense': 2},
'bronze_armor': {'cost': 10, 'speed': -2, 'strength': 1, 'defense': 6},
'silver_armor': {'cost': 20, 'speed': -5, 'strength': 2, 'defense': 12},
'platinum_armor': {'cost': 35, 'speed': -8, 'strength': 4, 'defense': 20}
}
class town:
def __init__(self, name, ruler, alliance, income, population):
self.name = name
self.ruler = ruler
self.alliance = alliance
self.income = income
self.population = population
def set_ruler(self, ruler):
self.ruler = ruler
def set_name(self, name):
self.name = name
def set_alliance(self, alliance):
self.alliance = alliance
def modify_income(self, amount):
self.income += amount
def modify_pop(self, population):
self.population += population
class bar:
def __init__(self, name, owner, income):
self.name = name
self.owner = owner
self.income = income
def set_owner(self, owner):
self.owner = owner
def modify_income(amount):
self.income += amount
|
gpl-2.0
| 3,359,393,522,367,290,000
| 29.964912
| 119
| 0.545326
| false
| 3.904867
| false
| false
| false
|
fbergmann/libSEDML
|
examples/python/create_sedml.py
|
1
|
5521
|
#!/usr/bin/env python
##
## @file create_sedml.py
## @brief cerates a SED-ML document.
## @author Frank T. Bergmann
##
## <!--------------------------------------------------------------------------
## This file is part of libSEDML. Please visit http://sed-ml.org for more
## information about SEDML, and the latest version of libSEDML.
##
## Copyright (c) 2013, Frank T. Bergmann
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## 1. Redistributions of source code must retain the above copyright notice, this
## list of conditions and the following disclaimer.
## 2. Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
## ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
## ------------------------------------------------------------------------ -.
##
import sys
import os.path
import libsedml
def main (args):
"""Usage: create_sedml output-filename
"""
if (len(args) != 2):
print(main.__doc__)
sys.exit(1);
# create the document
doc = libsedml.SedDocument();
doc.setLevel(1);
doc.setVersion(1);
# create a first model referencing an sbml file
model = doc.createModel();
model.setId("model1");
model.setSource("file.xml");
model.setLanguage("urn:sedml:language:sbml");
# create a second model modifying a variable of that other sbml file
model = doc.createModel();
model.setId("model2");
model.setSource("model1");
model.setLanguage("urn:sedml:sbml");
# change a paramerter 'k' to 0.1
change = model.createChangeAttribute();
change.setTarget("/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id='k']/@value");
change.setNewValue("0.1");
# remove species 's1'
remove = model.createRemoveXML();
remove.setTarget("/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='S1']");
# now for something tricky we want to update the initialConcentration of 'S2' to be
# half what it was in the original model
compute = model.createComputeChange();
compute.setTarget("/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id="S2"]/@initialConcentration");
variable = compute.createVariable();
variable.setId("S2");
variable.setModelReference("model1");
variable.setTarget("/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='S2']");
compute.setMath(libsedml.parseFormula("S2 / 2"));
# create simulation
tc = doc.createUniformTimeCourse();
tc.setId("sim1");
tc.setInitialTime(0.0);
tc.setOutputStartTime(0.0);
tc.setOutputEndTime(10.0);
tc.setNumberOfPoints(1000);
# need to set the correct KISAO Term
alg = tc.createAlgorithm();
alg.setKisaoID("KISAO:0000019");
# create a task that uses the simulation and the model above
task = doc.createTask();
task.setId("task1");
task.setModelReference("model1");
task.setSimulationReference("sim1");
# add a DataGenerator to hold the output for time
dg = doc.createDataGenerator();
dg.setId("time");
dg.setName("time");
var = dg.createVariable();
var.setId("v0");
var.setName("time");
var.setTaskReference("task1");
var.setSymbol("urn:sedml:symbol:time");
dg.setMath(libsedml.parseFormula("v0"));
# and one for S1
dg = doc.createDataGenerator();
dg.setId("S1");
dg.setName("S1");
var = dg.createVariable();
var.setId("v1");
var.setName("S1");
var.setTaskReference("task1");
var.setTarget("/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='S1']");
dg.setMath(libsedml.parseFormula("v1"));
# add a report
report = doc.createReport();
report.setId("r1");
report.setName("report 1");
set = report.createDataSet();
set.setId("ds1");
set.setLabel("time");
set.setDataReference("time");
set = report.createDataSet();
set.setId("ds2");
set.setLabel("S1");
set.setDataReference("S1");
# add a 2d plot
plot = doc.createPlot2D();
plot.setId("p1");
plot.setName("S1 Timecourse");
curve = plot.createCurve();
curve.setId("c1");
curve.setName("S1");
curve.setLogX(False);
curve.setLogY(False);
curve.setXDataReference("time");
curve.setYDataReference("S1");
# add a 3D Plot
plot2 = doc.createPlot3D();
plot2.setId("p2");
plot2.setName("dunno");
surf = plot2.createSurface();
surf.setId("surf1");
surf.setName("S1");
surf.setLogX(False);
surf.setLogY(False);
surf.setLogZ(False);
surf.setXDataReference("time");
surf.setYDataReference("S1");
surf.setZDataReference("S1");
# write the document
libsedml.writeSedML(doc, args[1]);
if __name__ == '__main__':
main(sys.argv)
|
bsd-2-clause
| 3,096,274,935,878,346,000
| 32.05988
| 119
| 0.685926
| false
| 3.393362
| false
| false
| false
|
molmod/yaff
|
yaff/pes/colvar.py
|
1
|
13249
|
# -*- coding: utf-8 -*-
# YAFF is yet another force-field code.
# Copyright (C) 2011 Toon Verstraelen <Toon.Verstraelen@UGent.be>,
# Louis Vanduyfhuys <Louis.Vanduyfhuys@UGent.be>, Center for Molecular Modeling
# (CMM), Ghent University, Ghent, Belgium; all rights reserved unless otherwise
# stated.
#
# This file is part of YAFF.
#
# YAFF is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# YAFF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
'''Collective variables
This module implements the computation of collective variables and their
derivatives, typically used in advanced sampling methods such as umbrella
sampling or metadynamics. The ``CollectiveVariable`` class is the main item
in this module, which is normally used in conjuction with an instance of the
``Bias`` class. Note that many collective variables such as bond lengths,
bending angles, improper angles, ... are already implemented by the
:mod:`yaff.pes.iclist` module, so no separate implementation needs to be
provided here.
'''
from __future__ import division
import numpy as np
from yaff.log import log
from yaff.pes.dlist import DeltaList
from yaff.pes.iclist import InternalCoordinateList
from yaff.sampling.utils import cell_lower
__all__ = [
'CollectiveVariable', 'CVVolume', 'CVCOMProjection','CVInternalCoordinate',
'CVLinCombIC',
]
class CollectiveVariable(object):
'''Base class for collective variables.'''
def __init__(self, name, system):
"""
**Arguments:**
name
A name for the collective variable.
system
The system for the collective variable.
"""
self.name = name
self.system = system
self.value = np.nan
self.gpos = np.zeros((system.natom, 3), float)
self.vtens = np.zeros((3, 3), float)
def get_conversion(self):
'''Auxiliary routine that allows base classes the specify the unit
conversion associated with the internal coordinate.
'''
raise NotImplementedError
def get_log(self):
'''Describe the internal coordinate in a format that is suitable for
screen logging.
'''
return '%s' % (self.__class__.__name__)
def compute(self, gpos=None, vtens=None):
"""Compute the collective variable and optionally some derivatives
The only variable inputs for the compute routine are the atomic
positions and the cell vectors.
**Optional arguments:**
gpos
The derivatives of the collective variable towards the Cartesian
coordinates of the atoms. ('g' stands for gradient and 'pos'
for positions.)
This must be a writeable numpy array with shape (N, 3) where N
is the number of atoms.
vtens
The force contribution to the pressure tensor. This is also
known as the virial tensor. It represents the derivative of the
energy towards uniform deformations, including changes in the
shape of the unit cell. (v stands for virial and 'tens' stands
for tensor.) This must be a writeable numpy array with shape (3,
3).
The collective variable value is returned. The optional arguments
are Fortran-style output arguments. When they are present, the
corresponding results are computed and **stored** to the current
contents of the array.
"""
#Subclasses implement their compute code here.
raise NotImplementedError
def get_last_computed_value(self):
"""Return the last value that was computed. It is not assured that this
value reflects the value for the current state of the system. This
is merely a convenience method to obtain the value without
performing an actual computation.
"""
return self.value
class CVInternalCoordinate(CollectiveVariable):
'''
An InternalCoordinate disguised as a CollectiveVariable so that it can
be used together with a BiasPotential.
This is less efficient than using the InternalCoordinate with a
ValenceTerm, so the latter is preferred if it is possible.
'''
def __init__(self, system, ic, comlist=None):
self.system = system
self.ic = ic
self.comlist = comlist
self.dlist = DeltaList(system if comlist is None else comlist)
self.iclist = InternalCoordinateList(self.dlist)
self.iclist.add_ic(ic)
def get_conversion(self):
return self.ic.get_conversion()
def compute(self, gpos=None, vtens=None):
if self.comlist is not None:
self.comlist.forward()
self.dlist.forward()
self.iclist.forward()
self.value = self.iclist.ictab[0]['value']
if gpos is not None: gpos[:] = 0.0
if vtens is not None: vtens[:] = 0.0
if not ((gpos is None) and (vtens is None)):
self.iclist.ictab[0]['grad'] = 1.0
self.iclist.back()
if self.comlist is None:
self.dlist.back(gpos, vtens)
else:
self.comlist.gpos[:] = 0.0
self.dlist.back(self.comlist.gpos, vtens)
self.comlist.back(gpos)
return self.value
class CVVolume(CollectiveVariable):
'''The volume of the simulation cell.'''
def __init__(self, system):
'''
**Arguments:**
system
An instance of the ``System`` class.
'''
if system.cell.nvec == 0:
raise TypeError('Can not compute volume of a non-periodic system.')
CollectiveVariable.__init__(self, 'CVVolume', system)
def get_conversion(self):
return np.power(log.length.conversion, self.system.cell.nvec)
def compute(self, gpos=None, vtens=None):
self.value = self.system.cell.volume
if gpos is not None:
# No dependence on atomic positions
gpos[:] = 0.0
if vtens is not None:
vtens[:] = np.identity(3)*self.value
return self.value
class CVCOMProjection(CollectiveVariable):
'''Compute the vector connecting two centers of masses and return the
projection along a selected vector. cv=(r_{COM}^{B}-r_{COM}^{A})[index]
and r_{COM} is a vector with centers of mass of groups A and B:
* first component: projected onto ``a`` vector of cell
* second component: projected onto vector perpendicular to ``a``
and in the plane spanned by ``a`` and ``b``
* third component: projected onto vector perpendicular to ``a`` and
``b``
Note that periodic boundary conditions are NOT taken into account
* the centers of mass are computed using absolute positions; this is
most likely the desired behavior
* the center of mass difference can in principle be periodic, but
the periodicity is not the same as the periodicity of the system,
because of the projection on a selected vector
'''
def __init__(self, system, groups, index):
'''
**Arguments:**
system
An instance of the ``System`` class
groups
List of 2 arrays, each array containing atomic indexes
used to compute one of the centers of mass
index
Selected projection vector:
* if index==0, projection onto ``a`` vector of cell
* if index==1, projection onto vector perpendicular to ``a``
and in the plane spanned by ``a`` and ``b``
* if index==2, projection onto vector perpendicular to ``a``
and ``b``
'''
CollectiveVariable.__init__(self, 'CVCOMProjection', system)
self.index = index
# Safety checks
assert len(groups)==2, "Exactly 2 groups need to be defined"
assert system.cell.nvec==3, "Only 3D periodic systems are supported"
assert self.index in [0,1,2], "Index should be one of 0,1,2"
# Masses need to be defined in order to compute centers of mass
if self.system.masses is None:
self.system.set_standard_masses()
# Define weights w_i such that difference of centers of mass can be
# computed as sum_i w_i r_i
self.weights = np.zeros((system.natom))
self.weights[groups[0]] = -self.system.masses[groups[0]]/np.sum(self.system.masses[groups[0]])
self.weights[groups[1]] = self.system.masses[groups[1]]/np.sum(self.system.masses[groups[1]])
def get_conversion(self):
return log.length.conversion
def compute(self, gpos=None, vtens=None):
'''
Consider a rotation of the entire system such that the ``a`` vector
is aligned with the X-axis, the ``b`` vector is in the XY-plane, and
the ``c`` vector chosen such that a right-handed basis is formed.
The rotated cell is lower-diagonal in the Yaff notation.
In this rotated system, it is fairly simple to compute the required
projections and derivatives, because the projections are simply the
Cartesian components. Values obtained in the rotated system are then
transformed back to the original system.
'''
# Compute rotation that makes cell lower diagonal
_, R = cell_lower(self.system.cell.rvecs)
# The projected vector of centers of mass difference (aka the
# collective variable) in the rotated system
cv_orig = np.sum(self.weights.reshape((-1,1))*self.system.pos, axis=0)
# Transform back to the original system
cv = np.dot(R, cv_orig)
self.value = cv[self.index]
if gpos is not None:
gpos[:] = 0.0
gpos[:,self.index] = self.weights
# Forces (vector) need to be rotated back to original system
gpos[:] = np.einsum('ij,kj', gpos, R.T)
if vtens is not None:
vtens[:] = 0.0
vtens[self.index,self.index:] = cv[self.index:]
vtens[self.index:,self.index] = cv[self.index:]
# Virial (tensor) needs to be rotated back to original system
vtens[:] = np.dot(R.T,np.dot(vtens[:],R))
return self.value
class CVLinCombIC(CollectiveVariable):
'''
A linear combination of InternalCoordinates:
cv = w0*ic0 + w1*ic1 + ...
'''
def __init__(self, system, ics, weights, comlist=None):
'''
**Arguments:**
system
An instance of the ``System`` class.
ics
A list of InternalCoordinate instances.
weights
A list defining the weight of each InternalCoordinate that is
used when computing the linear combination.
**Optional arguments:**
comlist
An instance COMList; if provided, this is used instead of the
normal DeltaList to compute the InternalCoordinates
'''
assert len(weights)==len(ics)
self.system = system
self.ics = ics
self.comlist = comlist
self.dlist = DeltaList(system if comlist is None else comlist)
self.iclist = InternalCoordinateList(self.dlist)
for ic in self.ics:
self.iclist.add_ic(ic)
self.weights = weights
def get_conversion(self):
# Units depend on the particular linear combination of internal
# coordinates
return 1.0
def compute(self, gpos=None, vtens=None):
if self.comlist is not None:
self.comlist.forward()
self.dlist.forward()
self.iclist.forward()
self.value = 0.0
for iic in range(len(self.ics)):
self.value += self.weights[iic]*self.iclist.ictab[iic]['value']
if gpos is not None: gpos[:] = 0.0
if vtens is not None: vtens[:] = 0.0
if not ((gpos is None) and (vtens is None)):
for iic in range(len(self.ics)):
# Derivative of the linear combination to this particular
# internal coordinate
self.iclist.ictab[iic]['grad'] = self.weights[iic]
self.iclist.back()
if self.comlist is None:
self.dlist.back(gpos, vtens)
else:
self.comlist.gpos[:] = 0.0
self.dlist.back(self.comlist.gpos, vtens)
self.comlist.back(gpos)
return self.value
|
gpl-3.0
| -3,113,228,627,627,625,000
| 37.853372
| 102
| 0.611669
| false
| 4.174228
| false
| false
| false
|
russorat/savage-leads
|
api/models/lead.py
|
1
|
2649
|
from elasticsearch import Elasticsearch,RequestsHttpConnection,NotFoundError
from flask import url_for
import config
import json
class Lead(object):
es = Elasticsearch(config.ES_HOSTS,connection_class=RequestsHttpConnection)
@staticmethod
def create_lead(lead_data):
try:
results = Lead.es.create(index='leads',
doc_type='leads',
body=lead_data
)
if results['created']:
return { 'status': 'success',
'message': '',
'created_id': results['_id'] }
else:
return { 'status': 'failure',
'message': 'failed to create new lead.',
'created_id': '' }
except Exception as e:
print e
return { 'status': 'failure',
'message': 'unknown error',
'created_id': '' }
@staticmethod
def delete_lead(lead_id):
try :
Lead.es.delete(index='leads',
doc_type='leads',
id=lead_id
)
return { 'status': 'success', 'message': '' }
except NotFoundError as e:
return { 'status': 'failure', 'message': 'id not found' }
except Exception as e:
print e
return { 'status': 'failure', 'message': 'unknown error' }
@staticmethod
def get_lead(lead_id):
try:
results = Lead.es.get(
index='leads',
doc_type='leads',
id='%s'%(lead_id),
ignore=404
)
if results and results['found'] :
return {'status':'success','message':'','results':[Lead.from_es_hit(results)]}
return {'status':'success','message':'','results':[]}
except NotFoundError as e:
return { 'status': 'failure', 'message': 'id not found', 'results': [] }
except Exception as e:
print e
return { 'status': 'failure', 'message': 'unknown exception', 'results': [] }
@staticmethod
def get_leads(size,page,search):
try:
results = Lead.es.search(
index='leads',
doc_type='leads',
size=size,
q=search or "*",
sort='last_name:ASC,first_name:ASC'
)
retVal = []
if results and results['hits']['total'] > 0 :
for hit in results['hits']['hits']:
retVal.append(Lead.from_es_hit(hit))
return {'status':'success','message':'','results':retVal}
except Exception as e:
print e
return {'status':'failure','message':'unknown error','results':[]}
@staticmethod
def from_es_hit(hit):
lead = {}
lead['id'] = hit['_id']
for key,val in hit['_source'].items():
lead[key] = val
lead['uri'] = url_for('get_lead', lead_id=lead['id'], _external=True)
return lead
|
apache-2.0
| -1,660,902,958,443,782,400
| 29.102273
| 86
| 0.559079
| false
| 3.800574
| false
| false
| false
|
jaantollander/CrowdDynamics
|
crowddynamics/core/tests/test_interactions_benchmark.py
|
1
|
1239
|
import numpy as np
import pytest
from crowddynamics.core.interactions import agent_agent_block_list
from crowddynamics.core.vector2D import unit_vector
from crowddynamics.simulation.agents import Agents, Circular, ThreeCircle, \
AgentGroup
def attributes():
orientation = np.random.uniform(-np.pi, np.pi)
return dict(body_type='adult',
orientation=orientation,
velocity=np.random.uniform(0.0, 1.3, 2),
angular_velocity=np.random.uniform(-1.0, 1.0),
target_direction=unit_vector(orientation),
target_orientation=orientation)
@pytest.mark.parametrize('size', (200, 500, 1000))
@pytest.mark.parametrize('agent_type', (Circular, ThreeCircle))
def test_agent_agent_block_list(benchmark, size, agent_type, algorithm):
# Grow the area with size. Keeps agent density constant.
area_size = np.sqrt(2 * size)
agents = Agents(agent_type=agent_type)
group = AgentGroup(
agent_type=agent_type,
size=size,
attributes=attributes)
agents.add_non_overlapping_group(
group, position_gen=lambda: np.random.uniform(-area_size, area_size, 2))
benchmark(agent_agent_block_list, agents.array)
assert True
|
gpl-3.0
| 4,276,068,190,012,276,700
| 36.545455
| 80
| 0.684423
| false
| 3.580925
| false
| false
| false
|
myshkov/bnn-analysis
|
models/bbb_sampler.py
|
1
|
4851
|
"""
This module implements Bayes By Backprop -based sampler for NNs.
http://jmlr.org/proceedings/papers/v37/blundell15.pdf
"""
import numpy as np
from keras.models import Sequential
from keras.layers.core import Activation
from keras import backend as K
from keras.engine.topology import Layer
from sampler import Sampler, SampleStats
class BBBSampler(Sampler):
"""
BBB sampler for NNs.
"""
def __init__(self, model=None, batch_size=None, n_epochs=None, **kwargs):
"""
Creates a new BBBSampler object.
"""
super().__init__(**kwargs)
self.sampler_type = 'BBB'
self.model = model
self.batch_size = batch_size if batch_size is not None else self.train_set_size
self.n_epochs = n_epochs
def __repr__(self):
s = super().__repr__()
return s
def _fit(self, n_epochs=None, verbose=0, **kwargs):
""" Fits the model before sampling. """
n_epochs = n_epochs if n_epochs is not None else self.n_epochs
self.model.fit(self.train_x, self.train_y, batch_size=self.batch_size, nb_epoch=n_epochs,
verbose=verbose)
def _sample_predictive(self, test_x=None, return_stats=False, **kwargs):
""" Draws a new sample from the model. """
sample = self.model.predict(test_x, batch_size=self.batch_size)
stats = None
if return_stats:
stats = SampleStats(time=self._running_time())
return [sample], [stats]
@classmethod
def model_from_description(cls, layers, noise_std, weights_std, batch_size, train_size):
""" Creates a BBB model from the specified parameters. """
n_batches = int(train_size / batch_size)
step = .01
class BBBLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super().__init__(**kwargs)
def build(self, input_shape):
input_dim = input_shape[1]
shape = [input_dim, self.output_dim]
eps_std = step
# weights
self.eps_w = K.random_normal([input_shape[0]] + shape, std=eps_std)
self.mu_w = K.variable(np.random.normal(0., 10. * step, size=shape), name='mu_w')
self.rho_w = K.variable(np.random.normal(0., 10. * step, size=shape), name='rho_w')
self.W = self.mu_w + self.eps_w * K.log(1.0 + K.exp(self.rho_w))
self.eps_b = K.random_normal([self.output_dim], std=eps_std)
self.mu_b = K.variable(np.random.normal(0., 10. * step, size=[self.output_dim]), name='mu_b')
self.rho_b = K.variable(np.random.normal(0., 10. * step, size=[self.output_dim]), name='rho_b')
self.b = self.mu_b + self.eps_b * K.log(1.0 + K.exp(self.rho_b))
self.trainable_weights = [self.mu_w, self.rho_w, self.mu_b, self.rho_b]
def call(self, x, mask=None):
return K.squeeze(K.batch_dot(K.expand_dims(x, dim=1), self.W), axis=1) + self.b
def get_output_shape_for(self, input_shape):
return (input_shape[0], self.output_dim)
def log_gaussian(x, mean, std):
return -K.log(std) - (x - mean) ** 2 / (2. * std ** 2)
def sigma_from_rho(rho):
return K.log(1. + K.exp(rho)) / step
def variational_objective(model, noise_std, weights_std, batch_size, nb_batches):
def loss(y, fx):
log_pw = K.variable(0.)
log_qw = K.variable(0.)
for layer in model.layers:
if type(layer) is BBBLayer:
log_pw += K.sum(log_gaussian(layer.W, 0., weights_std))
log_pw += K.sum(log_gaussian(layer.b, 0., weights_std))
log_qw += K.sum(log_gaussian(layer.W, layer.mu_w, sigma_from_rho(layer.rho_w)))
log_qw += K.sum(log_gaussian(layer.b, layer.mu_b, sigma_from_rho(layer.rho_b)))
log_likelihood = K.sum(log_gaussian(y, fx, noise_std))
return K.sum((log_qw - log_pw) / nb_batches - log_likelihood) / batch_size
return loss
model = Sequential()
in_shape = [batch_size, layers[0][0]]
# input
model.add(BBBLayer(layers[1][0], batch_input_shape=in_shape))
model.add(Activation('relu'))
# hidden layers
for l in range(2, len(layers) - 1):
model.add(BBBLayer(layers[l - 1][0]))
model.add(Activation('relu'))
# output layer
model.add(BBBLayer(1))
loss = variational_objective(model, noise_std, weights_std, batch_size, n_batches)
model.compile(loss=loss, optimizer='adam', metrics=['accuracy'])
return model
|
mit
| -8,254,993,371,662,850,000
| 35.201493
| 111
| 0.556999
| false
| 3.433121
| false
| false
| false
|
felixbr/nosql-rest-preprocessor
|
nosql_rest_preprocessor/models.py
|
1
|
5131
|
from __future__ import absolute_import, unicode_literals, print_function, division
from nosql_rest_preprocessor import exceptions
from nosql_rest_preprocessor.utils import non_mutating
class BaseModel(object):
required_attributes = set()
optional_attributes = None
immutable_attributes = set()
private_attributes = set()
sub_models = {}
resolved_attributes = {}
@classmethod
def validate(cls, obj):
cls._check_required_attributes(obj)
cls._check_allowed_attributes(obj)
# recurse for sub models
for attr, sub_model in cls.sub_models.items():
if attr in obj.keys():
sub_model.validate(obj[attr])
return obj
@classmethod
@non_mutating
def prepare_response(cls, obj):
# remove non-public attrs
for attr in cls.private_attributes:
obj.pop(attr, None)
# recurse for sub models
for attr, sub_model in cls.sub_models.items():
if attr in obj.keys():
obj[attr] = sub_model.prepare_response(obj[attr])
return obj
@classmethod
def merge_updated(cls, db_obj, new_obj):
cls.validate(new_obj)
merged_obj = {}
# check if previously present immutable attributes should be deleted
for key in cls.immutable_attributes:
if key in db_obj and key not in new_obj:
raise exceptions.ChangingImmutableAttributeError()
# copy attributes into merged_obj
for key, value in new_obj.items():
cls._check_immutable_attrs_on_update(key, value, db_obj)
if key in cls.resolved_attributes and isinstance(value, dict): # ignore resolved attributes in update
merged_obj[key] = db_obj[key]
else:
merged_obj[key] = value
# recurse for sub models
for attr, sub_model in cls.sub_models.items():
merged_obj[attr] = sub_model.merge_updated(db_obj[attr], new_obj[attr])
return merged_obj
@classmethod
def _check_immutable_attrs_on_update(cls, key, value, db_obj):
# check if immutable attributes should be changed
if key in cls.immutable_attributes:
if db_obj[key] != value:
raise exceptions.ChangingImmutableAttributeError()
@classmethod
def _check_required_attributes(cls, obj):
for attr in cls.required_attributes:
if isinstance(attr, tuple):
set_wanted = set(attr[1])
set_contained = set(obj.keys())
if attr[0] == 'one_of':
if len(set_wanted & set_contained) < 1:
raise exceptions.ValidationError()
elif attr[0] == 'either_of':
if len(set_wanted & set_contained) != 1:
raise exceptions.ValidationError()
else:
raise exceptions.ConfigurationError()
else:
if attr not in obj.keys():
raise exceptions.ValidationError()
@classmethod
def _check_allowed_attributes(cls, obj):
if cls.optional_attributes is not None:
required = cls._required_attributes()
for attr in obj.keys():
if attr in required:
continue
allowed = False
for opt_attr in cls.optional_attributes:
if attr == opt_attr:
allowed = True
break
elif isinstance(opt_attr, tuple):
if opt_attr[0] == 'all_of':
if attr in opt_attr[1]: # if one of these is in obj.keys()...
if not set(opt_attr[1]).issubset(obj.keys()): # ...all of them have to be there
raise exceptions.ValidationError()
else:
allowed = True
break
elif opt_attr[0] == 'either_of':
if attr in opt_attr[1]: # if one of these is in obj.keys()...
if next((key for key in opt_attr[1] if key != attr and key in obj.keys()), None): # ...no other key may be present in obj.keys()
raise exceptions.ValidationError()
else:
allowed = True
break
else:
raise exceptions.ConfigurationError()
if not allowed: # if we haven't found attr anywhere in cls.optional_attributes
raise exceptions.ValidationError()
@classmethod
def _required_attributes(cls):
required = set()
for attr in cls.required_attributes:
if isinstance(attr, tuple):
required = required | set(attr[1])
else:
required.add(attr)
return required
|
mit
| 7,771,158,724,342,860,000
| 33.213333
| 161
| 0.524069
| false
| 4.80881
| false
| false
| false
|
math-a3k/django-ai
|
tests/test_models/migrations/0011_add_is_inferred_and_minor_tweaks.py
|
1
|
2196
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-12-20 15:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('test_models', '0010_myunsupervisedlearningtechnique'),
]
operations = [
migrations.AddField(
model_name='mystatisticalmodel',
name='is_inferred',
field=models.BooleanField(
default=False, verbose_name='Is Inferred?'),
),
migrations.AddField(
model_name='mysupervisedlearningtechnique',
name='is_inferred',
field=models.BooleanField(
default=False, verbose_name='Is Inferred?'),
),
migrations.AddField(
model_name='myunsupervisedlearningtechnique',
name='is_inferred',
field=models.BooleanField(
default=False, verbose_name='Is Inferred?'),
),
migrations.AlterField(
model_name='mystatisticalmodel',
name='sm_type',
field=models.SmallIntegerField(blank=True, choices=[
(0, 'General / System'),
(1, 'Classification'),
(2, 'Regression')],
default=0, null=True,
verbose_name='Statistical Technique Type'),
),
migrations.AlterField(
model_name='mysupervisedlearningtechnique',
name='sm_type',
field=models.SmallIntegerField(blank=True, choices=[
(0, 'General / System'),
(1, 'Classification'),
(2, 'Regression')],
default=0, null=True,
verbose_name='Statistical Technique Type'),
),
migrations.AlterField(
model_name='myunsupervisedlearningtechnique',
name='sm_type',
field=models.SmallIntegerField(blank=True, choices=[
(0, 'General / System'),
(1, 'Classification'),
(2, 'Regression')],
default=0, null=True,
verbose_name='Statistical Technique Type'),
),
]
|
lgpl-3.0
| -7,274,611,177,119,370
| 33.857143
| 64
| 0.536885
| false
| 4.565489
| false
| false
| false
|
Makeystreet/makeystreet
|
woot/apps/catalog/views/review.py
|
1
|
5983
|
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render
from django.utils import timezone
from woot.apps.catalog.forms import CreateProductReviewForm,\
CreateShopReviewForm, CreateSpaceReviewForm
from woot.apps.catalog.models.core import Product, Shop, Space, NewProduct
from woot.apps.catalog.models.review import ProductReview, ShopReview,\
SpaceReview
from .helper import get_user_details_json
static_blob = settings.STATIC_BLOB
def all_reviews(request):
product_reviews = ProductReview.objects.all()
shop_reviews = ShopReview.objects.all()
space_reviews = SpaceReview.objects.all()
context = {
'static_blob': static_blob,
'user_details': get_user_details_json(request),
'product_reviews': product_reviews,
'shop_reviews': shop_reviews,
'space_reviews': space_reviews,
}
return render(request, 'catalog/all_reviews.html', context)
def store_review(request, review_id):
try:
user_details = get_user_details_json(request)
review = ShopReview.objects.get(id=review_id)
review.upvotes = review.voteshopreview_set.filter(vote=True)
context = {
'static_blob': static_blob,
'user_details': user_details,
'review': review,
}
return render(request, 'catalog/store_review.html', context)
except ShopReview.DoesNotExist:
raise Http404
def product_review(request, review_id):
try:
user_details = get_user_details_json(request)
review = ProductReview.objects.get(id=review_id)
review.upvotes = review.voteproductreview_set.filter(vote=True)
context = {
'static_blob': static_blob,
'user_details': user_details,
'review': review,
}
return render(request, 'catalog/product_review.html', context)
except ProductReview.DoesNotExist:
raise Http404
def space_review(request, review_id):
try:
user_details = get_user_details_json(request)
review = SpaceReview.objects.get(id=review_id)
review.upvotes = review.votespacereview_set.filter(vote=True)
context = {
'static_blob': static_blob,
'user_details': user_details,
'review': review,
}
return render(request, 'catalog/space_review.html', context)
except SpaceReview.DoesNotExist:
raise Http404
def create_review(request):
if request.method == "POST":
if request.POST.get('val_type', '') == 'PART':
form = CreateProductReviewForm(request.POST)
if form.is_valid():
r = ProductReview()
r.title = form.cleaned_data['val_title']
r.review = form.cleaned_data['val_review']
r.user = request.user
r.rating = form.cleaned_data['val_rating']
r.added_time = timezone.now()
product_data_split = form.cleaned_data['val_part'].split('_')
product_type = product_data_split[0]
product_id = int(product_data_split[1])
if product_type == 'old':
product = Product.objects.get(id=product_id)
r.product = product
elif product_type == 'new':
product = NewProduct.objects.get(id=product_id)
r.product = product
r.save()
return HttpResponseRedirect(reverse('catalog:all_reviews'))
else:
print(form.errors)
elif request.POST.get('val_type', '') == 'SHOP':
form = CreateShopReviewForm(request.POST)
if form.is_valid():
r = ShopReview()
r.title = form.cleaned_data['val_title']
r.review = form.cleaned_data['val_review']
r.user = request.user
r.rating = form.cleaned_data['val_rating']
r.added_time = timezone.now()
shop_data_split = form.cleaned_data['val_shop'].split('_')
shop_type = shop_data_split[0]
shop_id = int(shop_data_split[1])
if shop_type == 'old':
shop = Shop.objects.get(id=shop_id)
r.shop = shop
elif shop_type == 'new':
shop = NewProduct.objects.get(id=shop_id)
r.shop = shop
r.save()
return HttpResponseRedirect(reverse('catalog:all_reviews'))
else:
print(form.errors)
elif request.POST.get('val_type', '') == 'SPACE':
form = CreateSpaceReviewForm(request.POST)
if form.is_valid():
r = SpaceReview()
r.title = form.cleaned_data['val_title']
r.review = form.cleaned_data['val_review']
r.user = request.user
r.rating = form.cleaned_data['val_rating']
r.added_time = timezone.now()
space_data_split = form.cleaned_data['val_space'].split('_')
space_type = space_data_split[0]
space_id = int(space_data_split[1])
if space_type == 'old':
space = Space.objects.get(id=space_id)
r.space = space
elif space_type == 'new':
space = NewProduct.objects.get(id=space_id)
r.space = space
r.save()
return HttpResponseRedirect(reverse('catalog:all_reviews'))
else:
print(form.errors)
else:
pass
context = {
'static_blob': static_blob,
'user_details': get_user_details_json(request),
}
return render(request, 'catalog/create_product_review.html', context)
|
apache-2.0
| 661,690,713,215,756,200
| 33.188571
| 77
| 0.563095
| false
| 4.131906
| false
| false
| false
|
semplea/characters-meta
|
python/alchemy/examples/alchemy_vision_v1.py
|
1
|
1466
|
import json
from os.path import join, dirname
from watson_developer_cloud import AlchemyVisionV1
alchemy_vision = AlchemyVisionV1(api_key='c851400276c1acbd020210847f8677e6d1577c26')
# Face recognition
with open(join(dirname(__file__), '../resources/face.jpg'), 'rb') as image_file:
print(json.dumps(alchemy_vision.recognize_faces(image_file, knowledge_graph=True), indent=2))
face_url = 'https://upload.wikimedia.org/wikipedia/commons/9/9d/Barack_Obama.jpg'
print(json.dumps(alchemy_vision.recognize_faces(image_url=face_url, knowledge_graph=True), indent=2))
# Image tagging
with open(join(dirname(__file__), '../resources/test.jpg'), 'rb') as image_file:
print(json.dumps(alchemy_vision.get_image_keywords(image_file, knowledge_graph=True,
force_show_all=True), indent=2))
# Text recognition
with open(join(dirname(__file__), '../resources/text.png'), 'rb') as image_file:
print(json.dumps(alchemy_vision.get_image_scene_text(image_file), indent=2))
print(json.dumps(alchemy_vision.get_image_keywords(
image_url='https://upload.wikimedia.org/wikipedia/commons/8/81/Morris-Chair-Ironwood.jpg'), indent=2))
# Image link extraction
print(json.dumps(alchemy_vision.get_image_links(url='http://www.zillow.com/'), indent=2))
with open(join(dirname(__file__), '../resources/example.html'), 'r') as webpage:
print(json.dumps(alchemy_vision.get_image_links(html=webpage.read()), indent=2))
|
mit
| 1,583,828,267,179,628,300
| 47.866667
| 106
| 0.71487
| false
| 3.073375
| false
| true
| false
|
mmclenna/engine
|
sky/tools/create_ios_sdk.py
|
1
|
1820
|
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import subprocess
import shutil
import sys
import os
def main():
parser = argparse.ArgumentParser(description='Creates the Flutter iOS SDK')
parser.add_argument('--dst', type=str, required=True)
parser.add_argument('--device-out-dir', type=str, required=True)
parser.add_argument('--simulator-out-dir', type=str, required=True)
args = parser.parse_args()
device_sdk = os.path.join(args.device_out_dir, 'Flutter')
simulator_sdk = os.path.join(args.simulator_out_dir, 'Flutter')
flutter_framework_binary = 'Flutter.framework/Flutter'
device_dylib = os.path.join(args.device_out_dir,
flutter_framework_binary)
simulator_dylib = os.path.join(args.simulator_out_dir,
flutter_framework_binary)
if not os.path.isdir(device_sdk):
print 'Cannot find iOS device SDK at', device_sdk
return 1
if not os.path.isdir(simulator_sdk):
print 'Cannot find iOS simulator SDK at', simulator_sdk
return 1
if not os.path.isfile(device_dylib):
print 'Cannot find iOS device dylib at', device_dylib
return 1
if not os.path.isfile(simulator_dylib):
print 'Cannot find iOS device dylib at', simulator_dylib
return 1
shutil.rmtree(args.dst, True)
shutil.copytree(device_sdk, args.dst)
sim_tools = 'Tools/iphonesimulator'
shutil.copytree(os.path.join(simulator_sdk, sim_tools),
os.path.join(args.dst, sim_tools))
subprocess.call([
'lipo',
device_dylib,
simulator_dylib,
'-create',
'-output',
os.path.join(args.dst, 'Tools/common/Flutter.framework/Flutter')
])
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
| 4,522,966,620,628,158,000
| 26.575758
| 77
| 0.697802
| false
| 3.427495
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.