content
stringlengths 5
1.05M
|
|---|
from abc import ABCMeta, abstractmethod
from .tilfile import *
from .utils import *
RESERVED_BYTE = 0xFF
TYPE_BASE_MASK = 0x0F
TYPE_FLAGS_MASK = 0x30
TYPE_MODIF_MASK = 0xC0
TYPE_FULL_MASK = TYPE_BASE_MASK | TYPE_FLAGS_MASK
BT_UNK = 0x00
BT_VOID = 0x01
BTMT_SIZE0 = 0x00
BTMT_SIZE12 = 0x10
BTMT_SIZE48 = 0x20
BTMT_SIZE128 = 0x30
BT_INT8 = 0x02
BT_INT16 = 0x03
BT_INT32 = 0x04
BT_INT64 = 0x05
BT_INT128 = 0x06
BT_INT = 0x07
BTMT_UNKSIGN = 0x00
BTMT_SIGNED = 0x10
BTMT_USIGNED = 0x20
BTMT_UNSIGNED = BTMT_USIGNED
BTMT_CHAR = 0x30
BT_BOOL = 0x08
BTMT_DEFBOOL = 0x00
BTMT_BOOL1 = 0x10
BTMT_BOOL2 = 0x20
BTMT_BOOL4 = 0x30
BT_FLOAT = 0x09
BTMT_FLOAT = 0x00
BTMT_DOUBLE = 0x10
BTMT_LNGDBL = 0x20
BTMT_SPECFLT = 0x30
BT_LAST_BASIC = BT_FLOAT
BT_PTR = 0x0A
BTMT_DEFPTR = 0x00
BTMT_NEAR = 0x10
BTMT_FAR = 0x20
BTMT_CLOSURE = 0x30
BT_ARRAY = 0x0B
BTMT_NONBASED = 0x10
BTMT_ARRESERV = 0x20
BT_FUNC = 0x0C
BTMT_DEFCALL = 0x00
BTMT_NEARCALL = 0x10
BTMT_FARCALL = 0x20
BTMT_INTCALL = 0x30
BT_COMPLEX = 0x0D
BTMT_STRUCT = 0x00
BTMT_UNION = 0x10
BTMT_ENUM = 0x20
BTMT_TYPEDEF = 0x30
BT_BITFIELD = 0x0E
BTMT_BFLDI8 = 0x00
BTMT_BFLDI16 = 0x10
BTMT_BFLDI32 = 0x20
BTMT_BFLDI64 = 0x30
BT_RESERVED = 0x0F
BTM_CONST = 0x40
BTM_VOLATILE = 0x80
BTE_SIZE_MASK = 0x07
BTE_RESERVED = 0x08
BTE_BITFIELD = 0x10
BTE_OUT_MASK = 0x60
BTE_HEX = 0x00
BTE_CHAR = 0x20
BTE_SDEC = 0x40
BTE_UDEC = 0x60
BTE_ALWAYS = 0x80
BTF_STRUCT = BT_COMPLEX | BTMT_STRUCT
BTF_UNION = BT_COMPLEX | BTMT_UNION
BTF_ENUM = BT_COMPLEX | BTMT_ENUM
BTF_TYPEDEF = BT_COMPLEX | BTMT_TYPEDEF
TAH_BYTE = 0xFE
FAH_BYTE = 0xFF
TAH_HASATTRS = 0x0010
TAUDT_UNALIGNED = 0x0040
TAUDT_MSSTRUCT = 0x0020
TAUDT_CPPOBJ = 0x0080
TAFLD_BASECLASS = 0x0020
TAFLD_UNALIGNED = 0x0040
TAFLD_VIRTBASE = 0x0080
TAPTR_PTR32 = 0x0020
TAPTR_PTR64 = 0x0040
TAPTR_RESTRICT = 0x0060
TAENUM_64BIT = 0x0020
CM_MASK = 0x03
CM_UNKNOWN = 0x00
CM_N8_F16 = 0x01
CM_N64 = 0x01
CM_N16_F32 = 0x02
CM_N32_F48 = 0x03
CM_M_MASK = 0x0C
CM_M_MN = 0x00
CM_M_FF = 0x04
CM_M_NF = 0x08
CM_M_FN = 0x0C
CM_CC_MASK = 0xF0
CM_CC_INVALID = 0x00
CM_CC_UNKNOWN = 0x10
CM_CC_VOIDARG = 0x20
CM_CC_CDECL = 0x30
CM_CC_ELLIPSIS = 0x40
CM_CC_STDCALL = 0x50
CM_CC_PASCAL = 0x60
CM_CC_FASTCALL = 0x70
CM_CC_THISCALL = 0x80
CM_CC_MANUAL = 0x90
CM_CC_SPOILED = 0xA0
CM_CC_RESERVE4 = 0xB0
CM_CC_RESERVE3 = 0xC0
CM_CC_SPECIALE = 0xD0
CM_CC_SPECIALP = 0xE0
CM_CC_SPECIAL = 0xF0
def print_type(tinfo, name):
typ = tinfo.get_decl_type()
base = typ & TYPE_BASE_MASK
flags = typ & TYPE_FLAGS_MASK
mod = typ & TYPE_MODIF_MASK
res = ""
if mod & BTM_CONST:
res += "const "
if mod & BTM_VOLATILE:
res += "volatile "
if base <= BT_LAST_BASIC:
if base == BT_UNK:
if flags == BTMT_SIZE12:
return "_WORD"
elif flags == BTMT_SIZE48:
return "_QWORD"
elif flags == BTMT_SIZE128:
return "_UNKNOWN"
if base == BT_VOID:
if flags == BTMT_SIZE12:
return "_BYTE"
elif flags == BTMT_SIZE48:
return "_DWORD"
elif flags == BTMT_SIZE128:
return "_OWORD"
else:
return "void"
elif base <= BT_INT:
if flags & BTMT_SIGNED:
res += "signed "
elif flags & BTMT_UNSIGNED:
res += "unsigned "
elif flags & BTMT_CHAR:
return res + "char"
if base == BT_INT8:
return res + "__int8"
elif base == BT_INT16:
return res + "__int16"
elif base == BT_INT32:
return res + "__int32"
elif base == BT_INT64:
return res + "__int64"
elif base == BT_INT128:
return res + "__int128"
elif base == BT_INT:
return res + "int"
elif base == BT_BOOL:
if flags == BTMT_BOOL1:
return "_BOOL1"
elif flags == BTMT_BOOL2:
#return "_BOOL8" if inf.is_64bit() else "_BOOL2"
return "_BOOL2"
elif flags == BTMT_BOOL4:
return "_BOOL4"
else:
return "bool"
elif base == BT_FLOAT:
if flags == BTMT_FLOAT:
return "float"
elif flags == BTMT_DOUBLE:
return "double"
elif flags == BTMT_LNGDBL:
return "long double"
elif flags == BTMT_SPECFLT:
return "short float"
# return "_TBYTE" if ph.flags & PR_USE_TBYTE else "short float"
elif base > BT_LAST_BASIC:
details = tinfo.get_type_details()
return details.print_type(name)
class BaseTypeData:
__metaclass__ = ABCMeta
@abstractmethod
def deserialize(self, til, typestr, fields, fieldcmts):
"""Deserialize a type string into a TypeInfo object.
Args:
til (TIL): Type info library
typestr (TypeString): Type string
fields (TypeString): List of field names
fieldcmts (TypeString): List of field comments
Returns:
BaseTypeData: Deserialized type data
"""
raise NotImplementedError()
@abstractmethod
def serialize(self, til, tinfo, typestr, fields, fieldcmts):
"""Serialize a TypeInfo object into a type string.
Args:
til (TIL): Type info library
tinfo (TypeInfo): Input TypeInfo object
typestr (TypeString): Type string
fields (TypeString): List of field names
fieldcmts (TypeString): List of field comments
"""
raise NotImplementedError()
@abstractmethod
def print_type(self, name):
"""Print this type using name as the type name.
Args:
name (str): Type name
Returns:
str: Printed type
"""
raise NotImplementedError()
class PointerTypeData(BaseTypeData):
"""Representation of ptr_type_data_t"""
def __init__(self):
self.obj_type = None
self.closure = None
self.based_ptr_size = 0
self.taptr_bits = 0
def deserialize(self, til, typestr, fields, fieldcmts):
typ = typestr.read_db()
flags = typ & TYPE_FLAGS_MASK
if flags == BTMT_CLOSURE:
ptr_size = typestr.read_db()
# Next byte MUST be RESERVED_BYTE
if ptr_size == RESERVED_BYTE:
# and after it ::BT_FUNC
self.closure = til.deserialize(typestr, fields, fieldcmts)
else:
self.based_ptr_size = typestr.read_db()
self.taptr_bits = typestr.read_type_attr()
self.obj_type = til.deserialize(typestr, fields, fieldcmts)
return self
def serialize(self, til, tinfo, typestr, fields, fieldcmts):
typ = tinfo.get_decl_type()
# append type byte
typestr.append_db(typ)
til.serialize(self.obj_type, typestr)
def print_type(self, name):
obj_type = self.obj_type
if obj_type.is_func():
details = obj_type.get_type_details()
ret_type = print_type(details.rettype, "")
args = ""
for n, arg in enumerate(details.args):
args += print_type(arg.type, "")
if arg.name:
args += " " + arg.name
if n != len(details.args) - 1:
args += ", "
return "{} (* {})({})".format(ret_type, name, args)
base_type = print_type(obj_type, "")
if name:
return "{}* {}".format(base_type, name)
return "{}*".format(base_type)
class ArrayTypeData(BaseTypeData):
"""Representation of array_type_data_t"""
def __init__(self):
self.elem_type = None
self.base = 0
self.nelems = 0
def deserialize(self, til, typestr, fields, fieldcmts):
typ = typestr.read_db()
flags = typ & TYPE_FLAGS_MASK
if flags & BTMT_NONBASED:
self.base = 0
self.nelems = typestr.read_dt()
else:
_, self.nelems, self.base = typestr.read_da()
self.elem_type = til.deserialize(typestr, fields, fieldcmts)
return self
def serialize(self, til, tinfo, typestr, fields, fieldcmts):
typ = tinfo.get_decl_type()
flags = typ & TYPE_FLAGS_MASK
typestr.append_db(typ)
if flags & BTMT_NONBASED:
typestr.append_dt(self.nelems)
else:
typestr.append_da(self.nelems, self.base)
til.serialize(self.elem_type, typestr)
def print_type(self, name):
elem_type = self.elem_type
array = "[{}]".format(self.nelems)
while elem_type.is_array():
details = elem_type.get_type_details()
array += "[{}]".format(details.nelems)
elem_type = details.elem_type
base_type = print_type(elem_type, "")
return "{} {}{}".format(base_type, name, array)
ALOC_NONE = 0
ALOC_STACK = 1
ALOC_DIST = 2
ALOC_REG1 = 3
ALOC_REG2 = 4
ALOC_RREL = 5
ALOC_STATIC = 6
ALOC_CUSTOM = 7
class ArgLoc:
def __init__(self):
self.type = 0
class RegInfo:
def __init__(self):
self.reg = 0
self.size = 0
class FuncArg:
def __init__(self):
self.argloc = None # argloc_t
self.name = ""
self.cmt = ""
self.type = None # tinfo_t
self.flags = 0
class FuncTypeData(BaseTypeData):
def __init__(self):
self.args = []
self.flags = 0 # FTI_*
self.rettype = None # tinfo_t
self.retloc = None # argloc_t
self.stkargs = None # uval_t
self.spoiled = None # reginfovec_t
self.cc = 0
def deserialize(self, til, typestr, fields, fieldcmts):
typ = typestr.read_db()
flags = typ & TYPE_FLAGS_MASK
self.extract_spoiled(typestr)
self.cc = typestr.read_db()
self.flags |= 4 * flags
typestr.read_type_attr()
self.rettype = til.deserialize(typestr, fields, fieldcmts)
cc = self.cc & CM_CC_MASK
if cc > CM_CC_SPECIALE:
if (self.rettype.get_decl_type() & TYPE_FULL_MASK) != BT_VOID:
self.retloc = self.extract_argloc(typestr)
if cc != CM_CC_VOIDARG:
N = typestr.read_dt()
if N > 256:
raise ValueError("Invalid arg count!")
if N > 0:
for n in range(N):
arg = FuncArg()
if fields is not None:
arg.name = fields.read_pstring().decode("ascii")
if fieldcmts is not None:
arg.cmt = fieldcmts.read_pstring().decode("ascii")
fah = typestr.peek_db()
if fah == FAH_BYTE:
typestr.seek(1)
arg.flags = typestr.read_de()
arg.type = til.deserialize(typestr, fields, fieldcmts)
if cc > CM_CC_SPECIALE:
arg.argloc = self.extract_argloc(typestr)
self.args.append(arg)
return self
def extract_spoiled(self, typestr):
# TODO: NOT FULLY TESTED
cm = typestr.peek_db()
if (cm & CM_CC_MASK) == CM_CC_SPOILED:
while True:
typestr.seek(1)
if (cm & ~CM_CC_MASK) == 15:
f = 2 * (typestr.read_db() & 0x1f)
else:
nspoiled = cm & ~CM_CC_MASK
for n in range(nspoiled):
reginfo = RegInfo()
b = typestr.read_db()
if bool(b & 0x80):
size = typestr.read_db()
reg = b & 0x7f
else:
size = (b >> 4) + 1
reg = (b & 0xf) - 1
reginfo.size = size
reginfo.reg = reg
self.spoiled.append(reginfo)
f = 1
cm = typestr.peek_db()
self.flags |= f
if (cm & CM_CC_MASK) != CM_CC_SPOILED:
break
else:
self.flags = 0
def extract_argloc(self, typestr):
# TODO: NOT FULLY TESTED
argloc = ArgLoc()
a = typestr.read_db()
if a == 0xff:
argloc.type = typestr.read_dt()
if argloc.type == ALOC_STACK:
# fills sval
typestr.read_de() # sval
elif argloc.type == ALOC_DIST:
pass
elif argloc.type in (ALOC_REG1, ALOC_REG2):
# fills reginfo (offset and register ndx)
typestr.read_dt() # reginfo
typestr.read_dt() # reginfo << 16
elif argloc.type == ALOC_RREL:
# fills rrel
typestr.read_dt() # rrel_t->reg
typestr.read_de() # rrel_t->off
elif argloc.type == ALOC_STATIC:
# fills sval
typestr.read_de()
else:
b = (a & 0x7f) - 1
if b <= 0x80:
if b & 0x7f:
# argloc.type = ALLOC_REG1
# argloc.sval = b
pass
else:
# argloc.type = ALLOC_STACK
# argloc.sval = 0
pass
else:
c = typestr.read_db() - 1
if c != -1:
# argloc.type = ALLOC_REG2
# argloc.reginfo = b | (c << 16)
pass
return argloc
def serialize(self, til, tinfo, typestr, fields, fieldcmts):
typ = tinfo.get_decl_type()
typestr.append_db(typ)
typestr.append_db(self.cc)
til.serialize(self.rettype, typestr)
N = len(self.args)
typestr.append_dt(N)
for arg in self.args:
til.serialize(arg.type, typestr)
def print_type(self, name):
res = print_type(self.rettype, "") + " "
cc = self.cc & CM_CC_MASK
if cc == CM_CC_INVALID:
res += "__bad_cc "
elif cc == CM_CC_CDECL:
res += "__cdecl "
elif cc == CM_CC_STDCALL:
res += "__stdcall "
elif cc == CM_CC_PASCAL:
res += "__pascal "
elif cc == CM_CC_FASTCALL:
res += "__fastcall "
elif cc == CM_CC_THISCALL:
res += "__thiscall "
elif cc in (CM_CC_SPECIALE, CM_CC_SPECIAL):
res += "__usercall "
elif cc == CM_CC_SPECIALP:
res += "__userpurge "
res += name
args = ""
for n, arg in enumerate(self.args):
args += print_type(arg.type, "")
if arg.name:
args += " " + arg.name
if n != len(self.args) - 1:
args += ", "
res += "(" + args + ")"
return res
class UdtMember:
def __init__(self):
self.offset = 0
self.size = 0
self.name = None # qstring
self.cmt = None # qstring
self.type = None # tinfo_t
self.effalign = 0
self.tafld_bits = 0
self.fda = 0
class UdtTypeData(BaseTypeData):
"""Representation of udt_type_data_t"""
def __init__(self):
self.members = []
self.total_size = 0
self.unpadded_size = 0
self.effalign = 0
self.taudt_bits = 0
self.sda = 0
self.pack = 0
self.is_union = False
def deserialize(self, til, typestr, fields, fieldcmts):
typ = typestr.read_db()
self.is_union = (typ & TYPE_FULL_MASK) == BTF_UNION
N = typestr.read_complex_n()
if N == 0:
raise ValueError("Should have been parsed as typedef")
alpow = N & 7
mcnt = N >> 3
self.pack = alpow
attr = typestr.read_sdacl_attr()
if attr is not None:
self.taudt_bits = attr
for n in range(mcnt):
member = UdtMember()
if fields is not None:
member.name = fields.read_pstring().decode("ascii")
if fieldcmts is not None:
member.cmt = fieldcmts.read_pstring()
member.type = til.deserialize(typestr, fields, fieldcmts)
attr = typestr.read_sdacl_attr()
if attr is not None:
member.tafld_bits = attr
member.fda = attr & 0xf
self.members.append(member)
return self
def serialize(self, til, tinfo, typestr, fields, fieldcmts):
typ = tinfo.get_decl_type()
typestr.append_db(typ)
mcnt = len(self.members)
alpow = self.pack
N = mcnt << 3 | alpow & 7
typestr.append_complex_n(N, False)
for member in self.members:
til.serialize(member.type, typestr)
if fields is not None and member.name:
fields.append_pstring(member.name)
if fieldcmts is not None and member.cmt:
fieldcmts.append_pstring(member.cmt)
def print_type(self, name):
res = "union " if self.is_union else "struct "
if self.taudt_bits & TAUDT_MSSTRUCT:
res += "__attribute__((msstruct)) "
if self.taudt_bits & TAUDT_CPPOBJ:
res += "__cppobj "
res += name + " "
for i, member in enumerate(self.members):
if member.tafld_bits & TAFLD_BASECLASS:
res += ": " if i == 0 else ", "
res += print_type(member.type, "")
res += "\n"
res += "{\n"
for member in self.members:
if member.tafld_bits & TAFLD_BASECLASS:
continue
membertype = member.type
res += " "
if membertype.is_ptr() or membertype.is_array():
field = print_type(member.type, member.name)
res += "{};\n".format(field)
elif membertype.is_bitfield():
details = membertype.get_type_details()
flags = membertype.get_type_flags()
if flags == BTMT_BFLDI8:
res += "__int8"
elif flags == BTMT_BFLDI16:
res += "__int16"
elif flags == BTMT_BFLDI32:
res += "__int32"
elif flags == BTMT_BFLDI64:
res += "__int64"
res += " {} : {};\n".format(member.name, details.width)
else:
field = print_type(member.type, "")
res += "{} {};\n".format(field, member.name)
res += "}"
return res
class EnumMember:
def __init__(self):
self.name = None # qstring
self.cmt = None # qstring
self.value = 0
class EnumTypeData(BaseTypeData):
"""Representation of enum_type_data_t"""
def __init__(self):
self.group_sizes = [] # intvec_t (qvector<int>)
self.taenum_bits = 0
self.bte = 0
self.members = []
def deserialize(self, til, typestr, fields, fieldcmts):
typ = typestr.read_db()
N = typestr.read_complex_n()
if N == 0:
raise ValueError("Should have been parsed as typedef")
attr = typestr.read_type_attr()
if attr is not None:
self.taenum_bits = attr
self.bte = typestr.read_db()
if not (self.bte & BTE_ALWAYS):
raise ValueError("Enum bte must have BTE_ALWAYS set")
mask = self.calc_mask(til)
delta = 0
hi = 0
for m in range(N):
member = EnumMember()
if fields is not None:
member.name = fields.read_pstring().decode("ascii")
if fieldcmts is not None:
member.cmt = fieldcmts.read_pstring().decode("ascii")
lo = typestr.read_de()
if self.taenum_bits & TAENUM_64BIT:
hi = typestr.read_de()
if self.bte & BTE_BITFIELD:
self.group_sizes.append(typestr.read_dt())
delta += to_s32((lo | (hi << 32)) & mask)
member.value = delta
self.members.append(member)
return self
def calc_mask(self, til):
emsize = self.bte & BTE_SIZE_MASK
if emsize != 0:
bytesize = 1 << (emsize - 1)
else:
bytesize = til.get_header().size_e
# elif (ph.flag >> 12) & 1:
# mask = ph.notify(ev_get_default_enum_size)
# else:
# mask = -1
bitsize = bytesize * 8
if bitsize < 64:
return (1 << bitsize) - 1
return 0xffffffffffffffff
def serialize(self, til, tinfo, typestr, fields, fieldcmts):
typ = tinfo.get_decl_type()
typestr.append_db(typ)
N = len(self.members)
typestr.append_complex_n(N, False)
typestr.append_db(self.bte)
prev = 0
for member in self.members:
curr = member.value
delta = curr - prev
prev = curr
typestr.append_de(delta)
def print_type(self, name):
res = "enum {}\n".format(name)
res += "{\n"
out = self.bte & BTE_OUT_MASK
for member in self.members:
value = member.value
if out == BTE_HEX:
value = hex(member.value)
res += " {} = {},\n".format(member.name, value)
res += "}"
return res
class TypedefTypeData(BaseTypeData):
"""Representation of typedef_type_data_t"""
def __init__(self):
self.til = None
self.name = None
self.ordinal = 0
self.is_ordref = False
self.resolve = False
def deserialize(self, til, typestr, fields, fieldcmts):
typ = typestr.read_db()
self.til = til
string = typestr.read_pstring()
if len(string) > 1 and string[0] == b"#":
self.is_ordref = True
self.name = string
else:
self.name = string.decode("ascii")
return self
def serialize(self, til, tinfo, typestr, fields, fieldcmts):
typ = tinfo.get_decl_type()
typestr.append_db(typ)
typestr.append_pstring(self.name)
def print_type(self, name):
if name:
return "typedef {} {}".format(self.name, name)
else:
return "{}".format(self.name)
class BitfieldTypeData(BaseTypeData):
"""Representation of bitfield_type_data_t"""
def __init__(self):
self.nbytes = 0
self.width = 0
self.is_unsigned = False
def deserialize(self, til, typestr, fields, fieldcmts):
typ = typestr.read_db()
flags = typ & TYPE_FLAGS_MASK
dt = typestr.read_dt()
self.nbytes = 1 << (flags >> 4)
self.width = dt >> 1
self.is_unsigned = dt & 1
typestr.read_type_attr()
return self
def serialize(self, til, tinfo, typestr, fields, fieldcmts):
typ = tinfo.get_decl_type()
typestr.append_db(typ)
typestr.append_dt(self.width << 1 | self.is_unsigned)
def print_type(self, name):
return "{} : {}".format(name, self.width)
|
"""Level Playlist Service."""
from playlist.enums import PlaylistCategory, LevelPlaylistName
from playlist.models import Playlist
from playlist.service.abstract import AbstractPlaylistService
from playlist.utils import formulas
from playlist.utils.definition import ConstraintDefinition
class LevelService(AbstractPlaylistService):
"""Level Service."""
course_number_ranges = {
LevelPlaylistName.lower_division: range(1, 100),
LevelPlaylistName.upper_division: range(100, 200),
LevelPlaylistName.graduate: range(200, 300),
LevelPlaylistName.professional: range(300, 500),
LevelPlaylistName.freshmen_sophomore_seminars: (24, 39, 84,),
LevelPlaylistName.directed_group_study: (98, 198,),
LevelPlaylistName.supervised_independent_study: (99, 199,),
LevelPlaylistName.field_study: (197,),
}
def update(self):
"""Update playlist for specific course number ranges."""
for playlist_name in LevelPlaylistName:
course_number_range = self.course_number_ranges[playlist_name]
definition = ConstraintDefinition(
constraints=[formulas.course_integer_in(course_number_range)]
)
playlist, created = Playlist.objects.get_or_create(
name=str(playlist_name),
category=str(PlaylistCategory.level)
)
self._update(playlist, definition)
level_service = LevelService()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 13 13:35:33 2020
"""
#for finding loss of significances
x=1e-1
flag = True
a=0
while (flag):
print (((2*x)/(1-(x**2))),"......",(1/(1+x))-(1/(1-x)))
x= x*(1e-1)
a=a+1
if(a==25):
flag=False
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a Google Cloud Speech Hook.
"""
from google.cloud.speech_v1 import SpeechClient
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
class GCPSpeechToTextHook(GoogleCloudBaseHook):
"""
Hook for Google Cloud Speech API.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
def __init__(self, gcp_conn_id="google_cloud_default", delegate_to=None):
super().__init__(gcp_conn_id, delegate_to)
self._client = None
def get_conn(self):
"""
Retrieves connection to Cloud Speech.
:return: Google Cloud Speech client object.
:rtype: google.cloud.speech_v1.SpeechClient
"""
if not self._client:
self._client = SpeechClient(credentials=self._get_credentials())
return self._client
def recognize_speech(self, config, audio, retry=None, timeout=None):
"""
Recognizes audio input
:param config: information to the recognizer that specifies how to process the request.
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionConfig
:type config: dict or google.cloud.speech_v1.types.RecognitionConfig
:param audio: audio data to be recognized
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionAudio
:type audio: dict or google.cloud.speech_v1.types.RecognitionAudio
:param retry: (Optional) A retry object used to retry requests. If None is specified,
requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:type timeout: float
"""
client = self.get_conn()
response = client.recognize(config=config, audio=audio, retry=retry, timeout=timeout)
self.log.info("Recognised speech: %s" % response)
return response
|
from twilio.rest.resources.util import transform_params
from twilio.rest.resources import InstanceResource, ListResource
class Sandbox(InstanceResource):
id_key = "pin"
def update(self, **kwargs):
"""
Update your Twilio Sandbox
"""
a = self.parent.update(**kwargs)
self.load(a.__dict__)
class Sandboxes(ListResource):
name = "Sandbox"
instance = Sandbox
def get(self):
"""Request the specified instance resource"""
return self.get_instance(self.uri)
def update(self, **kwargs):
"""
Update your Twilio Sandbox
"""
resp, entry = self.request("POST", self.uri,
body=transform_params(kwargs))
return self.create_instance(entry)
|
# https://www.hackerrank.com/challenges/defaultdict-tutorial/problem
from collections import defaultdict
A_size, B_size = map(int, input().split())
A_defaultdict = defaultdict(list)
B_list = list()
for i in range(0, A_size):
A_defaultdict[input()].append(i + 1)
for i in range(0, B_size):
B_list = B_list + [input()]
for b in B_list:
if b in A_defaultdict:
print(" ".join(map(str, A_defaultdict[b])))
else:
print(-1)
|
def test_basic_front_end(flask_app):
with flask_app.test_client() as client:
response = client.get(
"/a/random/page"
)
assert response.status_code == 200
def test_basic_front_end_next(flask_app, auth_user):
with flask_app.test_client(user=auth_user) as client:
with client.session_transaction() as sess:
sess["next_url"] = "/another/random/page"
response = client.get(
"/a/random/page"
)
assert response.status_code == 302
def test_basic_front_end_next_unauthorized(flask_app):
with flask_app.test_client() as client:
with client.session_transaction() as sess:
sess["next_url"] = "/another/random/page"
response = client.get(
"/a/random/page"
)
assert response.status_code == 200
|
"""
"""
from django.contrib.auth.models import AbstractUser
from django.db import models
class BPUser(AbstractUser):
is_tester = models.NullBooleanField(db_column='IS_TESTER', null=True, blank=False)
last_login = models.DateTimeField(db_column='LAST_LOGIN', auto_now_add=True, blank=False)
class Meta:
db_table = u'"ATLAS_PANDABIGMON"."AUTH_USER"'
class BPUserSettings(models.Model):
userid = models.IntegerField(db_column='USERID', null=False)
page = models.CharField(db_column='PAGE', max_length=100, null=False)
preferences = models.CharField(db_column='PREFERENCES', max_length=4000)
class Meta:
db_table = u'"ATLAS_PANDABIGMON"."USER_SETTINGS"'
class Visits(models.Model):
visitId = models.BigIntegerField(primary_key=True, db_column='VISITID')
url = models.CharField(null=True, db_column='URL', max_length=1000)
time = models.DateTimeField(db_column='TIME', null=False)
remote = models.CharField(null=True, db_column='REMOTE', max_length=20)
userid = models.IntegerField(null=True, db_column='USERID', blank=True)
service = models.IntegerField(null=True, db_column='SERVICE', blank=True)
class Meta:
db_table= u'"ATLAS_PANDABIGMON"."VISITS"'
|
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
data_file=path
data=np.genfromtxt(data_file, delimiter=",", skip_header=1)
print("\nData: \n\n", data)
print("\nType of data: \n\n", type(data))
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
census=np.concatenate((data,new_record),axis=0)
print(census)
#Code starts here
# --------------
#Code starts here
#print(census)
age=np.asarray(census[:,0])
print(age)
max_age=age.max()
min_age=age.min()
age_mean=age.mean()
age_std=np.std(age)
print(max_age , min_age , age_mean,age_std)
# --------------
#Code starts here
mask_0=(census[:,2].astype(int)==0)
mask_1=(census[:,2]==1)
mask_2=(census[:,2]==2)
mask_3=(census[:,2]==3)
mask_4=(census[:,2]==4)
race_0=census[mask_0]
race_1=census[mask_1]
race_2=census[mask_2]
race_3=census[mask_3]
race_4=census[mask_4]
print(race_0)
len_0=len(race_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
print(len_0, len_1,len_2,len_3,len_4)
minority_race=min(len_0,len_1,len_2,3,len_4)
print(minority_race)
#print(census[:,2])
#print(race_0,race_1,race_2,race_3,race_4,len_0)
# --------------
#Code starts here
senior_citizens=census[census[:,0]>60]
senior_citizens_len=len(senior_citizens)
working_hours_sum=sum(senior_citizens[:,6])
print(len(senior_citizens))
avg_working_hours=working_hours_sum/senior_citizens_len
print(avg_working_hours)
# --------------
#Code starts here
high=census[census[:,1]>10]
low=census[census[:,1]<=10]
avg_pay_high=(high[:,7]).mean()
avg_pay_low=(low[:,7]).astype(float).mean()
print(avg_pay_high)
print(avg_pay_low)
|
# coding:utf-8
from users import (User, UserIp, UserInvitation, Characters, Permission, Role, Message,
UserMessage, UserOnline, PermissionRole, permission_has_role, role_is_admin)
from info import Donate, GuildInfo, Agreement
from prompt import AlivePrompt, LevelPrompt, RacePrompt, JobPrompt, GenderPrompt, MoneyPrompt
from news import News, Sidebar
from tasks import TaskResult
__all__ = ['User', 'UserIp', 'UserOnline', 'UserInvitation', 'Characters',
'GuildInfo', 'Donate', 'Agreement', 'AlivePrompt', 'LevelPrompt', 'RacePrompt', 'JobPrompt',
'GenderPrompt', 'MoneyPrompt', 'News', 'Sidebar', 'Permission', 'Role', 'Message', 'UserMessage',
'PermissionRole', 'permission_has_role', 'role_is_admin','TaskResult']
|
###USER should be able to edit password
import webbrowser
import json
import requests
## assume dictionary of password for each user
def getContent(url):
content = requests.get(url).content
user_dic = json.loads(content)
return user_dic
API_url = "http://127.0.0.1:8000/request/"
resoinse_dict = getContent(API_url + "requestLoginInfo")
email = input("please enter your Email:")
if email not in resoinse_dict.keys():
print("User does not exist try again")
else:
password = input("Please enter your password:")
if password == resoinse_dict[email]["passWord"]:
userId = resoinse_dict[email]["id"]
url = API_url + "userUpdate?" + "userId=" + str(userId)
webbrowser.open(url, new=2, autoraise=True)
else:
print("incorrect password, please try again")
|
# why do we need reference variables:
# बस इतना समझ लीजिए की अगर हीलीअम गैस से भरे गुब्बारे को किसी धागे से नहीं बाँधेंगे तो क्या होगा?
# गुब्बारा इधर-उधर उड़ जाएगा!
class Mobile:
def __init__(self, brand, price):
print("Mobile created using location ID: ", id(self))
self.brand = brand
self.price = price
mob1 = Mobile("Apple", 25000)
print("Brand is ", mob1.brand, " and price is ", mob1.price)
mob2 = Mobile("Samsung", 15000)
print("Other brand is ", mob2.brand, " and its price is ", mob2.price)
# just like a baloon without a ribbon the an object without a reference variable cannot be used later
class Mobile2:
def __init__(self, brand, price):
self.brand = brand
self.price = price
Mobile2("Redmi", 9990)
# after the above line the mobile object is created is lost and unusable
# Multilple references
# can a baloon have multiple ribbons? yes
# an object can have multiple references
# Notice the below code
class Mobile3:
def __init__(self, brand, price):
print("Inside constructor")
self.brand = brand
self.price = price
print("Brand is: ", self.brand, " and price is ", self.price)
my_mob1 = Mobile3("Apple", 15000)
my_mob2 = my_mob1
print("ID for multiple reference of Same object, i.e my_mob1 ", id(my_mob1))
print("ID for multiple reference of Same object, i.e my_mob2 ", id(my_mob2))
|
import json
from girder import events
from girder.constants import registerAccessFlag, AccessType, TokenScope
from girder.plugins.jobs.constants import JobStatus
from girder.utility.model_importer import ModelImporter
from . import constants
from .rest import ItemTask
from .json_tasks import createItemTasksFromJson, configureItemTaskFromJson, \
runJsonTasksDescriptionForFolder, runJsonTasksDescriptionForItem
from .slicer_cli_tasks import configureItemTaskFromSlicerCliXml, createItemTasksFromSlicerCliXml, \
runSlicerCliTasksDescriptionForFolder, runSlicerCliTasksDescriptionForItem
def _onJobSave(event):
"""
If a job is finalized (i.e. success or failure status) and contains
a temp token, we remove the token.
"""
params = event.info['params']
job = event.info['job']
if 'itemTaskTempToken' in job and params['status'] in (JobStatus.ERROR, JobStatus.SUCCESS):
token = ModelImporter.model('token').load(
job['itemTaskTempToken'], objectId=False, force=True)
if token:
ModelImporter.model('token').remove(token)
# Remove the itemTaskTempToken field from the job
ModelImporter.model('job', 'jobs').update({'_id': job['_id']}, update={
'$unset': {'itemTaskTempToken': True}
}, multi=False)
del job['itemTaskTempToken']
def _onUpload(event):
"""
Look at uploads containing references related to this plugin. If found,
they are used to link item task outputs back to a job document.
"""
try:
ref = json.loads(event.info.get('reference'))
except (ValueError, TypeError):
return
if isinstance(ref, dict) and ref.get('type') == 'item_tasks.output':
jobModel = ModelImporter.model('job', 'jobs')
tokenModel = ModelImporter.model('token')
token = event.info['currentToken']
if tokenModel.hasScope(token, 'item_tasks.job_write:%s' % ref['jobId']):
job = jobModel.load(ref['jobId'], force=True, exc=True)
else:
job = jobModel.load(
ref['jobId'], level=AccessType.WRITE, user=event.info['currentUser'], exc=True)
file = event.info['file']
item = ModelImporter.model('item').load(file['itemId'], force=True)
# Add link to job model to the output item
jobModel.updateJob(job, otherFields={
'itemTaskBindings.outputs.%s.itemId' % ref['id']: item['_id']
})
# Also a link in the item to the job that created it
item['createdByJob'] = job['_id']
ModelImporter.model('item').save(item)
def load(info):
registerAccessFlag(constants.ACCESS_FLAG_EXECUTE_TASK, name='Execute analyses', admin=True)
TokenScope.describeScope(
constants.TOKEN_SCOPE_EXECUTE_TASK, name='Execute tasks', description='Execute item tasks.')
TokenScope.describeScope(
constants.TOKEN_SCOPE_AUTO_CREATE_CLI, 'Item task auto-creation',
'Create new CLIs via automatic introspection.', admin=True)
ModelImporter.model('item').ensureIndex(['meta.isItemTask', {'sparse': True}])
ModelImporter.model('item').exposeFields(level=AccessType.READ, fields='createdByJob')
ModelImporter.model('job', 'jobs').exposeFields(level=AccessType.READ, fields={
'itemTaskId', 'itemTaskBindings'})
events.bind('jobs.job.update', info['name'], _onJobSave)
events.bind('data.process', info['name'], _onUpload)
info['apiRoot'].item_task = ItemTask()
info['apiRoot'].item.route('POST', (':id', 'item_task_slicer_cli_description'),
runSlicerCliTasksDescriptionForItem)
info['apiRoot'].item.route('PUT', (':id', 'item_task_slicer_cli_xml'),
configureItemTaskFromSlicerCliXml)
info['apiRoot'].item.route('POST', (':id', 'item_task_json_description'),
runJsonTasksDescriptionForItem)
info['apiRoot'].item.route('PUT', (':id', 'item_task_json_specs'),
configureItemTaskFromJson)
info['apiRoot'].folder.route('POST', (':id', 'item_task_slicer_cli_description'),
runSlicerCliTasksDescriptionForFolder)
info['apiRoot'].folder.route('POST', (':id', 'item_task_slicer_cli_xml'),
createItemTasksFromSlicerCliXml)
info['apiRoot'].folder.route('POST', (':id', 'item_task_json_description'),
runJsonTasksDescriptionForFolder)
info['apiRoot'].folder.route('POST', (':id', 'item_task_json_specs'),
createItemTasksFromJson)
|
"""Agilent N6705B Power Supply SCIPI Class.
Copyright (c) 2014 The Project Loon Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file
"""
__author__ = "Alfred Cohen"
__email__ = "alfredcohen@google.com"
import time
class Driver(object):
"""Agilent N6705B Power Supply - Driver specific Class."""
def __init__(self, inst, cfg=None):
"""Initialize the specific driver."""
self.i = inst
if not cfg:
self.i.set_error("Configuration File for this instrument not available")
return
self.cfg = cfg["cfg"]
self.power = cfg["power"]
def __del__(self):
"""Destroy and cleanup/shutdown power."""
self.set_power(0)
return
def get_params(self):
"""Dictionary holding all driver specific parameter mappings.
Returns:
Structure in the format:
PARM: p = "SCIPI:PARAMETER",
u = "Units" to append,
v = "param = paramdevice"
"""
return {
"Volt": {"p": "VOLT "},
"Curr": {"p": "CURR "},
"VoltMax": {"p": "VOLT:PROT:LEV "},
"CurrMax": {"p": "CURR:PROT:LEV "}
}
def setup(self):
"""Set-up the device according to the supplied configuration."""
# Prevent resetting parameters if specific configuration setting exists.
if "no_setup" in self.cfg and self.cfg["no_setup"]:
return True
ok = []
self.i.set("*RST")
seq = self.get_channels("list", all=True)
for ch in seq:
chan = str(ch)
for param in self.cfg[chan]:
cmd = self.getcmd(param, chan)
if not cmd:
continue
res = self.i.set(cmd)
if res:
ok.append(True)
else:
ok.append(False)
self.i.get("*OPC?")
return all(ok)
def getcmd(self, p="", ch=""):
"""Use the Global Wrapper to get full SCIPI Command for the parameter."""
if not p:
return
channel = ("" if not ch else ", (@%s)"%ch)
cmd = self.i.getcmd(p, self.cfg[str(ch)])
if not cmd:
return
return cmd + channel
def set_power(self, on=0, chnls=None):
"""Turn Power On/Off for all power channels or selected ones."""
ok = []
mode = "ON " if on == 1 else "OFF "
seq = chnls if chnls else self.get_channels("list")
if on == 0:
seq = reversed(seq)
delay = 0.8 if "delay" not in self.power else self.power["delay"]
for ch in seq:
res = self.i.set("OUTP %s, (@%s)"%(mode, ch))
ok.append(res)
time.sleep(delay)
return all(ok)
def get_channels(self, mtype="str", all=True):
"""Obtain All configured Channels in order and return a list or string."""
seq = self.power["seq"] if "seq" in self.power else []
if all and "main" in self.power:
if self.power["main"] not in seq:
seq.append(self.power["main"])
return ",".join(map(str, seq)) if mtype is "str" else seq
def get_meas(self, mtype="VOLT"):
"""Perform Measurement of the specified type for the used channels."""
cmd = "OUTP?" if mtype == "ON" else "MEAS:%s?"%mtype
vals = self.i.geta("%s (@%s)"%(cmd, self.get_channels("str", all=True)))
data = []
if not vals:
return data
for v in vals:
newval = int(v) if mtype == "ON" else float(v)
data.append(newval)
return data
def action(self, *args):
"""Perform Default Driver Action. In this case Turn on/off power."""
return self.set_power(*args)
def get_measure(self):
"""Perform a complete measurement, with all most important readings."""
ok = []
res = {}
self.i.set_error()
raw = {}
for q in ("VOLT", "CURR", "ON"):
meas = self.get_meas(q)
ok.append(bool(meas))
raw[q] = meas
if not all(ok):
self.i.set_error("Measurement Read Fail: OK=%s/%s (%s)"%
(sum(ok), len(ok), self.i.get_error()))
return ({}, False)
chans = self.get_channels("list")
for ch in chans:
key = self.cfg[str(ch)]["name"]
idx = chans.index(ch)
res[key] = {"Volt": raw["VOLT"][idx],
"Curr": raw["CURR"][idx],
"Watt": raw["VOLT"][idx] * raw["CURR"][idx],
"On": raw["ON"][idx]
}
return (res, all(ok))
|
import os
from collections import OrderedDict
from .config import Config
from .utils import Utils, Clr
from .bitbucket import BitBucketApi
class Setup:
def __init__(self):
pass
@staticmethod
def service(package_dir=None, m_service=None):
i = 0
pkg_txt = ' '
pkg_input_hint = ''
if package_dir:
pkg_path = os.path.normpath(package_dir)
pkg_split_path = pkg_path.split(os.sep)
pkg_name = pkg_split_path.pop()
pkg_name = pkg_name.split("pkg_")[1]
pkg_txt = " to " + Clr.WARNING + str(pkg_name) + Clr.RESET + Clr.OK + " package "
pkg_input_hint = Clr.WARNING + "[package " + pkg_name + "]" + Clr.RESET
while True:
if i == 0:
print("\n" + Clr.OK + "Add a service" +
pkg_txt + "or leave empty to skip." + Clr.RESET +
"(Git repository name/slug recommended)")
elif i >= 1:
print("\n" + Clr.OK + "Add another service" + pkg_txt + Clr.RESET + "(leave empty to skip)")
if not m_service:
service = Utils.m_input('Service Name' + pkg_input_hint + ': ')
else:
service = m_service
m_service = None
service_dir = os.path.join(Utils.PROJECTS_DIR, service)
if package_dir:
service_dir = os.path.join(package_dir, service)
if service:
if not os.path.exists(service_dir):
user = Config().get_bit_bucket_username()
password = Config().get_bit_bucket_pass()
print('Fetch service from bit-bucket...')
repo = BitBucketApi(user, password).get_repos(query_filter='name ~ "' + service + '"')
if not repo['values']:
Clr('Repository "' + service + '" does not exist on BitBucket. Add it manually!!!').warn()
i += 1
continue
elif len(repo['values']) == 1:
repo = repo['values'][0]
print('This repo is found in BitBucket "' + Clr.OK + repo['name'] + Clr.RESET +
' (' + repo['links']['html']['href'] + ')".')
confirmation = Utils.m_input('Continue: [n/Y]')
if confirmation in ['y', '']:
BitBucketApi(user, password).clone_service(repo, package_dir)
elif len(repo['values']) >= 2:
c = 0
print("\nMore then one repo is found on BitBucket:")
repos = {}
for r in repo['values']:
repos[r['slug']] = r
c += 1
print(" [" + str(c) + "] " + Clr.OK + r['slug'] + Clr.RESET)
print("Type " + Clr.WARNING + "'all'" + Clr.RESET + " "
"to add them all or enter names separated by space.")
print('')
service = Utils.m_input('Service Name' + pkg_input_hint + ': ')
if service:
if service == 'all':
for slug, s in repos.items():
BitBucketApi(user, password).clone_service(s, package_dir)
else:
services = service.split(' ')
for s in services:
if s in repos:
BitBucketApi(user, password).clone_service(repos[s], package_dir)
else:
print("Service " + Clr.WARNING + service + Clr.RESET + " does not exist!")
i += 1
continue
else:
i += 1
else:
print('Service "' + service + '" already exists. Run ' +
Clr.WARNING + '"oly service ls"' + Clr.RESET +
' to list all available services')
i += 1
else:
break
def package(self, m_service=None):
i = 0
while True:
if not m_service:
if i == 0:
print("\n" + Clr.OK + "Add new or an existing package name" + Clr.RESET + " (leave empty to skip)")
elif i >= 1:
print("\n" + Clr.OK + "Add another package" + Clr.RESET + " (leave empty to skip)")
packages = self.package_list()
if packages:
package = Utils().input_with_help('Existing packages: ', 'Package Name: ', *packages)
else:
package = input('Package Name: ')
if package:
if not os.path.exists(os.path.join(Utils.PROJECTS_DIR, 'pkg_' + package)):
os.makedirs(os.path.join(Utils.PROJECTS_DIR, 'pkg_' + package))
else:
print('')
print('Package "' + package + '" already exists. Run ' +
Clr.WARNING + '"oly package ls"' + Clr.RESET +
' to list all available packages')
self.service(os.path.join(Utils.PROJECTS_DIR, 'pkg_' + package))
i += 1
else:
break
else:
if not os.path.exists(os.path.join(Utils.PROJECTS_DIR, 'pkg_' + m_service)):
os.makedirs(os.path.join(Utils.PROJECTS_DIR, 'pkg_' + m_service))
else:
print('')
print('Package "' + m_service + '" already exists. Run ' +
Clr.WARNING + '"oly package ls"' + Clr.RESET +
' to list all available packages')
self.service(os.path.join(Utils.PROJECTS_DIR, 'pkg_' + m_service))
m_service=None
i += 1
@staticmethod
def package_list():
packages = os.listdir(os.path.join(Utils.PROJECTS_DIR))
names = []
for pkg in packages:
if os.path.isdir(os.path.join(Utils.PROJECTS_DIR, pkg)) and 'pkg_' in pkg:
name = pkg.split('pkg_')[1]
names.append(name)
return names
@staticmethod
def service_list():
packages = os.listdir(os.path.join(Utils.PROJECTS_DIR))
names = []
for pkg in packages:
if os.path.isdir(os.path.join(Utils.PROJECTS_DIR, pkg)) and 'pkg_' not in pkg:
names.append(pkg)
return names
def all_services_list(self, **kwargs):
services = self.service_list()
packages = self.package_list()
services_list = {}
table = OrderedDict([('Name', []), ('Package', []), ('Status', []), ('Ports', [])])
if services:
for srv_name in services:
services_list[srv_name] = srv_name
if 'table' in kwargs and kwargs['table']:
table['Name'].append(srv_name)
table['Package'].extend('-')
table['Status'].append(Clr.WARNING + 'Stopped' + Clr.RESET)
table['Ports'].append('-')
if packages:
for pkg_name in packages:
cdirs = os.listdir(os.path.join(Utils.PROJECTS_DIR, 'pkg_' + str(pkg_name)))
for service in cdirs:
service_dir = os.path.join(Utils.PROJECTS_DIR, 'pkg_' + str(pkg_name), service)
if not os.path.isdir(service_dir):
cdirs.remove(service)
if 'plain' in kwargs and kwargs['plain']:
for srv_name in cdirs:
services_list[srv_name] = srv_name
elif 'table' in kwargs and kwargs['table']:
for srv_name in cdirs:
table['Name'].append(srv_name)
table['Package'].append(pkg_name)
table['Status'].append(Clr.WARNING + 'Stopped' + Clr.RESET)
table['Ports'].append('-')
services_list = table
else:
services_list[pkg_name] = cdirs
return services_list
|
# Generated by Django 3.1.3 on 2020-11-23 11:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fit', '0005_auto_20201123_1627'),
]
operations = [
migrations.CreateModel(
name='Pharmacy',
fields=[
('phar_id', models.AutoField(primary_key=True, serialize=False)),
('phar_name', models.CharField(default='', max_length=100)),
('phar_ownerName', models.CharField(default='', max_length=150)),
('phar_idProof', models.ImageField(default='', upload_to='fit/pharmacy')),
('phar_StoreImage', models.ImageField(default='', upload_to='fit/pharmacy')),
('phar_phone', models.IntegerField()),
('phar_address', models.CharField(default='', max_length=500)),
],
),
]
|
#!/usr/bin/python
###
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_storage_system_facts
short_description: Retrieve facts about the OneView Storage Systems
description:
- Retrieve facts about the Storage Systems from OneView.
version_added: "2.5"
requirements:
- "python >= 2.7.9"
- "hpOneView >= 4.0.0"
author: "Gustavo Hennig (@GustavoHennig)"
options:
storage_hostname:
description:
- Storage System IP or hostname.
name:
description:
- Storage System name.
options:
description:
- "List with options to gather additional facts about a Storage System and related resources.
Options allowed:
C(hostTypes) gets the list of supported host types.
C(storagePools) gets a list of storage pools belonging to the specified storage system.
C(reachablePorts) gets a list of storage system reachable ports. Accepts C(params).
An additional C(networks) list param can be used to restrict the search for only these ones.
C(templates) gets a list of storage templates belonging to the storage system."
- "To gather facts about C(storagePools), C(reachablePorts), and C(templates) it is required to inform
either the argument C(name), C(ip_hostname), or C(hostname). Otherwise, this option will be ignored."
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Storage Systems
oneview_storage_system_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 600
delegate_to: localhost
- debug: var=storage_systems
- name: Gather paginated, filtered and sorted facts about Storage Systems
oneview_storage_system_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 600
params:
start: 0
count: 3
sort: 'name:descending'
filter: managedDomain=TestDomain
- debug: var=storage_systems
- name: Gather facts about a Storage System by IP (ip_hostname)
oneview_storage_system_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 600
ip_hostname: "172.18.11.12"
delegate_to: localhost
- debug: var=storage_systems
- name: Gather facts about a Storage System by IP (hostname)
oneview_storage_system_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 600
hostname: "172.18.11.12"
delegate_to: localhost
- debug: var=storage_systems
- name: Gather facts about a Storage System by name
oneview_storage_system_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 600
name: "ThreePAR7200-4555"
delegate_to: localhost
- debug: var=storage_systems
- name: Gather facts about a Storage System and all options
oneview_storage_system_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 600
name: "ThreePAR7200-4555"
options:
- hostTypes
- storagePools
delegate_to: localhost
- debug: var=storage_systems
- debug: var=storage_system_host_types
- debug: var=storage_system_pools
- name: Gather queried facts about Storage System reachable ports (API500 onwards)
oneview_storage_system_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 600
hostname: "172.18.11.12"
options:
- reachablePorts
params:
networks:
- /rest/fc-networks/01FC123456
- /rest/fc-networks/02FC123456
sort: 'name:descending'
- debug: var=storage_system_reachable_ports
- name: Gather facts about Storage System storage templates (API500 onwards)
oneview_storage_system_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 600
hostname: "172.18.11.12"
options:
- templates
params:
sort: 'name:descending'
- debug: var=storage_system_templates
'''
RETURN = '''
storage_systems:
description: Has all the OneView facts about the Storage Systems.
returned: Always, but can be null.
type: dict
storage_system_host_types:
description: Has all the OneView facts about the supported host types.
returned: When requested, but can be null.
type: dict
storage_system_pools:
description: Has all the OneView facts about the Storage Systems - Storage Pools.
returned: When requested, but can be null.
type: dict
storage_system_reachable_ports:
description: Has all the OneView facts about the Storage Systems reachable ports.
returned: When requested, but can be null.
type: dict
storage_system_templates:
description: Has all the OneView facts about the Storage Systems - Storage Templates.
returned: When requested, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase
class StorageSystemFactsModule(OneViewModuleBase):
def __init__(self):
argument_spec = dict(
name=dict(type='str'),
options=dict(type='list'),
params=dict(type='dict'),
storage_hostname=dict(type='str')
)
super(StorageSystemFactsModule, self).__init__(additional_arg_spec=argument_spec)
self.resource_client = self.oneview_client.storage_systems
def execute_module(self):
facts = {}
is_specific_storage_system = True
# This allows using both "ip_hostname" and "hostname" regardless api_version
if self.oneview_client.api_version >= 500:
get_method = self.oneview_client.storage_systems.get_by_hostname
else:
get_method = self.oneview_client.storage_systems.get_by_ip_hostname
if self.module.params.get('storage_hostname'):
storage_systems = get_method(self.module.params['storage_hostname'])
elif self.module.params.get('name'):
storage_systems = self.oneview_client.storage_systems.get_by_name(self.module.params['name'])
else:
storage_systems = self.oneview_client.storage_systems.get_all(**self.facts_params)
is_specific_storage_system = False
self.__get_options(facts, storage_systems, is_specific_storage_system)
facts['storage_systems'] = storage_systems
return dict(changed=False, ansible_facts=facts)
def __get_options(self, facts, storage_system, is_specific_storage_system):
if self.options:
if self.options.get('hostTypes'):
facts['storage_system_host_types'] = self.oneview_client.storage_systems.get_host_types()
if storage_system and is_specific_storage_system:
storage_uri = storage_system['uri']
query_params = self.module.params.get('params', {})
if self.options.get('storagePools'):
facts['storage_system_pools'] = self.oneview_client.storage_systems.get_storage_pools(storage_uri)
if self.options.get('reachablePorts'):
facts['storage_system_reachable_ports'] = \
self.oneview_client.storage_systems.get_reachable_ports(storage_uri, **query_params)
if self.options.get('templates'):
facts['storage_system_templates'] = \
self.oneview_client.storage_systems.get_templates(storage_uri, **query_params)
def main():
StorageSystemFactsModule().run()
if __name__ == '__main__':
main()
|
# coding:utf-8
"""
@Time : 2021/6/16 上午10:03
@Author: chuwt
"""
|
diccionario={'José':27,'Rafa':90,'Sara':30,'Luis':40}
print(diccionario['Rafa'])
print(len(diccionario))
|
def gcd(a, b):
while b:
a, b = b, a % b
return a
for a in range(1, 500):
b1 = 1000 * (500 - a)
b2 = 1000 - a
if gcd(b1, b2) == b2:
b = b1 // b2
c = 1000 - a - b
print(a * b * c)
break
|
from setuptools import setup, find_packages
from hand_obj_interaction import commands
setup(
name='hand_obj_interaction',
version='0.1',
description='2D to 3D',
python_requires='>=3.7',
packages=find_packages()
)
|
from .st import *
from .mt import makeMask
import numpy as np, matplotlib.pyplot as plt, cv2
def Erode_Dilate(mask):
def process(mask_, kernel, iters):
top_erode1 = cv2.erode(mask_, kernel, iterations=iters)
top_dilate = cv2.dilate(top_erode1, kernel, iterations=iters*2)
top_erode2 = cv2.erode(top_dilate, kernel, iterations=iters)
return top_erode2
p1 = process(mask, np.ones([3,3], dtype=np.uint8), iters=2)
p2 = process(p1, np.ones([7,7], dtype=np.uint8), iters=1)
return p2
def Contour_(mask):
msk = mask.copy()
(_, cnts, _) = cv2.findContours(msk, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
area = []
for c_ in cnts:area.append(cv2.contourArea(c_))
for a_ in range(len(area)):
if a_ != np.argmax(area):msk = cv2.drawContours(msk, [cnts[a_]], -1, 0, -1)
return msk
def top(pngImg, yolo_rst):
h, w, c = pngImg.shape
rgb = pngImg[...,:3]
alpha = pngImg[...,-1].reshape(h, w, 1)
if len(yolo_rst) == 6:
top_x1 = yolo_rst[0]; top_y1 = yolo_rst[1]
top_x2 = yolo_rst[2]; top_y2 = yolo_rst[3]
top_h = yolo_rst[4]; top_w = yolo_rst[5]
scale_h = int(top_h/10); scale_w = int(top_w/10)
top_x1_ = top_x1-scale_w; top_y1_ = top_y1-scale_h
top_x2_ = top_x2+scale_w; top_y2_ = top_y2+scale_h
if top_x1_ < 0:top_x1_=0
if top_y1_ < 0:top_y1_=0
if top_x2_ > w:top_x2_=w-1
if top_y2_ > h:top_y2_=h-1
top = rgb[top_y1_:top_y1_+(top_y2_-top_y1_), top_x1_:top_x1_+(top_x2_-top_x1_)]
top_alpha = alpha[top_y1_:top_y1_+(top_y2_-top_y1_), top_x1_:top_x1_+(top_x2_-top_x1_)]
#topSkinMask, topSkinMask_h, topSkinMask_w = makeMask().top(top)
#topSkinMask_ed = Erode_Dilate(topSkinMask)
#top = cv2.bitwise_and(top, top, mask=cv2.bitwise_not(topSkinMask_ed))
#top_alpha = cv2.bitwise_and(top_alpha, top_alpha, mask=cv2.bitwise_not(topSkinMask_ed)).reshape(topSkinMask_h, topSkinMask_w, 1)
topClothesMask = Contour_(cv2.threshold(cv2.cvtColor(top.copy(), cv2.COLOR_RGB2GRAY),1,255,cv2.THRESH_BINARY)[1])
#============= do ed again =============
topClothesMask_ed = Erode_Dilate(topClothesMask)
top = cv2.bitwise_and(top, top, mask=topClothesMask_ed)
top_alpha = cv2.bitwise_and(top_alpha, top_alpha, mask=topClothesMask_ed).reshape(top.shape[0], top.shape[1], 1)
#============= do ed again =============
#============= detect junction =============
top_h_, top_w_ = top.shape[:2]
jct_h, jct_w = top_y2_-(2*top_y2-top_y2_), (top_x2_-top_x1_)
jct = top[(top_h_-jct_h):top_h_, 0:jct_w]
jct_alpha = top_alpha[(top_h_-jct_h):top_h_, 0:jct_w]
jct_mask = Contour_(makeMask().junction(jct))
top[(top_h_-jct_h):top_h_, 0:jct_w] = cv2.bitwise_and(jct, jct, mask=jct_mask)
top_alpha[(top_h_-jct_h):top_h_, 0:jct_w] = cv2.bitwise_and(jct_alpha, jct_alpha, mask=jct_mask).reshape(jct_h, jct_w, 1)
#============= detect junction =============
#============= Combine =============
final_top = np.concatenate([top, top_alpha], axis=2)
#============= Combine =============
show_rgb = rgb.copy()
def putText(img, txt, pos, color):
cv2.putText(img, txt, pos, cv2.FONT_HERSHEY_DUPLEX, 0.5, color, 1, cv2.LINE_AA)
putText(img=show_rgb, txt="Top(x1, y1)", pos=(top_x1_+10, top_y1_-10), color=(255, 0, 255))
cv2.rectangle(show_rgb, (top_x1_, top_y1_), (top_x1_+(top_x2_-top_x1_), top_y1_+(top_y2_-top_y1_)), (255,0,255), 3)
putText(img=show_rgb, txt="Junction(x1, y2)", pos=(top_x1_+10, (2*top_y2-top_y2_)-10), color=(255, 0, 0))
cv2.rectangle(show_rgb, (top_x1_, 2*top_y2-top_y2_), (top_x2_, top_y2_), (255,0,0), 2)
# ------ save points as json ------
js = {}
ori_img = {}
ori_img["height"] = int(h); ori_img["width"] = int(w); ori_img["channel"] = int(c)
js["image"] = ori_img
top_js = {}
top_js["x1"] = int(top_x1_); top_js["y1"] = int(top_y1_)
top_js["x2"] = int(top_x1_+(top_x2_-top_x1_)); top_js["y2"] = int(top_y1_+(top_y2_-top_y1_))
js["top"] = top_js
# ------ save points as json ------
#return [top, topSkinMask, topSkinMask_ed, topClothesMask, topClothesMask_ed, show_rgb, final_top, js]
return [top, topClothesMask, topClothesMask_ed, show_rgb, final_top, js]
else:
print("{}".format(yolo_rst))
|
from gym.envs.mujoco import HalfCheetahEnv
import rlkit.torch.pytorch_util as ptu
import gym
import os
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import MdpPathCollector
from rlkit.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic
from rlkit.torch.sac.sac import SACTrainer
from rlkit.torch.networks import ConcatMlp
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm, TorchOnlineRLAlgorithm
def experiment(variant):
env = gym.make(variant['env_id'])
expl_env = NormalizedBoxEnv(env)
eval_env = NormalizedBoxEnv(env)
obs_dim = expl_env.observation_space.low.size
action_dim = eval_env.action_space.low.size
M = variant['layer_size']
qf1 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
qf2 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
target_qf1 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
target_qf2 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
policy = TanhGaussianPolicy(
obs_dim=obs_dim,
action_dim=action_dim,
hidden_sizes=[M, M],
)
eval_policy = MakeDeterministic(policy)
eval_path_collector = MdpPathCollector(
eval_env,
eval_policy,
)
expl_path_collector = MdpPathCollector(
expl_env,
policy,
)
replay_buffer = EnvReplayBuffer(
variant['replay_buffer_size'],
expl_env,
)
trainer = SACTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant['trainer_kwargs']
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algorithm_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
print('end')
savefile_path = os.path.join('datasets', variant['env_id'], 'full_replay.hdf5')
replay_buffer.save_numpy(savefile_path)
if __name__ == "__main__":
# noinspection PyTypeChecker
variant = dict(
algorithm="SAC",
env_id="Ant-v2",
version="normal",
layer_size=256,
replay_buffer_size=int(1E6),
algorithm_kwargs=dict(
num_epochs=1000,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=5000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=256,
),
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=5.0,
use_automatic_entropy_tuning=True,
),
)
setup_logger('ant_replay_rewscale5', variant=variant)
ptu.set_gpu_mode(True) # optionally set the GPU (default=False)
experiment(variant)
|
from enum import Enum, auto
class Category:
def __init__(self, name, key):
self.name = name
self.key = key
class CategoryEnum(Enum):
PETI_MIN = Category('5minutovka na míru', '5min')
DOMOV = Category("Z domova", "domov")
ZAHRANICI = Category("Zahraničí", "zahranici")
SPORT = Category("Sport", "sport")
BYZNYS = Category("Byznys", "byznys")
KULTURA = Category("Kultura", "kultura")
ZDRAVI = Category("Zdraví", "zdravi")
CESTOVANI = Category("Cestovaní", "cestovani")
RELAX = Category("Relax", "relax")
VEDA = Category("Věda", "veda")
AUTO = Category("Auto", "auto")
ZIVOTNI_STYL = Category("Životní styl", "zivotni-styl")
HISTORIE = Category("Historie", "historie")
UNKNOWN = Category("", "unknown")
class MetricEnum(Enum):
FRECENCY = "frecency"
POPULARITY = "popularity"
RECENCY = "recency"
|
from google.appengine.ext import vendor
vendor.add('./Lib')
|
import logging
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
from rr.models.certificate import Certificate, certificate_validator
from rr.serializers.common import ActiveListSerializer
logger = logging.getLogger(__name__)
class CertificateSerializer(serializers.ModelSerializer):
def validate(self, data):
sp = data['sp'] if 'sp' in data else self.instance.sp
certificate = data['certificate'] if 'certificate' in data else None
signing = data['signing'] if 'signing' in data else False
encryption = data['encryption'] if 'encryption' in data else False
if not signing and not encryption:
signing = True
encryption = True
certificate_validator(sp, certificate, signing, encryption, serializers.ValidationError)
return data
class Meta:
model = Certificate
fields = ['id', 'sp', 'cn', 'issuer', 'valid_from', 'valid_until', 'key_size', 'certificate', 'signing',
'encryption', 'created_at', 'updated_at', 'end_at', 'validated', 'status']
read_only_fields = ['cn', 'issuer', 'valid_from', 'valid_until', 'key_size', 'created_at', 'updated_at',
'end_at', 'validated', 'status']
validators = [
UniqueTogetherValidator(
queryset=Certificate.objects.filter(end_at=None),
fields=['sp', 'certificate', 'signing', 'encryption']
)
]
def create(self, validated_data):
sp = validated_data.pop('sp', None)
certificate = validated_data.pop('certificate', None).replace(
"-----BEGIN CERTIFICATE-----", "").replace("-----END CERTIFICATE-----", "").strip()
encryption = validated_data.pop('encryption', False)
signing = validated_data.pop('signing', False)
if not signing and not encryption:
signing = True
encryption = True
cert = Certificate.objects.add_certificate(certificate, sp, signing=signing,
encryption=encryption)
user = self.context['request'].user
logger.info("Certificate added for {sp} by {user}"
.format(sp=sp, user=user))
return cert
class CertificateLimitedSerializer(serializers.ModelSerializer):
class Meta:
list_serializer_class = ActiveListSerializer
model = Certificate
fields = ['id', 'sp', 'valid_from', 'valid_until', 'certificate', 'signing', 'encryption', 'status']
read_only_fields = ['valid_from', 'valid_until', 'status']
|
"""
test of drawing relations from relation space
"""
import torch
import numpy as np
from texrel import relations, spaces
def test_prep_equality():
left1 = relations.LeftOf()
left2 = relations.LeftOf()
above1 = relations.Above()
above2 = relations.Above()
assert left1 == left2
assert above1 == above2
assert left1 != above1
assert left2 != above2
assert left1 != above2
assert left2 != above1
assert str(left1) == 'left-of'
assert str(left2) == 'left-of'
assert str(above1) == 'above'
assert str(above2) == 'above'
def test_prepositions():
r = np.random.RandomState()
prep_space = spaces.PrepositionSpace(r=r)
for i in range(5):
print(prep_space.sample())
print('')
prep_space = spaces.PrepositionSpace(r=r, available_preps=['Above', 'RightOf'])
for i in range(5):
print(prep_space.sample())
print('')
def test_relations():
r = np.random.RandomState()
rel_space = spaces.RelationSpace(
prep_space=spaces.PrepositionSpace(r=r),
thing_space=spaces.ThingSpace(
r=r,
shape_space=spaces.IntSpace(r=r, num_ints=9), color_space=spaces.IntSpace(r=r, num_ints=9)))
for i in range(5):
print(rel_space.sample())
print('')
color_space = spaces.IntSpace(r=r, num_ints=2)
shape_space = spaces.IntSpace(r=r, num_ints=2)
thing_space = spaces.ThingSpace(r=r, color_space=color_space, shape_space=shape_space)
prep_space = spaces.PrepositionSpace(r=r, available_preps=['Above', 'RightOf'])
rel_space = spaces.RelationSpace(thing_space=thing_space, prep_space=prep_space)
for i in range(5):
print(rel_space.sample())
print('')
def test_complements():
r = np.random.RandomState()
print('')
rel_space = spaces.RelationSpace(
prep_space=spaces.PrepositionSpace(r=r),
thing_space=spaces.ThingSpace(
r=r,
shape_space=spaces.IntSpace(r=r, num_ints=9), color_space=spaces.IntSpace(r=r, num_ints=9)))
rels = []
for i in range(5):
r = rel_space.sample()
rels.append(r)
print(r)
print('')
comps = []
print('comps:')
for r in rels:
comp = r.complement()
print(comp)
comps.append(comp)
assert comp != r
print('')
comp2s = []
print('comp2s:')
for i, comp in enumerate(comps):
comp2 = comp.complement()
print(comp2)
comp2s.append(comp2)
assert comp2 != comp
assert comp2 == rels[i]
print('')
def test_preps_encode_decode():
r = np.random.RandomState()
prep_space = spaces.PrepositionSpace(r=r)
for i in range(10):
p = prep_space.sample()
p_indices = p.as_onehot_indices(prep_space)
p_onehot = torch.Tensor(p.as_onehot_tensor_size(prep_space)).zero_()
p_onehot[torch.LongTensor(p_indices)] = 1
p2 = relations.Preposition.from_onehot_tensor(prep_space, p_onehot)
print('p', p, 'p2', p2)
assert p2 == p
def test_relations_encode_decode():
r = np.random.RandomState()
rel_space = spaces.RelationSpace(
prep_space=spaces.PrepositionSpace(r=r),
thing_space=spaces.ThingSpace(
r=r,
shape_space=spaces.IntSpace(r=r, num_ints=9), color_space=spaces.IntSpace(r=r, num_ints=9)))
for i in range(10):
r = rel_space.sample()
r_indices = r.as_onehot_indices(rel_space)
r_onehot = torch.Tensor(r.as_onehot_tensor_size(rel_space)).zero_()
r_onehot[torch.LongTensor(r_indices)] = 1
r2 = relations.Relation.from_onehot_tensor(rel_space, r_onehot)
print('r', r, 'r2', r2)
assert r2 == r
def test_relations_encode_decode2():
r = np.random.RandomState()
rel_space = spaces.RelationSpace(
prep_space=spaces.PrepositionSpace(r=r),
thing_space=spaces.ThingSpace(
r=r,
shape_space=spaces.IntSpace(r=r, num_ints=9), color_space=spaces.IntSpace(r=r, num_ints=9)))
for i in range(10):
r = rel_space.sample()
r_onehot = r.encode_onehot(rel_space=rel_space)
r2 = relations.Relation.from_onehot_tensor(rel_space, r_onehot)
print('r', r, 'r2', r2)
assert r2 == r
def test_relations_encode3():
r = np.random.RandomState()
rel_space = spaces.RelationSpace(
prep_space=spaces.PrepositionSpace(r=r),
thing_space=spaces.ThingSpace(
r=r,
shape_space=spaces.IntSpace(r=r, num_ints=9), color_space=spaces.IntSpace(r=r, num_ints=9)))
for i in range(10):
r = rel_space.sample()
indices = r.as_indices(rel_space=rel_space)
print('indices', indices)
r2 = relations.Relation.eat_from_indices(rel_space=rel_space, indices=indices[0])[0]
print('r2.indices', r2.as_indices(rel_space=rel_space))
print('r', r, 'r2', r2)
assert r == r2
|
from enum import Enum
class SpinnakerBootOpCode(Enum):
""" Boot message Operation Codes
"""
HELLO = 0x41
FLOOD_FILL_START = 0x1
FLOOD_FILL_BLOCK = 0x3
FLOOD_FILL_CONTROL = 0x5
def __new__(cls, value, doc=""):
obj = object.__new__(cls)
obj._value_ = value
return obj
def __init__(self, value, doc=""):
self._value_ = value
self.__doc__ = doc
|
from authors.apps.authentication.models import User
from rest_framework.test import APIClient, APITestCase
from rest_framework import status
from authors.apps.articles.models import Article
from authors.apps.articles.extra_methods import create_slug
class BaseTest(APITestCase):
articles_url = '/api/articles/'
def tearUp(self):
pass
def setUp(self):
self.user = User.objects.create(
username="lamech", email="lamech@bg.com", is_superuser=False, is_active=True
)
self.super_user = User.objects.create(
username="edward", email="edward@bg.com", is_superuser=True, is_active=True
)
self.user_two = User.objects.create(
username="ann", email="night@bg.com", is_superuser=False, is_active=True
)
article_data = {
"article": {
"title": "Article 2 title",
"body": "Article body",
"description": "Article description",
"tagList":["python"]
}
}
self.article_data_2 = {
"article": {
"title": "Article 2 title",
"body": "Article body",
"description": "Article description",
"tagList":["python"]
}
}
self.report_data = {
"violation_subject": "rules violation",
"violation_report": "heieijeei"
}
self.report_data_2 = {
"violation_subject": "spam",
"violation_report": "this is the first report"
}
self.client.force_authenticate(user=self.user)
response = self.client.post(self.articles_url, data=article_data, format='json')
slug_one = response.data["article"]["slug"]
self.report_articles_url = self.articles_url + "report-article/"
self.report_article_url_post = self.articles_url + slug_one + "/report-article/"
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.client.force_authenticate(user=self.user_two)
self.client.post(
self.report_article_url_post, data=self.report_data_2, format='json'
)
response = self.client.get(
self.report_articles_url, format='json'
)
self.report_id = response.data.get('reports')[0].get('id')
self.report_article_url = self.articles_url + "report-article/" + str(self.report_id) + "/"
|
# -*- coding: utf-8 -*-
"""
**visualize** module
-------------------
"""
__all__ = ['show_hist', 'show_scatter', 'show_plot']
def show_hist(data, bins=10, orientation='vertical'):
# df = pd.concat([dataX,dataY], axis=1)
# df.hist(bins=bins, orientation=orientation).get_figure()
return data.plot(kind='hist', bins=bins, orientation=orientation).get_figure()
def show_scatter(dataX, dataY):
df = pd.concat([dataX,dataY], axis=1)
return df.plot(kind="scatter", x=dataX.columns[0], y=dataY.columns[0]).get_figure()
def show_plot(dataX, dataY, method="count", kind='line'):
if method == "count":
return pd.concat([dataX,dataY], axis=1).groupby(dataX.columns[0]).count().plot(kind=kind).get_figure()
elif method == "sum":
return pd.concat([dataX,dataY], axis=1).groupby(dataX.columns[0]).sum().plot(kind=kind).get_figure()
elif method == None:
return pd.concat([dataX,dataY], axis=1).plot(kind=kind).get_figure()
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: protos/model/v1/datapoint.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from protos.model.v1 import file_pb2 as protos_dot_model_dot_v1_dot_file__pb2
from protos.model.v1 import health_pb2 as protos_dot_model_dot_v1_dot_health__pb2
from protos.model.v1 import math_pb2 as protos_dot_model_dot_v1_dot_math__pb2
from protos.model.v1 import navigation_pb2 as protos_dot_model_dot_v1_dot_navigation__pb2
from protos.model.v1 import text_pb2 as protos_dot_model_dot_v1_dot_text__pb2
from protos.model.v1 import media_pb2 as protos_dot_model_dot_v1_dot_media__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='protos/model/v1/datapoint.proto',
package='v1.model',
syntax='proto3',
serialized_options=b'Z)github.com/FormantIO/genproto/go/v1/model',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1fprotos/model/v1/datapoint.proto\x12\x08v1.model\x1a\x1aprotos/model/v1/file.proto\x1a\x1cprotos/model/v1/health.proto\x1a\x1aprotos/model/v1/math.proto\x1a protos/model/v1/navigation.proto\x1a\x1aprotos/model/v1/text.proto\x1a\x1bprotos/model/v1/media.proto\"\xd9\x06\n\tDatapoint\x12\x16\n\x06stream\x18\x01 \x01(\tR\x06stream\x12\x1c\n\ttimestamp\x18\x02 \x01(\x03R\ttimestamp\x12\x31\n\x04tags\x18\x03 \x03(\x0b\x32\x1d.v1.model.Datapoint.TagsEntryR\x04tags\x12$\n\x04text\x18\x04 \x01(\x0b\x32\x0e.v1.model.TextH\x00R\x04text\x12-\n\x07numeric\x18\x05 \x01(\x0b\x32\x11.v1.model.NumericH\x00R\x07numeric\x12\x37\n\x0bnumeric_set\x18\x11 \x01(\x0b\x32\x14.v1.model.NumericSetH\x00R\nnumericSet\x12*\n\x06\x62itset\x18\x07 \x01(\x0b\x32\x10.v1.model.BitsetH\x00R\x06\x62itset\x12$\n\x04\x66ile\x18\x08 \x01(\x0b\x32\x0e.v1.model.FileH\x00R\x04\x66ile\x12\'\n\x05image\x18\t \x01(\x0b\x32\x0f.v1.model.ImageH\x00R\x05image\x12\x37\n\x0bpoint_cloud\x18\n \x01(\x0b\x32\x14.v1.model.PointCloudH\x00R\npointCloud\x12\x30\n\x08location\x18\x0b \x01(\x0b\x32\x12.v1.model.LocationH\x00R\x08location\x12<\n\x0clocalization\x18\x0c \x01(\x0b\x32\x16.v1.model.LocalizationH\x00R\x0clocalization\x12*\n\x06health\x18\r \x01(\x0b\x32\x10.v1.model.HealthH\x00R\x06health\x12$\n\x04json\x18\x0e \x01(\x0b\x32\x0e.v1.model.JsonH\x00R\x04json\x12-\n\x07\x62\x61ttery\x18\x0f \x01(\x0b\x32\x11.v1.model.BatteryH\x00R\x07\x62\x61ttery\x12\'\n\x05video\x18\x10 \x01(\x0b\x32\x0f.v1.model.VideoH\x00R\x05video\x12@\n\x0etransform_tree\x18\x12 \x01(\x0b\x32\x17.v1.model.TransformTreeH\x00R\rtransformTree\x1a\x37\n\tTagsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x42\x06\n\x04\x64\x61taJ\x04\x08\x06\x10\x07\"\xfa\x02\n\x10\x43ontrolDatapoint\x12\x16\n\x06stream\x18\x01 \x01(\tR\x06stream\x12\x1c\n\ttimestamp\x18\x02 \x01(\x03R\ttimestamp\x12*\n\x06\x62itset\x18\x03 \x01(\x0b\x32\x10.v1.model.BitsetH\x00R\x06\x62itset\x12\'\n\x05twist\x18\x04 \x01(\x0b\x32\x0f.v1.model.TwistH\x00R\x05twist\x12)\n\x04pose\x18\x05 \x01(\x0b\x32\x13.v1.model.TransformH\x00R\x04pose\x12-\n\x07numeric\x18\x06 \x01(\x0b\x32\x11.v1.model.NumericH\x00R\x07numeric\x12P\n\x14pose_with_covariance\x18\x07 \x01(\x0b\x32\x1c.v1.model.PoseWithCovarianceH\x00R\x12poseWithCovariance\x12\'\n\x05point\x18\x08 \x01(\x0b\x32\x0f.v1.model.PointH\x00R\x05pointB\x06\n\x04\x64\x61taB+Z)github.com/FormantIO/genproto/go/v1/modelb\x06proto3'
,
dependencies=[protos_dot_model_dot_v1_dot_file__pb2.DESCRIPTOR,protos_dot_model_dot_v1_dot_health__pb2.DESCRIPTOR,protos_dot_model_dot_v1_dot_math__pb2.DESCRIPTOR,protos_dot_model_dot_v1_dot_navigation__pb2.DESCRIPTOR,protos_dot_model_dot_v1_dot_text__pb2.DESCRIPTOR,protos_dot_model_dot_v1_dot_media__pb2.DESCRIPTOR,])
_DATAPOINT_TAGSENTRY = _descriptor.Descriptor(
name='TagsEntry',
full_name='v1.model.Datapoint.TagsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='v1.model.Datapoint.TagsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='key', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='v1.model.Datapoint.TagsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='value', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1011,
serialized_end=1066,
)
_DATAPOINT = _descriptor.Descriptor(
name='Datapoint',
full_name='v1.model.Datapoint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='stream', full_name='v1.model.Datapoint.stream', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='stream', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp', full_name='v1.model.Datapoint.timestamp', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='timestamp', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='v1.model.Datapoint.tags', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='tags', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='text', full_name='v1.model.Datapoint.text', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='text', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='numeric', full_name='v1.model.Datapoint.numeric', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='numeric', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='numeric_set', full_name='v1.model.Datapoint.numeric_set', index=5,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='numericSet', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bitset', full_name='v1.model.Datapoint.bitset', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='bitset', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='file', full_name='v1.model.Datapoint.file', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='file', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='image', full_name='v1.model.Datapoint.image', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='image', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='point_cloud', full_name='v1.model.Datapoint.point_cloud', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='pointCloud', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='location', full_name='v1.model.Datapoint.location', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='location', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='localization', full_name='v1.model.Datapoint.localization', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='localization', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='health', full_name='v1.model.Datapoint.health', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='health', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='json', full_name='v1.model.Datapoint.json', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='json', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='battery', full_name='v1.model.Datapoint.battery', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='battery', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='video', full_name='v1.model.Datapoint.video', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='video', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='transform_tree', full_name='v1.model.Datapoint.transform_tree', index=16,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='transformTree', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_DATAPOINT_TAGSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='data', full_name='v1.model.Datapoint.data',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=223,
serialized_end=1080,
)
_CONTROLDATAPOINT = _descriptor.Descriptor(
name='ControlDatapoint',
full_name='v1.model.ControlDatapoint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='stream', full_name='v1.model.ControlDatapoint.stream', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='stream', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp', full_name='v1.model.ControlDatapoint.timestamp', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='timestamp', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bitset', full_name='v1.model.ControlDatapoint.bitset', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='bitset', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='twist', full_name='v1.model.ControlDatapoint.twist', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='twist', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pose', full_name='v1.model.ControlDatapoint.pose', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='pose', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='numeric', full_name='v1.model.ControlDatapoint.numeric', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='numeric', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pose_with_covariance', full_name='v1.model.ControlDatapoint.pose_with_covariance', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='poseWithCovariance', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='point', full_name='v1.model.ControlDatapoint.point', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='point', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='data', full_name='v1.model.ControlDatapoint.data',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=1083,
serialized_end=1461,
)
_DATAPOINT_TAGSENTRY.containing_type = _DATAPOINT
_DATAPOINT.fields_by_name['tags'].message_type = _DATAPOINT_TAGSENTRY
_DATAPOINT.fields_by_name['text'].message_type = protos_dot_model_dot_v1_dot_text__pb2._TEXT
_DATAPOINT.fields_by_name['numeric'].message_type = protos_dot_model_dot_v1_dot_math__pb2._NUMERIC
_DATAPOINT.fields_by_name['numeric_set'].message_type = protos_dot_model_dot_v1_dot_math__pb2._NUMERICSET
_DATAPOINT.fields_by_name['bitset'].message_type = protos_dot_model_dot_v1_dot_math__pb2._BITSET
_DATAPOINT.fields_by_name['file'].message_type = protos_dot_model_dot_v1_dot_file__pb2._FILE
_DATAPOINT.fields_by_name['image'].message_type = protos_dot_model_dot_v1_dot_media__pb2._IMAGE
_DATAPOINT.fields_by_name['point_cloud'].message_type = protos_dot_model_dot_v1_dot_media__pb2._POINTCLOUD
_DATAPOINT.fields_by_name['location'].message_type = protos_dot_model_dot_v1_dot_navigation__pb2._LOCATION
_DATAPOINT.fields_by_name['localization'].message_type = protos_dot_model_dot_v1_dot_navigation__pb2._LOCALIZATION
_DATAPOINT.fields_by_name['health'].message_type = protos_dot_model_dot_v1_dot_health__pb2._HEALTH
_DATAPOINT.fields_by_name['json'].message_type = protos_dot_model_dot_v1_dot_text__pb2._JSON
_DATAPOINT.fields_by_name['battery'].message_type = protos_dot_model_dot_v1_dot_health__pb2._BATTERY
_DATAPOINT.fields_by_name['video'].message_type = protos_dot_model_dot_v1_dot_media__pb2._VIDEO
_DATAPOINT.fields_by_name['transform_tree'].message_type = protos_dot_model_dot_v1_dot_media__pb2._TRANSFORMTREE
_DATAPOINT.oneofs_by_name['data'].fields.append(
_DATAPOINT.fields_by_name['text'])
_DATAPOINT.fields_by_name['text'].containing_oneof = _DATAPOINT.oneofs_by_name['data']
_DATAPOINT.oneofs_by_name['data'].fields.append(
_DATAPOINT.fields_by_name['numeric'])
_DATAPOINT.fields_by_name['numeric'].containing_oneof = _DATAPOINT.oneofs_by_name['data']
_DATAPOINT.oneofs_by_name['data'].fields.append(
_DATAPOINT.fields_by_name['numeric_set'])
_DATAPOINT.fields_by_name['numeric_set'].containing_oneof = _DATAPOINT.oneofs_by_name['data']
_DATAPOINT.oneofs_by_name['data'].fields.append(
_DATAPOINT.fields_by_name['bitset'])
_DATAPOINT.fields_by_name['bitset'].containing_oneof = _DATAPOINT.oneofs_by_name['data']
_DATAPOINT.oneofs_by_name['data'].fields.append(
_DATAPOINT.fields_by_name['file'])
_DATAPOINT.fields_by_name['file'].containing_oneof = _DATAPOINT.oneofs_by_name['data']
_DATAPOINT.oneofs_by_name['data'].fields.append(
_DATAPOINT.fields_by_name['image'])
_DATAPOINT.fields_by_name['image'].containing_oneof = _DATAPOINT.oneofs_by_name['data']
_DATAPOINT.oneofs_by_name['data'].fields.append(
_DATAPOINT.fields_by_name['point_cloud'])
_DATAPOINT.fields_by_name['point_cloud'].containing_oneof = _DATAPOINT.oneofs_by_name['data']
_DATAPOINT.oneofs_by_name['data'].fields.append(
_DATAPOINT.fields_by_name['location'])
_DATAPOINT.fields_by_name['location'].containing_oneof = _DATAPOINT.oneofs_by_name['data']
_DATAPOINT.oneofs_by_name['data'].fields.append(
_DATAPOINT.fields_by_name['localization'])
_DATAPOINT.fields_by_name['localization'].containing_oneof = _DATAPOINT.oneofs_by_name['data']
_DATAPOINT.oneofs_by_name['data'].fields.append(
_DATAPOINT.fields_by_name['health'])
_DATAPOINT.fields_by_name['health'].containing_oneof = _DATAPOINT.oneofs_by_name['data']
_DATAPOINT.oneofs_by_name['data'].fields.append(
_DATAPOINT.fields_by_name['json'])
_DATAPOINT.fields_by_name['json'].containing_oneof = _DATAPOINT.oneofs_by_name['data']
_DATAPOINT.oneofs_by_name['data'].fields.append(
_DATAPOINT.fields_by_name['battery'])
_DATAPOINT.fields_by_name['battery'].containing_oneof = _DATAPOINT.oneofs_by_name['data']
_DATAPOINT.oneofs_by_name['data'].fields.append(
_DATAPOINT.fields_by_name['video'])
_DATAPOINT.fields_by_name['video'].containing_oneof = _DATAPOINT.oneofs_by_name['data']
_DATAPOINT.oneofs_by_name['data'].fields.append(
_DATAPOINT.fields_by_name['transform_tree'])
_DATAPOINT.fields_by_name['transform_tree'].containing_oneof = _DATAPOINT.oneofs_by_name['data']
_CONTROLDATAPOINT.fields_by_name['bitset'].message_type = protos_dot_model_dot_v1_dot_math__pb2._BITSET
_CONTROLDATAPOINT.fields_by_name['twist'].message_type = protos_dot_model_dot_v1_dot_math__pb2._TWIST
_CONTROLDATAPOINT.fields_by_name['pose'].message_type = protos_dot_model_dot_v1_dot_math__pb2._TRANSFORM
_CONTROLDATAPOINT.fields_by_name['numeric'].message_type = protos_dot_model_dot_v1_dot_math__pb2._NUMERIC
_CONTROLDATAPOINT.fields_by_name['pose_with_covariance'].message_type = protos_dot_model_dot_v1_dot_navigation__pb2._POSEWITHCOVARIANCE
_CONTROLDATAPOINT.fields_by_name['point'].message_type = protos_dot_model_dot_v1_dot_math__pb2._POINT
_CONTROLDATAPOINT.oneofs_by_name['data'].fields.append(
_CONTROLDATAPOINT.fields_by_name['bitset'])
_CONTROLDATAPOINT.fields_by_name['bitset'].containing_oneof = _CONTROLDATAPOINT.oneofs_by_name['data']
_CONTROLDATAPOINT.oneofs_by_name['data'].fields.append(
_CONTROLDATAPOINT.fields_by_name['twist'])
_CONTROLDATAPOINT.fields_by_name['twist'].containing_oneof = _CONTROLDATAPOINT.oneofs_by_name['data']
_CONTROLDATAPOINT.oneofs_by_name['data'].fields.append(
_CONTROLDATAPOINT.fields_by_name['pose'])
_CONTROLDATAPOINT.fields_by_name['pose'].containing_oneof = _CONTROLDATAPOINT.oneofs_by_name['data']
_CONTROLDATAPOINT.oneofs_by_name['data'].fields.append(
_CONTROLDATAPOINT.fields_by_name['numeric'])
_CONTROLDATAPOINT.fields_by_name['numeric'].containing_oneof = _CONTROLDATAPOINT.oneofs_by_name['data']
_CONTROLDATAPOINT.oneofs_by_name['data'].fields.append(
_CONTROLDATAPOINT.fields_by_name['pose_with_covariance'])
_CONTROLDATAPOINT.fields_by_name['pose_with_covariance'].containing_oneof = _CONTROLDATAPOINT.oneofs_by_name['data']
_CONTROLDATAPOINT.oneofs_by_name['data'].fields.append(
_CONTROLDATAPOINT.fields_by_name['point'])
_CONTROLDATAPOINT.fields_by_name['point'].containing_oneof = _CONTROLDATAPOINT.oneofs_by_name['data']
DESCRIPTOR.message_types_by_name['Datapoint'] = _DATAPOINT
DESCRIPTOR.message_types_by_name['ControlDatapoint'] = _CONTROLDATAPOINT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Datapoint = _reflection.GeneratedProtocolMessageType('Datapoint', (_message.Message,), {
'TagsEntry' : _reflection.GeneratedProtocolMessageType('TagsEntry', (_message.Message,), {
'DESCRIPTOR' : _DATAPOINT_TAGSENTRY,
'__module__' : 'protos.model.v1.datapoint_pb2'
# @@protoc_insertion_point(class_scope:v1.model.Datapoint.TagsEntry)
})
,
'DESCRIPTOR' : _DATAPOINT,
'__module__' : 'protos.model.v1.datapoint_pb2'
# @@protoc_insertion_point(class_scope:v1.model.Datapoint)
})
_sym_db.RegisterMessage(Datapoint)
_sym_db.RegisterMessage(Datapoint.TagsEntry)
ControlDatapoint = _reflection.GeneratedProtocolMessageType('ControlDatapoint', (_message.Message,), {
'DESCRIPTOR' : _CONTROLDATAPOINT,
'__module__' : 'protos.model.v1.datapoint_pb2'
# @@protoc_insertion_point(class_scope:v1.model.ControlDatapoint)
})
_sym_db.RegisterMessage(ControlDatapoint)
DESCRIPTOR._options = None
_DATAPOINT_TAGSENTRY._options = None
# @@protoc_insertion_point(module_scope)
|
import unittest
from mppsolar.protocols.jk04 import jk04 as pi
class test_jk04_decode(unittest.TestCase):
maxDiff = None
def test_getInfo(self):
""" test the decode of a getInfo response"""
protocol = pi()
response = bytes.fromhex(
"55aaeb9003f14a4b2d42324132345300000000000000332e300000000000332e322e330000000876450004000000506f7765722057616c6c203100000000313233340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c2"
)
command = "getInfo"
expected = {
"raw_response": [
"Uªë\x90\x03ñJK-B2A24S\x00\x00\x00\x00\x00\x00\x003.0\x00\x00\x00\x00\x003.2.3\x00\x00\x00\x08vE\x00\x04\x00\x00\x00Power Wall 1\x00\x00\x00\x001234\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Â",
"",
],
"_command": "getInfo",
"_command_description": "BLE Device Information inquiry",
"Header": ["55aaeb90", ""],
"Record Type": ["03", ""],
"Record Counter": [241, ""],
"Device Model": ["JK-B2A24S", ""],
"Hardware Version": ["3.0", ""],
"Software Version": ["3.2.3", ""],
"Device Name": ["Power Wall 1", ""],
"Device Passcode": ["1234", ""],
"Manufacturing Date": ["", ""],
"Serial Number": ["", ""],
"User Data": ["", ""],
"Settings Passcode?": ["", ""],
}
protocol.get_full_command(command)
result = protocol.decode(response, command)
# print(result)
self.assertEqual(result, expected)
def test_getCellData(self):
""" test the decode of a getCellData response"""
protocol = pi()
response = b"U\xaa\xeb\x90\x02\xfd\x01\x04\x13@\x81\xbc\x16@E\xd2\x10@\xed\xd4\x16@\xed\xd4\x16@2\x1e\x17@\xa8\x10\x14@\xe3\x7f\x17@\x15\xa4\x16@\xf7)\x16@2\x1e\x17@\xb1\xf4\x0b@2\xa3\x14@\x9eJ\r@\x9e\xc5\x0f@\xa8\x8b\x16@\x9e6\x17@\xc6\x05\x17@\xe3\x7f\x17@Y\xed\x16@\xe3\x7f\x17@\xcf\xdf\x13@Y\xed\x16@2\xa3\x14@\xab\xe5p>Yk2>&\xef\xf6=>\xb84>p\xfc~>\xab9\xbc>\xde\xd3\xb6>25\x80>672>\xaeG\xf7=\x86\xc4\xfa=g,\x02>\xf6&\x02>\x97S\x01>\xd8\x1d\x01>\x94%\x05>JF\x00>\x8f\xd83>\xe0a\x92>\x05\xf2\xaa>\xd2\xbaU>\xad\xc0\xf8=\xee\x88\xf7=\xd5\xa2@>\x00\x00\x00\x00\x92\xf2\x14@P,7>\x00\x00\x00\x00\xff\xff\xff\x00\x07\x0b\x01\x01\x00X\xb6?\x00\x00\x00\x00\x00\x00\x00Z{\xedK@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x00\x00\xd2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa0/\x00\x00\x00\x00\x00\x00\x00X*@\x00\x0b"
command = "getCellData"
expected = {
"raw_response": [
"Uªë\x90\x02ý\x01\x04\x13@\x81¼\x16@EÒ\x10@íÔ\x16@íÔ\x16@2\x1e\x17@¨\x10\x14@ã\x7f\x17@\x15¤\x16@÷)\x16@2\x1e\x17@±ô\x0b@2£\x14@\x9eJ\r@\x9eÅ\x0f@¨\x8b\x16@\x9e6\x17@Æ\x05\x17@ã\x7f\x17@Yí\x16@ã\x7f\x17@Ïß\x13@Yí\x16@2£\x14@«åp>Yk2>&ïö=>¸4>pü~>«9¼>ÞÓ¶>25\x80>672>®G÷=\x86Äú=g,\x02>ö&\x02>\x97S\x01>Ø\x1d\x01>\x94%\x05>JF\x00>\x8fØ3>àa\x92>\x05òª>ÒºU>\xadÀø=î\x88÷=Õ¢@>\x00\x00\x00\x00\x92ò\x14@P,7>\x00\x00\x00\x00ÿÿÿ\x00\x07\x0b\x01\x01\x00X¶?\x00\x00\x00\x00\x00\x00\x00Z{íK@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x00\x00Ò\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa0/\x00\x00\x00\x00\x00\x00\x00X*@\x00\x0b",
"",
],
"_command": "getCellData",
"_command_description": "BLE Cell Data inquiry",
"Header": ["55aaeb90", ""],
"Record Type": ["02", ""],
"Record Counter": [253, ""],
"Voltage Cell01": [2.297119379043579, "V"],
"Voltage Cell02": [2.355255365371704, "V"],
"Voltage Cell03": [2.262833833694458, "V"],
"Voltage Cell04": [2.356745958328247, "V"],
"Voltage Cell05": [2.356745958328247, "V"],
"Voltage Cell06": [2.361217975616455, "V"],
"Voltage Cell07": [2.313516616821289, "V"],
"Voltage Cell08": [2.367180585861206, "V"],
"Voltage Cell09": [2.353764772415161, "V"],
"Voltage Cell10": [2.346311330795288, "V"],
"Voltage Cell11": [2.361217975616455, "V"],
"Voltage Cell12": [2.186809778213501, "V"],
"Voltage Cell13": [2.322460651397705, "V"],
"Voltage Cell14": [2.207679271697998, "V"],
"Voltage Cell15": [2.246436595916748, "V"],
"Voltage Cell16": [2.352273941040039, "V"],
"Voltage Cell17": [2.362708568572998, "V"],
"Voltage Cell18": [2.359727382659912, "V"],
"Voltage Cell19": [2.367180585861206, "V"],
"Voltage Cell20": [2.35823655128479, "V"],
"Voltage Cell21": [2.367180585861206, "V"],
"Voltage Cell22": [2.310535192489624, "V"],
"Voltage Cell23": [2.35823655128479, "V"],
"Voltage Cell24": [2.322460651397705, "V"],
"Resistance Cell01": [0.23525111377239227, "Ohm"],
"Resistance Cell02": [0.17423762381076813, "Ohm"],
"Resistance Cell03": [0.12057332694530487, "Ohm"],
"Resistance Cell04": [0.17648407816886902, "Ohm"],
"Resistance Cell05": [0.2490098476409912, "Ohm"],
"Resistance Cell06": [0.36762747168540955, "Ohm"],
"Resistance Cell07": [0.3570851683616638, "Ohm"],
"Resistance Cell08": [0.25040584802627563, "Ohm"],
"Resistance Cell09": [0.17403873801231384, "Ohm"],
"Resistance Cell10": [0.12074218690395355, "Ohm"],
"Resistance Cell11": [0.12244515120983124, "Ohm"],
"Resistance Cell12": [0.12712250649929047, "Ohm"],
"Resistance Cell13": [0.12710174918174744, "Ohm"],
"Resistance Cell14": [0.12629543244838715, "Ohm"],
"Resistance Cell15": [0.126090407371521, "Ohm"],
"Resistance Cell16": [0.13002616167068481, "Ohm"],
"Resistance Cell17": [0.1252681314945221, "Ohm"],
"Resistance Cell18": [0.17563079297542572, "Ohm"],
"Resistance Cell19": [0.2859029769897461, "Ohm"],
"Resistance Cell20": [0.33387771248817444, "Ohm"],
"Resistance Cell21": [0.20872047543525696, "Ohm"],
"Resistance Cell22": [0.12146124988794327, "Ohm"],
"Resistance Cell23": [0.12086664140224457, "Ohm"],
"Resistance Cell24": [0.18812115490436554, "Ohm"],
"Resistance Cell25": [0.0, "Ohm"],
"Average Cell Voltage": [2.327305316925049, "V"],
"Delta Cell Voltage": [0.178879976272583, "V"],
"Highest Cell": [8, ""],
"Lowest Cell": [12, ""],
"Flags": ["0101", ""],
"uptime": ["0D3H23M12S", ""],
"Checksum": ["0b", ""],
"Highest Cell Voltage": [2.367180585861206, "V"],
"Lowest Cell Voltage": [2.186809778213501, "V"],
}
protocol.get_full_command(command)
result = protocol.decode(response, command)
# print(result)
self.assertEqual(result, expected)
|
""" Tests for QUInt and QInt """
import unittest
from qublets import QPU, QUInt
class QUIntTest(unittest.TestCase):
def test_num_qubits(self):
q1 = QUInt.zeros(2)
q2 = QUInt.zeros(10)
self.assertEqual(q1.num_qubits, 2, "quint count mismatch")
self.assertEqual(q2.num_qubits, 10, "quint count mismatch")
def test_shared_qpu(self):
qpu = QPU(4)
q1 = QUInt.zeros(2, qpu=qpu)
q2 = QUInt.zeros(2, qpu=qpu)
self.assertEqual(q1.qpu, qpu, "quint qpu mismatch")
self.assertEqual(q2.qpu, qpu, "quint qpu mismatch")
def test_qubit_access(self):
q1 = QUInt.zeros(2)
qb1 = q1[0]
qb2 = q1[1]
self.assertEqual(qb1.quint, q1, "qubit access mismatch")
self.assertEqual(qb1.qubit, 0, "qubit access mismatch")
self.assertEqual(qb2.quint, q1, "qubit access mismatch")
self.assertEqual(qb2.qubit, 1, "qubit access mismatch")
def test_qubit_access_out_of_range(self):
q1 = QUInt.zeros(2)
def invalid_access():
q1[2].measure()
self.assertRaises(Exception, invalid_access)
|
import os
from flask import Flask, render_template, jsonify, make_response
from flask_sqlalchemy import SQLAlchemy
import config
# Configure Flask app
app = Flask(__name__, static_url_path='/templates')
app.config.from_object(os.environ['APP_SETTINGS'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# Database
db = SQLAlchemy(app)
# Import + Register Blueprints
from app.podcastml import podcastml as podcastml # pylint: disable=C0413
app.register_blueprint(podcastml)
# HTTP error handling
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
|
import click
import json
import os
from lib.common.config import CONFIG
from lib.services.avatar import AvatarService
@click.group()
def common_cmd():
pass
@common_cmd.command('list:ships')
def list_ships():
if not os.path.exists('data/ship.json'):
print('Data not exist, please update data first.')
return
ships = json.load(open('data/ship.json', 'r'))
for ship in ships:
if not ship:
continue
print(ship['id'], ship['name'])
@common_cmd.command('convert')
def convert():
"""Convert quotes from zh-cn to zh-tw"""
import requests
host = 'https://zh.kcwiki.org/api.php'
payload = {
'action': 'parse',
'contentmodel': 'wikitext',
'format': 'json',
'uselang': 'zh-tw'
}
content = json.dumps(json.load(open('data/subtitles.json', 'r')), ensure_ascii=False)
payload['text'] = content
rep = requests.post(host, payload)
text = rep.json()['parse']['text']['*']
import re
match = re.search(r'<p>(.*?)</p>', text, re.DOTALL)
content = match.group(1).strip()
with open('data/test.out', 'w') as fd:
fd.write(content)
content = re.sub(r'<.*?/>', '', content)
content = re.sub(r'<.*?>', '', content)
data = json.loads(content)
print(data['1'])
@common_cmd.command('weibo:share')
def weibo_share():
"""推特头像微博分享测试命令"""
SAVE_DIR = CONFIG['twitter']['save_dir']
FILE_NAME = 'KanColleStaffAvatar'
AvatarService.weibo_share(''.join([SAVE_DIR, '/', FILE_NAME, '.png']))
|
import sutil.math.core as core
import sutil.math.integrate as integrate
# Heaviside function
def heaviside(x):
if x < 0:
return 0
elif x == 0:
return 0.5
else:
return 1
# Indicator function
def indicator(x, subset):
if x in subset:
return 1
else:
return 0
# Sawtooth function
def sawtooth(x):
return x % 1
# Triangle function
def triangle(x):
return 2 * abs((x / 2) - int((x / 2) + 0.5))
# Gamma function
def gamma(n):
def f(x):
return core.power(x, n - 1) * (1 / core.exp(x))
v = integrate.trapezoidal(0, 10000, f, parts=10000)
return v
|
import collections
import pandas as pd
import warnings
from typing import Union, Optional, List, Dict, Tuple
from autogluon.core.constants import MULTICLASS, BINARY, REGRESSION
from .constants import NULL, CATEGORICAL, NUMERICAL, TEXT
#TODO, This file may later be merged with the infer type logic in tabular.
def is_categorical_column(data: pd.Series,
valid_data: pd.Series,
threshold: int = None,
ratio: Optional[float] = None,
oov_ratio_threshold: Optional[float] = None,
is_label: bool = False) -> bool:
"""Check whether the column is a categorical column.
If the number of unique elements in the column is smaller than
min(#Total Sample * ratio, threshold),
it will be treated as a categorical column.
Parameters
----------
data
The column data
valid_data
Additional validation data
threshold
The threshold for detecting categorical column
ratio
The ratio detecting categorical column
oov_ratio_threshold
The out-of-vocabulary ratio between training and validation.
This is used to determine if the column is a categorical column.
Usually, a categorical column can tolerate a small OOV ratio
is_label
Whether the column is a label column.
Returns
-------
is_categorical
Whether the column is a categorical column
"""
if data.dtype.name == 'category':
return True
else:
if threshold is None:
if is_label:
threshold = 100
oov_ratio_threshold = 0
ratio = 0.1
else:
threshold = 20
oov_ratio_threshold = 0
ratio = 0.1
threshold = min(int(len(data) * ratio), threshold)
data_value_counts = data.value_counts(dropna=False)
key_set = set(data_value_counts.keys())
if len(data_value_counts) < threshold:
valid_value_counts = valid_data.value_counts(dropna=False)
total_valid_num = len(valid_data)
oov_num = 0
for k, v in zip(valid_value_counts.keys(), valid_value_counts.values):
if k not in key_set:
oov_num += v
if is_label and oov_num != 0:
return False
if oov_num / total_valid_num > oov_ratio_threshold:
return False
return True
return False
def is_numerical_column(data: pd.Series,
valid_data: Optional[pd.Series] = None):
"""Try to identify if a column is a numerical column.
We adopted a very simple rule to verify if the column is a numerical column.
Parameters
----------
data
The training data series
valid_data
The validation data series
Returns
-------
is_numerical
Whether the column is a numerical column
"""
try:
numerical_data = pd.to_numeric(data)
if valid_data is not None:
numerical_valid_data = pd.to_numeric(valid_data)
return True
except:
return False
def infer_column_problem_types(
train_df: pd.DataFrame,
valid_df: pd.DataFrame,
label_columns: Union[str, List[str]],
problem_type: Optional[str] = None,
provided_column_types: Optional[Dict] = None) -> Tuple[collections.OrderedDict, str]:
"""Infer the column types of the data frame + the problem type
Parameters
----------
train_df
The training Pandas DataFrame
valid_df
The validation Pandas DataFrame
label_columns
The chosen label column names
problem_type
The type of the problem
provided_column_types
Additional dictionary that you can use to specify the columns types that you know.
{'col_name': TYPE}
Returns
-------
column_types
Dictionary of column types
If the column does not contain any useful information, we will filter the column with
type = NULL
problem_type
The inferred problem type
"""
if isinstance(label_columns, str):
label_columns = [label_columns]
elif isinstance(label_columns, (list, tuple)):
pass
else:
raise NotImplementedError(f'label_columns is not supported. label_columns={label_columns}.')
label_set = set(label_columns)
assert len(label_set) == 1, 'Currently, only a single label column is supported.'
column_types = collections.OrderedDict()
# Process all feature columns
for col_name in train_df.columns:
is_label = col_name in label_set
if provided_column_types is not None and col_name in provided_column_types:
column_types[col_name] = provided_column_types[col_name]
continue
if is_label:
num_train_missing = train_df[col_name].isnull().sum()
num_valid_missing = valid_df[col_name].isnull().sum()
if num_train_missing > 0:
raise ValueError(f'Label column "{col_name}" contains missing values in the '
f'training data frame. You may want to filter your data because '
f'missing label is currently not supported.')
if num_valid_missing > 0:
raise ValueError(f'Label column "{col_name}" contains missing values in the '
f'validation data frame. You may want to filter your data because '
f'missing label is currently not supported.')
if problem_type == MULTICLASS or problem_type == BINARY:
column_types[col_name] = CATEGORICAL
continue
elif problem_type == REGRESSION:
column_types[col_name] = NUMERICAL
continue
# Identify columns that provide no information
idx = train_df[col_name].first_valid_index()
if idx is None or len(train_df[col_name].unique()) == 1:
# No valid index, thus, we will just ignore the column
if not is_label:
column_types[col_name] = NULL
else:
warnings.warn(f'Label column "{col_name}" contains only one label. You may want'
f' to check your dataset again.')
# Use the following way for type inference
# 1) Inference categorical column
# 2) Inference numerical column
# 3) All the other columns are treated as text column
if is_categorical_column(train_df[col_name], valid_df[col_name],
is_label=is_label):
column_types[col_name] = CATEGORICAL
elif is_numerical_column(train_df[col_name], valid_df[col_name]):
column_types[col_name] = NUMERICAL
else:
column_types[col_name] = TEXT
problem_type = infer_problem_type(column_types, label_columns[0], train_df, problem_type)
return column_types, problem_type
def printable_column_type_string(column_types):
ret = 'Column Types:\n'
for col_name, col_type in column_types.items():
ret += f' - "{col_name}": {col_type}\n'
return ret
def infer_problem_type(column_types, label_column, data_df,
provided_problem_type=None):
"""Inference the type of the problem based on type of the column and
the training data.
Also, it will try to check the correctness of the column types and the provided problem_type.
Parameters
----------
column_types
Type of the columns
label_column
The label column
data_df
The dataframe
provided_problem_type
The provided problem type
Returns
-------
problem_type
Type of the problem
"""
if provided_problem_type is not None:
if provided_problem_type == MULTICLASS or provided_problem_type == BINARY:
err_msg = f'Provided problem type is "{provided_problem_type}" while the number of ' \
f'unique value in the label column is {len(data_df[label_column].unique())}'
if provided_problem_type == BINARY and len(data_df[label_column].unique()) != 2:
raise AssertionError(err_msg)
elif provided_problem_type == MULTICLASS and len(data_df[label_column].unique()) <= 2:
raise AssertionError(err_msg)
return provided_problem_type
else:
if column_types[label_column] == CATEGORICAL:
if len(data_df[label_column].value_counts()) == 2:
return BINARY
else:
return MULTICLASS
elif column_types[label_column] == NUMERICAL:
return REGRESSION
else:
raise ValueError(f'The label column "{label_column}" has type'
f' "{column_types[label_column]}" and is supported yet.')
|
import tkinter as tk
import tkinter.ttk as ttk
import math as m
# main definitions
import time
# rpyc servic definition
import rpyc
class MyService(rpyc.Service):
def __init__(self, tkRoot):
rpyc.Service.__init__(self)
self.root = tkRoot
print('Instantiated MyService with root: {}'.format(self.root))
def exposed_addQuadSlider(self, name, setMin = 15, setMax = -15, setRes = 0.2, cb = None, **options):
return self.root.addQuadSlider(name=name, setMin= setMin, setMax=setMax, setRes = setRes, cb = cb, **options)
def exposed_printMessage(self, msg):
return self.root.printMsg(msg)
def exposed_setValue(self,QuadSliderKey, SliderKey, value):
return self.root.setValue(QuadSliderKey, SliderKey, value)
class SliderWithExponent(ttk.Frame):
def __init__(self, master, name, initialVal=50, **options):
self.minSlider = 0
self.maxSlider = 10
self.minExp = -5
self.maxExp = 0
ttk.Frame.__init__(self, master, **options,)
self.sliderVar = tk.DoubleVar()
self.slider = tk.Scale(self, name="scale", orient=tk.HORIZONTAL, length = 250,
from_=0, to=10, variable = self.sliderVar, command= self.sliderCb, takefocus=True)
self.slider.config(resolution = 0.05)
# with Windows OS
self.slider.bind("<MouseWheel>", self.mouse_wheel)
# with Linux OS
self.slider.bind("<Button-4>", self.mouse_wheel)
self.slider.bind("<Button-5>", self.mouse_wheel)
self.exponent = tk.Spinbox(self, name="spinbox",
from_=-5, to=0, increment = 1, command= self.sliderCb)
# with Windows OS
self.exponent.bind("<MouseWheel>", self.mouse_wheel)
# with Linux OS
self.exponent.bind("<Button-4>", self.mouse_wheel)
self.exponent.bind("<Button-5>", self.mouse_wheel)
self.set(initialVal)
self.label = ttk.Label(self, name="label",)
self.value = tk.DoubleVar()
separator = ttk.Frame(self)
ttk.Label(separator, text = name, width = 10).grid(row=0, column=0)
ttk.Separator(separator, orient=tk.HORIZONTAL, ).grid(row=0, column=1, sticky=tk.E+tk.W)
separator.pack()
self.slider.pack(fill='x', side='left')
self.exponent.pack(fill='x', side='left')
self.label.pack(fill='x', side='left')
self.sliderCb()
def sliderCb(self, event=None):
sliderVal = self.children["scale"].get()
try:
expVal = int(self.children["spinbox"].get())
value = sliderVal * 10**expVal
self.value.set(value)
self.label.config(text="{:.2e}".format(value))
except:
return #there is a race condition: self.children["spinbox"].get() can yield '' in case it is changed from somewhere else
def mouse_wheel(self, event):
source = event.widget
if isinstance(source, tk.Spinbox):
# respond to Linux or Windows wheel event
if event.num == 5 or event.delta == -120:
source.invoke('buttondown')
if event.num == 4 or event.delta == 120:
source.invoke('buttonup')
return
elif isinstance(source, tk.Scale):
val = float(source.get())
# respond to Linux or Windows wheel event
if event.num == 5 or event.delta == -120:
val -= source.cget('resolution')
if event.num == 4 or event.delta == 120:
val += source.cget('resolution')
source.set(val)
return
def set(self, newVal):
# print("set slider to {}; min: {}, max{}".format(newVal, self.minSlider*10**self.minExp, self.maxSlider*10**self.maxExp))
if newVal <= self.minSlider*10**self.minExp:
self.sliderVar.set(self.minSlider)
self.exponent.delete(0,"end")
self.exponent.insert(0,self.minExp)
self.sliderCb()
return
if newVal >= self.maxSlider*10**self.maxExp:
self.sliderVar.set(self.maxSlider)
self.exponent.delete(0,"end")
self.exponent.insert(0,self.maxExp)
self.sliderCb()
return
exp = m.floor(m.log10(newVal))
valNorm = newVal / 10**exp
self.sliderVar.set(valNorm)
self.exponent.delete(0,"end")
self.exponent.insert(0,exp)
self.sliderCb()
return
class QuadrupleSliderWithEnable(ttk.Frame):
def __init__(self, master, name, setMin = 15, setMax = -15, setRes = 0.2, cb = None, **options):
self.cb = cb
tk.Frame.__init__(self, master, **options)
sliderFrame = tk.Frame(self)
ttk.Label(sliderFrame, text=name).pack()
self.sliders = {
"sliderP" : SliderWithExponent(sliderFrame, "SliderP"),
"sliderPI" : SliderWithExponent(sliderFrame, "SliderPI"),
"sliderPD" : SliderWithExponent(sliderFrame, "SliderPD"),
}
self.valueP = self.sliders["sliderP"].value
self.valuePI = self.sliders["sliderPI"].value
self.valuePD = self.sliders["sliderPD"].value
self.valueP.trace('w', self.varChange)
self.valuePI.trace('w', self.varChange)
self.valuePD.trace('w', self.varChange)
self.sliders["sliderP"].pack()
self.sliders["sliderPI"].pack()
self.sliders["sliderPD"].pack()
SetpointEnableFrame = tk.Frame(self)
quitBtn = tk.Button(self, text='Remove Quad', command=self.destroy) #TODO: it's not deleted from the widget list :-(
self.setpointVar = tk.DoubleVar(self)
self.sliders["sliderSetpoint"] = tk.Scale(SetpointEnableFrame, name="scale", orient=tk.VERTICAL,
from_=setMin, to=setMax, resolution = setRes, takefocus=True,
command= self.sliderCb)
# with Windows OS
self.sliders["sliderSetpoint"].bind("<MouseWheel>", self.mouse_wheel)
# with Linux OS
self.sliders["sliderSetpoint"].bind("<Button-4>", self.mouse_wheel)
self.sliders["sliderSetpoint"].bind("<Button-5>", self.mouse_wheel)
self.enabledVar = tk.BooleanVar(self)
enableBtn = ttk.Checkbutton(SetpointEnableFrame, text = "enabled", variable = self.enabledVar, command = None)
self.enabledVar.set(True)
self.setpointVar.trace('w', self.varChange)
self.enabledVar.trace('w', self.varChange)
self.sliders["sliderSetpoint"].pack()
enableBtn.pack()
quitBtn.pack()
sliderFrame.pack(fill='x', side='left')
SetpointEnableFrame.pack(fill='x', side='left')
def mouse_wheel(self, event):
source = event.widget
val = float(source.get())
# respond to Linux or Windows wheel event
if event.num == 5 or event.delta == -120:
val -= source.cget('resolution')
if event.num == 4 or event.delta == 120:
val += source.cget('resolution')
source.set(val)
return
def sliderCb(self, event=None):
sliderVal = self.sliders["sliderSetpoint"].get()
self.setpointVar.set(sliderVal)
def set(self, element, value):
self.sliders[element].set(value)
self.varChange()
def varChange(self, *argv):
dict = {
'valueP' : self.valueP.get(),
'valuePI' : self.valuePI.get(),
'valuePD' : self.valuePD.get(),
'valueSetPoint' : self.setpointVar.get(),
'valueEnabled' : self.enabledVar.get()
}
if self.cb:
self.cb(**dict)
class GUI():
def __init__(self):
self.widgetList = {}
self.root = tk.Tk()
#now show the container
self.root.title('This is my frame for all and everything')
self.root.geometry("500x500")
tk.Button(self.root, text="Finish",
command=lambda : self.root.event_generate("<Destroy>", when="now")).pack(anchor=tk.CENTER)
# self.root.after(2000, self.scheduledAdd)
def showGui(self):
self.root.mainloop()
def addQuadSlider(self, name, setMin = 15, setMax = -15, setRes = 0.2, cb = None, **options):
newFrame = QuadrupleSliderWithEnable(None, name=name, setMin= setMin, setMax=setMax, setRes = setRes, cb = cb, **options)
self.widgetList[name] = newFrame
newFrame.pack()
def setValue(self, QuadSliderKey, SliderKey, value):
self.widgetList[QuadSliderKey].set(SliderKey, value)
def printMsg(self, msg):
print(msg)
return 42
def callback(**kwargs):
for key, value in kwargs.items():
print ("%s == %s" %(key, value))
# the main logic
if __name__ == '__main__':
print("start")
guiRoot = GUI()
# guiRoot.addQuadSlider("Pitch-Control", cb = callback)
# guiRoot.addQuadSlider("Roll-Control", cb = callback, setMin=30, setMax=-30, setRes=1)
# guiRoot.widgetList["Pitch-Control"].set('sliderSetpoint', 20)
# guiRoot.widgetList["Pitch-Control"].set('sliderPI', 5)
# guiRoot.widgetList["Pitch-Control"].set('sliderPD', 0.00345)
# guiRoot.widgetList["Pitch-Control"].set('sliderP', 0.0125)
# start the rpyc server
from rpyc.utils.server import ThreadedServer
from threading import Thread
myServiceInstance = MyService(guiRoot)
server = ThreadedServer(myServiceInstance, port = 12345)
t = Thread(target = server.start)
t.daemon = True
t.start()
guiRoot.showGui()
print("Server shutdown")
|
from crispy_forms_gds.helper import FormHelper
from crispy_forms_gds.layout import Button
from crispy_forms_gds.layout import Layout
from django import forms
from django.contrib.auth.models import User
from django.db import transaction
from importer import models
from importer.chunker import chunk_taric
from importer.management.commands.run_import_batch import run_batch
from importer.namespaces import TARIC_RECORD_GROUPS
from workbaskets.validators import WorkflowStatus
class UploadTaricForm(forms.ModelForm):
status = forms.ChoiceField(choices=WorkflowStatus.choices, required=True)
taric_file = forms.FileField(required=True)
commodities = forms.BooleanField(
label="Commodities Only",
required=False,
initial=False,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
*self.fields,
Button("submit", "Upload"),
)
@transaction.atomic
def save(self, user: User, commit=True):
batch = super().save(commit)
if self.data.get("commodities") is not None:
record_group = TARIC_RECORD_GROUPS["commodities"]
else:
record_group = None
chunk_taric(self.files["taric_file"], batch, record_group=record_group)
run_batch(batch=batch.name, username=user.username, status=self.data["status"])
return batch
class Meta:
model = models.ImportBatch
fields = ["name", "split_job", "dependencies"]
|
from systems.commands import profile
import copy
class ProfileComponent(profile.BaseProfileComponent):
def priority(self):
return 7
def run(self, name, config):
networks = self.pop_values('network', config)
subnets = self.pop_values('subnets', config)
provider = self.pop_value('provider', config)
domain = self.pop_value('domain', config)
listeners = self.pop_info('listeners', config)
groups = self.pop_values('groups', config)
firewalls = self.pop_values('firewalls', config)
if not provider or not networks or not listeners:
self.command.error("Load balancer {} requires 'network', 'provider', and 'listeners' fields".format(name))
def process_network(network):
self.exec('lb save',
load_balancer_provider_name = provider,
load_balancer_name = name,
load_balancer_fields = self.interpolate(config,
network = network,
provider = provider,
domain = domain
),
network_name = network,
subnet_names = subnets,
domain_name = domain,
group_names = groups,
firewall_names = firewalls,
test = self.test
)
def process_listener(listener):
listener_config = copy.deepcopy(listeners[listener])
certificate = self.pop_value('certificate', listener_config)
self.exec('lb listener save',
load_balancer_name = name,
load_balancer_listener_name = listener,
load_balancer_listener_fields = self.interpolate(listener_config,
load_balancer = name,
network = network,
provider = provider,
domain = domain
),
network_name = network,
certificate_name = certificate,
test = self.test
)
if self.profile.include_inner('load_balancer_listener'):
self.run_list(listeners.keys(), process_listener)
self.run_list(networks, process_network)
def scope(self, instance):
return { 'network': instance.network.name }
def variables(self, instance):
variables = {
'provider': instance.provider_type,
'groups': self.get_names(instance.groups),
'subnets': self.get_names(instance.subnets),
'firewalls': self.get_names(instance.firewalls),
'listeners': {}
}
if instance.domain:
variables['domain'] = instance.domain.name
for listener in instance.loadbalancerlistener_relation.all():
listener_config = self.get_variables(listener)
if listener.certificate:
listener_config['certificate'] = listener.certificate.name
variables['listeners'][listener.name] = listener_config
return variables
def destroy(self, name, config):
networks = self.pop_values('network', config)
def process_network(network):
self.exec('lb remove',
load_balancer_name = name,
network_name = network,
force = True
)
self.run_list(networks, process_network)
|
import sys
import argparse
from pathlib import Path
import pandas as pd
import yaml
def create_configs_from_inputs_csv(exp, scenarios_csv_file_path, simulation_settings_path, config_path,
run_script_path, update_check_rho=False):
"""
Create one simulation configuration file per scenario.
Parameters
----------
exp : str, experiment identifier
scenarios_csv_file_path : str or Path, simulation scenario input csv file
simulation_settings_path : str or Path, YAML file with simulation settings
config_path : str or Path, destination for scenario specific config files
run_script_path : str or Path, destination for shell scripts for running simulation scenarios
update_check_rho : bool (Default=False), if True, recompute rho check values. Set to True if manual capacity levels set.
Returns
-------
No return value
"""
# Read scenarios file in DataFrame
scenarios_df = pd.read_csv(scenarios_csv_file_path)
# Read settings file
with open(simulation_settings_path, 'rt') as settings_file:
settings = yaml.safe_load(settings_file)
#print(settings)
global_vars = {}
run_script_file_path = Path(run_script_path, f'{exp}_run.sh')
with open(run_script_file_path, 'w') as bat_file:
# Iterate over rows in scenarios file
for row in scenarios_df.iterrows():
scenario = int(row[1]['scenario'].tolist())
global_vars['arrival_rate'] = row[1]['arrival_rate'].tolist()
global_vars['mean_los_obs'] = row[1]['mean_los_obs'].tolist()
global_vars['num_erlang_stages_obs'] = int(row[1]['num_erlang_stages_obs'])
global_vars['mean_los_ldr'] = float(row[1]['mean_los_ldr'])
global_vars['num_erlang_stages_ldr'] = int(row[1]['num_erlang_stages_ldr'])
global_vars['mean_los_pp_noc'] = float(row[1]['mean_los_pp_noc'])
global_vars['mean_los_pp_c'] = float(row[1]['mean_los_pp_c'])
global_vars['num_erlang_stages_pp'] = int(row[1]['num_erlang_stages_pp'])
global_vars['mean_los_csect'] = float(row[1]['mean_los_csect'])
global_vars['num_erlang_stages_csect'] = int(row[1]['num_erlang_stages_csect'])
global_vars['c_sect_prob'] = float(row[1]['c_sect_prob'])
config = {}
config['locations'] = settings['locations']
cap_obs = int(row[1]['cap_obs'].tolist())
cap_ldr = int(row[1]['cap_ldr'].tolist())
cap_pp = int(row[1]['cap_pp'].tolist())
config['locations'][1]['capacity'] = cap_obs
config['locations'][2]['capacity'] = cap_ldr
config['locations'][4]['capacity'] = cap_pp
# Write scenario config file
config['scenario'] = scenario
config['run_settings'] = settings['run_settings']
config['output'] = settings['output']
config['random_number_streams'] = settings['random_number_streams']
config['routes'] = settings['routes']
config['global_vars'] = global_vars
config_file_path = Path(config_path) / f'{exp}_scenario_{scenario}.yaml'
with open(config_file_path, 'w', encoding='utf-8') as config_file:
yaml.dump(config, config_file)
run_line = f"obflow_sim {config_file_path} --loglevel=WARNING\n"
bat_file.write(run_line)
# Create output file processing line
# output_proc_line = f'python obflow_stat.py {output_path_} {exp_suffix_} '
# output_proc_line += f"--run_time {settings['run_settings']['run_time']} "
# output_proc_line += f"--warmup_time {settings['run_settings']['warmup_time']} --include_inputs "
# output_proc_line += f"--scenario_inputs_path {scenarios_csv_path_} --process_logs "
# output_proc_line += f"--stop_log_path {settings['paths']['stop_logs']} "
# output_proc_line += f"--occ_stats_path {settings['paths']['occ_stats']}"
# bat_file.write(output_proc_line)
# Update load and rho check values in case capacity levels were changed manually
if update_check_rho:
scenarios_df['check_load_obs'] = \
scenarios_df.apply(lambda x: x.arrival_rate * x.mean_los_obs, axis=1)
scenarios_df['check_load_ldr'] = \
scenarios_df.apply(lambda x: x.arrival_rate * x.mean_los_ldr, axis=1)
scenarios_df['check_load_pp'] = \
scenarios_df.apply(
lambda x: x.arrival_rate * (
x.c_sect_prob * x.mean_los_pp_c + (1 - x.c_sect_prob) * x.mean_los_pp_noc),
axis=1)
scenarios_df['check_rho_obs'] = \
scenarios_df.apply(lambda x: round(x.check_load_obs / x.cap_obs, 2), axis=1)
scenarios_df['check_rho_ldr'] = \
scenarios_df.apply(lambda x: round(x.check_load_ldr / x.cap_ldr, 2), axis=1)
scenarios_df['check_rho_pp'] = \
scenarios_df.apply(lambda x: round(x.check_load_pp / x.cap_pp, 2), axis=1)
# Rewrite scenarios input file with updated rho_checks
scenarios_df.to_csv(scenarios_csv_file_path, index=False)
print(f'Config files written to {Path(config_path)}')
return run_script_file_path
def create_run_script_chunks(run_script_file_path, run_script_chunk_size):
"""
Split shell script of simulation run commands into multiple files each
(except for perhaps the last one) haveing ``bat_scenario_chunk_size`` lines.
Parameters
----------
run_script_file_path : str or Path
run_script_chunk_size : int
Returns
-------
No return value - creates multiple output files of simulation run commands.
"""
base_script_path = Path(run_script_file_path).parent
stem = Path(run_script_file_path).stem
with open(run_script_file_path, 'r') as batf:
bat_lines = batf.readlines()
num_lines = len(bat_lines)
num_full_chunks = num_lines // run_script_chunk_size
if num_full_chunks == 0:
start = 0
end = num_lines
chunk = bat_lines[slice(start, end)]
chunk_bat_file = Path(base_script_path, f'{stem}_{start + 1}_{end}.sh')
with open(chunk_bat_file, 'w') as chunkf:
for line in chunk:
chunkf.write(f'{line}')
else:
for i in range(num_full_chunks):
start = i * run_script_chunk_size
end = start + run_script_chunk_size
chunk = bat_lines[slice(start, end)]
chunk_bat_file = Path(base_script_path, f'{stem}_{start + 1}_{end}.sh')
with open(chunk_bat_file, 'w') as chunkf:
for line in chunk:
chunkf.write(f'{line}')
# Write out any remaining partial chunks
if end < num_lines - 1:
start = end
end = num_lines
chunk_bat_file = Path(base_script_path, f'{stem}_{start + 1}_{end}.sh')
chunk = bat_lines[start:]
with open(chunk_bat_file, 'w') as chunkf:
for line in chunk:
chunkf.write(f'{line}')
def process_command_line(argv=None):
"""
Parse command line arguments
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
Return a Namespace representing the argument list.
"""
# Create the parser
parser = argparse.ArgumentParser(prog='create_configs',
description='Create scenario related files for obflowsim')
# Add arguments
parser.add_argument(
"exp", type=str,
help="Experiment identifier. Used in filenames."
)
parser.add_argument(
'scenario_inputs_file_path', type=str,
help="Scenario inputs csv file"
)
parser.add_argument(
'sim_settings_file_path', type=str,
help="Simulation experiment settings YAML file"
)
parser.add_argument(
'configs_path', type=str,
help="Destination directory for the scenario config files"
)
parser.add_argument(
'run_script_path', type=str,
help="Destination directory for the scripts for running the simulations."
)
parser.add_argument(
'--chunk_size', '-c', type=int, default=None,
help="Number of run simulation commands in each script file."
)
parser.add_argument('--update_rho_checks', '-u', dest='update_rho', action='store_true',
help='Use flag if capacity levels explicitly set')
# do the parsing
args = parser.parse_args(argv)
return args
def main(argv=None):
# Parse command line arguments
args = process_command_line(argv)
run_script_file_path = create_configs_from_inputs_csv(args.exp, args.scenario_inputs_file_path,
args.sim_settings_file_path,
args.configs_path,
args.run_script_path, args.update_rho)
if args.chunk_size:
create_run_script_chunks(run_script_file_path, args.chunk_size)
if __name__ == '__main__':
sys.exit(main())
|
from tests.util import BaseTest
class Test_I2041(BaseTest):
def error_code(self) -> str:
return "I2041"
def test_pass_1(self):
code = """
import os.path as pat
from os import path
"""
result = self.run_flake8(code)
assert result == []
def test_pass_2(self):
code = """
from os import path as pat
"""
result = self.run_flake8(code)
assert result == []
def test_fail_1(self):
code = """
from os.path import join
"""
result = self.run_flake8(code)
self.assert_error_at(result, "I2041", 1, 1)
def test_fail_2(self):
code = """
from os import path, environ
"""
result = self.run_flake8(code)
self.assert_error_at(result, "I2041", 1, 1)
|
import argparse
import cv2
import random
from glob import glob
import time
import os, sys, inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
code_dir = os.path.dirname(os.path.dirname(current_dir))
sys.path.insert(0, code_dir)
from tools.utils import mkdir
from data.folder_dataset import make_dataset
def main(args):
frame_paths = make_dataset(args.data_root, recursive=True, from_vid=False)
random.shuffle(frame_paths)
mkdir(args.out_dir)
i = len(glob(os.path.join(args.out_dir, "*.png")))
global frame
for path in frame_paths:
try:
original_frame = cv2.imread(path)
frame = original_frame.copy()
success, (x, y) = annotate()
if success:
out_path = os.path.join(args.out_dir, f"{i:05d}_{x}_{y}.jpg")
cv2.imwrite(out_path, original_frame)
i += 1
except:
print(f"Skipping {path}")
time.sleep(3)
def annotate():
cv2.namedWindow('image')
cv2.setMouseCallback('image', get_x_y)
while True:
cv2.imshow('image', frame)
k = cv2.waitKey(20) & 0xFF
if k == 27:
break
elif k == ord('a'):
return True, (mouseX, mouseY)
return False, (None, None)
def get_x_y(event, x, y, flags, param):
global mouseX, mouseY
if event == cv2.EVENT_LBUTTONDOWN:
cv2.circle(frame, (x, y), 10, (255, 0, 0), -1)
mouseX, mouseY = x, y
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, required=True)
parser.add_argument('--out_dir', type=str, required=True)
args = parser.parse_args()
main(args)
|
"""
This module provides the molior UserRole database model.
UserRole : sqlAlchemy table
Constraint :
* the primary key is on the pair user_id/project_id
Columns :
* user_id
* user
* project_id
* project
* role
UserRoleEnum : array of possible roles
"""
from sqlalchemy import Column, ForeignKey, Enum, PrimaryKeyConstraint
from sqlalchemy.orm import relationship
from .database import Base
from .user import User
from .project import Project
USER_ROLES = ["member", "manager", "owner"]
class UserRole(Base): # pylint: disable=too-few-public-methods
"""
Database model for a UserRole.
"""
__tablename__ = "userrole"
__table_args__ = (PrimaryKeyConstraint("user_id", "project_id"),)
user_id = Column(ForeignKey("molioruser.id"))
user = relationship(User)
project_id = Column(ForeignKey("project.id"))
project = relationship(Project)
role = Column(Enum(*USER_ROLES, name="role_enum"))
|
from django.db import models
from backends.mosso import cloudfiles_upload_to
class Photo(models.Model):
title = models.CharField(max_length=50)
image = models.ImageField(upload_to=cloudfiles_upload_to)
def __unicode__(self):
return self.title
|
from .NamedObject import NamedObject
from .PPtr import PPtr
class Material(NamedObject):
def __init__(self, reader):
super().__init__(reader=reader)
version = self.version
self.m_Shader = PPtr(reader) # Shader
if version[0] == 4 and version[1] >= 1: # 4.x
self.m_ShaderKeywords = reader.read_string_array()
if version[0] >= 5: # 5.0 and up
self.m_ShaderKeywords = reader.read_aligned_string()
self.m_LightmapFlags = reader.read_u_int()
if version[0] > 5 or (version[0] == 5 and version[1] >= 6): # 5.6 and up
self.m_EnableInstancingVariants = reader.read_boolean()
# var m_DoubleSidedGI = a_Stream.read_boolean() //2017 and up
reader.align_stream()
if version[0] > 4 or (version[0] == 4 and version[1] >= 3): # 4.3 and up
self.m_CustomRenderQueue = reader.read_int()
if version[0] > 5 or (version[0] == 5 and version[1] >= 1): # 5.1 and up
stringTagMapSize = reader.read_int()
self.stringTagMap = {}
for _ in range(stringTagMapSize):
first = reader.read_aligned_string()
second = reader.read_aligned_string()
self.stringTagMap[first] = second
if version[0] > 5 or (version[0] == 5 and version[1] >= 6): # 5.6 and up
self.disabledShaderPasses = reader.read_string_array()
self.m_SavedProperties = UnityPropertySheet(reader)
class UnityTexEnv:
def __init__(self, reader):
self.m_Texture = PPtr(reader) # Texture
self.m_Scale = reader.read_vector2()
self.m_Offset = reader.read_vector2()
class UnityPropertySheet:
def __init__(self, reader):
m_TexEnvsSize = reader.read_int()
self.m_TexEnvs = {}
for i in range(m_TexEnvsSize):
key = reader.read_aligned_string()
self.m_TexEnvs[key] = UnityTexEnv(reader)
m_FloatsSize = reader.read_int()
self.m_Floats = {}
for i in range(m_FloatsSize):
key = reader.read_aligned_string()
self.m_Floats[key] = reader.read_float()
m_ColorsSize = reader.read_int()
self.m_Colors = {}
for i in range(m_ColorsSize):
key = reader.read_aligned_string()
self.m_Colors[key] = reader.read_color4()
|
#! python3
# takt-tickets.py is used to look for new Jira tickets or updated comments and notify
# via Slack.
import jira, shelve, requests
NOTIFICATIONS = {
'slack':True,
'email':False
}
notifyMsg = ''
# Create file to save settings
settings = shelve.open(".settings")
# Add setting to shelve if missing.
def getConfig( x, prompt, new=None ):
try:
thesettings = settings[x]
if new != None:
settings[x] = new
except Exception:
if new == None:
settings[x] = input(prompt)
else:
settings[x] = new
thesettings = settings[x]
return thesettings
def addTrailingSlash(url):
url = url.strip()
if url[-1] != '/':
url += '/'
return(url)
# Save URL, credentials, and project name.
def setSettings():
getConfig('jiraURL', 'Enter Jira project URL: ')
getConfig('username', 'Enter Email Address: ')
getConfig('password', 'Enter Password: ')
getConfig('project', 'Enter Project Name: ')
if NOTIFICATIONS['slack'] == True:
getConfig('slack', 'Enter Slack Webhook URL: ')
# Login to Jira
def login():
global jira
options = {
'server': settings['jiraURL']}
try:
jira = jira.JIRA(options, basic_auth=(settings['username'], settings['password']))
# Test for bad credentials.
except Exception as e:
if 'Unauthorized (401)' in e.text:
print('Bad username or password; clearing cache.')
settings.clear()
setSettings()
login()
setSettings()
login()
# Pull issues and look for any new ones / changes
issues = jira.search_issues('project=' + settings['project'])
issueCount = getConfig('issueCount', '', len(issues))
if issueCount != settings['issueCount']:
notifyMsg += str(int(issueCount - int(settings['issueCount']))) + ' new issues.\n'
for issue in issues:
loopMsg = ''
newComments = ''
exists = True
issueLink = '<' + addTrailingSlash(settings['jiraURL']) + 'browse/' + str(issue) + '|' + str(issue) + '>'
issueData = {'assignee':str(issue.fields.assignee), 'commentcount':len(jira.comments(issue)), 'status':str(issue.fields.status)}
if str(issue) not in notifyMsg:
try:
oldData = settings[str(issue)]
if oldData['assignee'] != issueData['assignee']:
loopMsg += ' has been assigned to ' + issueData['assignee']
if oldData['commentcount'] != issueData['commentcount']:
newCommentCount = int(issueData['commentcount']) - int(oldData['commentcount'])
loopMsg += ' has ' + str(newCommentCount) + ' new comments'
try:
for i in range(0,newCommentCount):
newComments += "Comment: " + jira.comment(issue,jira.comments(issue)[-(newCommentCount-i)]).body + "\n"
except:
continue
if oldData['status'] != issueData['status']:
loopMsg += ' and status has changed to ' + issueData['status']
if len(loopMsg) > 0:
loopMsg = issueLink + loopMsg + "\n" + newComments
except KeyError:
loopMsg += 'New issue: ' + issueLink + ' ' + issueData['status'] + ' ' + issueData['assignee'] + ' ' + issue.fields.summary + '\n'
notifyMsg += loopMsg
#print(str(issue), issue.fields.assignee,len(jira.comments(issue)), issue.fields.status)
getConfig(str(issue), '', issueData)
# Notify on changes
if notifyMsg != '':
data = {'text':notifyMsg}
try:
print(data)
except Exception as e:
print(e)
if NOTIFICATIONS['slack'] != False:
# https://api.slack.com/apps
requests.post(getConfig('slack', 'Enter Slack Webhook URL: '), json=data)
print(settings['slack'])
# TODO: Add email notifications
settings.close()
|
# Generated by Django 3.1.8 on 2021-07-19 15:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Videos",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("video_id", models.CharField(max_length=500, unique=True)),
("title", models.CharField(max_length=150)),
("description", models.CharField(max_length=200)),
("published_at", models.DateTimeField()),
("thumbnail_url", models.URLField()),
("channel_title", models.CharField(max_length=150)),
("channel_id", models.CharField(max_length=100)),
],
),
]
|
from itertools import repeat
import yaml
import csv
import netmiko
import subprocess
from platform import system as system_name
from concurrent.futures import ThreadPoolExecutor, as_completed
import re
from netmiko import ConnectHandler
def ping_ip(ip):
param = "-n" if system_name().lower() == "windows" else "-c"
command = ["ping", param, "3", ip]
reply = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ip_is_reachable = reply.returncode == 0
return ip_is_reachable
def ping_ip_addresses(ip_list, limit=5):
reachable = []
unreachable = []
with ThreadPoolExecutor(max_workers=limit) as executor:
results = executor.map(ping_ip, ip_list)
for ip, status in zip(ip_list, results):
if status:
reachable.append(ip)
else:
unreachable.append(ip)
return reachable, unreachable
def dlink_get_hostname_sn(device):
with ConnectHandler(**device) as ssh:
ssh.enable()
output = ssh.send_command("...")
match_sn = re.search(r"...", output)
if match_sn:
sn = match_sn.group(1)
else:
sn = None
prompt = ssh.find_prompt()
hostname = re.search(r"...", prompt).group(1)
return (device["host"], hostname, sn)
def cisco_get_hostname_sn(device):
with ConnectHandler(**device) as ssh:
ssh.enable()
output = ssh.send_command("sh version")
match_sn = re.search(r"Processor board ID (\S+)", output)
if match_sn:
sn = match_sn.group(1)
else:
sn = None
prompt = ssh.find_prompt()
hostname = re.search(r"(\S+)[>#]", prompt).group(1)
return (device["host"], hostname, sn)
def get_host_sn_write_to_file(devices, filename, limit=10):
with ThreadPoolExecutor(max_workers=limit) as executor:
future_list = []
for device in devices:
function = device.pop("function")
future = executor.submit(function, device)
future_list.append(future)
with open(filename, "w") as f:
wr = csv.writer(f)
wr.writerow(["vendor", "ip", "hostname", "serial number"])
for device, f in zip(devices, future_list):
output = f.result()
vendor = device["device_type"]
wr.writerow([vendor, *output])
def collect_info_from_devices(devices_list, output_filename):
vendor_device_type_map = {
"Cisco": "cisco_ios",
"D-LINK": "dlink_ds",
}
vendor_function_map = {
"Cisco": cisco_get_hostname_sn,
"D-LINK": dlink_get_hostname_sn,
}
base_params = {
"username": "cisco",
"password": "cisco",
"secret": "cisco",
"timeout": 10,
}
devices = [
{
**base_params,
"host": device["ip"],
"device_type": vendor_device_type_map[device["vendor"]],
"function": vendor_function_map[device["vendor"]],
}
for device in devices_list
]
get_host_sn_write_to_file(devices, output_filename)
def main():
with open("devices.csv") as f:
reader = csv.DictReader(f)
netmiko_support = [
row for row in reader if row["vendor"] in ("Cisco", "D-LINK")
]
# получаем список IP-адресов и проверяем доступность
check_ip = [dev["ip"] for dev in netmiko_support]
reach, unreach = ping_ip_addresses(check_ip)
print(f"Недоступные адреса:\n{unreach}")
# Подключение идет только к тем адресам, которые пингуются
reachable_devices = [dev for dev in netmiko_support if dev["ip"] in reach]
collect_info_from_devices(reachable_devices, "collected_params_results.csv")
if __name__ == "__main__":
main()
|
"""Kraken - maths.vec2 module.
Classes:
Vec2 -- Vector 2 object.
"""
import math
from kraken.core.kraken_system import ks
from math_object import MathObject
class Vec2(MathObject):
"""Vector 2 object."""
def __init__(self, x=0.0, y=0.0):
"""Initializes x, y values for Vec2 object."""
super(Vec2, self).__init__()
if ks.getRTValTypeName(x) == 'Vec2':
self._rtval = x
else:
self._rtval = ks.rtVal('Vec2')
if isinstance(x, Vec2):
self.set(x=x.x, y=x.y)
else:
self.set(x=x, y=y)
def __str__(self):
"""String representation of the Vec2 object.
Returns:
str: String representation of the Vec2 object."""
return "Vec2(" + str(self.x) + "," + str(self.y) + ")"
@property
def x(self):
"""Gets x value of this vector.
Returns:
float: X value of this vector.
"""
return self._rtval.x.getSimpleType()
@x.setter
def x(self, value):
"""Sets x value from the input value.
Args:
value (float): Value to set the x property as.
Returns:
bool: True if successful.
"""
self._rtval.x = ks.rtVal('Scalar', value)
return True
@property
def y(self):
"""Gets y value of this vector.
Returns:
float: Y value of this vector.
"""
return self._rtval.y.getSimpleType()
@y.setter
def y(self, value):
"""Sets y value from the input value.
Args:
value (float): Value to set the y property as.
Returns:
bool: True if successful.
"""
self._rtval.y = ks.rtVal('Scalar', value)
return True
def __eq__(self, other):
return self.equal(other)
def __ne__(self, other):
return not self.equal(other)
def __add__(self, other):
return self.add(other)
def __sub__(self, other):
return self.subtract(other)
def __mul__(self, other):
return self.multiply(other)
def __div__(self, other):
return self.divide(other)
def clone(self):
"""Returns a clone of the Vec2.
Returns:
Vec2: The cloned Vec2
"""
vec2 = Vec2()
vec2.x = self.x
vec2.y = self.y
return vec2
def set(self, x, y):
"""Sets the x and y value from the input values.
Args:
x (float): Value to set the x property as.
y (float): Value to set the x property as.
Returns:
bool: True if successful.
"""
self._rtval.set('', ks.rtVal('Scalar', x), ks.rtVal('Scalar', y))
return True
def setNull():
"""Setting all components of the vec2 to 0.0.
Returns:
bool: True if successful.
"""
self._rtval.setNull('')
return True
def equal(self, other):
"""Checks equality of this vec2 with another.
Args:
other (Vec2): other vector to check equality with.
Returns:
bool: True if equal.
"""
return self._rtval.equal('Boolean', other._rtval).getSimpleType()
def almostEqual(self, other, precision):
"""Checks almost equality of this Vec2 with another.
Args:
other (Vec2): other matrix to check equality with.
precision (float): Precision value.
Returns:
bool: True if almost equal.
"""
return self._rtval.almostEqual('Boolean', other._rtval, ks.rtVal('Scalar', precision)).getSimpleType()
def almostEqual(self, other):
"""Checks almost equality of this Vec2 with another
(using a default precision).
Args:
other (Vec2): other vector to check equality with.
Returns:
bool: True if almost equal.
"""
return self._rtval.almostEqual('Boolean', other._rtval).getSimpleType()
def component(self, i):
"""Gets the component of this Vec2 by index.
Args:
i (int): index of the component to return.
Returns:
float: Component of this Vec2.
"""
return self._rtval.component('Scalar', ks.rtVal('Size', i)).getSimpleType()
# Sets the component of this vector by index
def setComponent(self, i, v):
"""Sets the component of this Vec2 by index.
Args:
i (int): index of the component to set.
v (float): Value to set component as.
Returns:
bool: True if successful.
"""
self._rtval.setComponent('', ks.rtVal('Size', i),
ks.rtVal('Scalar', v))
def add(self, other):
"""Overload method for the add operator.
Args:
other (Vec2): Other vector to add to this one.
Returns:
Vec2: New Vec2 of the sum of the two Vec2's.
"""
return Vec2(self._rtval.add('Vec2', other._rtval))
def subtract(self, other):
"""Overload method for the subtract operator.
Args:
other (Vec2): other vector to subtract from this one.
Returns:
Vec2: New Vec2 of the difference of the two Vec2's.
"""
return Vec2(self._rtval.subtract('Vec2', other._rtval))
def multiply(self, other):
"""Overload method for the multiply operator.
Args:
other (Vec2): other vector to multiply from this one.
Returns:
Vec2: New Vec2 of the product of the two Vec2's.
"""
return Vec2(self._rtval.multiply('Vec2', other._rtval))
def divide(self, other):
"""Divides this vector and an other.
Args:
other (Vec2): other vector to divide by.
Returns:
Vec2: Quotient of the division of this vector by the other.
"""
return Vec2(self._rtval.divide('Vec2', other._rtval))
def multiplyScalar(self, other):
"""Product of this vector and a scalar.
Args:
other (float): Scalar value to multiply this vector by.
Returns:
Vec2: Product of the multiplication of the scalar and this vector.
"""
return Vec2(self._rtval.multiplyScalar('Vec2', ks.rtVal('Scalar', other)))
def divideScalar(self, other):
"""Divides this vector and a scalar.
Args:
other (float): Value to divide this vector by.
Returns:
Vec2: Quotient of the division of the vector by the scalar.
"""
return Vec2(self._rtval.divideScalar('Vec2', ks.rtVal('Scalar', other)))
def negate(self):
"""Gets the negated version of this vector.
Returns:
Vec2: Negation of this vector.
"""
return Vec2(self._rtval.negate('Vec2'))
def inverse(self):
"""Get the inverse vector of this vector.
Returns:
Vec2: Inverse of this vector.
"""
return Vec2(self._rtval.inverse('Vec2'))
def dot(self, other):
"""Gets the dot product of this vector and another.
Args:
other (Vec2): Other vector.
Returns:
float: Dot product.
"""
return self._rtval.dot('Scalar', other._rtval).getSimpleType()
def cross(self, other):
"""Gets the cross product of this vector and another.
Args:
other (Vec2): Other vector.
Returns:
Vec2: Dot product.
"""
return Vec2(self._rtval.cross('Vec2', other._rtval))
def lengthSquared(self):
"""Get the squared length of this vector.
Returns:
float: Squared length oft his vector.
"""
return self._rtval.lengthSquared('Scalar').getSimpleType()
def length(self):
"""Gets the length of this vector.
Returns:
float: Length of this vector.
"""
return self._rtval.length('Scalar').getSimpleType()
def unit(self):
"""Gets a unit vector of this one.
Returns:
Vec2: New unit vector from this one.
"""
return Vec2(self._rtval.unit('Vec2'))
def unit_safe(self):
"""Gets a unit vector of this one, no error reported if cannot be
made unit.
Returns:
Vec2: New unit vector.
"""
return Vec2(self._rtval.unit_safe('Vec2'))
def setUnit(self):
"""Sets this vector to a unit vector and returns the previous
length.
Returns:
float: This vector.
"""
return self._rtval.setUnit('Scalar').getSimpleType()
def normalize(self):
"""Gets a normalized vector from this vector.
Returns:
float: Previous length.
"""
return self._rtval.normalize('Scalar').getSimpleType()
def clamp(self, min, max):
"""Clamps this vector per component by a min and max vector.
Args:
min (float): Minimum value.
max (float): Maximum value.
Returns:
bool: True if successful.
"""
return Vec2(self._rtval.clamp('Vec2', min._rtval, max._rtval))
def unitsAngleTo(self, other):
"""Gets the angle (self, in radians) of this vector to another one
note expects both vectors to be units (else use angleTo)
Args:
other (Vec2): other vector to get angle to.
Returns:
float: Angle.
"""
return self._rtval.unitsAngleTo('Scalar', other._rtval).getSimpleType()
def angleTo(self, other):
"""Gets the angle (self, in radians) of this vector to another one.
Args:
other (Vec2): other vector to get angle to.
Returns:
float: Angle.
"""
return self._rtval.angleTo('Scalar', other._rtval).getSimpleType()
# Returns the distance of this vector to another one
def distanceTo(self, other):
"""Doc String.
Args:
other (Vec2): the other vector to measure the distance to.
Returns:
bool: True if successful.
"""
return self._rtval.distanceTo('Scalar', other._rtval).getSimpleType()
def linearInterpolate(self, other, t):
"""Linearly interpolates this vector with another one based on a scalar
blend value (0.0 to 1.0).
Args:
other (Vec2): vector to blend to.
t (float): Blend value.
Returns:
Vec2: New vector blended between this and the input vector.
"""
return Vec2(self._rtval.linearInterpolate('Vec2', ks.rtVal('Scalar', t)))
def distanceToLine(self, lineP0, lineP1):
"""Returns the distance of this vector to a line defined by two points
on the line.
Args:
lineP0 (Vec2): point 1 of the line.
lineP1 (Vec2): point 2 of the line.
Returns:
float: Distance to the line.
"""
return self._rtval.distanceToLine('Scalar', lineP0._rtval, lineP1._rtval).getSimpleType()
def distanceToSegment(self, segmentP0, segmentP1):
"""Returns the distance of this vector to a line segment defined by the
start and end points of the line segment
Args:
segmentP0 (Vec2): point 1 of the segment.
segmentP1 (Vec2): point 2 of the segment.
Returns:
float: Distance to the segment.
"""
return self._rtval.distanceToSegment('Scalar', segmentP0._rtval, segmentP1._rtval).getSimpleType()
|
class Warrior:
def __init__(self):
self.health = 50
self.attack = 5
@property
def is_alive(self):
return self.health > 0
def take_hit(self, hit):
self.health -= hit
return hit
def do_attack(self, other1, other2 = None, **options):
hit = options.get('hit', self.attack)
return other1.take_hit(hit)
class Lancer(Warrior):
def __init__(self):
super().__init__()
self.health = 50
self.attack = 6
def do_attack(self, other1, other2 = None, **options):
damage = super().do_attack(other1)
damage2 = super().do_attack(other2, None, hit = damage / 2) if other2 else 0
return damage + damage2
class Vampire(Warrior):
def __init__(self):
super().__init__()
self.health = 40
self.attack = 4
self.vampirism = 0.5
def do_attack(self, other1, other2 = None, **options):
damage = super().do_attack(other1)
self.health += damage * self.vampirism
return damage
class Defender(Warrior):
def __init__(self):
super().__init__()
self.health = 60
self.attack = 3
self.defense = 2
def take_hit(self, hit):
return super().take_hit(max(0, hit - self.defense))
class Knight(Warrior):
def __init__(self):
super().__init__()
self.attack = 7
def fight(unit_1, unit_2):
while True:
unit_1.do_attack(unit_2)
if not unit_2.is_alive:
return True
unit_2.do_attack(unit_1)
if not unit_1.is_alive:
return False
class Army:
def __init__(self):
self.units = []
def add_units(self, klass, count):
for i in range(count):
self.units.append(klass())
def cleanup(self):
front_warrior_dead = self.units and not self.units[0].is_alive
self.units = [u for u in self.units if u.is_alive]
return front_warrior_dead
def all_dead(self):
return self.units == []
class Battle:
def fight(self, army1, army2):
army1_turn = True
while not army1.all_dead() and not army2.all_dead():
if army1_turn:
army1.units[0].do_attack(*army2.units[:2])
else:
army2.units[0].do_attack(*army1.units[:2])
army1_turn = not army1_turn
front_warrior_dead1 = army1.cleanup()
front_warrior_dead2 = army2.cleanup()
if front_warrior_dead1 or front_warrior_dead2:
army1_turn = True
return army2.all_dead()
|
import sys
def merge_in_order(seq_a, seq_b):
seq_a = seq_a.copy()
seq_b = seq_b.copy()
result = []
while seq_a and seq_b:
if seq_a[0] > seq_b[0]:
result.append(seq_b.pop(0))
else:
result.append(seq_a.pop(0))
while seq_a:
result.append(seq_a.pop(0))
while seq_b:
result.append(seq_b.pop(0))
return result
if __name__ == "__main__":
print("There is possibility of passing a path to file containing data as a first argument")
if len(sys.argv) > 2:
print('Wrong Invocation! Pass a path to file or do not pass anything')
else:
filename = sys.argv[1] if len(sys.argv) == 2 else 'rosalind_mer.txt'
with open(filename) as file:
size_a = int(file.readline().strip())
seq_a = [int(number) for number in file.readline().split()][:size_a]
size_b = int(file.readline().strip())
seq_b = [int(number) for number in file.readline().split()][:size_b]
for number in merge_in_order(seq_a, seq_b):
print(number, end=' ')
|
[x1 for <error descr="Assignment expression cannot be used as a target here">(x1 := 2)</error> in (1, 2, 3)]
for <error descr="Assignment expression cannot be used as a target here">(x1 := 2)</error> in (1, 2, 3):
pass
|
"""The tests for the Dialogflow component."""
import copy
import json
import pytest
from homeassistant import data_entry_flow
from homeassistant.components import dialogflow, intent_script
from homeassistant.config import async_process_ha_core_config
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
SESSION_ID = "a9b84cec-46b6-484e-8f31-f65dba03ae6d"
INTENT_ID = "c6a74079-a8f0-46cd-b372-5a934d23591c"
INTENT_NAME = "tests"
REQUEST_ID = "19ef7e78-fe15-4e94-99dd-0c0b1e8753c3"
REQUEST_TIMESTAMP = "2017-01-21T17:54:18.952Z"
CONTEXT_NAME = "78a5db95-b7d6-4d50-9c9b-2fc73a5e34c3_id_dialog_context"
@pytest.fixture
async def calls(hass, fixture):
"""Return a list of Dialogflow calls triggered."""
calls = []
@callback
def mock_service(call):
"""Mock action call."""
calls.append(call)
hass.services.async_register("test", "dialogflow", mock_service)
return calls
@pytest.fixture
async def fixture(hass, aiohttp_client):
"""Initialize a Home Assistant server for testing this module."""
await async_setup_component(hass, dialogflow.DOMAIN, {"dialogflow": {}})
await async_setup_component(
hass,
intent_script.DOMAIN,
{
"intent_script": {
"WhereAreWeIntent": {
"speech": {
"type": "plain",
"text": """
{%- if is_state("device_tracker.paulus", "home")
and is_state("device_tracker.anne_therese",
"home") -%}
You are both home, you silly
{%- else -%}
Anne Therese is at {{
states("device_tracker.anne_therese")
}} and Paulus is at {{
states("device_tracker.paulus")
}}
{% endif %}
""",
}
},
"GetZodiacHoroscopeIntent": {
"speech": {
"type": "plain",
"text": "You told us your sign is {{ ZodiacSign }}.",
}
},
"CallServiceIntent": {
"speech": {"type": "plain", "text": "Service called"},
"action": {
"service": "test.dialogflow",
"data_template": {"hello": "{{ ZodiacSign }}"},
"entity_id": "switch.test",
},
},
}
},
)
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
result = await hass.config_entries.flow.async_init(
"dialogflow", context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
webhook_id = result["result"].data["webhook_id"]
return await aiohttp_client(hass.http.app), webhook_id
class _Data:
_v1 = {
"id": REQUEST_ID,
"timestamp": REQUEST_TIMESTAMP,
"result": {
"source": "agent",
"resolvedQuery": "my zodiac sign is virgo",
"action": "GetZodiacHoroscopeIntent",
"actionIncomplete": False,
"parameters": {"ZodiacSign": "virgo"},
"metadata": {
"intentId": INTENT_ID,
"webhookUsed": "true",
"webhookForSlotFillingUsed": "false",
"intentName": INTENT_NAME,
},
"fulfillment": {"speech": "", "messages": [{"type": 0, "speech": ""}]},
"score": 1,
},
"status": {"code": 200, "errorType": "success"},
"sessionId": SESSION_ID,
"originalRequest": None,
}
_v2 = {
"responseId": REQUEST_ID,
"timestamp": REQUEST_TIMESTAMP,
"queryResult": {
"queryText": "my zodiac sign is virgo",
"action": "GetZodiacHoroscopeIntent",
"allRequiredParamsPresent": True,
"parameters": {"ZodiacSign": "virgo"},
"intent": {
"name": INTENT_ID,
"webhookState": "true",
"displayName": INTENT_NAME,
},
"fulfillment": {"text": "", "messages": [{"type": 0, "speech": ""}]},
"intentDetectionConfidence": 1,
},
"status": {"code": 200, "errorType": "success"},
"session": SESSION_ID,
"originalDetectIntentRequest": None,
}
@property
def v1(self):
return copy.deepcopy(self._v1)
@property
def v2(self):
return copy.deepcopy(self._v2)
Data = _Data()
async def test_v1_data():
"""Test for version 1 api based on message."""
assert dialogflow.get_api_version(Data.v1) == 1
async def test_v2_data():
"""Test for version 2 api based on message."""
assert dialogflow.get_api_version(Data.v2) == 2
async def test_intent_action_incomplete_v1(fixture):
"""Test when action is not completed."""
mock_client, webhook_id = fixture
data = Data.v1
data["result"]["actionIncomplete"] = True
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
assert await response.text() == ""
async def test_intent_action_incomplete_v2(fixture):
"""Test when action is not completed."""
mock_client, webhook_id = fixture
data = Data.v2
data["queryResult"]["allRequiredParamsPresent"] = False
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
assert await response.text() == ""
async def test_intent_slot_filling_v1(fixture):
"""Test when Dialogflow asks for slot-filling return none."""
mock_client, webhook_id = fixture
data = Data.v1
data["result"].update(
resolvedQuery="my zodiac sign is",
speech="",
actionIncomplete=True,
parameters={"ZodiacSign": ""},
contexts=[
{
"name": CONTEXT_NAME,
"parameters": {"ZodiacSign.original": "", "ZodiacSign": ""},
"lifespan": 2,
},
{
"name": "tests_ha_dialog_context",
"parameters": {"ZodiacSign.original": "", "ZodiacSign": ""},
"lifespan": 2,
},
{
"name": "tests_ha_dialog_params_zodiacsign",
"parameters": {"ZodiacSign.original": "", "ZodiacSign": ""},
"lifespan": 1,
},
],
fulfillment={
"speech": "What is the ZodiacSign?",
"messages": [{"type": 0, "speech": "What is the ZodiacSign?"}],
},
score=0.77,
)
data["result"]["metadata"].update(webhookForSlotFillingUsed="true")
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
assert await response.text() == ""
async def test_intent_request_with_parameters_v1(fixture):
"""Test a request with parameters."""
mock_client, webhook_id = fixture
data = Data.v1
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("speech")
assert text == "You told us your sign is virgo."
async def test_intent_request_with_parameters_v2(fixture):
"""Test a request with parameters."""
mock_client, webhook_id = fixture
data = Data.v2
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("fulfillmentText")
assert text == "You told us your sign is virgo."
async def test_intent_request_with_parameters_but_empty_v1(fixture):
"""Test a request with parameters but empty value."""
mock_client, webhook_id = fixture
data = Data.v1
data["result"].update(parameters={"ZodiacSign": ""})
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("speech")
assert text == "You told us your sign is ."
async def test_intent_request_with_parameters_but_empty_v2(fixture):
"""Test a request with parameters but empty value."""
mock_client, webhook_id = fixture
data = Data.v2
data["queryResult"].update(parameters={"ZodiacSign": ""})
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("fulfillmentText")
assert text == "You told us your sign is ."
async def test_intent_request_without_slots_v1(hass, fixture):
"""Test a request without slots."""
mock_client, webhook_id = fixture
data = Data.v1
data["result"].update(
resolvedQuery="where are we",
action="WhereAreWeIntent",
parameters={},
contexts=[],
)
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("speech")
assert text == "Anne Therese is at unknown and Paulus is at unknown"
hass.states.async_set("device_tracker.paulus", "home")
hass.states.async_set("device_tracker.anne_therese", "home")
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("speech")
assert text == "You are both home, you silly"
async def test_intent_request_without_slots_v2(hass, fixture):
"""Test a request without slots."""
mock_client, webhook_id = fixture
data = Data.v2
data["queryResult"].update(
queryText="where are we",
action="WhereAreWeIntent",
parameters={},
outputContexts=[],
)
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("fulfillmentText")
assert text == "Anne Therese is at unknown and Paulus is at unknown"
hass.states.async_set("device_tracker.paulus", "home")
hass.states.async_set("device_tracker.anne_therese", "home")
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("fulfillmentText")
assert text == "You are both home, you silly"
async def test_intent_request_calling_service_v1(fixture, calls):
"""Test a request for calling a service.
If this request is done async the test could finish before the action
has been executed. Hard to test because it will be a race condition.
"""
mock_client, webhook_id = fixture
data = Data.v1
data["result"]["action"] = "CallServiceIntent"
call_count = len(calls)
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
assert len(calls) == call_count + 1
call = calls[-1]
assert call.domain == "test"
assert call.service == "dialogflow"
assert call.data.get("entity_id") == ["switch.test"]
assert call.data.get("hello") == "virgo"
async def test_intent_request_calling_service_v2(fixture, calls):
"""Test a request for calling a service.
If this request is done async the test could finish before the action
has been executed. Hard to test because it will be a race condition.
"""
mock_client, webhook_id = fixture
data = Data.v2
data["queryResult"]["action"] = "CallServiceIntent"
call_count = len(calls)
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
assert len(calls) == call_count + 1
call = calls[-1]
assert call.domain == "test"
assert call.service == "dialogflow"
assert call.data.get("entity_id") == ["switch.test"]
assert call.data.get("hello") == "virgo"
async def test_intent_with_no_action_v1(fixture):
"""Test an intent with no defined action."""
mock_client, webhook_id = fixture
data = Data.v1
del data["result"]["action"]
assert "action" not in data["result"]
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("speech")
assert text == "You have not defined an action in your Dialogflow intent."
async def test_intent_with_no_action_v2(fixture):
"""Test an intent with no defined action."""
mock_client, webhook_id = fixture
data = Data.v2
del data["queryResult"]["action"]
assert "action" not in data["queryResult"]
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("fulfillmentText")
assert text == "You have not defined an action in your Dialogflow intent."
async def test_intent_with_unknown_action_v1(fixture):
"""Test an intent with an action not defined in the conf."""
mock_client, webhook_id = fixture
data = Data.v1
data["result"]["action"] = "unknown"
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("speech")
assert text == "This intent is not yet configured within Home Assistant."
async def test_intent_with_unknown_action_v2(fixture):
"""Test an intent with an action not defined in the conf."""
mock_client, webhook_id = fixture
data = Data.v2
data["queryResult"]["action"] = "unknown"
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("fulfillmentText")
assert text == "This intent is not yet configured within Home Assistant."
|
import typing as ty
import numpy as np
from scipy import stats
import tensorflow as tf
import tensorflow_probability as tfp
import flamedisx as fd
export, __all__ = fd.exporter()
o = tf.newaxis
SIGNAL_NAMES = dict(photoelectron='s1', electron='s2')
class MakeFinalSignals(fd.Block):
"""Common code for MakeS1 and MakeS2"""
model_attributes = ('check_acceptances',)
# Whether to check acceptances are positive at the observed events.
# This is recommended, but you'll have to turn it off if your
# likelihood includes regions where only anomalous sources make events.
check_acceptances = True
# Prevent pycharm warnings:
source: fd.Source
gimme: ty.Callable
gimme_numpy: ty.Callable
quanta_name: str
signal_name: str
def _simulate(self, d):
d[self.signal_name] = stats.norm.rvs(
loc=(d[self.quanta_name + 's_detected']
* self.gimme_numpy(self.quanta_name + '_gain_mean')),
scale=(d[self.quanta_name + 's_detected']**0.5
* self.gimme_numpy(self.quanta_name + '_gain_std')))
# Call add_extra_columns now, since s1 and s2 are known and derived
# observables from it (cs1, cs2) might be used in the acceptance.
# TODO: This is a bit of a kludge
self.source.add_extra_columns(d)
d['p_accepted'] *= self.gimme_numpy(self.signal_name + '_acceptance')
def _annotate(self, d):
m = self.gimme_numpy(self.quanta_name + '_gain_mean')
s = self.gimme_numpy(self.quanta_name + '_gain_std')
mle = d[self.quanta_name + 's_detected_mle'] = \
(d[self.signal_name] / m).clip(0, None)
scale = mle**0.5 * s / m
for bound, sign, intify in (('min', -1, np.floor),
('max', +1, np.ceil)):
# For detected quanta the MLE is quite accurate
# (since fluctuations are tiny)
# so let's just use the relative error on the MLE)
d[self.quanta_name + 's_detected_' + bound] = intify(
mle + sign * self.source.max_sigma * scale
).clip(0, None).astype(np.int)
def _compute(self,
quanta_detected, s_observed,
data_tensor, ptensor):
# Lookup signal gain mean and std per detected quanta
mean_per_q = self.gimme(self.quanta_name + '_gain_mean',
data_tensor=data_tensor,
ptensor=ptensor)[:, o, o]
std_per_q = self.gimme(self.quanta_name + '_gain_std',
data_tensor=data_tensor,
ptensor=ptensor)[:, o, o]
mean = quanta_detected * mean_per_q
std = quanta_detected ** 0.5 * std_per_q
# add offset to std to avoid NaNs from norm.pdf if std = 0
result = tfp.distributions.Normal(
loc=mean, scale=std + 1e-10
).prob(s_observed)
# Add detection/selection efficiency
result *= self.gimme(SIGNAL_NAMES[self.quanta_name] + '_acceptance',
data_tensor=data_tensor, ptensor=ptensor)[:, o, o]
return result
def check_data(self):
if not self.check_acceptances:
return
s_acc = self.gimme_numpy(self.signal_name + '_acceptance')
if np.any(s_acc <= 0):
raise ValueError(f"Found event with non-positive {self.signal_name} "
f"acceptance: did you apply and configure "
"your cuts correctly?")
@export
class MakeS1(MakeFinalSignals):
quanta_name = 'photoelectron'
signal_name = 's1'
dimensions = ('photoelectrons_detected', 's1')
special_model_functions = ('reconstruction_bias_s1',)
model_functions = (
'photoelectron_gain_mean',
'photoelectron_gain_std',
's1_acceptance') + special_model_functions
photoelectron_gain_mean = 1.
photoelectron_gain_std = 0.5
def s1_acceptance(self, s1, s1_min=2, s1_max=70):
return tf.where((s1 < s1_min) | (s1 > s1_max),
tf.zeros_like(s1, dtype=fd.float_type()),
tf.ones_like(s1, dtype=fd.float_type()))
@staticmethod
def reconstruction_bias_s1(sig):
""" Dummy method for pax s2 reconstruction bias mean. Overwrite
it in source specific class. See x1t_sr1.py for example.
"""
reconstruction_bias = tf.ones_like(sig, dtype=fd.float_type())
return reconstruction_bias
def _compute(self, data_tensor, ptensor,
photoelectrons_detected, s1):
return super()._compute(
quanta_detected=photoelectrons_detected,
s_observed=s1,
data_tensor=data_tensor, ptensor=ptensor)
@export
class MakeS2(MakeFinalSignals):
quanta_name = 'electron'
signal_name = 's2'
dimensions = ('electrons_detected', 's2')
special_model_functions = ('reconstruction_bias_s2',)
model_functions = (
('electron_gain_mean',
'electron_gain_std',
's2_acceptance')
+ special_model_functions)
@staticmethod
def electron_gain_mean(z, *, g2=20):
return g2 * tf.ones_like(z)
electron_gain_std = 5.
def s2_acceptance(self, s2, s2_min=2, s2_max=6000):
return tf.where((s2 < s2_min) | (s2 > s2_max),
tf.zeros_like(s2, dtype=fd.float_type()),
tf.ones_like(s2, dtype=fd.float_type()))
@staticmethod
def reconstruction_bias_s2(sig):
""" Dummy method for pax s2 reconstruction bias mean. Overwrite
it in source specific class. See x1t_sr1.py for example.
"""
reconstruction_bias = tf.ones_like(sig, dtype=fd.float_type())
return reconstruction_bias
def _compute(self, data_tensor, ptensor,
electrons_detected, s2):
return super()._compute(
quanta_detected=electrons_detected,
s_observed=s2,
data_tensor=data_tensor, ptensor=ptensor)
|
from model import get_model
from data_loader import get_data
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.losses import categorical_crossentropy
import os
os.system("pip install wandb -q")
import wandb
from wandb.keras import WandbCallback
growth_rate = int(input("Growth rate: "))
d = int(input("Number of dense blocks: "))
layers = []
for i in range(d):
layers.append(int(input(f"Number of layers in dense block {i+1}: ")))
B = input("Bottleneck (y/n): ")
B = True if B == 'y'else False
C = input("Compression (y/n): ")
C = True if C == 'y'else False
depth = 2 * sum(layers) + len(layers) + 1
# Run name and model name are same
run_name = 'k'+str(growth_rate)+'d'+str(depth)
if B:
run_name+='B'
if C:
run_name+='C'
depth = 2 * sum(layers) + len(layers) + 1
print(run_name)
wandb.init(id=run_name, project='dense-net-implementation', resume=True )
data = int(input('Dataset : 1. cifar100; 2. cifar10\nEnter(1/2): '))
classes = None
if data == 1:
tr_ds, te_ds = get_data('cifar100')
classes = 100
elif data == 2:
tr_ds, te_ds = get_data('cifar10')
classes = 10
model = get_model(growth_rate=growth_rate
, layers_per_block = layers
, classes=classes
, compression_ratio=0.5 if C else 1
, bottleneck_ratio=4 if B else 1)
optimizer = SGD(momentum=0.9, nesterov=True)
model.compile(optimizer=optimizer, loss=categorical_crossentropy, metrics=['accuracy'])
model.fit(tr_ds, epochs=5, validation_data=te_ds12
, callbacks=[WandbCallback])
|
import os
import sys
import re
import yaml
import numpy as np
import pandas as pd
import math
from tqdm import tqdm
from scipy import interpolate
sys.path.append("..")
class MaterialLoader:
def __init__(self,
wl_s=0.2,
wl_e=2.0):
super().__init__()
self.db_path = './db'
self.db_info = self.load_db_info()
self.default_wavelength_range = (wl_s, wl_e)
self.db_shelf = dict()
self.failed_material = []
self.success_material = []
def load_db_info(self):
"""[summery]
load data base info from database/library.yml
Returns:
[db info] -- [total information from load from library.yml]
"""
info_path = 'library.yml'
fileYml = open(os.path.join(self.db_path,info_path), encoding='UTF-8')
db_info = yaml.load(fileYml)
return db_info
def load_material(self, shelfs):
"""[summary]
using in self.material_list()
load material data path from db_info
Arguments:
shelfs {[str]} -- [shelfs name list]
Returns:
[material_names, material_data] -- [material_data]
"""
material_names = []
material_data = {}
for shelf in shelfs:
for material in self.db_shelf[shelf]:
if 'BOOK' in material.keys():
material_names.append(material['BOOK'])
for data in material['content']:
if 'data' in data.keys():
material_data['%s_%s'%(material['BOOK'], data['PAGE'])] = self.material_info_split(divider, data['name'])
material_data['%s_%s'%(material['BOOK'], data['PAGE'])]['path'] = data['data']
material_data['%s_%s'%(material['BOOK'], data['PAGE'])]['divider'] = divider
elif 'DIVIDER' in data.keys():
divider = data['DIVIDER']
return material_names, material_data
def material_info_split(self, divider, info):
material_info = {}
info_split = info.split(':')
if len(info_split) > 1:
material_info['year'], material_info['author'] = self.rex_author_info(info_split[0])
material_info['n'], material_info['k'] = self.rex_nk_info(info_split[1])
material_info['wavelength_start'], material_info['wavelength_end'] = self.rex_wavelength_info(info_split[1])
material_info['degree'] = self.rex_degree_info(info_split[1])
material_info['model'] = self.rex_model_info(info_split[1])
else:
material_info['year'], material_info['author'] = self.rex_author_info(info_split[0])
material_info['n'], material_info['k'] = True, False
material_info['wavelength_start'], material_info['wavelength_end'] = None, None
material_info['degree'] = None
material_info['model'] = None
return material_info
def rex_author_info(self, info):
try:
year = re.findall('[0-9]{4}', info)[0]
author = info.split(year)[0]
except:
year = None
author = info
return year, author
def rex_nk_info(self, info):
try:
nk = re.findall('n,k', info)
if nk is not None:
n = True
k = True
else:
n = True
k = False
except:
n = False
k = False
return n, k
return n, k
def rex_wavelength_info(self, info):
try:
wavelength_range = re.findall('-?\d+\.\d*\d*?',info)
if len(wavelength_range) is 2:
wavelength_start, wavelength_end = wavelength_range[0], wavelength_range[1]
else:
wavelength_start = wavelength_range[0]
wavelength_end = wavelength_range[0]
except:
wavelength_start = None
wavelength_end = None
return wavelength_start, wavelength_end
def rex_degree_info(self, info):
degree = re.findall('\-?\d+?°C', info)
if len(degree) == 0:
return None
return degree[0]
def rex_model_info(self, info):
model = re.findall('Brendel-Bormann model', info)
if len(model) != 0:
return 'Brendel-Bormann model'
model = re.findall('Lorentz-Drude model', info)
if len(model) != 0:
return 'Lorentz-Drude model'
model = re.findall('DFT calculations', info)
if len(model) != 0:
return 'DFT calculations'
return None
def load_total_material(self):
# print(len(self.material_list))
# print(type(self.material_list))
total_material = {}
for material_name, material_info in self.material_list[1].items():
try:
material_path = material_info['path']
# print(material_path)
wl, n, k= self.load_material_parameter(material_path)
# print(material)
# material_info = self.material
total_material[material_name] = {
'wl': wl,
'n': n,
'k': k
}
self.success_material.append(material_name)
except ValueError as ve:
self.failed_material.append(material_name)
print('Load %s filled' % material_name)
print('Material wavelength bound is out of range!')
except MemoryError as Me:
self.failed_material.append(material_name)
print('Load %s filled!' % material_name)
print('Material wavelength outof memory!')
# print(total_material, len(total_material))
return total_material
def load_total_material_generator(self):
for material_name, material_info in tqdm(self.material_list[1].items()):
try:
# print(material_name)
material_path = material_info['path']
wl, n, k= self.load_material_parameter(material_path)
self.success_material.append(material_name)
yield material_name, [wl, n, k]
except ValueError as ve:
self.failed_material.append(material_name)
print('Load %s filled' % material_name)
print('Material wavelength bound is out of range!')
except MemoryError as Me:
self.failed_material.append(material_name)
print('Load %s filled!' % material_name)
print('Material wavelength outof memory!')
def load_select_material(self, select_material):
selected_material = {}
for material in select_material:
material_info = self.material_list[1][material]
wl, n, k= self.load_material_parameter(material_info['path'])
selected_material[material] = {
'wl': wl,
'n': n,
'k': k
}
return selected_material
def extract_data_nk(self, datas):
datas_type = datas['DATA'][0]['type']
wl = []
n = []
k = []
if datas_type == 'tabulated nk':
datas = datas['DATA'][0]['data'].split('\n')
for data in datas:
data = data.strip().split(' ')
if len(data) == 3:
wl.append(float(data[0]) * 1000)
n.append(float(data[1]))
k.append(float(data[2]))
elif datas_type == 'tabulated n':
datas = datas['DATA'][0]['data'].split('\n')
for data in datas:
data = data.split(' ')
wl.append(float(data[0]) * 1000)
n.append(float(data[1]))
k.append(0)
elif datas_type == 'tabulated k':
datas = datas['DATA'][0]['data'].split('\n')
for data in datas:
data = data.split(' ')
wl.append(float(data[0]) * 1000)
n.append(0)
k.append(float(data[1]))
elif datas_type.split(' ')[0] == 'formula':
coefficients = list(map(float, datas['DATA'][0]['coefficients'].split(' ')))
wavelength_range = list(map(float, datas['DATA'][0]['wavelength_range'].split(' ')))
print(wavelength_range)
wl_tmp = list(np.arange(wavelength_range), 0.001)
wl = [1000*w for w in wl_tmp]
if datas_type == 'formula 1':
n = [self.formula_1(w, coefficients) for w in wl_tmp]
elif datas_type == 'formula 2':
n = [self.cauchy_model(w, coefficients) for w in wl_tmp]
elif datas_type == 'formula 4':
n = [self.formula_4(w, coefficients) for w in wl_tmp]
elif datas_type == 'formula 5':
n = [self.formula_5(w, coefficients) for w in wl_tmp]
elif datas_type == 'formula 6':
n = [self.formula_6(w, coefficients) for w in wl_tmp]
elif datas_type == 'formula 8':
n = [self.formula_8(w, coefficients) for w in wl_tmp]
k = [0 for x in range(len(wl))]
coefficients = list(map(float, datas['DATA'][0]['coefficients'].split(' ')))
wavelength_range = list(map(float, datas['DATA'][0]['wavelength_range'].split(' ')))
fwl = np.arange(math.ceil(min(wl)), int(max(wl)), 1)
fn = interpolate.interp1d(np.array(wl), np.array(n), kind='quadratic')
fk = interpolate.interp1d(np.array(wl), np.array(k), kind='quadratic')
return fwl, fn(fwl), fk(fwl)
def load_material_parameter(self, path):
fileYml = open(os.path.join(self.db_path, 'data', path), encoding='UTF-8')
datas = yaml.load(fileYml)
if len(datas['DATA']) == 1:
wl, n, k = self.extract_data_nk(datas)
elif len(datas['DATA']) == 2:
wl, n, k = self.extract_data_nk(datas)
return wl, n, k
def formula_1(self, wavelength, coefficients):
"""[summary]
Arguments:
wavelength {[type]} -- [description]
coefficients {[type]} -- [description]
"""
wavelength_square = pow(wavelength, 2)
if len(coefficients) == 3:
n_square = 1 + coefficients[0] \
+ ((coefficients[1] * wavelength_square)/(wavelength_square - pow(coefficients[2], 2)))
elif len(coefficients) == 5:
n_square = 1 + coefficients[0] \
+ ((coefficients[1] * wavelength_square)/(wavelength_square - pow(coefficients[2], 2)))\
+ ((coefficients[3] * wavelength_square)/(wavelength_square - pow(coefficients[4], 2)))
elif len(coefficients) == 7:
n_square = 1 + coefficients[0] \
+ ((coefficients[1] * wavelength_square)/(wavelength_square - pow(coefficients[2], 2)))\
+ ((coefficients[3] * wavelength_square)/(wavelength_square - pow(coefficients[4], 2)))\
+ ((coefficients[5] * wavelength_square)/(wavelength_square - pow(coefficients[6], 2)))
elif len(coefficients) == 9:
n_square = 1 + coefficients[0] \
+ ((coefficients[1] * wavelength_square)/(wavelength_square - pow(coefficients[2], 2)))\
+ ((coefficients[3] * wavelength_square)/(wavelength_square - pow(coefficients[4], 2)))\
+ ((coefficients[5] * wavelength_square)/(wavelength_square - pow(coefficients[6], 2)))\
+ ((coefficients[7] * wavelength_square)/(wavelength_square - pow(coefficients[8], 2)))
elif len(coefficients) == 11:
n_square = 1 + coefficients[0] \
+ ((coefficients[1] * wavelength_square)/(wavelength_square - pow(coefficients[2], 2))) \
+ ((coefficients[3] * wavelength_square)/(wavelength_square - pow(coefficients[4], 2))) \
+ ((coefficients[5] * wavelength_square)/(wavelength_square - pow(coefficients[6], 2))) \
+ ((coefficients[7] * wavelength_square)/(wavelength_square - pow(coefficients[8], 2))) \
+ ((coefficients[9] * wavelength_square)/(wavelength_square - pow(coefficients[10], 2)))
elif len(coefficients) == 13:
n_square = 1 + coefficients[0] \
+ ((coefficients[1] * wavelength_square)/(wavelength_square - pow(coefficients[2], 2))) \
+ ((coefficients[3] * wavelength_square)/(wavelength_square - pow(coefficients[4], 2))) \
+ ((coefficients[5] * wavelength_square)/(wavelength_square - pow(coefficients[6], 2))) \
+ ((coefficients[7] * wavelength_square)/(wavelength_square - pow(coefficients[8], 2))) \
+ ((coefficients[9] * wavelength_square)/(wavelength_square - pow(coefficients[10], 2))) \
+ ((coefficients[11] * wavelength_square)/(wavelength_square - pow(coefficients[12], 2)))
elif len(coefficients) == 15:
n_square = 1 + coefficients[0] \
+ ((coefficients[1] * wavelength_square)/(wavelength_square - pow(coefficients[2], 2))) \
+ ((coefficients[3] * wavelength_square)/(wavelength_square - pow(coefficients[4], 2))) \
+ ((coefficients[5] * wavelength_square)/(wavelength_square - pow(coefficients[6], 2))) \
+ ((coefficients[7] * wavelength_square)/(wavelength_square - pow(coefficients[8], 2))) \
+ ((coefficients[9] * wavelength_square)/(wavelength_square - pow(coefficients[10], 2))) \
+ ((coefficients[11] * wavelength_square)/(wavelength_square - pow(coefficients[12], 2))) \
+ ((coefficients[13] * wavelength_square)/(wavelength_square - pow(coefficients[14], 2)))
elif len(coefficients) == 17:
n_square = 1 + coefficients[0] \
+ ((coefficients[1] * wavelength_square)/(wavelength_square - pow(coefficients[2], 2))) \
+ ((coefficients[3] * wavelength_square)/(wavelength_square - pow(coefficients[4], 2))) \
+ ((coefficients[5] * wavelength_square)/(wavelength_square - pow(coefficients[6], 2))) \
+ ((coefficients[7] * wavelength_square)/(wavelength_square - pow(coefficients[8], 2))) \
+ ((coefficients[9] * wavelength_square)/(wavelength_square - pow(coefficients[10], 2))) \
+ ((coefficients[11] * wavelength_square)/(wavelength_square - pow(coefficients[12], 2))) \
+ ((coefficients[13] * wavelength_square)/(wavelength_square - pow(coefficients[14], 2))) \
+ ((coefficients[15] * wavelength_square)/(wavelength_square - pow(coefficients[16], 2))) \
return math.sqrt(n_square)
def formula_4(self, wavelength, coefficients):
wavelength_square = pow(wavelength, 2)
if len(coefficients) == 9:
n_square = coefficients[0]
n_square += (coefficients[1] * pow(wavelength, coefficients[2])) / (wavelength_square - pow(coefficients[3], coefficients[4]))
n_square += (coefficients[5] * pow(wavelength, coefficients[6])) / (wavelength_square - pow(coefficients[7], coefficients[8]))
elif len(coefficients) == 11:
n_square = coefficients[0]
n_square += (coefficients[1] * pow(wavelength, coefficients[2])) / (wavelength_square - pow(coefficients[3], coefficients[4]))
n_square += (coefficients[5] * pow(wavelength, coefficients[6])) / (wavelength_square - pow(coefficients[7], coefficients[8]))
n_square += coefficients[9] * pow(wavelength, coefficients[10])
elif len(coefficients) == 13:
n_square = coefficients[0]
n_square += (coefficients[1] * pow(wavelength, coefficients[2])) / (wavelength_square - pow(coefficients[3], coefficients[4]))
n_square += (coefficients[5] * pow(wavelength, coefficients[6])) / (wavelength_square - pow(coefficients[7], coefficients[8]))
n_square += coefficients[9] * pow(wavelength, coefficients[10])
n_square += coefficients[11] * pow(wavelength, coefficients[12])
elif len(coefficients) == 15:
n_square = coefficients[0]
n_square += (coefficients[1] * pow(wavelength, coefficients[2])) / (wavelength_square - pow(coefficients[3], coefficients[4]))
n_square += (coefficients[5] * pow(wavelength, coefficients[6])) / (wavelength_square - pow(coefficients[7], coefficients[8]))
n_square += coefficients[9] * pow(wavelength, coefficients[10])
n_square += coefficients[11] * pow(wavelength, coefficients[12])
n_square += coefficients[13] * pow(wavelength, coefficients[14])
return math.sqrt(n_square)
def formula_5(self, wavelength, coefficients):
n_square = 1
if len(coefficients) == 3:
n_square += coefficients[0] + coefficients[1]*pow(wavelength, coefficients[2])
return math.sqrt(n_square)
def formula_6(self, wavelength, coefficients):
wavelength_sqrt = pow(wavelength, -2)
if len(coefficients) == 11:
n = 1 + coefficients[0] \
+ (coefficients[1]/(coefficients[2] - wavelength_sqrt)) \
+ (coefficients[3]/(coefficients[4] - wavelength_sqrt)) \
+ (coefficients[5]/(coefficients[6] - wavelength_sqrt)) \
+ (coefficients[7]/(coefficients[8] - wavelength_sqrt)) \
+ (coefficients[9]/(coefficients[10] - wavelength_sqrt))
elif len(coefficients) == 9:
n = 1 + coefficients[0] \
+ (coefficients[1]/(coefficients[2] - wavelength_sqrt)) \
+ (coefficients[3]/(coefficients[4] - wavelength_sqrt)) \
+ (coefficients[5]/(coefficients[6] - wavelength_sqrt)) \
+ (coefficients[7]/(coefficients[8] - wavelength_sqrt))
elif len(coefficients) == 7:
n = 1 + coefficients[0] \
+ (coefficients[1]/(coefficients[2] - wavelength_sqrt)) \
+ (coefficients[3]/(coefficients[4] - wavelength_sqrt)) \
+ (coefficients[5]/(coefficients[6] - wavelength_sqrt))
elif len(coefficients) == 5:
n = 1 + coefficients[0] \
+ (coefficients[1]/(coefficients[2] - wavelength_sqrt)) \
+ (coefficients[3]/(coefficients[4] - wavelength_sqrt))
elif len(coefficients) == 3:
n = 1 + coefficients[0] \
+ (coefficients[1]/(coefficients[2] - wavelength_sqrt))
return n
def formula_8(self, wavelength, coefficients):
pass
def cauchy_model(self, wavelength, coefficients):
"""[cauchy model]
Arguments:
wavelength {[type]} -- [description]
coefficients {[type]} -- [description]
"""
if len(coefficients) == 3:
n = coefficients[0] \
+ (coefficients[1] * pow(wavelength, coefficients[2]))
elif len(coefficients) == 5:
n = coefficients[0] \
+ (coefficients[1] * pow(wavelength, coefficients[2])) \
+ (coefficients[3] * pow(wavelength, coefficients[4]))
elif len(coefficients) == 7:
n = coefficients[0] \
+ (coefficients[1] * pow(wavelength, coefficients[2])) \
+ (coefficients[3] * pow(wavelength, coefficients[4])) \
+ (coefficients[5] * pow(wavelength, coefficients[6]))
elif len(coefficients) == 9:
n = coefficients[0] \
+ (coefficients[1] * pow(wavelength, coefficients[2])) \
+ (coefficients[3] * pow(wavelength, coefficients[4])) \
+ (coefficients[5] * pow(wavelength, coefficients[6])) \
+ (coefficients[7] * pow(wavelength, coefficients[8]))
elif len(coefficients) == 11:
n = coefficients[0] \
+ (coefficients[1] * pow(wavelength, coefficients[2])) \
+ (coefficients[3] * pow(wavelength, coefficients[4])) \
+ (coefficients[5] * pow(wavelength, coefficients[6])) \
+ (coefficients[7] * pow(wavelength, coefficients[8])) \
+ (coefficients[9] * pow(wavelength, coefficients[10]))
return n
@property
def shelf(self):
"""[summery]
load data base shelf from db_info
Returns:
[shelf] -- [database shelf]
"""
shelfs_name = []
for data in self.db_info:
self.db_shelf[data['SHELF']] = data['content']
shelfs_name.append(data['SHELF'])
return shelfs_name
@property
def material_list(self, shelfs='total', shelfs_list=None):
"""[summary]
Keyword Arguments:
shelfs {str} -- [description] (default: {'total'})
shelfs_list {[type]} -- [description] (default: {None})
Returns:
[type] -- [description]
"""
if shelfs is 'total':
materials_name, material_data = self.load_material(self.shelf)
elif shelfs is not 'total':
materials_name, material_data = self.load_material(shelfs_list)
return materials_name, material_data
def load_material_data(self, path):
fileYml = open(os.path.join(self.db_path, path), encoding='UTF-8')
db_info = yaml.load(fileYml)
if __name__ == "__main__":
ml = MaterialLoader()
print("Load Total %d Material, %d Optical Constants"%(len(ml.material_list[0]), len(ml.material_list[1])))
# ml.load_total_material()
for sample in ml.load_total_material_generator():
print(sample.keys())
print("Success Load %s materials." % len(ml.success_material))
print("Failed Load %s materials." % len(ml.failed_material))
for failed in ml.failed_material:
print("Failed Load %s " % failed)
|
from pathlib import Path
import pandas as pd
#------------------------------------------------------
fd_out='./out/a00_clean_01_tumor'
f_in='./out/a00_clean_00_load/tumor.csv'
l_cat=['None', 'IC', 'Small', 'Medium', 'Large']
#------------------------------------------------------
Path(fd_out).mkdir(exist_ok=True, parents=True)
df=pd.read_csv(f_in, index_col=0).fillna(0)
###################################################################
#remove duplicates (no duplicates)
df=df.loc[~df.index.duplicated()]
#remove error
df['tmp']=df['r_tumor_size']+df['l_tumor_size']
df=df.loc[~((df['sidetumor']==0) & (df['tmp']>0)), :] #no error
df=df.loc[~((df['sidetumor']!=0) & (df['tmp']==0)), :] #error: 1025, 1044
df=df.drop('tmp', axis=1)
#clean
df.columns=['side', 'size_r', 'size_l']
df['side']=df['side'].replace([0, 1, 2, 3], ['None', 'Right', 'Left', 'Both'])
df['size_r']=df['size_r'].replace([0, 1, 2, 3, 4], l_cat)
df['size_l']=df['size_l'].replace([0, 1, 2, 3, 4], l_cat)
df['tumor']=(df['side']!='None').astype('int')
df['tumor']=df['tumor'].replace([0, 1], ['None', 'Tumor'])
df.to_csv(f'{fd_out}/data.csv')
|
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
import os
import pandas as pd
from edfi_schoology_extractor.helpers.csv_writer import df_to_csv
def describe_when_writing_dataframe_to_csv():
def describe_given_DataFrame_has_data():
def the_output_should_not_include_the_index(fs):
path = "a/b.csv"
df = pd.DataFrame([{"a": 1}, {"a": 2}])
expected = "a\n1\n2\n"
# Arrange
fs.create_dir("a")
# Act
df_to_csv(df, path)
# Assert
with open(path) as f:
contents = f.read()
assert expected == contents
def describe_given_DataFrame_is_empty():
def it_should_write_file_for_empty_DataFrame(fs):
# Arrange
path = "path"
# Act
df_to_csv(pd.DataFrame(), path)
# Assert
assert os.path.exists(path)
def and_the_file_should_be_empty(fs):
# Arrange
path = "path"
# Act
df_to_csv(pd.DataFrame(), path)
# Assert
with open(path) as f:
contents = f.read()
assert "\n" == contents
|
# MIT License
#
# Copyright (c) 2017
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
#
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import argparse
import logging
import time
# Import the various model objects
from OSMBase import get_model_instances
import OSMKerasFingerprint
import OSMKerasDragon
import OSMKerasCoulomb
import OSMKerasMeta
import OSMKerasEnsemble
import OSMTemplate
import OSMSKLearnRegress
import OSMSKLearnClassify
import OSMTensorFlow
import OSMBinding
__version__ = "0.3"
# ===================================================================================================
# A utility class to parse the program runtime arguments
# and setup a logger to receive classification output.
# ===================================================================================================
class ExecEnv(object):
"""Utility class to setup the runtime environment and logging"""
# Static class variables.
args = None
log = None
cmdLine = ""
modelInstances = [] # Model instances are singletons.
def __init__(self):
"""Parse runtime arguments on object creation and maintain the runtime environment"""
# Start a console logger to complain about any bad args (file logger defined below).
file_log_format = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
console_log_format = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
ExecEnv.log = self.setup_logging(console_log_format)
# Parse the runtime args
parser = argparse.ArgumentParser(
description="OSM_QSAR. Classification of OSM ligands using machine learning techniques.")
# --dir
parser.add_argument("--dir", dest="workDirectory", default="./Work/",
help=('The work directory where log files and data files are found.'
' Use a Linux style directory specification with trailing forward'
' slash "/" (default "./Work/").'
" Important - to run OSM_QSAR this directory must exist, it will not be created."
' Model specific files (models, statistics and graphics) are in the '
'subdirectories "/<WorkDir>/postfix/...".'))
# --data
parser.add_argument("--data", dest="dataFilename", default="OSMData.csv",
help=('The input data filename (default "OSMData.csv").'
" Important - to run OSM_QSAR this file must exist in the Work directory."
' If this flag is not specified then OSM_QSAR attempts to read the'
' data file at "/<WorkDir>/OSMData.csv".'
" See the additional OSM_QSAR documentation for the format of this file."))
# --data
parser.add_argument("--dragon", dest="dragonFilename", default="Dragon.csv",
help=('The Dragon QSAR data filename (default "Dragon.csv").'
" Important - to run OSM_QSAR this file must exist in the Work directory."
' If this flag is not specified then OSM_QSAR attempts to read the'
' data file at "/<WorkDir>/Dragon.csv".'
" See the additional OSM_QSAR documentation for the format of this file."))
# --depend
parser.add_argument("--depend", dest="dependVar", default="default",
help=('The regression or classifier dependent variable.'
" This variable must exist in the data dictionary. The variables in the data"
' directory can be listed using the "--vars" flag. The default dependent variable'
' for a regression model is "pIC50" (log10 IC50 potency in uMols). The default'
' variable for a classifier model is "ION_ACTIVITY".'))
# --indep
parser.add_argument("--indep", dest="indepList", default="default",
help=('The regression or classifier independent variables.'
" Important - some models (NNs) will not accept this flag and will issue a warning."
" Specified variables must exist in the data dictionary. The variables in the data"
' directory can be listed using the "--vars" flag. The independent variables are '
' specified in a comma delimited list "Var1, Var2, ..., Varn". Quotes must present.'
' For regression and classifier models the default independent variable'
' is the Morgan molecular fingerprint "MORGAN2048".'))
# --vars
parser.add_argument("--vars", dest="varFlag", action="store_true",
help=("Lists all the data variables available in the data dictionary and exits."))
# --load
parser.add_argument("--load", dest="loadFilename", default="noload",
help=("Loads the saved model and generates statistics and graphics but does no "
" further training."
' If the model is a Neural Network then the "--epoch" flag can be optionally'
" specified to read to a particular NN epoch model."
" This file is always located in the model <postfix> subdirectory of the Work"
' directory. For example, specifying "--load mymodel --epoch 1000 --train 0" for '
' a KERAS NN loads "./<WorkDir>/<postfix>/mymodel_1000.krs" and generates'
' model statistics and graphics without further training'))
# --save
parser.add_argument("--save", dest="saveFilename", default="OSMClassifier",
help=('File name to save the model (default "OSMClassifier").'
' The model file is always saved to the model postfix subdirectory.'
' Neural Networks append the training epoch to the file name.'
' For example, if a KERAS NN classifier is saved and if this flag is not specified '
' the model is saved to: "./<WorkDir>/postfix/OSMClassifier_<epoch>.krs".'
' The postfix directory is created if it does not exist.'))
# --stats
parser.add_argument("--stats", dest="statsFilename", default="OSMStatistics.csv",
help=('File to append the test and train model statistics (default "OSMStatistics").'
' The statistics files is always saved to subdirectories of the the model <postfix>'
' directory. Test data statistics are always appended to the specified statistics'
' file in the "./<WorkDir>/<postfix>/test/" and training data statistics are appended'
' to the specified statistics file in the "./<WorkDir>/<postfix>/train" directory.'
' The postfix directory and the "test" and "train" subdirectories are created'
' if they do not exist. The statistics file(s) are created if they do not exist.'))
# --extend
parser.add_argument("--extend", dest="extendFlag", action="store_true",
help=(' The "--extend" flag generates all training data statistics and graphics.'
' Additional training graphics and statistics are added to the'
' "./<WorkDir>/<postfix>/train" directory.'
' The directory is created is it does not exist. The statistics file is created'
' if it does not exist.'
' Warning - the "--extend" flag may substantially increase OSM_QSAR runtime.'))
# --coulomb
parser.add_argument("--coulomb", dest="coulombFlag", action="store_true",
help=(' The "--coulomb" flag generates Coulomb Matrices (uses deepchem).'))
# --clean
parser.add_argument("--clean", dest="cleanFlag", action="store_true",
help=('Deletes all files in the "test" and "train" subdirectories of the model '
' <postfix> directory before OSM_QSAR executes.'
' Any model files in the <postfix> directory are not deleted.'))
# --log
parser.add_argument("--log", dest="logFilename", default="OSM_QSAR.log",
help=('Log file. Appends the log to any existing logs (default "OSM_QSAR.log").'
'The log file always resides in the work directory.'))
# --newlog
parser.add_argument("--newlog", dest="newLogFilename", default="nonewlog", nargs='?',
help='Flush an existing log file (file name argument optional, default "OSM_QSAR.log").'
'The log file always resides in the work directory.')
# --model
parser.add_argument("--model", dest="modelDescriptions", action="store_true",
help=("Lists all defined regression and classification models and exits."))
# --classify
parser.add_argument("--classify", dest="classifyType", default="seq",
help=("Specify which classification model OSM_QSAR will execute using the model"
' postfix code (default "seq"). For more information on current models specify'
' the "--model" flag.'))
# --epoch
parser.add_argument("--epoch", dest="epoch", default=-1, type=int,
help=(" Compulsory when loading Neural Networks and other iterative models."
" Used to specify which training epoch to load'"
" and retrain. Ignored if not valid for model. Example:"
'"--classify mod --load OSMClassifier -- epoch 1000 --train 0"'
' loads the KERAS "mod" model from "./<WorkDir>/mod/OSMClassifier_1000.krs"'
' and generates model statistics and graphics without further training'))
parser.add_argument("--crossval", dest="crossVal", default=-1.0, type=float,
help=(" Randomly shuffle the test/training set for every checkpoint"
" this is done by shuffling data into a training and test set."
" The float number is the proportion of the data that is held out for testing."))
parser.add_argument("--shuffle", dest="shuffle", default=-1.0, type=float,
help=(" Randomly shuffle the test/training set at the beginning (only) of model training"
" this is done by shuffling data into a training and test set."
" The float number is the proportion of the data that is held out for testing."))
parser.add_argument("--holdout", dest="holdOut", default=0.0, type=float,
help=(" The proportion of the training dataset in the range [0, 1] that is used for"
" training validation (default 0.0). This is very useful for checking pre-trained "
" Neural Network models to see if they have over-fitted the training data."))
# --train
parser.add_argument("--train", dest="train", default=-1, type=int,
help=("The number of training epochs (iterations). Ignored if not valid for model."))
# --check
parser.add_argument("--check", dest="checkPoint", default=-1, type=int,
help=('Number of iterations the training model is saved. Statistics are generated'
' at each checkpoint. Must be used with --train e.g. "--train 2000 --check 500".'))
# --version
parser.add_argument("--version", action="version", version=__version__)
ExecEnv.args = parser.parse_args()
################# File Logging is here ########################################################
ExecEnv.args.logFilename = os.path.join(ExecEnv.args.workDirectory,ExecEnv.args.logFilename)
if ExecEnv.args.newLogFilename != "nonewlog" and ExecEnv.args.newLogFilename is not None:
ExecEnv.args.newLogFilename = os.path.join(ExecEnv.args.workDirectory,ExecEnv.args.newLogFilename)
log_append = False
self.setup_file_logging(ExecEnv.args.newLogFilename, log_append, file_log_format)
elif ExecEnv.args.newLogFilename != "nonewlog": # No filename supplied (optional arg).
ExecEnv.args.newLogFilename = os.path.join(ExecEnv.args.workDirectory,"OSM_QSAR.log")
log_append = False
self.setup_file_logging(ExecEnv.args.newLogFilename, log_append, file_log_format)
else:
log_append = True
self.setup_file_logging(ExecEnv.args.logFilename, log_append, file_log_format)
# Check to see if the postfix directory and subdirectories exist and create if necessary.
postfix_directory = os.path.join(ExecEnv.args.workDirectory, ExecEnv.args.classifyType)
test_directory = os.path.join(postfix_directory, "test")
train_directory = os.path.join(postfix_directory, "train")
# Append the postfix directory to the environment file names.
ExecEnv.args.postfixDirectory = postfix_directory
ExecEnv.args.testDirectory = test_directory
ExecEnv.args.trainDirectory = train_directory
ExecEnv.args.dataFilename = os.path.join(ExecEnv.args.workDirectory, ExecEnv.args.dataFilename)
ExecEnv.args.dragonFilename = os.path.join(ExecEnv.args.workDirectory, ExecEnv.args.dragonFilename)
if ExecEnv.args.loadFilename != "noload":
ExecEnv.args.loadFilename = os.path.join(postfix_directory,ExecEnv.args.loadFilename)
ExecEnv.args.saveFilename = os.path.join(postfix_directory,ExecEnv.args.saveFilename)
# Check that the data file exists and terminate if not.
if not os.path.exists(ExecEnv.args.dataFilename):
ExecEnv.log.error('The OSM_QSAR data file: "%s" does not exist.', ExecEnv.args.dataFilename)
ExecEnv.log.error('Please examine the "--dir", "--data" and "--help" flags.')
sys.exit()
# Set up the classification variables.
ExecEnv.setup_variables()
cmd_line = ""
for argStr in sys.argv:
cmd_line += argStr + " "
ExecEnv.cmdLine = cmd_line
################# Models are created here ########################################################
# The model objects are held as singletons.
ExecEnv.modelInstances = get_model_instances(ExecEnv.args, ExecEnv.log)
# List the available models and exit.
if ExecEnv.args.modelDescriptions:
ExecEnv.log.info(ExecEnv.list_available_models())
sys.exit()
# Check for a valid model.
if ExecEnv.selected_model() is None:
ExecEnv.log.error('Unknown model prefix %s', ExecEnv.args.classifyType)
ExecEnv.log.info(ExecEnv.list_available_models())
sys.exit()
################# Other house keeping ########################################################
# Check that the work directory exists and terminate if not.
if not os.path.isdir(ExecEnv.args.workDirectory):
ExecEnv.log.error('The OSM_QSAR work directory: "%s" does not exist.', ExecEnv.args.workDirectory)
ExecEnv.log.error("Create or Rename the work directory.")
ExecEnv.log.error('Please examine the --dir" and "--help" flags.')
sys.exit()
try:
if not os.path.isdir(postfix_directory):
ExecEnv.log.info('The model <postfix> directory: "%s" does not exist. Creating it.', postfix_directory)
os.makedirs(postfix_directory)
if not os.path.isdir(test_directory):
ExecEnv.log.info('The model <postfix> directory: "%s" does not exist. Creating it.', test_directory)
os.makedirs(test_directory)
if not os.path.isdir(train_directory):
ExecEnv.log.info('The model <postfix> directory: "%s" does not exist. Creating it.', train_directory)
os.makedirs(train_directory)
except OSError:
ExecEnv.log.error("Could not create directory")
ExecEnv.log.error("Check the work directory: %s permissions.", ExecEnv.args.workDirectory)
sys.exit()
if ExecEnv.args.cleanFlag:
# clean the postfix subdirectories if the "--clean" flag is specified..
ExecEnv.log.info('"--clean" specified, deleting all files in directory: "%s"', test_directory)
ExecEnv.log.info('"--clean" specified, deleting all files in directory: "%s"', train_directory)
try:
for file_name in os.listdir(test_directory):
file_path = os.path.join(test_directory, file_name)
if os.path.isfile(file_path):
os.unlink(file_path)
for file_name in os.listdir(train_directory):
file_path = os.path.join(train_directory, file_name)
if os.path.isfile(file_path):
os.unlink(file_path)
except OSError:
ExecEnv.log.error('Specified the "--clean" flag. Could not delete file(s)')
ExecEnv.log.error("Check <postfix> subdirectories and file permissions.")
sys.exit()
@staticmethod
def list_available_models():
model_str = "A list of available classification models:\n\n"
for model in ExecEnv.modelInstances:
model_name = model.model_name() + "\n"
model_str += model_name
model_str += "=" * len(model_name) + "\n"
model_postfix = "--classify "+ model.model_postfix() + "\n"
model_str += model_postfix
model_str += "-" * len(model_postfix) + "\n"
model_str += model.model_description() + "\n\n"
return model_str
@staticmethod
def selected_model():
model = None
for instance in ExecEnv.modelInstances:
if instance.model_postfix() == ExecEnv.args.classifyType:
model = instance
break
return model
def setup_logging(self, log_format):
"""Set up Python logging"""
logger = logging.getLogger("OSMLogger")
logger.setLevel(logging.INFO) # Default output level.
# Create a console log
console_log = logging.StreamHandler()
console_log.setLevel(logging.DEBUG) # Output debug to screen
console_log.setFormatter(log_format)
logger.addHandler(console_log)
return logger
def setup_file_logging(self, log_filename, append, log_format):
"""Set up Python logging to log file"""
# Create a file log.
if append:
file_log = logging.FileHandler(log_filename, mode='a')
else:
file_log = logging.FileHandler(log_filename, mode='w')
file_log.setLevel(logging.INFO) # Info level and above to file.
file_log.setFormatter(log_format)
ExecEnv.log.addHandler(file_log)
if not append:
ExecEnv.log.info("Flushed logfile: %s", log_filename)
ExecEnv.log.info("Logging to file: %s", log_filename)
@staticmethod
def setup_variables():
if ExecEnv.args.indepList == "default": return # no var list required.
var_list = [x.strip() for x in ExecEnv.args.indepList.split(',')]
if len(var_list) == 0:
ExecEnv.log.error('The "--indep" argument: %s is incorrectly formatted.', ExecEnv.args.indepList)
ExecEnv.log.error('Please examine the "--indep" and "--help" flags.')
sys.exit()
ExecEnv.args.indepList = var_list
|
import argparse
import operator
import os
import random
import sys
from typing import Tuple
from rotkehlchen.assets.asset import Asset
from rotkehlchen.config import default_data_directory
from rotkehlchen.db.dbhandler import DBHandler
from rotkehlchen.db.utils import AssetBalance, LocationData
from rotkehlchen.fval import FVal
from rotkehlchen.serialization.deserialize import deserialize_location
from rotkehlchen.typing import FilePath, Location, Timestamp
from rotkehlchen.user_messages import MessagesAggregator
from rotkehlchen.utils.misc import ts_now
def divide_number_in_parts(number: int, parts_number: int):
if parts_number > number:
raise ValueError('Number of parts cant be higher than the number')
number_rest = number
for i in range(1, parts_number + 1):
if (i == parts_number):
yield number_rest
break
else:
new_number = random.randint(1, (number_rest - (parts_number - i)) // 2)
number_rest -= new_number
yield new_number
class StatisticsFaker():
def __init__(self, args: argparse.Namespace) -> None:
user_path = FilePath(os.path.join(str(default_data_directory()), args.user_name))
self.db = DBHandler(
user_data_dir=user_path,
password=args.user_password,
msg_aggregator=MessagesAggregator(),
)
def _clean_tables(self) -> None:
cursor = self.db.conn.cursor()
cursor.execute('DELETE from timed_location_data;')
cursor.execute('DELETE from timed_balances;')
self.db.conn.commit()
@staticmethod
def _get_amounts(args: argparse.Namespace) -> Tuple[int, int, int]:
if not isinstance(args.min_amount, int) and args.min_amount < 0:
print('Invalid minimum amount given')
sys.exit(1)
min_amount = args.min_amount
if not isinstance(args.max_amount, int) or args.max_amount < min_amount:
print('Invalid max amount given')
sys.exit(1)
max_amount = args.max_amount
invalid_starting_amount = (
not isinstance(args.starting_amount, int) or
args.starting_amount < min_amount or
args.starting_amount > max_amount
)
if invalid_starting_amount:
print('Invalid starting amount given')
sys.exit(1)
starting_amount = args.starting_amount
return starting_amount, min_amount, max_amount
@staticmethod
def _get_timestamps(args: argparse.Namespace) -> Tuple[Timestamp, Timestamp]:
if not isinstance(args.from_timestamp, int) or args.from_timestamp < 0:
print('Invalid from timestamp given')
sys.exit(1)
from_ts = Timestamp(args.from_timestamp)
if args.to_timestamp is None:
to_ts = ts_now()
else:
if not isinstance(args.to_timestamp, int) or args.to_timestamp < from_ts:
print('Invalid to timestamp given')
sys.exit(1)
to_ts = Timestamp(args.to_timestamp)
return from_ts, to_ts
def create_fake_data(self, args: argparse.Namespace) -> None:
self._clean_tables()
from_ts, to_ts = StatisticsFaker._get_timestamps(args)
starting_amount, min_amount, max_amount = StatisticsFaker._get_amounts(args)
total_amount = starting_amount
locations = [deserialize_location(location) for location in args.locations.split(',')]
assets = [Asset(symbol) for symbol in args.assets.split(',')]
go_up_probability = FVal(args.go_up_probability)
# Add the first distribution of location data
location_data = []
for idx, value in enumerate(divide_number_in_parts(starting_amount, len(locations))):
location_data.append(LocationData(
time=from_ts,
location=locations[idx].serialize_for_db(),
usd_value=str(value),
))
# add the location data + total to the DB
self.db.add_multiple_location_data(location_data + [LocationData(
time=from_ts,
location=Location.TOTAL.serialize_for_db(),
usd_value=str(total_amount),
)])
# Add the first distribution of assets
assets_data = []
for idx, value in enumerate(divide_number_in_parts(starting_amount, len(assets))):
assets_data.append(AssetBalance(
time=from_ts,
asset=assets[idx],
amount=str(random.randint(1, 20)),
usd_value=str(value),
))
self.db.add_multiple_balances(assets_data)
while from_ts < to_ts:
print(f'At timestamp: {from_ts}/{to_ts} wih total net worth: ${total_amount}')
new_location_data = []
new_assets_data = []
from_ts += args.seconds_between_balance_save
# remaining_loops = to_ts - from_ts / args.seconds_between_balance_save
add_usd_value = random.choice([100, 350, 500, 625, 725, 915, 1000])
add_amount = random.choice([
FVal('0.1'), FVal('0.23'), FVal('0.34'), FVal('0.69'), FVal('1.85'), FVal('2.54'),
])
go_up = (
# If any asset's usd value is close to to go below zero, go up
any(FVal(a.usd_value) - FVal(add_usd_value) < 0 for a in assets_data) or
# If total is going under the min amount go up
total_amount - add_usd_value < min_amount or
# If "dice roll" matched and we won't go over the max amount go up
(
add_usd_value + total_amount < max_amount and
FVal(random.random()) <= go_up_probability
)
)
if go_up:
total_amount += add_usd_value
action = operator.add
else:
total_amount -= add_usd_value
action = operator.sub
for idx, value in enumerate(divide_number_in_parts(add_usd_value, len(locations))):
new_location_data.append(LocationData(
time=from_ts,
location=location_data[idx].location,
usd_value=str(action(FVal(location_data[idx].usd_value), value)),
))
# add the location data + total to the DB
self.db.add_multiple_location_data(new_location_data + [LocationData(
time=from_ts,
location=Location.TOTAL.serialize_for_db(),
usd_value=str(total_amount),
)])
for idx, value in enumerate(divide_number_in_parts(add_usd_value, len(assets))):
old_amount = FVal(assets_data[idx].amount)
new_amount = action(old_amount, add_amount)
if new_amount < FVal('0'):
new_amount = old_amount + FVal('0.01')
new_assets_data.append(AssetBalance(
time=from_ts,
asset=assets[idx],
amount=str(new_amount),
usd_value=str(action(FVal(assets_data[idx].usd_value), value)),
))
self.db.add_multiple_balances(new_assets_data)
location_data = new_location_data
assets_data = new_assets_data
|
import os
from dca.DCA import DCA
from dca.schemes import (
DCALoggers,
DelaunayGraphParams,
ExperimentDirs,
GeomCAParams,
HDBSCANParams,
REData,
)
import typer
import CL_utils
import numpy as np
import pickle
app = typer.Typer()
@app.command()
def CL_mode_truncation(cleanup: int = 1):
experiment_path = f"output/CL_mode_truncation/"
dataset_path = "representations/contrastive_learning/"
# Load representations
with open(os.path.join(dataset_path, "Df_train.pkl"), "rb") as f:
Rdata = pickle.load(f)
Rrepresentations, Rlabels = Rdata["R"], Rdata["class_labels"]
with open(os.path.join(dataset_path, "Df_holdout.pkl"), "rb") as f:
Edata = pickle.load(f)
Erepresentations, Elabels = Edata["E"], Edata["class_labels"]
# Extract initial R and E splits
Eclasses = [0]
n_classes = 12
R = CL_utils.get_representations_by_class(
Rrepresentations, Rlabels, [0, 1, 2, 3, 4, 5, 6]
)
E = CL_utils.get_representations_by_class(Erepresentations, Elabels, Eclasses)
output = []
while len(Eclasses) <= n_classes:
experiment_id = f"n_Eclasses{len(Eclasses)}"
print("Current Eclasses: {0}".format(Eclasses))
data_config = REData(
R=R, E=E, input_array_dir=os.path.join(experiment_id, "logs")
)
experiment_config = ExperimentDirs(
experiment_dir=experiment_path,
experiment_id=experiment_id,
precomputed_folder=os.path.join(experiment_id, "logs"),
)
graph_config = DelaunayGraphParams(
unfiltered_edges_dir=os.path.join(experiment_id, "logs"),
filtered_edges_dir=os.path.join(experiment_id, "logs"),
)
hdbscan_config = HDBSCANParams(
clusterer_dir=os.path.join(experiment_id, "logs"),
)
geomCA_config = GeomCAParams(
comp_consistency_threshold=0.75, comp_quality_threshold=0.45
)
exp_loggers = DCALoggers(experiment_config.logs_dir)
dca = DCA(
experiment_config,
graph_config,
hdbscan_config,
geomCA_config,
loggers=exp_loggers,
)
dca_scores = dca.fit(data_config)
output.append(dca_scores)
if cleanup:
dca.cleanup()
# Add a class to E and obtain the data
new_class = Eclasses[-1] + 1
new_class_representations = CL_utils.get_representations_by_class(
Erepresentations, Elabels, [new_class]
)
Eclasses.append(Eclasses[-1] + 1)
E = np.concatenate([E, new_class_representations])
print("E set updated with class {0}".format(new_class))
return output
if __name__ == "__main__":
typer.run(CL_mode_truncation)
|
import discord
import typing
import asyncio
import time
from discord.ext import commands
from nerdlandbot.translations.Translations import get_text as translate
from nerdlandbot.helpers.TranslationHelper import get_culture_from_context as culture
from nerdlandbot.helpers.TranslationHelper import get_culture_from_context as get_culture_from_context
from nerdlandbot.helpers.emoji import poll_emojis, yes, no, drum
from nerdlandbot.helpers.constants import POLL_MAX_TIMEOUT, POLL_DEFAULT_TIMEOUT
class Poll(commands.Cog, name="Simple Poll"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="poll", brief="poll_brief", usage="poll_usage", help="poll_help")
@commands.guild_only()
async def poll(self, ctx: commands.Context, *, input_str: str):
"""
Create a poll with either yes or no as an answer or self submitted options.
Poll will be open for an amount of time determined by the user.
Syntax: question (multiple words) timeout (numerals) <options> (words split by ;)
Example: This is a quention? 10 option 1; option 2; option 3
:param input: input to be parsed according to above syntax
"""
poller_id = ctx.message.author.id
#check if there is a question
if '?' not in input_str:
await ctx.send(translate("poll_no_questionmark", await culture(ctx)))
return
input_split = input_str.split('?',1)
question = input_split[0]
numbers_and_options = input_split[1].strip()
# parse timeout and options
if len(numbers_and_options) > 0:
first_word = numbers_and_options.split()[0]
if not first_word.isdigit():
await ctx.send(translate("poll_no_timeout", await culture(ctx)).format(POLL_DEFAULT_TIMEOUT))
timeout_s = POLL_DEFAULT_TIMEOUT * 60
else:
timeout_s = int(first_word) * 60
if timeout_s > POLL_MAX_TIMEOUT * 60:
await ctx.send(translate("poll_max_timeout", await culture(ctx)).format(POLL_MAX_TIMEOUT))
timeout_s = POLL_MAX_TIMEOUT * 60
# parse options
options = numbers_and_options.split(first_word,1)[1].strip()
if len(options) > 0:
options_list = options.split(';')
is_yes_no = False
else:
is_yes_no = True
else:
is_yes_no = True
await ctx.send(translate("poll_no_timeout", await culture(ctx)).format(POLL_DEFAULT_TIMEOUT))
timeout_s = POLL_DEFAULT_TIMEOUT * 60
# create message to send to channel
txt = translate("poll_start", await culture(ctx)).format(poller_id,question)
# add options to message
options_dict = dict()
if is_yes_no:
txt += translate("poll_yes_no", await culture(ctx)).format(yes,no)
options_dict[yes] = translate("yes", await culture(ctx))
options_dict[no] = translate("no", await culture(ctx))
else:
i = 1
for option in options_list:
txt += "{} - {}\n".format(poll_emojis[i],option)
options_dict[poll_emojis[i]] = option
i += 1
msg = await ctx.send(txt)
# add reactions to message
if is_yes_no:
await msg.add_reaction(yes)
await msg.add_reaction(no)
else:
i = 1
for option in options_list:
await msg.add_reaction(poll_emojis[i])
i += 1
# wait until timeout
await asyncio.sleep(timeout_s)
# refresh message
msg = await ctx.fetch_message(msg.id)
# get the reactions
reactions = msg.reactions
reactions_dict = dict()
for reaction in reactions:
reactions_dict[reaction.emoji] = reaction.count
reactions_sorted = sorted(reactions_dict.items(), key=lambda x: x[1], reverse=True)
# send poll results
txt = translate("poll_results", await culture(ctx)).format(drum,poller_id,question)
for reaction in reactions_sorted:
try:
option_str = options_dict[reaction[0]].strip()
count = reaction[1] - 1
txt += translate("poll_votes", await culture(ctx)).format(option_str,count)
except KeyError:
pass
# send message with results
await ctx.send(txt)
def setup(bot: commands.bot):
bot.add_cog(Poll(bot))
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import encodeutils
import six
from six.moves.urllib import parse
from heatclient.common import base
from heatclient.common import utils
class ResourceType(base.Resource):
def __repr__(self):
if isinstance(self._info, six.string_types):
return "<ResourceType %s>" % self._info
else:
return "<ResourceType %s>" % self._info.get('resource_type')
def data(self, **kwargs):
return self.manager.data(self, **kwargs)
def _add_details(self, info):
if isinstance(info, six.string_types):
self.resource_type = info
elif isinstance(info, dict):
self.resource_type = info.get('resource_type')
self.description = info.get('description')
class ResourceTypeManager(base.BaseManager):
resource_class = ResourceType
KEY = 'resource_types'
def list(self, **kwargs):
"""Get a list of resource types.
:rtype: list of :class:`ResourceType`
"""
url = '/%s' % self.KEY
params = {}
if 'filters' in kwargs:
filters = kwargs.pop('filters')
params.update(filters)
if 'with_description' in kwargs:
with_description = kwargs.pop('with_description')
params.update({'with_description': with_description})
if params:
url += '?%s' % parse.urlencode(params, True)
return self._list(url, self.KEY)
def get(self, resource_type, with_description=False):
"""Get the details for a specific resource_type.
:param resource_type: name of the resource type to get the details for
:param with_description: return result with description or not
"""
url_str = '/%s/%s' % (
self.KEY,
parse.quote(encodeutils.safe_encode(resource_type), ''))
resp = self.client.get(url_str,
params={'with_description': with_description})
body = utils.get_response_body(resp)
return body
def generate_template(self, resource_type, template_type='cfn'):
url_str = '/%s/%s/template' % (
self.KEY,
parse.quote(encodeutils.safe_encode(resource_type), ''))
if template_type:
url_str += '?%s' % parse.urlencode(
{'template_type': template_type}, True)
resp = self.client.get(url_str)
body = utils.get_response_body(resp)
return body
|
import json
from edge.blast import blast_genome
from edge.models import Operation
from Bio.Seq import Seq
class CrisprTarget(object):
def __init__(self, fragment_id, fragment_name, strand, subject_start, subject_end, pam):
self.fragment_id = fragment_id
self.fragment_name = fragment_name
self.strand = strand
self.subject_start = subject_start
self.subject_end = subject_end
self.pam = pam
def to_dict(self):
return self.__dict__
def match_pam(pam, query):
if len(query) != len(pam):
return False
for p, q in zip(pam.lower(), query.lower()):
if p != 'n' and p != q:
return False
return True
def target_followed_by_pam(blast_res, pam):
fragment = blast_res.fragment.indexed_fragment()
if blast_res.strand() > 0:
pam_start = blast_res.subject_end + 1
pam_end = pam_start + len(pam) - 1
query = fragment.get_sequence(bp_lo=pam_start, bp_hi=pam_end)
else:
pam_end = blast_res.subject_end - 1
pam_start = pam_end - len(pam) + 1
query = fragment.get_sequence(bp_lo=pam_start, bp_hi=pam_end)
query = str(Seq(query).reverse_complement())
if match_pam(pam, query) is True:
subject_start = blast_res.subject_start
subject_end = blast_res.subject_end
subject_start = fragment.circ_bp(subject_start)
subject_end = fragment.circ_bp(subject_end)
return CrisprTarget(blast_res.fragment_id, blast_res.fragment.name,
blast_res.strand(), subject_start, subject_end, pam)
return None
def find_crispr_target(genome, guide, pam):
"""
Find sequences on genome that have exact match to guide, followed by pam
sequence.
"""
guide_matches = blast_genome(genome, 'blastn', guide)
targets = []
for res in guide_matches:
if res.query_start == 1 and res.query_end == len(guide):
target = target_followed_by_pam(res, pam)
if target is not None:
targets.append(target)
return targets
def crispr_dsb(genome, guide, pam, genome_name=None, notes=None):
targets = find_crispr_target(genome, guide, pam)
if len(targets) == 0:
return None
if genome_name is None or genome_name.strip() == "":
genome_name = "%s after CRISPR-Cas9 wt (double stranded break) using guide %s"\
% (genome.name, guide)
new_genome = genome.update()
new_genome.name = genome_name
new_genome.notes = notes
new_genome.save()
op = CrisprOp.get_operation(guide=guide, pam=pam)
op.genome = new_genome
op.save()
for target in targets:
if target.strand > 0:
annotation_start = target.subject_start
annotation_end = target.subject_end
else:
annotation_start = target.subject_end
annotation_end = target.subject_start
new_fragment_id = None
with new_genome.update_fragment_by_fragment_id(target.fragment_id) as f:
new_fragment_id = f.id
with new_genome.annotate_fragment_by_fragment_id(new_fragment_id) as f:
feature = 'CRISPR-Cas9 (pam %s) target' % pam
f.annotate(annotation_start, annotation_end, feature,
'event', target.strand, operation=op)
return new_genome
class CrisprOp(object):
@staticmethod
def check(genome, guide, pam, genome_name=None, notes=None):
return find_crispr_target(genome, guide, pam)
@staticmethod
def get_operation(guide, pam, genome_name=None, notes=None):
params = dict(guide=guide, pam=pam)
op = Operation(type=Operation.CRISPR_DSB[0], params=json.dumps(params))
return op
@staticmethod
def perform(genome, guide, pam, genome_name, notes):
return crispr_dsb(genome, guide, pam, genome_name=genome_name, notes=notes)
|
##
# Contains TranscriptomeIndexListView, TranscriptomeIndexDetailView, and needed serializer
##
from django.utils.decorators import method_decorator
from rest_framework import filters, generics, serializers
from django_filters.rest_framework import DjangoFilterBackend
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from data_refinery_api.exceptions import InvalidFilters
from data_refinery_api.utils import check_filters
from data_refinery_common.models import OrganismIndex
class OrganismIndexSerializer(serializers.ModelSerializer):
organism_name = serializers.StringRelatedField(source="organism", read_only=True)
download_url = serializers.SerializerMethodField()
class Meta:
model = OrganismIndex
fields = (
"id",
"assembly_name",
"organism_name",
"database_name",
"release_version",
"index_type",
"salmon_version",
"download_url",
"result_id",
"last_modified",
)
read_only_fields = fields
def get_download_url(self, obj):
computed_file = obj.get_computed_file()
if computed_file is not None:
return computed_file.s3_url
return None
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="organism__name",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Organism name. Eg. `MUS_MUSCULUS`",
),
openapi.Parameter(
name="length",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Short hand for `index_type` Eg. `short` or `long`",
),
openapi.Parameter(
name="salmon_version",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Eg. `salmon 0.13.1`",
),
openapi.Parameter(
name="index_type",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Eg. `TRANSCRIPTOME_LONG`",
),
]
),
)
class TranscriptomeIndexListView(generics.ListAPIView):
"""
List all Transcriptome Indices. These are a special type of process result,
necessary for processing other SRA samples.
"""
serializer_class = OrganismIndexSerializer
filter_backends = (
DjangoFilterBackend,
filters.OrderingFilter,
)
filterset_fields = ["salmon_version", "index_type"]
ordering_fields = ("created_at", "salmon_version")
ordering = ("-created_at",)
def get_queryset(self):
invalid_filters = check_filters(
self, special_filters=["organism__name", "result_id", "length"]
)
if invalid_filters:
raise InvalidFilters(invalid_filters=invalid_filters)
queryset = OrganismIndex.public_objects.all()
organism_name = self.request.query_params.get("organism__name", None)
if organism_name is not None:
queryset = queryset.filter(organism__name=organism_name.upper())
# https://github.com/AlexsLemonade/refinebio/issues/2459
# It looks like when we set `result_id` as a filterset field,
# django_forms goes nuts and tries to call __str__ on every single
# computational result in our database trying to find all of the
# different possible computational_results. So let's just take care of
# this one ourselves.
result_id = self.request.query_params.get("result_id", None)
if result_id is not None:
queryset = queryset.filter(result_id=result_id)
length = self.request.query_params.get("length", None)
if length is not None:
index_type = "TRANSCRIPTOME_{}".format(length.upper())
queryset = queryset.filter(index_type=index_type)
return queryset
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="id",
in_=openapi.IN_PATH,
type=openapi.TYPE_NUMBER,
description="Transcriptome Index Id eg `1`",
),
]
),
)
class TranscriptomeIndexDetailView(generics.RetrieveAPIView):
"""
Gets the S3 url associated with the organism and length, along with other metadata about
the transcriptome index we have stored.
"""
serializer_class = OrganismIndexSerializer
lookup_field = "id"
queryset = OrganismIndex.public_objects.all()
|
# -*- coding: utf-8 -*-
"""
String
collection of string related functions
Created on Sat May 2 19:05:23 2020
@author: Tim
"""
# return copy of given list of strings with suffix appended to each string
def string_append (strings: list, suffix: str) -> list:
temp = []
for s in strings:
temp.append((s + suffix))
return temp
# Test Cases
suffix = ".jpg"
lst1 = ["tim", "star", "brooke"]
lst2 = string_append(lst1, suffix)
# print the new list with suffix
print(lst2)
# here we can see that the original list has not been mutated
print(lst1)
|
from typing import Union
from pythonequipmentdrivers import Scpi_Instrument
from time import sleep
import numpy as np
class Agilent_N893XX(Scpi_Instrument):
"""
Agilent_N893XX(address)
address : str, address of the connected power supply
object for accessing basic functionallity of the Agilent_N893XX supply
"""
def set_state(self, state: bool) -> None:
"""
set_state(state)
Enables/disables the output of the supply
Args:
state (bool): Supply state (True == enabled, False == disabled)
"""
self.instrument.write(f'OUTP:STAT {1 if state else 0}')
def get_state(self) -> bool:
"""
get_state()
Retrives the current state of the output of the supply.
Returns:
bool: Supply state (True == enabled, False == disabled)
"""
response = self.instrument.query('OUTP:STAT?')
return ('1' in response)
def on(self) -> None:
"""
on()
Enables the relay for the power supply's output equivalent to
set_state(True).
"""
self.set_state(True)
def off(self) -> None:
"""
off()
Disables the relay for the power supply's output equivalent to
set_state(False).
"""
self.set_state(False)
def toggle(self, return_state: bool = False) -> Union[None, bool]:
"""
toggle(return_state=False)
Reverses the current state of the Supply's output
If return_state = True the boolean state of the supply after toggle()
is executed will be returned.
Args:
return_state (bool, optional): Whether or not to return the state
of the supply after changing its state. Defaults to False.
Returns:
Union[None, bool]: If return_state == True returns the Supply state
(True == enabled, False == disabled), else returns None
"""
self.set_state(self.get_state() ^ True)
if return_state:
return self.get_state()
def set_voltage(self, voltage: float) -> None:
"""
set_voltage(voltage)
Sets the output voltage setpoint for the supply.
Args:
voltage (float): output voltage setpoint in Volts DC.
"""
self.instrument.write(f'SOUR:VOLT {float(voltage)}')
def get_voltage(self) -> float:
"""
get_voltage()
Retrives the current value output voltage setpoint.
Returns:
float: Output voltage setpoint in Volts DC.
"""
response = self.instrument.query('SOUR:VOLT?')
return float(response)
def set_current(self, current: float) -> None:
"""
set_current(current)
Sets the current limit threshold for the power supply.
Args:
current (float): Current Limit setpoint in Amps DC.
"""
self.instrument.write(f'SOUR:CURR {float(current)}')
def get_current(self) -> float:
"""
get_current()
Retrives the current limit threshold for the power supply.
Returns:
float: Current Limit setpoint in Amps DC.
"""
response = self.instrument.query('SOUR:CURR?')
return float(response)
def measure_voltage(self) -> float:
"""
measure_voltage()
Retrives measurement of the voltage present across the supply's output.
Returns:
float: Measured Voltage in Volts DC
"""
response = self.instrument.query('MEAS:VOLT?')
return float(response)
def measure_current(self) -> float:
"""
measure_current()
Retrives measurement of the current present through the supply.
Returns:
float: Measured Current in Amps DC.
"""
response = self.instrument.query('MEAS:CURR?')
return float(response)
def measure_power(self) -> float:
"""
measure_power()
Retrives measurement of the power drawn from the supply.
Note: This command is only supported in the SGI Version of the supply.
Returns:
float: Measured power in Watts.
"""
response = self.instrument.query('MEAS:POW?')
return float(response)
def set_over_voltage_protection(self, voltage: float) -> None:
"""
set_over_voltage_protection(voltage)
Configures the OVP setpoint of the supply.
Args:
voltage (float): Over voltage protection set-point in Volts DC.
"""
self.instrument.write(f'SOUR:VOLT:PROT {float(voltage)}')
def get_over_voltage_protection(self) -> float:
"""
get_over_voltage_protection(voltage)
Retrives the current value of the OVP setpoint of the supply.
Returns:
float: Over voltage protection set-point in Volts DC.
"""
response = self.instrument.query('SOUR:VOLT:PROT?')
return float(response)
def set_over_current_protection(self, current: float) -> None:
"""
set_over_current_protection(current)
Configures the OCP setpoint of the supply.
Args:
current (float): Over current protection set-point in Amps DC.
"""
self.instrument.write(f'SOUR:CURR:LIM {float(current)}')
def get_over_current_protection(self) -> float:
"""
get_over_current_protection(current)
Retrives the current value of the OCP setpoint of the supply.
Returns:
float: Over current protection set-point in Amps DC.
"""
response = self.instrument.query('SOUR:CURR:LIM?')
return float(response)
def pop_error_queue(self) -> Union[str, None]:
"""
pop_error_queue()
Retrieves a summary information of the error at the front of the error
queue (FIFO). Information consists of an error number and some
descriptive text. If the error queue is empty this function returns
None. To clear the queue either repeatedly pop elements off the queue
until it is empty or call the self.cls() method.
Returns:
Union[str, None]: Error summary information for the first item in
the error queue or None if the queue is empty.
"""
response = self.instrument.query('SYST:ERR?')
if response[0] == '0':
return None
return response.strip()
def error_queue(self) -> List:
"""
error_queue()
Retrieves the summary information for all errors currently in the error
queue (FIFO), clearing it in the process. Information for each error
consists of an error number and some descriptive text. If the error
queue is empty this function returns an empty list.
Returns:
Union[str, None]: Error summary information for the first item in
the error queue or None if the queue is empty.
Returns:
List: a list of error summary information for the errors in the
error queue. Ordered by occurance.
"""
queue = []
while True:
error = self.pop_error_queue()
if error is None:
break
queue.append(error)
return queue
def set_local(self, state: bool) -> None:
"""
set_local(state)
Forces the supply to local or remote state.
Args:
state (bool): local state, if True the supply can be locally
operated through the front panel else the front panel is locked
from manual use and the supply must be adjusted remotely.
"""
self.instrument.write(f'SYST:LOCAL {1 if state else 0}')
def get_local(self) -> bool:
"""
get_local()
Returns whether the supply is in a local or remote state.
Returns:
bool: local state, if True the supply can be locally operated
through the front panel else the front panel is locked from
manual use and the supply must be adjusted remotely.
"""
response = self.instrument.query('SYST:LOCAL?')
return ('ON' in response)
def pulse(self, level: float, duration: float) -> None:
"""
pulse(level, duration)
Generates a square pulse with height and duration specified by level
and duration. The supply will start and return to the previous voltage
level set on the supply before the execution of pulse(). "level" can be
less than or greater than the previous voltage setpoint.
Args:
level (float): Voltage level of pulse in Volts DC
duration (float): Duration of the pulse in seconds
"""
start_level = self.get_voltage()
self.set_voltage(level)
sleep(duration)
self.set_voltage(start_level)
def ramp(self, start: float, stop: float,
n: int = 100, dt: float = 0.01) -> None:
"""
ramp(start, stop, n=100, dt=0.01)
Generates a linear ramp on the supply's voltage specified by the
parameters start, stop, n, and dt.
The input of the supply should be enabled before executing this
command. "start" can be higher than "stop" or vise-versa. The minimum
dt is limited by the communication speed of the interface used to
communicate with this device.
Args:
start (float): Initial voltage setpoint of the ramp in Volts DC.
stop (float): Final voltage setpoint of the ramp in Volts DC.
n (int, optional): Number of points in the ramp between "start" and
"stop". Defaults to 100.
dt (float, optional): Time between changes in the value of the
setpoint in seconds. Defaults to 0.01.
"""
for v in np.linspace(float(start), float(stop), int(n)):
self.set_voltage(v)
sleep(dt)
def slew(self, start: float, stop: float, n: int = 100,
dt: float = 0.01, dwell: float = 0) -> None:
"""
slew(start, stop, n=100, dt=0.01, dwell=0, channel=0)
Generates a triangular waveform on the supply's voltage specified by
the parameters start, stop, n, and dt.
Optionally, a dwell acan be added at the top of the waveform to create
a trapezoidal voltage shape.
The input of the supply should be enabled before executing this
command. "start" can be higher than "stop" or vise-versa. The minimum
dt is limited by the communication speed of the interface used to
communicate with this device.
Args:
start (float): Initial voltage setpoint of the ramp in Volts DC.
stop (float): Midpoint voltage setpoint of the ramp in Volts DC.
n (int, optional): Number of points in the ramp between "start" and
"stop". Defaults to 100.
dt (float, optional): Time between changes in the value of the
setpoint in seconds. Defaults to 0.01.
dwell (float, optional): Time to dwell at the "stop" value before
ramping back to "start". Defaults to 0.
"""
self.ramp(start, stop, n=n, dt=dt)
sleep(dwell)
self.ramp(stop, start, n=n, dt=dt)
# def clear_errors(self):
# response = self.read_error().split(',')
# if response[0] != '+0':
# self.clear_errors()
# def get_ident(self):
# ''' Return full ID of scope '''
# return self.visa.query("*IDN?")
# def parallel_poweron(self):
# ''' Parallel Operation PowerON Command '''
# return self.visa.set("*RCL0")
# def clear(self):
# ''' initialize supply '''
# self.visa.write('CLS')
# return
# def get_Iout(self):
# 'Return output current'
# return float(self.visa.query('MEAS:CURR?'))
# def get_Iset(self):
# """Float of the current setpoint"""
# return float(self.visa.query('CURR?'))
# def get_Vout(self):
# ''' float of measured output voltage '''
# return float(self.visa.query('MEAS:VOLT?'))
# def get_Vset(self):
# """Float of the voltage setpoint"""
# return float(self.visa.query('VOLT?'))
# def read_error(self):
# return self.visa.query('SYST:ERR?')
# def set_Vout(self, setting):
# ''' CHAR, int or float 5V 5 V 500 MV '''
# self.visa.write('VOLTAGE ' + str(setting))
# return
# def set_Iout(self, setting):
# ''' CHAR, int or float 5A 5 A 50 MA '''
# self.visa.write('CURRENT ' + str(setting))
# return
# def set_OVP(self, setting):
# ''' CHAR, int or float 5V 5 V 500 MV '''
# self.visa.write('VOLTAGE:PROTECTION ' + str(setting))
# return
# def set_state(self, setting):
# '''
# Enable/disable output\n
# setting = "ON" or "OFF"\n
# This is a duplicate method to match other device drivers
# '''
# self.visa.write('OUTPUT '+ str(setting))
|
#! /usr/bin/python
# -*- coding: iso-8859-15
num = int(input("Escriba número: "))
if num > 0:
print("positivo")
elif num < 0:
print("negativo")
else:
print("0")
|
from __future__ import absolute_import
from PyQt4.QtGui import QToolBar, QLabel, QPixmap, QApplication, QCursor
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import pyqtSlot, Qt
from views.core import centraltabwidget
from gui.wellplot.subplots.wellplotwidget import WellPlotWidget
from globalvalues.appsettings import AppSettings
from gui.wellplot.settings.templatesettingsdialog import TemplateSettingsDialog
from gui.signals.wellplotsignals import WellPlotSignals
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
import logging
logger = logging.getLogger('console')
__Instance = None
class Communicator(QtCore.QObject):
hideToolbar = QtCore.pyqtSignal()
showToolbar = QtCore.pyqtSignal()
# LogSettingsToolbar Singleton
def LogSettingsToolbar(*args, **kw):
global __Instance
if __Instance is None:
__Instance = __LogSettingsToolbar(*args, **kw)
return __Instance
class __LogSettingsToolbar(QToolBar):
def __init__(self, parent=None):
super(QToolBar, self).__init__(parent)
self.parent = parent
self.toolbar = None
self.wellPlotSignals = WellPlotSignals()
self.communicator = Communicator()
self.initUI()
def initUI(self):
#self.toolBarRHS = QtGui.QToolBar(self.parent)
#self.setObjectName(_fromUtf8("toolBarRHS"))
self.actionSettings = QtGui.QAction(QtGui.QIcon(AppSettings.ACTIONS_ICON_PATH+"settings_eclipse.gif"), '&Settings', self.parent)
self.actionSettings.setObjectName(_fromUtf8("actionSettings"))
self.actionSettings.setStatusTip('Settings')
#self.actionZoomIn = QtGui.QAction(QtGui.QIcon(AppSettings.ACTIONS_ICON_PATH+"zoom-in-5.png"), '&Zoom in', self.parent)
#self.actionZoomIn.setObjectName(_fromUtf8("actionZoomIn"))
#self.actionZoomIn.setStatusTip('Zoom in')
#self.actionZoomOut = QtGui.QAction(QtGui.QIcon(AppSettings.ACTIONS_ICON_PATH+"zoom-out-5.png"), '&Zoom out', self.parent)
#self.actionZoomOut.setObjectName(_fromUtf8("actionZoomOut"))
#self.actionZoomOut.setStatusTip('Zoom out')
self.actionPoint = QtGui.QAction(QtGui.QIcon(AppSettings.ACTIONS_ICON_PATH+"pointer_edit-select.png"), '&Point', self.parent)
self.actionPoint.setObjectName(_fromUtf8("actionPoint"))
self.actionPoint.setStatusTip('Point')
#self.actionPan = QtGui.QAction(QtGui.QIcon(AppSettings.ACTIONS_ICON_PATH+"hand-cursor-16.png"), '&Pan', self.parent)
#self.actionPan.setObjectName(_fromUtf8("actionPan"))
#self.actionPan.setStatusTip('Pan')
self.actionZoomInVertical = QtGui.QAction(QtGui.QIcon(AppSettings.ACTIONS_ICON_PATH+"zoom-in-5_vertical.png"), '&Vertical zoom in', self.parent)
self.actionZoomInVertical.setObjectName(_fromUtf8("actionZoomInVertical"))
self.actionZoomInVertical.setStatusTip('Zoom in vertically')
self.actionZoomOutVertical = QtGui.QAction(QtGui.QIcon(AppSettings.ACTIONS_ICON_PATH+"zoom-out-5_vertical.png"), '&Vertical zoom out', self.parent)
self.actionZoomOutVertical.setObjectName(_fromUtf8("actionZoomOutVertical"))
self.actionZoomOutVertical.setStatusTip('Zoom out vertically')
#self.actionZoomBox = QtGui.QAction(QtGui.QIcon(AppSettings.ACTIONS_ICON_PATH+"zoom-in-5_box2.png"), '&Box zoom', self.parent)
#self.actionZoomBox.setObjectName(_fromUtf8("actionZoomBox"))
#self.actionZoomBox.setStatusTip('Box zoom')
self.actionLevelLine = QtGui.QAction(QtGui.QIcon(AppSettings.ACTIONS_ICON_PATH+"snap-orto_hx.png"), '&Level', self.parent)
self.actionLevelLine.setObjectName(_fromUtf8("actionLevelLine"))
self.actionLevelLine.setStatusTip('Level line')
self.addAction(self.actionSettings)
self.addSeparator()
#self.addAction(self.actionZoomIn)
#self.addAction(self.actionZoomOut)
self.addAction(self.actionZoomInVertical)
self.addAction(self.actionZoomOutVertical)
#self.addAction(self.actionZoomBox)
self.addSeparator()
self.addAction(self.actionPoint)
#self.addAction(self.actionPan)
self.addAction(self.actionCrossHairs)
self.actionSettings.triggered.connect(self.actionSettingsTriggered)
#self.actionZoomIn.triggered.connect(self.actionZoomInTriggered)
#self.actionZoomOut.triggered.connect(self.actionZoomOutTriggered)
self.actionZoomInVertical.triggered.connect(self.actionZoomInVerticalTriggered)
self.actionZoomOutVertical.triggered.connect(self.actionZoomOutVerticalTriggered)
#self.actionZoomBox.triggered.connect(self.actionZoomBoxTriggered)
self.actionPoint.triggered.connect(self.actionPointTriggered)
#self.actionPan.triggered.connect(self.actionPanTriggered)
self.actionLevelLine.triggered.connect(self.actionHzLineTriggered)
self.wellPlotSignals.settingsOpenFired.connect(self.showSettingsSlot)
logger.debug("--initUI() showToolbar.emit()")
def emitShowToolbarSignal(self):
logger.debug(">>emitShowToolbarSignal() ")
self.communicator.showToolbar.emit()
@pyqtSlot()
def showSettingsSlot(self):
'''RMB on well plot, select settings triggers a signal intercepted here'''
self.actionSettingsTriggered()
def actionSettingsTriggered(self):
logger.debug(">>actionSettings()")
centralWidget = centraltabwidget.CentralTabWidget()
currentWidget = centralWidget.currentWidget()
#for widget in centralWidget.children():
if isinstance(currentWidget, WellPlotWidget):
logger.debug("--actionSettingsTriggered "+str(currentWidget.data))
logPlotData = currentWidget.logPlotData
if self.well != None:
if self.logSet == None:
dialog = TemplateSettingsDialog(logPlotData, self.well, parent = self)
else:
dialog = TemplateSettingsDialog(logPlotData, self.well, self.logSet, parent = self)
dialog.show()
'''
#Leave out for now, see TODO 4/7/15 for notes on options to connect up
def actionZoomInTriggered(self):
self.wellPlotSignals.toolbarZoomIn.emit()
def actionZoomOutTriggered(self):
self.wellPlotSignals.toolbarZoomOut.emit()
'''
def actionZoomInVerticalTriggered(self):
self.wellPlotSignals.toolbarZoomInVertical.emit()
def actionZoomOutVerticalTriggered(self):
self.wellPlotSignals.toolbarZoomOutVertical.emit()
'''
#Box zoom is more for a single track plot - eg a log QC rather than multiple tracks
#May want to add a test to initiation - if well plot, hide, if single track plot show
def actionZoomBoxTriggered(self):
logger.debug(">>actionZoomBox()")
self.wellPlotSignals.toolbarZoomBox.emit()
'''
def actionPointTriggered(self):
logger.debug(">>actionPoint()")
self.wellPlotSignals.toolbarPoint.emit()
'''
#Leave out for now, not core functionality
def actionPanTriggered(self):
logger.debug(">>actionPan()")
self.wellPlotSignals.toolbarPan.emit()
'''
def actionHzLineTriggered(self):
logger.debug(">>actionHzLineTriggered()")
self.wellPlotSignals.toolbarHzLine.emit()
|
import numpy as np
from glob import glob
from os.path import basename
def load_features(folder):
dataset = None
classmap = {}
for class_idx, filename in enumerate(glob('%s/*.csv' % folder)):
class_name = basename(filename)[:-4]
classmap[class_idx] = class_name
samples = np.loadtxt(filename, dtype=float, delimiter=',')
labels = np.ones((len(samples), 1)) * class_idx
samples = np.hstack((samples, labels))
dataset = samples if dataset is None else np.vstack((dataset, samples))
return dataset, classmap
|
class Solution:
def firstUniqChar(self, s: str) -> int:
d = {}
seen = set()
for idx, c in enumerate(s):
if c not in seen:
d[c] = idx
seen.add(c)
elif c in d:
del d[c]
return min(d.values()) if d else -1
Time: O(N)
Space:O(N)
class Solution:
def firstUniqChar(self, s: str) -> int:
count = collections.Counter(s)
# find the index
for idx, ch in enumerate(s):
if count[ch] == 1:
return idx
return -1
Time: O(N)
Space:O(1)
|
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import pyodbc
import yaml
import pprint
from sqlalchemy import create_engine, Column, MetaData, Table, Index
from sqlalchemy import Integer, String, Text, Float, Boolean, BigInteger, Numeric, SmallInteger,Unicode,UnicodeText
import ConfigParser, os
fileLocation = os.path.dirname(os.path.realpath(__file__))
inifile=fileLocation+'/sdeloader.cfg'
config = ConfigParser.ConfigParser()
config.read(inifile)
destination=config.get('Database','destination')
sourcePath=config.get('Files','sourcePath')
print "connecting to DB"
engine = create_engine(destination)
connection = engine.connect()
metadata = MetaData()
print "Setting up Tables"
invTypes = Table('invTypes',metadata,
Column('typeID',BigInteger,primary_key=True, autoincrement=False),
Column('groupID',Integer),
Column('typeName',Unicode(100)),
Column('description',UnicodeText),
Column('mass',Float),
Column('volume',Float),
Column('capacity',Float),
Column('portionSize',Integer),
Column('raceID',SmallInteger),
Column('basePrice',Numeric(scale=4,precision=19)),
Column('published',Boolean),
Column('marketGroupID',BigInteger),
Column('iconID',BigInteger),
Column('soundID',BigInteger)
)
Index('invTypes_groupid',invTypes.c.groupID)
trnTranslations = Table('trnTranslations',metadata,
Column('tcID',Integer,autoincrement=False),
Column('keyID',Integer,autoincrement=False),
Column('languageID',Unicode,autoincrement=False),
Column('text',UnicodeText)
)
certMasteries = Table('certMasteries',metadata,
Column('typeID',Integer),
Column('masteryLevel',Integer),
Column('certID',Integer))
invTraits = Table('invTraits',metadata,
Column('traitID',Integer,primary_key=True),
Column('typeID',Integer),
Column('skillID',Integer),
Column('bonus',Float),
Column('bonusText',Text),
Column('unitID',Integer))
metadata.create_all(engine,checkfirst=True)
print "opening Yaml"
with open(sourcePath+'typeIDs.yaml','r') as yamlstream:
print "importing"
trans = connection.begin()
typeids=yaml.load(yamlstream,Loader=yaml.CSafeLoader)
print "Yaml Processed into memory"
for typeid in typeids:
connection.execute(invTypes.insert(),
typeID=typeid,
groupID=typeids[typeid].get('groupID',0),
typeName=typeids[typeid].get('name',{}).get('en','').decode('utf-8'),
description=typeids[typeid].get('description',{}).get('en','').decode('utf-8'),
mass=typeids[typeid].get('mass',0),
volume=typeids[typeid].get('volume',0),
capacity=typeids[typeid].get('capacity',0),
portionSize=typeids[typeid].get('portionSize'),
raceID=typeids[typeid].get('raceID'),
basePrice=typeids[typeid].get('basePrice'),
published=typeids[typeid].get('published',0),
marketGroupID=typeids[typeid].get('marketGroupID'),
iconID=typeids[typeid].get('iconID'),
soundID=typeids[typeid].get('soundID'))
if typeids[typeid].has_key("masteries"):
for level in typeids[typeid]["masteries"]:
for cert in typeids[typeid]["masteries"][level]:
connection.execute(certMasteries.insert(),
typeID=typeid,
masteryLevel=level,
certID=cert)
if (typeids[typeid].has_key('name')):
for lang in typeids[typeid]['name']:
connection.execute(trnTranslations.insert(),tcID=8,keyID=typeid,languageID=lang.decode('utf-8'),text=typeids[typeid]['name'][lang].decode('utf-8'))
if (typeids[typeid].has_key('description')):
for lang in typeids[typeid]['description']:
connection.execute(trnTranslations.insert(),tcID=33,keyID=typeid,languageID=lang.decode('utf-8'),text=typeids[typeid]['description'][lang].decode('utf-8'))
if (typeids[typeid].has_key('traits')):
for skill in typeids[typeid]['traits']:
for trait in typeids[typeid]['traits'][skill]:
result=connection.execute(invTraits.insert(),
typeID=typeid,
skillID=skill,
bonus=typeids[typeid]['traits'][skill][trait].get('bonus'),
bonusText=typeids[typeid]['traits'][skill][trait].get('bonusText',{}).get('en',''),
unitID=typeids[typeid]['traits'][skill][trait].get('unitID'))
traitid=result.inserted_primary_key
for languageid in typeids[typeid]['traits'][skill][trait].get('bonusText',{}):
connection.execute(trnTranslations.insert(),tcID=1001,keyID=traitid[0],languageID=languageid.decode('utf-8'),text=typeids[typeid]['traits'][skill][trait]['bonusText'][languageid].decode('utf-8'))
trans.commit()
|
# Generated by Django 3.1.1 on 2020-09-23 10:20
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AbstractObject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='test_abstract_object', max_length=50)),
],
options={
'db_table': 'abstract_models',
'default_permissions': (),
},
),
]
|
from rest_framework.test import APITestCase
from rest_framework.exceptions import APIException
from ..custom_validations import validate_avatar as vd
class ValidationTest(APITestCase):
""" Class to test custom validations."""
def setUp (self):
"""
Sample avatars
"""
self.avatar_1 = "test.pdf"
self.avatar_2 = "test.docx"
self.avatar_3 = "test.py"
self.avatar_4 = None
def test_pdf_avatar(self):
"""
If user uploads .pdf
"""
with self.assertRaises(APIException) as e:
vd(self.avatar_1)
self.assertIn('files are accepted', str(e.exception))
def test_doc_avatar(self):
"""
If user uploads .docx
"""
with self.assertRaises(APIException) as e:
vd(self.avatar_2)
self.assertIn('files are accepted', str(e.exception))
def test_py_avatar(self):
"""
If user uploads .py
"""
with self.assertRaises(APIException) as e:
vd(self.avatar_3)
self.assertIn('files are accepted', str(e.exception))
def test_no_avatar(self):
"""
If user does not provide avatar
"""
response = vd(self.avatar_4)
self.assertEqual(response, True)
|
#!/usr/bin/env python
from distutils.core import setup
# patch distutils if it can't cope with the "classifiers" or
# "download_url" keywords
from sys import version
if version < '2.2.3':
from distutils.dist import DistributionMetadata
DistributionMetadata.classifiers = None
DistributionMetadata.download_url = None
import streamxmlwriter
setup(name="streamxmlwriter",
version=streamxmlwriter.__version__,
description="Simple library for incrementally writing XML files of arbitrary size",
long_description=streamxmlwriter.__doc__,
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Text Processing :: Markup :: XML",
],
author="Filip Salomonsson",
author_email="filip.salomonsson@gmail.com",
url="http://github.com/filipsalomonsson/streamxmlwriter/tree/master",
py_modules=["streamxmlwriter"],
license="MIT",
)
|
from django.test import TestCase
from . models import Image, Profile, Comment, Like
# Create your tests here.
class ProfileTestClass(TestCase):
"""
class that test the characteristics of the Profile model
"""
def test_instance(self):
self.assertTrue(isinstance(self.profile,Profile))
def setUp(self):
self.profile = Profile(profile_photo ='test_profile_photo', bio = 'test_bio')
def tearDown(self):
Profile.objects.all().delete()
def test_save_profile(self):
self.profile.save_profile()
all_profiles = Profile.objects.all()
self.assertTrue(len(all_profiles)>0)
def test_delete_profile(self):
"""
method that tests the delete_profile method
"""
self.profile.save_profile()
profile2 = Profile(profile_photo ='test_profile_photo2',bio = 'test_bio2')
profile2.save_profile()
self.profile.delete_profile()
all_profiles = Profile.objects.all()
self.assertTrue(len(all_profiles)==1)
def test_find_profile(self):
"""
method that tests the find_profile method
"""
self.profile.save_profile()
profile2 = Profile(profile_photo ='test_profile_photo2',bio = 'test_bio2')
profile2.save_profile()
search_profile = Profile.find_profile('test_bio2')
self.assertFalse(len(search_profile)==1)
class ImageTestClass(TestCase):
"""
A class that tests the Image class model
"""
def test_instance(self):
"""
method that tests if image objects are instantiated correctly
"""
self.assertTrue(isinstance(self.image,Image))
def setUp(self):
"""
method that runs at the begginning of each test
"""
self.image = Image(image = 'image_url',image_name ='vin' , image_caption='hey there',)
def tearDown(self):
Image.objects.all().delete()
def test_save_image(self):
"""
method that tests the save method of model image
"""
self.image.save_image()
all_images= Image.objects.all()
self.assertTrue(len(all_images)>0)
def test_delete_images(self):
"""
method that tests the delete_images method
"""
self.image.save_image()
new_image = Image(image = 'image_url2',image_name ='vin2' , image_caption='hey there2',)
new_image.save_image()
self.image.delete_image()
all_images = Image.objects.all()
self.assertTrue(len(all_images)==1)
def test_update_caption(self):
"""
method that tests the update caption
"""
self.image.save_image()
image = Image.objects.get(image ='image_url')
image.update_caption('new caption')
image = Image.objects.get(image ='image_url')
self.assertTrue(image.image_caption=='new caption')
def test_get_image_by_id(self):
"""
method that tests the get image by id function of image model
"""
pass
# found_img = self.image_test.get_image_by_id(self.image_test.id)
# img = Image.objects.filter(id=self.image_test.id)
# self.assertTrue(found_img,img)
class CommentTestClass(TestCase):
"""
class that tests the characteristics of the Comment model
"""
def test_instance(self):
"""
Test that checks if the created comment is an instance of the class Comment
"""
self.assertTrue(isinstance(self.new_comment,Comment))
def setUp(self):
"""
method that runs at the begginning of each test
"""
self.new_comment = Comment(comment= "this is a test comment")
self.new_comment.save()
def tearDown(self):
Comment.objects.all().delete()
def test_save_comment(self):
"""
method that tests save method of the Comment model
"""
self.new_comment.save_comment()
all_comments = Comment.objects.all()
self.assertTrue(len(all_comments)>0)
def test_delete_comment(self):
"""
method that tests the delete_profile method
"""
self.new_comment.save_comment()
comment2 = Comment(comment='this is the second test comment')
comment2.save_comment()
self.new_comment.delete_comment()
all_comments = Comment.objects.all()
self.assertTrue(len(all_comments)==1)
|
import os
import argparse
from collections import OrderedDict
from datetime import datetime
import torch
import numpy as np
from torch.utils.data import DataLoader
from lib.dataset import ScanReferDataset
from models.snt import ShowAndTell
from models.tdbu import ShowAttendAndTell
from models.retr import Retrieval2D
from lib.conf import get_config, get_samples, verify_visual_feat
from lib.eval_helper import eval_cap
import h5py
def get_dataloader(batch_size, num_workers, shuffle, sample_list, scene_list, run_config, split):
dataset = ScanReferDataset(
split=split,
sample_list=sample_list,
scene_list=scene_list,
run_config=run_config
)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers,
collate_fn=dataset.collate_fn)
return dataset, dataloader
def get_model(args, run_config, dataset):
model_selection = args.model
feat_size = 0
add_global, add_target, add_context = verify_visual_feat(args.visual_feat)
if add_global:
feat_size += run_config.GLOBAL_FEATURE_SIZE
if add_target:
feat_size += run_config.TARGET_FEATURE_SIZE
assert feat_size != 0
if add_context and model_selection == 'satnt':
print("Using Show, Attend and Tell.")
model = ShowAttendAndTell(
device='cuda',
max_desc_len=run_config.MAX_DESC_LEN,
vocabulary=dataset.vocabulary,
embeddings=dataset.glove,
emb_size=run_config.EMBEDDING_SIZE,
feat_size=feat_size,
context_size=run_config.PROPOSAL_FEATURE_SIZE,
feat_input={'add_global': add_global, 'add_target': add_target},
hidden_size=run_config.DECODER_HIDDEN_SIZE,
)
elif model_selection == 'snt' and not add_context:
model = ShowAndTell(
device='cuda',
max_desc_len=run_config.MAX_DESC_LEN,
vocabulary=dataset.vocabulary,
embeddings=dataset.glove,
emb_size=run_config.EMBEDDING_SIZE,
feat_size=feat_size,
feat_input={'add_global': add_global, 'add_target': add_target},
hidden_size=run_config.DECODER_HIDDEN_SIZE,
)
else:
raise NotImplementedError('Requested model {} is not implemented.'.format(dataset))
# Load checkpoint
if args.ckpt_path is not None:
checkpoint = torch.load(args.ckpt_path)
# print(checkpoint.keys())
try:
model.load_state_dict(checkpoint, strict=True)
print("Loaded checkpoint from {}".format(args.ckpt_path))
except KeyError:
print("Checkpoint has the following keys available: ")
print(checkpoint.keys())
exit(0)
else:
print("No checkpoint specified. Please specify one by --ckpt_path.")
exit(0)
# to CUDA
model = model.cuda()
return model
def get_retrieval_model(args, run_config, train_dataset):
_ = args
stamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
if args.tag: stamp += "_" + args.tag.upper()
retrieval_directory = os.path.join(run_config.PATH.OUTPUT_ROOT, stamp)
os.makedirs(retrieval_directory, exist_ok=True)
feat_size = run_config.TARGET_FEATURE_SIZE - 4
train_scene_list = train_dataset.scene_list
scanrefer = h5py.File(run_config.PATH.DB_PATH, 'r')
scanrefer_box_features = scanrefer['boxfeat']
scanrefer_oids = scanrefer['objectids']
# only take features that are in the train set and describe the target object.
ordered_train_feature_matrix = []
for sample_id, v in scanrefer_box_features.items():
if sample_id.split('-')[0] in train_scene_list:
target_object_id = int(sample_id.split('-')[1].split('_')[0])
object_ids = np.array(scanrefer_oids[sample_id])
target_idx = np.where(object_ids == int(target_object_id))[0]
object_feature = np.array(v)[target_idx, :].reshape(-1, feat_size)
ordered_train_feature_matrix.append((sample_id, object_feature))
ordered_train_feature_matrix = OrderedDict(ordered_train_feature_matrix)
model = Retrieval2D(
db_path=os.path.join(retrieval_directory, 'train_memory_map.dat'),
feat_size=feat_size,
vis_feat_dict=ordered_train_feature_matrix,
lang_ids=train_dataset.lang_ids
)
model.cuda()
model.eval()
return model, retrieval_directory
def eval_caption(args):
run_config = get_config(
exp_type=args.exp_type,
dataset=args.dataset,
viewpoint=args.viewpoint,
box=args.box
)
train_samples, train_scenes = get_samples(mode='train', key_type=run_config.TYPES.KEY_TYPE)
val_samples, val_scenes = get_samples(mode='val', key_type=run_config.TYPES.KEY_TYPE)
print('Number of training samples: ', len(train_samples))
print('Number of validation samples: ', len(val_samples))
train_dset, train_dloader = get_dataloader(
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=False,
sample_list=train_samples,
scene_list=train_scenes,
run_config=run_config,
split='train'
)
val_dset, val_dloader = get_dataloader(
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=False,
sample_list=val_samples,
scene_list=val_scenes,
run_config=run_config,
split='val'
)
retr_dir = None
folder = args.folder
if args.exp_type == 'ret':
model, retr_dir = get_retrieval_model(args=args, run_config=run_config, train_dataset=train_dset)
elif args.exp_type == 'nret':
model = get_model(args=args, run_config=run_config, dataset=val_dset)
else:
raise NotImplementedError('exp_type {} is not implemented.'.format(args.exp_type))
# evaluate
if retr_dir is not None:
folder = retr_dir
assert folder is not None
bleu, cider, rouge, meteor = eval_cap(
_global_iter_id=0,
model=model,
dataset=val_dset,
dataloader=val_dloader,
phase='val',
folder=folder,
max_len=run_config.MAX_DESC_LEN,
mode=args.exp_type,
extras=args.extras,
is_eval=True
)
# report
print("\n----------------------Evaluation-----------------------")
print("[BLEU-1] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}".format(bleu[0][0], max(bleu[1][0]), min(bleu[1][0])))
print("[BLEU-2] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}".format(bleu[0][1], max(bleu[1][1]), min(bleu[1][1])))
print("[BLEU-3] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}".format(bleu[0][2], max(bleu[1][2]), min(bleu[1][2])))
print("[BLEU-4] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}".format(bleu[0][3], max(bleu[1][3]), min(bleu[1][3])))
print("[CIDEr] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}".format(cider[0], max(cider[1]), min(cider[1])))
print("[ROUGE-L] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}".format(rouge[0], max(rouge[1]), min(rouge[1])))
print("[METEOR] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}".format(meteor[0], max(meteor[1]), min(meteor[1])))
print()
def eval_main(args):
# setting
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
# reproducibility
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(args.seed)
eval_caption(args)
|
from collections import defaultdict
from dataclasses import dataclass
import structlog
from eth_utils import encode_hex, is_binary_address, to_canonical_address, to_hex
from gevent.lock import RLock
from web3.exceptions import BadFunctionCallOutput
from raiden.constants import (
EMPTY_BALANCE_HASH,
EMPTY_SIGNATURE,
LOCKSROOT_OF_NO_LOCKS,
NULL_ADDRESS_BYTES,
UINT256_MAX,
UNLOCK_TX_GAS_LIMIT,
)
from raiden.exceptions import (
BrokenPreconditionError,
DepositOverLimit,
DuplicatedChannelError,
InvalidChannelID,
InvalidSettleTimeout,
RaidenRecoverableError,
RaidenUnrecoverableError,
SamePeerAddress,
WithdrawMismatch,
)
from raiden.network.proxies.metadata import SmartContractMetadata
from raiden.network.proxies.utils import (
get_channel_participants_from_open_event,
log_transaction,
raise_on_call_returned_empty,
)
from raiden.network.rpc.client import JSONRPCClient, check_address_has_code
from raiden.network.rpc.transactions import check_transaction_threw
from raiden.transfer.channel import compute_locksroot
from raiden.transfer.identifiers import CanonicalIdentifier
from raiden.transfer.state import PendingLocksState
from raiden.transfer.utils import hash_balance_data
from raiden.utils.formatting import format_block_id, to_checksum_address
from raiden.utils.packing import pack_balance_proof, pack_signed_balance_proof, pack_withdraw
from raiden.utils.signer import recover
from raiden.utils.smart_contracts import safe_gas_limit
from raiden.utils.typing import (
TYPE_CHECKING,
AdditionalHash,
Address,
Any,
BalanceHash,
BlockExpiration,
BlockNumber,
BlockSpecification,
ChainID,
ChannelID,
Dict,
LockedAmount,
Locksroot,
NamedTuple,
Nonce,
Optional,
Signature,
T_ChannelID,
TokenAddress,
TokenAmount,
TokenNetworkAddress,
TokenNetworkRegistryAddress,
WithdrawAmount,
typecheck,
)
from raiden_contracts.constants import (
CONTRACT_TOKEN_NETWORK,
ChannelInfoIndex,
ChannelState,
MessageTypeId,
ParticipantInfoIndex,
)
from raiden_contracts.contract_manager import ContractManager
if TYPE_CHECKING:
# pylint: disable=unused-import
from raiden.network.proxies.proxy_manager import ProxyManager
log = structlog.get_logger(__name__)
def raise_if_invalid_address_pair(address1: Address, address2: Address) -> None:
msg = "The null address is not allowed as a channel participant."
assert NULL_ADDRESS_BYTES not in (address1, address2), msg
msg = "Addresses must be in binary"
assert is_binary_address(address1) and is_binary_address(address2), msg
if address1 == address2:
raise SamePeerAddress("Using the same address for both participants is forbiden.")
class ChannelData(NamedTuple):
channel_identifier: ChannelID
settle_block_number: BlockNumber
state: ChannelState
class ParticipantDetails(NamedTuple):
address: Address
deposit: TokenAmount
withdrawn: WithdrawAmount
is_closer: bool
balance_hash: BalanceHash
nonce: Nonce
locksroot: Locksroot
locked_amount: LockedAmount
class ParticipantsDetails(NamedTuple):
our_details: ParticipantDetails
partner_details: ParticipantDetails
class ChannelDetails(NamedTuple):
chain_id: ChainID
token_address: TokenAddress
channel_data: ChannelData
participants_data: ParticipantsDetails
@dataclass
class TokenNetworkMetadata(SmartContractMetadata):
token_network_registry_address: Optional[TokenNetworkRegistryAddress]
class TokenNetwork:
def __init__(
self,
jsonrpc_client: JSONRPCClient,
contract_manager: ContractManager,
proxy_manager: "ProxyManager",
metadata: TokenNetworkMetadata,
block_identifier: BlockSpecification,
) -> None:
if not is_binary_address(metadata.address):
raise ValueError("Expected binary address format for token nework")
check_address_has_code(
client=jsonrpc_client,
address=Address(metadata.address),
contract_name=CONTRACT_TOKEN_NETWORK,
expected_code=metadata.runtime_bytecode,
given_block_identifier=block_identifier,
)
self.contract_manager = contract_manager
proxy = jsonrpc_client.new_contract_proxy(
abi=metadata.abi, contract_address=Address(metadata.address)
)
# These are constants
self._chain_id = proxy.contract.functions.chain_id().call()
self._token_address = TokenAddress(
to_canonical_address(proxy.contract.functions.token().call())
)
self.address = TokenNetworkAddress(metadata.address)
self.proxy = proxy
self.client = jsonrpc_client
self.node_address = self.client.address
self.metadata = metadata
self.token = proxy_manager.token(
token_address=self.token_address(), block_identifier=block_identifier
)
# Forbids concurrent operations on the same channel. This is important
# because some operations conflict with each other. E.g. deposit and
# close, in this case if the node is lucky the close will be performed
# before the deposit, and the deposit transactions will not be sent.
#
# Note: unlock doesn't have to be synchronized, after settlement the
# channel doesn't exist anymore.
self.channel_operations_lock: Dict[Address, RLock] = defaultdict(RLock)
self.opening_channels_count = 0
def chain_id(self) -> ChainID:
""" Return the token of this manager. """
return self._chain_id
def token_address(self) -> TokenAddress:
""" Return the token of this manager. """
return self._token_address
def channel_participant_deposit_limit(
self, block_identifier: BlockSpecification
) -> TokenAmount:
""" Return the deposit limit of a channel participant. """
return TokenAmount(
self.proxy.contract.functions.channel_participant_deposit_limit().call(
block_identifier=block_identifier
)
)
def token_network_deposit_limit(self, block_identifier: BlockSpecification) -> TokenAmount:
""" Return the token of this manager. """
return TokenAmount(
self.proxy.contract.functions.token_network_deposit_limit().call(
block_identifier=block_identifier
)
)
def safety_deprecation_switch(self, block_identifier: BlockSpecification) -> bool:
return self.proxy.contract.functions.safety_deprecation_switch().call(
block_identifier=block_identifier
)
def new_netting_channel(
self, partner: Address, settle_timeout: int, given_block_identifier: BlockSpecification
) -> ChannelID:
""" Creates a new channel in the TokenNetwork contract.
Args:
partner: The peer to open the channel with.
settle_timeout: The settle timeout to use for this channel.
given_block_identifier: The block identifier of the state change that
prompted this proxy action
Returns:
The ChannelID of the new netting channel.
"""
raise_if_invalid_address_pair(self.node_address, partner)
timeout_min = self.settlement_timeout_min()
timeout_max = self.settlement_timeout_max()
invalid_timeout = settle_timeout < timeout_min or settle_timeout > timeout_max
if invalid_timeout:
msg = (
f"settle_timeout must be in range [{timeout_min}, "
f"{timeout_max}], is {settle_timeout}"
)
raise InvalidSettleTimeout(msg)
# Currently only one channel per address pair is allowed. I.e. if the
# node sends two transactions to open a channel with the same partner
# in a row, the second transaction will fail. This lock prevents the
# second, wasteful transaction from happening.
with self.channel_operations_lock[partner]:
# check preconditions
try:
existing_channel_identifier = self.get_channel_identifier_or_none(
participant1=self.node_address,
participant2=partner,
block_identifier=given_block_identifier,
)
network_total_deposit = self.token.balance_of(
address=Address(self.address), block_identifier=given_block_identifier
)
limit = self.token_network_deposit_limit(block_identifier=given_block_identifier)
safety_deprecation_switch = self.safety_deprecation_switch(given_block_identifier)
except ValueError:
# If `given_block_identifier` has been pruned the checks cannot be
# performed.
pass
except BadFunctionCallOutput:
raise_on_call_returned_empty(given_block_identifier)
else:
if existing_channel_identifier is not None:
raise BrokenPreconditionError(
"A channel with the given partner address already exists."
)
if network_total_deposit >= limit:
raise BrokenPreconditionError(
"Cannot open another channel, token network deposit limit reached."
)
if safety_deprecation_switch:
raise BrokenPreconditionError("This token network is deprecated.")
log_details = {
"node": to_checksum_address(self.node_address),
"contract": to_checksum_address(self.address),
"peer1": to_checksum_address(self.node_address),
"peer2": to_checksum_address(partner),
"settle_timeout": settle_timeout,
"given_block_identifier": format_block_id(given_block_identifier),
}
with log_transaction(log, "new_netting_channel", log_details):
self.opening_channels_count += 1
try:
channel_identifier = self._new_netting_channel(
partner, settle_timeout, log_details
)
finally:
self.opening_channels_count -= 1
log_details["channel_identifier"] = str(channel_identifier)
return channel_identifier
def _new_netting_channel(
self, partner: Address, settle_timeout: int, log_details: Dict[Any, Any]
) -> ChannelID:
checking_block = self.client.get_checking_block()
gas_limit = self.proxy.estimate_gas(
checking_block,
"openChannel",
participant1=self.node_address,
participant2=partner,
settle_timeout=settle_timeout,
)
if not gas_limit:
failed_at = self.proxy.rpc_client.get_block("latest")
failed_at_blockhash = encode_hex(failed_at["hash"])
failed_at_blocknumber = failed_at["number"]
self.proxy.rpc_client.check_for_insufficient_eth(
transaction_name="openChannel",
transaction_executed=False,
required_gas=self.metadata.gas_measurements["TokenNetwork.openChannel"],
block_identifier=failed_at_blocknumber,
)
existing_channel_identifier = self.get_channel_identifier_or_none(
participant1=self.node_address,
participant2=partner,
block_identifier=failed_at_blockhash,
)
if existing_channel_identifier is not None:
raise DuplicatedChannelError("Channel with given partner address already exists")
network_total_deposit = self.token.balance_of(
address=Address(self.address), block_identifier=failed_at_blockhash
)
limit = self.token_network_deposit_limit(block_identifier=failed_at_blockhash)
if network_total_deposit >= limit:
raise DepositOverLimit(
"Could open another channel, token network deposit limit has been reached."
)
if self.safety_deprecation_switch(block_identifier=failed_at_blockhash):
raise RaidenRecoverableError("This token network is deprecated.")
raise RaidenRecoverableError(
f"Creating a new channel will fail - Gas estimation failed for "
f"unknown reason. Reference block {failed_at_blockhash} "
f"{failed_at_blocknumber}."
)
else:
gas_limit = safe_gas_limit(
gas_limit, self.metadata.gas_measurements["TokenNetwork.openChannel"]
)
log_details["gas_limit"] = gas_limit
transaction_hash = self.proxy.transact(
"openChannel",
gas_limit,
participant1=self.node_address,
participant2=partner,
settle_timeout=settle_timeout,
)
receipt = self.client.poll(transaction_hash)
failed_receipt = check_transaction_threw(receipt=receipt)
if failed_receipt:
failed_at_blockhash = encode_hex(failed_receipt["blockHash"])
existing_channel_identifier = self.get_channel_identifier_or_none(
participant1=self.node_address,
participant2=partner,
block_identifier=failed_at_blockhash,
)
if existing_channel_identifier is not None:
raise DuplicatedChannelError(
"Channel with given partner address already exists"
)
network_total_deposit = self.token.balance_of(
address=Address(self.address), block_identifier=failed_at_blockhash
)
limit = self.token_network_deposit_limit(block_identifier=failed_at_blockhash)
if network_total_deposit >= limit:
raise DepositOverLimit(
"Could open another channel, token network deposit limit has been reached."
)
if self.safety_deprecation_switch(block_identifier=failed_at_blockhash):
raise RaidenRecoverableError("This token network is deprecated.")
raise RaidenRecoverableError("Creating new channel failed.")
receipt = self.client.get_transaction_receipt(transaction_hash)
channel_identifier: ChannelID = self._detail_channel(
participant1=self.node_address,
participant2=partner,
block_identifier=encode_hex(receipt["blockHash"]),
).channel_identifier
return channel_identifier
def get_channel_identifier(
self, participant1: Address, participant2: Address, block_identifier: BlockSpecification
) -> ChannelID:
"""Return the channel identifier for the opened channel among
`(participant1, participant2)`.
Raises:
RaidenRecoverableError: If there is not open channel among
`(participant1, participant2)`. Note this is the case even if
there is a channel in a settled state.
BadFunctionCallOutput: If the `block_identifier` points to a block
prior to the deployment of the TokenNetwork.
SamePeerAddress: If an both addresses are equal.
"""
raise_if_invalid_address_pair(participant1, participant2)
channel_identifier = self.proxy.contract.functions.getChannelIdentifier(
participant=to_checksum_address(participant1),
partner=to_checksum_address(participant2),
).call(block_identifier=block_identifier)
if channel_identifier == 0:
msg = (
f"getChannelIdentifier returned 0, meaning "
f"no channel currently exists between "
f"{to_checksum_address(participant1)} and "
f"{to_checksum_address(participant2)}"
)
raise RaidenRecoverableError(msg)
return channel_identifier
def get_channel_identifier_or_none(
self, participant1: Address, participant2: Address, block_identifier: BlockSpecification
) -> Optional[ChannelID]:
""" Returns the channel identifier if an open channel exists, else None. """
try:
return self.get_channel_identifier(
participant1=participant1,
participant2=participant2,
block_identifier=block_identifier,
)
except RaidenRecoverableError:
return None
def _detail_participant(
self,
channel_identifier: ChannelID,
detail_for: Address,
partner: Address,
block_identifier: BlockSpecification,
) -> ParticipantDetails:
""" Returns a dictionary with the channel participant information. """
raise_if_invalid_address_pair(detail_for, partner)
data = self.proxy.contract.functions.getChannelParticipantInfo(
channel_identifier=channel_identifier, participant=detail_for, partner=partner
).call(block_identifier=block_identifier)
return ParticipantDetails(
address=detail_for,
deposit=data[ParticipantInfoIndex.DEPOSIT],
withdrawn=data[ParticipantInfoIndex.WITHDRAWN],
is_closer=data[ParticipantInfoIndex.IS_CLOSER],
balance_hash=data[ParticipantInfoIndex.BALANCE_HASH],
nonce=data[ParticipantInfoIndex.NONCE],
locksroot=data[ParticipantInfoIndex.LOCKSROOT],
locked_amount=data[ParticipantInfoIndex.LOCKED_AMOUNT],
)
def _detail_channel(
self,
participant1: Address,
participant2: Address,
block_identifier: BlockSpecification,
channel_identifier: ChannelID = None,
) -> ChannelData:
""" Returns a ChannelData instance with the channel specific information.
If no specific channel_identifier is given then it tries to see if there
is a currently open channel and uses that identifier.
"""
raise_if_invalid_address_pair(participant1, participant2)
if channel_identifier is None:
channel_identifier = self.get_channel_identifier(
participant1=participant1,
participant2=participant2,
block_identifier=block_identifier,
)
elif not isinstance(channel_identifier, T_ChannelID): # pragma: no unittest
raise InvalidChannelID("channel_identifier must be of type T_ChannelID")
elif channel_identifier <= 0 or channel_identifier > UINT256_MAX:
raise InvalidChannelID(
"channel_identifier must be larger then 0 and smaller then uint256"
)
channel_data = self.proxy.contract.functions.getChannelInfo(
channel_identifier=channel_identifier,
participant1=participant1,
participant2=participant2,
).call(block_identifier=block_identifier)
return ChannelData(
channel_identifier=channel_identifier,
settle_block_number=channel_data[ChannelInfoIndex.SETTLE_BLOCK],
state=channel_data[ChannelInfoIndex.STATE],
)
def detail_participants(
self,
participant1: Address,
participant2: Address,
block_identifier: BlockSpecification,
channel_identifier: ChannelID = None,
) -> ParticipantsDetails:
""" Returns a ParticipantsDetails instance with the participants'
channel information.
Note:
For now one of the participants has to be the node_address
"""
if self.node_address not in (participant1, participant2):
raise ValueError("One participant must be the node address")
if self.node_address == participant2:
participant1, participant2 = participant2, participant1
if channel_identifier is None:
channel_identifier = self.get_channel_identifier(
participant1=participant1,
participant2=participant2,
block_identifier=block_identifier,
)
elif not isinstance(channel_identifier, T_ChannelID): # pragma: no unittest
raise InvalidChannelID("channel_identifier must be of type T_ChannelID")
elif channel_identifier <= 0 or channel_identifier > UINT256_MAX:
raise InvalidChannelID(
"channel_identifier must be larger then 0 and smaller then uint256"
)
our_data = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=participant1,
partner=participant2,
block_identifier=block_identifier,
)
partner_data = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=participant2,
partner=participant1,
block_identifier=block_identifier,
)
return ParticipantsDetails(our_details=our_data, partner_details=partner_data)
def detail(
self,
participant1: Address,
participant2: Address,
block_identifier: BlockSpecification,
channel_identifier: ChannelID = None,
) -> ChannelDetails:
""" Returns a ChannelDetails instance with all the details of the
channel and the channel participants.
Note:
For now one of the participants has to be the node_address
"""
if self.node_address not in (participant1, participant2):
raise ValueError("One participant must be the node address")
if self.node_address == participant2:
participant1, participant2 = participant2, participant1
channel_data = self._detail_channel(
participant1=participant1,
participant2=participant2,
block_identifier=block_identifier,
channel_identifier=channel_identifier,
)
participants_data = self.detail_participants(
participant1=participant1,
participant2=participant2,
block_identifier=block_identifier,
channel_identifier=channel_data.channel_identifier,
)
chain_id = self.chain_id()
return ChannelDetails(
chain_id=chain_id,
token_address=self.token_address(),
channel_data=channel_data,
participants_data=participants_data,
)
def settlement_timeout_min(self) -> int:
""" Returns the minimal settlement timeout for the token network. """
return self.proxy.contract.functions.settlement_timeout_min().call()
def settlement_timeout_max(self) -> int:
""" Returns the maximal settlement timeout for the token network. """
return self.proxy.contract.functions.settlement_timeout_max().call()
def channel_is_opened(
self,
participant1: Address,
participant2: Address,
block_identifier: BlockSpecification,
channel_identifier: ChannelID,
) -> bool:
""" Returns true if the channel is in an open state, false otherwise. """
try:
channel_data = self._detail_channel(
participant1=participant1,
participant2=participant2,
block_identifier=block_identifier,
channel_identifier=channel_identifier,
)
except RaidenRecoverableError:
return False
return channel_data.state == ChannelState.OPENED
def channel_is_closed(
self,
participant1: Address,
participant2: Address,
block_identifier: BlockSpecification,
channel_identifier: ChannelID,
) -> bool:
""" Returns true if the channel is in a closed state, false otherwise. """
try:
channel_data = self._detail_channel(
participant1=participant1,
participant2=participant2,
block_identifier=block_identifier,
channel_identifier=channel_identifier,
)
except RaidenRecoverableError:
return False
return channel_data.state == ChannelState.CLOSED
def channel_is_settled(
self,
participant1: Address,
participant2: Address,
block_identifier: BlockSpecification,
channel_identifier: ChannelID,
) -> bool:
""" Returns true if the channel is in a settled state, false otherwise. """
try:
channel_data = self._detail_channel(
participant1=participant1,
participant2=participant2,
block_identifier=block_identifier,
channel_identifier=channel_identifier,
)
except RaidenRecoverableError:
return False
return channel_data.state >= ChannelState.SETTLED
def can_transfer(
self,
participant1: Address,
participant2: Address,
block_identifier: BlockSpecification,
channel_identifier: ChannelID,
) -> bool:
""" Returns True if the channel is opened and the node has deposit in
it.
Note: Having a deposit does not imply having a balance for off-chain
transfers. """
opened = self.channel_is_opened(
participant1=participant1,
participant2=participant2,
block_identifier=block_identifier,
channel_identifier=channel_identifier,
)
if opened is False:
return False
deposit = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=participant1,
partner=participant2,
block_identifier=block_identifier,
).deposit
return deposit > 0
def set_total_deposit(
self,
given_block_identifier: BlockSpecification,
channel_identifier: ChannelID,
total_deposit: TokenAmount,
partner: Address,
) -> None:
""" Set channel's total deposit.
`total_deposit` has to be monotonically increasing, this is enforced by
the `TokenNetwork` smart contract. This is done for the same reason why
the balance proofs have a monotonically increasing transferred amount,
it simplifies the analysis of bad behavior and the handling code of
out-dated balance proofs.
Races to `set_total_deposit` are handled by the smart contract, where
largest total deposit wins. The end balance of the funding accounts is
undefined. E.g.
- Acc1 calls set_total_deposit with 10 tokens
- Acc2 calls set_total_deposit with 13 tokens
- If Acc2's transaction is mined first, then Acc1 token supply is left intact.
- If Acc1's transaction is mined first, then Acc2 will only move 3 tokens.
Races for the same account don't have any unexpected side-effect.
Raises:
DepositMismatch: If the new request total deposit is lower than the
existing total deposit on-chain for the `given_block_identifier`.
RaidenRecoverableError: If the channel was closed meanwhile the
deposit was in transit.
RaidenUnrecoverableError: If the transaction was successful and the
deposit_amount is not as large as the requested value.
RuntimeError: If the token address is empty.
ValueError: If an argument is of the invalid type.
"""
typecheck(total_deposit, int)
if total_deposit <= 0 and total_deposit > UINT256_MAX:
msg = f"Total deposit {total_deposit} is not in range [1, {UINT256_MAX}]"
raise BrokenPreconditionError(msg)
# `channel_operations_lock` is used to serialize conflicting channel
# operations. E.g. this deposit and a close.
#
# A channel deposit with the ERC20 standard has two requirements. First
# the user's account must have balance, second the token network must
# have an allowance at least as high as the value of the deposit; To
# prevent another thread from concurrently changing these values and
# invalidating the deposit, the token lock is acquired. This does not
# prevent conflicting operations from being requested, but it does
# enforces an order to make easier to reason about errors.
#
# The account balance is checked implicitly by the gas estimation. If
# there is not enough balance in the account either a require or assert
# is hit, which makes the gas estimation fail. However, this only works
# for transactions that have been mined, so there can not be two
# transactions in-flight that move tokens from the same address.
#
# The token network allowace is a bit trickier, because consecutive
# calls to approve are not idempotent. If two channels are being open
# at the same time, the transactions must not be ordered as `approve +
# approve + deposit + deposit`, since the second approve will overwrite
# the first and at least one of the deposits will fail. Because of
# this, the allowance can not change until the deposit is done.
with self.channel_operations_lock[partner], self.token.token_lock:
try:
queried_channel_identifier = self.get_channel_identifier_or_none(
participant1=self.node_address,
participant2=partner,
block_identifier=given_block_identifier,
)
channel_onchain_detail = self._detail_channel(
participant1=self.node_address,
participant2=partner,
block_identifier=given_block_identifier,
channel_identifier=channel_identifier,
)
our_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=self.node_address,
partner=partner,
block_identifier=given_block_identifier,
)
partner_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=partner,
partner=self.node_address,
block_identifier=given_block_identifier,
)
current_balance = self.token.balance_of(
address=self.node_address, block_identifier=given_block_identifier
)
safety_deprecation_switch = self.safety_deprecation_switch(
block_identifier=given_block_identifier
)
token_network_deposit_limit = self.token_network_deposit_limit(
block_identifier=given_block_identifier
)
channel_participant_deposit_limit = self.channel_participant_deposit_limit(
block_identifier=given_block_identifier
)
network_total_deposit = self.token.balance_of(
address=Address(self.address), block_identifier=given_block_identifier
)
except ValueError:
# If `given_block_identifier` has been pruned the checks cannot be
# performed.
our_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=self.node_address,
partner=partner,
block_identifier="latest",
)
except BadFunctionCallOutput:
raise_on_call_returned_empty(given_block_identifier)
else:
if queried_channel_identifier != channel_identifier:
msg = (
f"There is a channel open between "
f"{to_checksum_address(self.node_address)} and "
f"{to_checksum_address(partner)}. However the channel id "
f"on-chain {queried_channel_identifier} and the provided "
f"id {channel_identifier} do not match."
)
raise BrokenPreconditionError(msg)
if safety_deprecation_switch:
msg = "This token_network has been deprecated."
raise BrokenPreconditionError(msg)
if channel_onchain_detail.state != ChannelState.OPENED:
msg = (
f"The channel was not opened at the provided block "
f"({given_block_identifier}). This call should never have "
f"been attempted."
)
raise BrokenPreconditionError(msg)
amount_to_deposit = total_deposit - our_details.deposit
if total_deposit <= our_details.deposit:
msg = (
f"Current total deposit ({our_details.deposit}) is already larger "
f"than the requested total deposit amount ({total_deposit})"
)
raise BrokenPreconditionError(msg)
total_channel_deposit = total_deposit + partner_details.deposit
if total_channel_deposit > UINT256_MAX:
raise BrokenPreconditionError("Deposit overflow")
if total_deposit > channel_participant_deposit_limit:
msg = (
f"Deposit of {total_deposit} is larger than the "
f"channel participant deposit limit"
)
raise BrokenPreconditionError(msg)
if network_total_deposit + amount_to_deposit > token_network_deposit_limit:
msg = (
f"Deposit of {amount_to_deposit} will have "
f"exceeded the token network deposit limit."
)
raise BrokenPreconditionError(msg)
if current_balance < amount_to_deposit:
msg = (
f"new_total_deposit - previous_total_deposit = {amount_to_deposit} can "
f"not be larger than the available balance {current_balance}, "
f"for token at address {to_checksum_address(self.token.address)}"
)
raise BrokenPreconditionError(msg)
log_details = {
"node": to_checksum_address(self.node_address),
"contract": to_checksum_address(self.address),
"participant": to_checksum_address(self.node_address),
"receiver": to_checksum_address(partner),
"channel_identifier": channel_identifier,
"total_deposit": total_deposit,
"previous_total_deposit": our_details.deposit,
"given_block_identifier": format_block_id(given_block_identifier),
}
with log_transaction(log, "set_total_deposit", log_details):
self._set_total_deposit(
channel_identifier=channel_identifier,
total_deposit=total_deposit,
previous_total_deposit=our_details.deposit,
partner=partner,
log_details=log_details,
)
def _set_total_deposit(
self,
channel_identifier: ChannelID,
total_deposit: TokenAmount,
partner: Address,
previous_total_deposit: TokenAmount,
log_details: Dict[Any, Any],
) -> None:
checking_block = self.client.get_checking_block()
amount_to_deposit = TokenAmount(total_deposit - previous_total_deposit)
# If there are channels being set up concurrently either the
# allowance must be accumulated *or* the calls to `approve` and
# `setTotalDeposit` must be serialized. This is necessary otherwise
# the deposit will fail.
#
# Calls to approve and setTotalDeposit are serialized with the
# deposit_lock to avoid transaction failure, because with two
# concurrent deposits, we may have the transactions executed in the
# following order
#
# - approve
# - approve
# - setTotalDeposit
# - setTotalDeposit
#
# in which case the second `approve` will overwrite the first,
# and the first `setTotalDeposit` will consume the allowance,
# making the second deposit fail.
self.token.approve(allowed_address=Address(self.address), allowance=amount_to_deposit)
gas_limit = self.proxy.estimate_gas(
checking_block,
"setTotalDeposit",
channel_identifier=channel_identifier,
participant=self.node_address,
total_deposit=total_deposit,
partner=partner,
)
if gas_limit:
gas_limit = safe_gas_limit(
gas_limit, self.metadata.gas_measurements["TokenNetwork.setTotalDeposit"]
)
log_details["gas_limit"] = gas_limit
transaction_hash = self.proxy.transact(
"setTotalDeposit",
gas_limit,
channel_identifier=channel_identifier,
participant=self.node_address,
total_deposit=total_deposit,
partner=partner,
)
receipt = self.client.poll(transaction_hash)
failed_receipt = check_transaction_threw(receipt=receipt)
if failed_receipt:
# Because the gas estimation succeeded it is known that:
# - The channel id was correct, i.e. this node and partner are
# participants of the chanenl with id `channel_identifier`.
# - The channel was open.
# - The account had enough tokens to deposit
# - The account had enough balance to pay for the gas (however
# there is a race condition for multiple transactions #3890)
failed_at_blockhash = encode_hex(failed_receipt["blockHash"])
failed_at_blocknumber = failed_receipt["blockNumber"]
if failed_receipt["cumulativeGasUsed"] == gas_limit:
msg = (
f"setTotalDeposit failed and all gas was used "
f"({gas_limit}). Estimate gas may have underestimated "
f"setTotalDeposit, or succeeded even though an assert is "
f"triggered, or the smart contract code has an "
f"conditional assert."
)
raise RaidenRecoverableError(msg)
safety_deprecation_switch = self.safety_deprecation_switch(
block_identifier=failed_at_blockhash
)
if safety_deprecation_switch:
msg = "This token_network has been deprecated."
raise RaidenRecoverableError(msg)
# Query the channel state when the transaction was mined
# to check for transaction races
our_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=self.node_address,
partner=partner,
block_identifier=failed_at_blockhash,
)
partner_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=self.node_address,
partner=partner,
block_identifier=failed_at_blockhash,
)
channel_data = self._detail_channel(
participant1=self.node_address,
participant2=partner,
block_identifier=failed_at_blockhash,
channel_identifier=channel_identifier,
)
if channel_data.state == ChannelState.CLOSED:
msg = "Deposit failed because the channel was closed meanwhile"
raise RaidenRecoverableError(msg)
if channel_data.state == ChannelState.SETTLED:
msg = "Deposit failed because the channel was settled meanwhile"
raise RaidenRecoverableError(msg)
if channel_data.state == ChannelState.REMOVED:
msg = "Deposit failed because the channel was settled and unlocked meanwhile"
raise RaidenRecoverableError(msg)
deposit_amount = total_deposit - our_details.deposit
# If an overflow is possible then we are interacting with a bad token.
# This must not crash the client, because it is not a Raiden bug,
# and otherwise this could be an attack vector.
total_channel_deposit = total_deposit + partner_details.deposit
if total_channel_deposit > UINT256_MAX:
raise RaidenRecoverableError("Deposit overflow")
total_deposit_done = our_details.deposit >= total_deposit
if total_deposit_done:
raise RaidenRecoverableError("Requested total deposit was already performed")
token_network_deposit_limit = self.token_network_deposit_limit(
block_identifier=failed_receipt["blockHash"]
)
network_total_deposit = self.token.balance_of(
address=Address(self.address), block_identifier=failed_receipt["blockHash"]
)
if network_total_deposit + deposit_amount > token_network_deposit_limit:
msg = (
f"Deposit of {deposit_amount} would have "
f"exceeded the token network deposit limit."
)
raise RaidenRecoverableError(msg)
channel_participant_deposit_limit = self.channel_participant_deposit_limit(
block_identifier=failed_receipt["blockHash"]
)
if total_deposit > channel_participant_deposit_limit:
msg = (
f"Deposit of {total_deposit} is larger than the "
f"channel participant deposit limit"
)
raise RaidenRecoverableError(msg)
has_sufficient_balance = (
self.token.balance_of(self.node_address, failed_at_blocknumber)
< amount_to_deposit
)
if not has_sufficient_balance:
raise RaidenRecoverableError(
"The account does not have enough balance to complete the deposit"
)
allowance = self.token.allowance(
owner=self.node_address,
spender=Address(self.address),
block_identifier=failed_at_blockhash,
)
if allowance < amount_to_deposit:
msg = (
f"The allowance of the {amount_to_deposit} deposit changed. "
f"Check concurrent deposits "
f"for the same token network but different proxies."
)
raise RaidenRecoverableError(msg)
latest_deposit = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=self.node_address,
partner=partner,
block_identifier=failed_at_blockhash,
).deposit
if latest_deposit < total_deposit:
raise RaidenRecoverableError("The tokens were not transferred")
# Here, we don't know what caused the failure. But because we are
# dealing with an external token contract, it is assumed that it is
# malicious and therefore we raise a Recoverable error here.
raise RaidenRecoverableError("Unlocked failed for an unknown reason")
else:
# The latest block can not be used reliably because of reorgs,
# therefore every call using this block has to handle pruned data.
failed_at = self.proxy.rpc_client.get_block("latest")
failed_at_blockhash = encode_hex(failed_at["hash"])
failed_at_blocknumber = failed_at["number"]
self.proxy.rpc_client.check_for_insufficient_eth(
transaction_name="setTotalDeposit",
transaction_executed=False,
required_gas=self.metadata.gas_measurements["TokenNetwork.setTotalDeposit"],
block_identifier=failed_at_blocknumber,
)
safety_deprecation_switch = self.safety_deprecation_switch(
block_identifier=failed_at_blockhash
)
if safety_deprecation_switch:
msg = "This token_network has been deprecated."
raise RaidenRecoverableError(msg)
allowance = self.token.allowance(
owner=self.node_address,
spender=Address(self.address),
block_identifier=failed_at_blockhash,
)
has_sufficient_balance = (
self.token.balance_of(self.node_address, failed_at_blocknumber) < amount_to_deposit
)
if allowance < amount_to_deposit:
msg = (
"The allowance is insufficient. Check concurrent deposits "
"for the same token network but different proxies."
)
raise RaidenRecoverableError(msg)
if has_sufficient_balance:
msg = "The address doesnt have enough tokens"
raise RaidenRecoverableError(msg)
queried_channel_identifier = self.get_channel_identifier_or_none(
participant1=self.node_address,
participant2=partner,
block_identifier=failed_at_blockhash,
)
our_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=self.node_address,
partner=partner,
block_identifier=failed_at_blockhash,
)
partner_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=self.node_address,
partner=partner,
block_identifier=failed_at_blockhash,
)
channel_data = self._detail_channel(
participant1=self.node_address,
participant2=partner,
block_identifier=failed_at_blockhash,
channel_identifier=channel_identifier,
)
token_network_deposit_limit = self.token_network_deposit_limit(
block_identifier=failed_at_blockhash
)
channel_participant_deposit_limit = self.channel_participant_deposit_limit(
block_identifier=failed_at_blockhash
)
total_channel_deposit = total_deposit + partner_details.deposit
network_total_deposit = self.token.balance_of(
Address(self.address), failed_at_blocknumber
)
# This check can only be done if the channel is in the open/closed
# states because from the settled state and after the id is removed
# from the smart contract.
is_invalid_channel_id = (
channel_data.state in (ChannelState.OPENED, ChannelState.CLOSED)
and queried_channel_identifier != channel_identifier
)
if is_invalid_channel_id:
msg = (
f"There is an open channel with the id {channel_identifier}. "
f"However addresses {to_checksum_address(self.node_address)} "
f"and {to_checksum_address(partner)} are not participants of "
f"that channel. The correct id is {queried_channel_identifier}."
)
raise RaidenUnrecoverableError(msg) # This error is considered a bug
if channel_data.state == ChannelState.CLOSED:
msg = "Deposit was prohibited because the channel is closed"
raise RaidenRecoverableError(msg)
if channel_data.state == ChannelState.SETTLED:
msg = "Deposit was prohibited because the channel is settled"
raise RaidenRecoverableError(msg)
if channel_data.state == ChannelState.REMOVED:
msg = "Deposit was prohibited because the channel is settled and unlocked"
raise RaidenRecoverableError(msg)
if our_details.deposit >= total_deposit:
msg = "Attempted deposit has already been done"
raise RaidenRecoverableError(msg)
# Check if deposit is being made on a nonexistent channel
if channel_data.state == ChannelState.NONEXISTENT:
msg = (
f"Channel between participant {to_checksum_address(self.node_address)} "
f"and {to_checksum_address(partner)} does not exist"
)
raise RaidenUnrecoverableError(msg)
if total_channel_deposit >= UINT256_MAX:
raise RaidenRecoverableError("Deposit overflow")
if total_deposit > channel_participant_deposit_limit:
msg = (
f"Deposit of {total_deposit} exceeded the "
f"channel participant deposit limit"
)
raise RaidenRecoverableError(msg)
if network_total_deposit + amount_to_deposit > token_network_deposit_limit:
msg = f"Deposit of {amount_to_deposit} exceeded the token network deposit limit."
raise RaidenRecoverableError(msg)
raise RaidenRecoverableError(
f"Deposit gas estimatation failed for unknown reasons. Reference "
f"block {failed_at_blockhash} {failed_at_blocknumber}."
)
def set_total_withdraw(
self,
given_block_identifier: BlockSpecification,
channel_identifier: ChannelID,
total_withdraw: WithdrawAmount,
expiration_block: BlockExpiration,
participant_signature: Signature,
partner_signature: Signature,
participant: Address,
partner: Address,
) -> None:
""" Set total token withdraw in the channel to total_withdraw.
Raises:
ValueError: If provided total_withdraw is not an integer value.
"""
if not isinstance(total_withdraw, int):
raise ValueError("total_withdraw needs to be an integer number.")
if total_withdraw <= 0:
raise ValueError("total_withdraw should be larger than zero.")
# `channel_operations_lock` is used to serialize conflicting channel
# operations. E.g. this withdraw and a close.
with self.channel_operations_lock[partner]:
try:
channel_onchain_detail = self._detail_channel(
participant1=participant,
participant2=partner,
block_identifier=given_block_identifier,
channel_identifier=channel_identifier,
)
our_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=participant,
partner=partner,
block_identifier=given_block_identifier,
)
partner_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=partner,
partner=participant,
block_identifier=given_block_identifier,
)
given_block_number = self.client.get_block(given_block_identifier)["number"]
except ValueError:
# If `given_block_identifier` has been pruned the checks cannot be
# performed.
pass
except BadFunctionCallOutput:
raise_on_call_returned_empty(given_block_identifier)
else:
if channel_onchain_detail.state != ChannelState.OPENED:
msg = (
f"The channel was not opened at the provided block "
f"({given_block_identifier}). This call should never have "
f"been attempted."
)
raise RaidenUnrecoverableError(msg)
if our_details.withdrawn >= total_withdraw:
msg = (
f"The provided total_withdraw amount on-chain is "
f"{our_details.withdrawn}. Requested total withdraw "
f"{total_withdraw} did not increase."
)
raise WithdrawMismatch(msg)
total_channel_deposit = our_details.deposit + partner_details.deposit
total_channel_withdraw = total_withdraw + partner_details.withdrawn
if total_channel_withdraw > total_channel_deposit:
msg = (
f"The total channel withdraw amount "
f"{total_channel_withdraw} is larger than the total channel "
f"deposit of {total_channel_deposit}."
)
raise WithdrawMismatch(msg)
if expiration_block <= given_block_number:
msg = (
f"The current block number {given_block_number} is "
f"already at expiration block {expiration_block} or "
"later."
)
raise BrokenPreconditionError(msg)
if participant_signature == EMPTY_SIGNATURE:
msg = "set_total_withdraw requires a valid participant signature"
raise RaidenUnrecoverableError(msg)
if partner_signature == EMPTY_SIGNATURE:
msg = "set_total_withdraw requires a valid partner signature"
raise RaidenUnrecoverableError(msg)
canonical_identifier = CanonicalIdentifier(
chain_identifier=self.proxy.contract.functions.chain_id().call(),
token_network_address=self.address,
channel_identifier=channel_identifier,
)
participant_signed_data = pack_withdraw(
participant=participant,
total_withdraw=total_withdraw,
canonical_identifier=canonical_identifier,
expiration_block=expiration_block,
)
try:
participant_recovered_address = recover(
data=participant_signed_data, signature=participant_signature
)
except Exception: # pylint: disable=broad-except
raise RaidenUnrecoverableError(
"Couldn't verify the participant withdraw signature"
)
else:
if participant_recovered_address != participant:
raise RaidenUnrecoverableError("Invalid withdraw participant signature")
partner_signed_data = pack_withdraw(
participant=participant,
total_withdraw=total_withdraw,
canonical_identifier=canonical_identifier,
expiration_block=expiration_block,
)
try:
partner_recovered_address = recover(
data=partner_signed_data, signature=partner_signature
)
except Exception: # pylint: disable=broad-except
raise RaidenUnrecoverableError(
"Couldn't verify the partner withdraw signature"
)
else:
if partner_recovered_address != partner:
raise RaidenUnrecoverableError("Invalid withdraw partner signature")
log_details = {
"node": to_checksum_address(participant),
"contract": to_checksum_address(self.address),
"participant": to_checksum_address(participant),
"partner": to_checksum_address(partner),
"total_withdraw": total_withdraw,
"given_block_identifier": format_block_id(given_block_identifier),
}
with log_transaction(log, "set_total_withdraw", log_details):
self._set_total_withdraw(
channel_identifier=channel_identifier,
total_withdraw=total_withdraw,
expiration_block=expiration_block,
participant=participant,
partner=partner,
partner_signature=partner_signature,
participant_signature=participant_signature,
log_details=log_details,
)
def _set_total_withdraw(
self,
channel_identifier: ChannelID,
total_withdraw: WithdrawAmount,
expiration_block: BlockExpiration,
participant: Address,
partner: Address,
partner_signature: Signature,
participant_signature: Signature,
log_details: Dict[Any, Any],
) -> None:
checking_block = self.client.get_checking_block()
gas_limit = self.proxy.estimate_gas(
checking_block,
"setTotalWithdraw",
channel_identifier=channel_identifier,
participant=participant,
total_withdraw=total_withdraw,
expiration_block=expiration_block,
partner_signature=partner_signature,
participant_signature=participant_signature,
)
if gas_limit:
gas_limit = safe_gas_limit(
gas_limit, self.metadata.gas_measurements["TokenNetwork.setTotalWithdraw"]
)
log_details["gas_limit"] = gas_limit
transaction_hash = self.proxy.transact(
function_name="setTotalWithdraw",
startgas=gas_limit,
channel_identifier=channel_identifier,
participant=participant,
total_withdraw=total_withdraw,
expiration_block=expiration_block,
partner_signature=partner_signature,
participant_signature=participant_signature,
)
receipt = self.client.poll(transaction_hash)
failed_receipt = check_transaction_threw(receipt=receipt)
if failed_receipt:
# Because the gas estimation succeeded it is known that:
# - The channel was open.
# - The total withdraw amount increased.
# - The account had enough balance to pay for the gas (however
# there is a race condition for multiple transactions #3890)
failed_at_blockhash = encode_hex(failed_receipt["blockHash"])
failed_at_blocknumber = failed_receipt["blockNumber"]
if failed_receipt["cumulativeGasUsed"] == gas_limit:
msg = (
f"update transfer failed and all gas was used "
f"({gas_limit}). Estimate gas may have underestimated "
f"update transfer, or succeeded even though an assert is "
f"triggered, or the smart contract code has an "
f"conditional assert."
)
raise RaidenUnrecoverableError(msg)
# Query the current state to check for transaction races
detail = self._detail_channel(
participant1=participant,
participant2=partner,
block_identifier=failed_at_blockhash,
channel_identifier=channel_identifier,
)
our_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=participant,
partner=partner,
block_identifier=failed_at_blockhash,
)
partner_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=partner,
partner=participant,
block_identifier=failed_at_blockhash,
)
total_withdraw_done = our_details.withdrawn >= total_withdraw
if total_withdraw_done:
raise RaidenRecoverableError("Requested total withdraw was already performed")
if detail.state > ChannelState.OPENED:
msg = (
f"setTotalWithdraw failed because the channel closed "
f"before the transaction was mined. "
f"current_state={detail.state}"
)
raise RaidenRecoverableError(msg)
if detail.state < ChannelState.OPENED:
msg = (
f"setTotalWithdraw failed because the channel never "
f"existed. current_state={detail.state}"
)
raise RaidenUnrecoverableError(msg)
if expiration_block <= failed_at_blocknumber:
msg = (
f"setTotalWithdraw failed because the transaction was "
f"mined after the withdraw expired "
f"expiration_block={expiration_block} "
f"transation_mined_at={failed_at_blocknumber}"
)
raise RaidenRecoverableError(msg)
total_channel_deposit = our_details.deposit + partner_details.deposit
total_channel_withdraw = total_withdraw + partner_details.withdrawn
if total_channel_withdraw > total_channel_deposit:
msg = (
f"The total channel withdraw amount "
f"{total_channel_withdraw} became larger than the total channel "
f"deposit of {total_channel_deposit}."
)
raise WithdrawMismatch(msg)
raise RaidenUnrecoverableError("SetTotalwithdraw failed for an unknown reason")
else:
# The transaction would have failed if sent, figure out why.
# The latest block can not be used reliably because of reorgs,
# therefore every call using this block has to handle pruned data.
failed_at = self.proxy.rpc_client.get_block("latest")
failed_at_blockhash = encode_hex(failed_at["hash"])
failed_at_blocknumber = failed_at["number"]
self.proxy.rpc_client.check_for_insufficient_eth(
transaction_name="total_withdraw",
transaction_executed=False,
required_gas=self.metadata.gas_measurements["TokenNetwork.setTotalWithdraw"],
block_identifier=failed_at_blocknumber,
)
detail = self._detail_channel(
participant1=participant,
participant2=partner,
block_identifier=failed_at_blockhash,
channel_identifier=channel_identifier,
)
our_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=participant,
partner=partner,
block_identifier=failed_at_blockhash,
)
if detail.state > ChannelState.OPENED:
msg = (
f"cannot call setTotalWithdraw on a channel that is not open. "
f"current_state={detail.state}"
)
raise RaidenRecoverableError(msg)
if detail.state < ChannelState.OPENED:
msg = (
f"cannot call setTotalWithdraw on a channel that does not exist. "
f"current_state={detail.state}"
)
raise RaidenUnrecoverableError(msg)
if expiration_block <= failed_at_blocknumber:
msg = (
f"setTotalWithdraw would have failed because current block "
f"has already reached the withdraw expiration "
f"expiration_block={expiration_block} "
f"transation_checked_at={failed_at_blocknumber}"
)
raise RaidenRecoverableError(msg)
total_withdraw_done = our_details.withdrawn >= total_withdraw
if total_withdraw_done:
raise RaidenRecoverableError("Requested total withdraw was already performed")
raise RaidenUnrecoverableError(
f"setTotalWithdraw gas estimation failed for an unknown reason. "
f"Reference block {failed_at_blockhash} {failed_at_blocknumber}."
)
def close(
self,
channel_identifier: ChannelID,
partner: Address,
balance_hash: BalanceHash,
nonce: Nonce,
additional_hash: AdditionalHash,
non_closing_signature: Signature,
closing_signature: Signature,
given_block_identifier: BlockSpecification,
) -> None:
""" Close the channel using the provided balance proof.
Note:
This method must *not* be called without updating the application
state, otherwise the node may accept new transfers which cannot be
used, because the closer is not allowed to update the balance proof
submitted on chain after closing
Raises:
RaidenRecoverableError: If the close call failed but it is not
critical.
RaidenUnrecoverableError: If the operation was illegal at the
`given_block_identifier` or if the channel changes in a way that
cannot be recovered.
"""
canonical_identifier = CanonicalIdentifier(
chain_identifier=self.proxy.contract.functions.chain_id().call(),
token_network_address=self.address,
channel_identifier=channel_identifier,
)
our_signed_data = pack_signed_balance_proof(
msg_type=MessageTypeId.BALANCE_PROOF,
nonce=nonce,
balance_hash=balance_hash,
additional_hash=additional_hash,
canonical_identifier=canonical_identifier,
partner_signature=non_closing_signature,
)
try:
our_recovered_address = recover(data=our_signed_data, signature=closing_signature)
except Exception: # pylint: disable=broad-except
raise RaidenUnrecoverableError("Couldn't verify the closing signature")
else:
if our_recovered_address != self.node_address:
raise RaidenUnrecoverableError("Invalid closing signature")
if non_closing_signature != EMPTY_SIGNATURE:
partner_signed_data = pack_balance_proof(
nonce=nonce,
balance_hash=balance_hash,
additional_hash=additional_hash,
canonical_identifier=canonical_identifier,
)
try:
partner_recovered_address = recover(
data=partner_signed_data, signature=non_closing_signature
)
# InvalidSignature is raised by raiden.utils.signer.recover if signature
# is not bytes or has the incorrect length
#
# ValueError is raised if the PublicKey instantiation failed, let it
# propagate because it's a memory pressure problem.
#
# Exception is raised if the public key recovery failed.
except Exception: # pylint: disable=broad-except
raise RaidenUnrecoverableError("Couldn't verify the non-closing signature")
else:
if partner_recovered_address != partner:
raise RaidenUnrecoverableError("Invalid non-closing signature")
try:
channel_onchain_detail = self._detail_channel(
participant1=self.node_address,
participant2=partner,
block_identifier=given_block_identifier,
channel_identifier=channel_identifier,
)
except ValueError:
# If `given_block_identifier` has been pruned the checks cannot be
# performed.
pass
except BadFunctionCallOutput:
raise_on_call_returned_empty(given_block_identifier)
else:
onchain_channel_identifier = channel_onchain_detail.channel_identifier
if onchain_channel_identifier != channel_identifier:
msg = (
f"The provided channel identifier does not match the value "
f"on-chain at the provided block ({given_block_identifier}). "
f"This call should never have been attempted. "
f"provided_channel_identifier={channel_identifier}, "
f"onchain_channel_identifier={channel_onchain_detail.channel_identifier}"
)
raise RaidenUnrecoverableError(msg)
if channel_onchain_detail.state != ChannelState.OPENED:
msg = (
f"The channel was not open at the provided block "
f"({given_block_identifier}). This call should never have "
f"been attempted."
)
raise RaidenUnrecoverableError(msg)
log_details = {
"node": to_checksum_address(self.node_address),
"contract": to_checksum_address(self.address),
"partner": to_checksum_address(partner),
"nonce": nonce,
"balance_hash": encode_hex(balance_hash),
"additional_hash": encode_hex(additional_hash),
"non_closing_signature": encode_hex(non_closing_signature),
"closing_signature": encode_hex(closing_signature),
"given_block_identifier": format_block_id(given_block_identifier),
}
with log_transaction(log, "close", log_details):
self._close(
channel_identifier=channel_identifier,
partner=partner,
balance_hash=balance_hash,
nonce=nonce,
additional_hash=additional_hash,
non_closing_signature=non_closing_signature,
closing_signature=closing_signature,
log_details=log_details,
)
def _close(
self,
channel_identifier: ChannelID,
partner: Address,
balance_hash: BalanceHash,
nonce: Nonce,
additional_hash: AdditionalHash,
non_closing_signature: Signature,
closing_signature: Signature,
log_details: Dict[Any, Any],
) -> None:
# `channel_operations_lock` is used to serialize conflicting channel
# operations. E.g. this close and a deposit or withdraw.
with self.channel_operations_lock[partner]:
checking_block = self.client.get_checking_block()
gas_limit = self.proxy.estimate_gas(
checking_block,
"closeChannel",
channel_identifier=channel_identifier,
non_closing_participant=partner,
closing_participant=self.node_address,
balance_hash=balance_hash,
nonce=nonce,
additional_hash=additional_hash,
non_closing_signature=non_closing_signature,
closing_signature=closing_signature,
)
if gas_limit:
gas_limit = safe_gas_limit(
gas_limit, self.metadata.gas_measurements["TokenNetwork.closeChannel"]
)
log_details["gas_limit"] = gas_limit
transaction_hash = self.proxy.transact(
"closeChannel",
gas_limit,
channel_identifier=channel_identifier,
non_closing_participant=partner,
closing_participant=self.node_address,
balance_hash=balance_hash,
nonce=nonce,
additional_hash=additional_hash,
non_closing_signature=non_closing_signature,
closing_signature=closing_signature,
)
receipt = self.client.poll(transaction_hash)
failed_receipt = check_transaction_threw(receipt=receipt)
if failed_receipt:
# Because the gas estimation succeeded it is known that:
# - The channel existed.
# - The channel was at the state open.
# - The account had enough balance to pay for the gas
# (however there is a race condition for multiple
# transactions #3890)
#
# So the only reason for the transaction to fail is if our
# partner closed it before (assuming exclusive usage of the
# account and no compiler bugs)
# These checks do not have problems with race conditions because
# `poll`ing waits for the transaction to be confirmed.
mining_block = failed_receipt["blockNumber"]
if failed_receipt["cumulativeGasUsed"] == gas_limit:
msg = (
"update transfer failed and all gas was used. Estimate gas "
"may have underestimated update transfer, or succeeded even "
"though an assert is triggered, or the smart contract code "
"has an conditional assert."
)
raise RaidenUnrecoverableError(msg)
partner_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=partner,
partner=self.node_address,
block_identifier=mining_block,
)
if partner_details.is_closer:
msg = "Channel was already closed by channel partner first."
raise RaidenRecoverableError(msg)
raise RaidenUnrecoverableError("closeChannel call failed")
else:
# The transaction would have failed if sent, figure out why.
# The latest block can not be used reliably because of reorgs,
# therefore every call using this block has to handle pruned data.
failed_at = self.proxy.rpc_client.get_block("latest")
failed_at_blockhash = encode_hex(failed_at["hash"])
failed_at_blocknumber = failed_at["number"]
self.proxy.rpc_client.check_for_insufficient_eth(
transaction_name="closeChannel",
transaction_executed=True,
required_gas=self.metadata.gas_measurements["TokenNetwork.closeChannel"],
block_identifier=failed_at_blocknumber,
)
detail = self._detail_channel(
participant1=self.node_address,
participant2=partner,
block_identifier=failed_at_blockhash,
channel_identifier=channel_identifier,
)
if detail.state < ChannelState.OPENED:
msg = (
f"cannot call close channel has not been opened yet. "
f"current_state={detail.state}"
)
raise RaidenUnrecoverableError(msg)
if detail.state >= ChannelState.CLOSED:
msg = (
f"cannot call close on a channel that has been closed already. "
f"current_state={detail.state}"
)
raise RaidenRecoverableError(msg)
raise RaidenUnrecoverableError(
f"close channel gas estimation failed for an unknown "
f"reason. Reference block {failed_at_blockhash} "
f"{failed_at_blocknumber}."
)
def update_transfer(
self,
channel_identifier: ChannelID,
partner: Address,
balance_hash: BalanceHash,
nonce: Nonce,
additional_hash: AdditionalHash,
closing_signature: Signature,
non_closing_signature: Signature,
given_block_identifier: BlockSpecification,
) -> None:
if balance_hash is EMPTY_BALANCE_HASH:
raise RaidenUnrecoverableError("update_transfer called with an empty balance_hash")
if nonce <= 0 or nonce > UINT256_MAX:
raise RaidenUnrecoverableError("update_transfer called with an invalid nonce")
canonical_identifier = CanonicalIdentifier(
chain_identifier=self.proxy.contract.functions.chain_id().call(),
token_network_address=self.address,
channel_identifier=channel_identifier,
)
partner_signed_data = pack_balance_proof(
nonce=nonce,
balance_hash=balance_hash,
additional_hash=additional_hash,
canonical_identifier=canonical_identifier,
)
our_signed_data = pack_signed_balance_proof(
msg_type=MessageTypeId.BALANCE_PROOF_UPDATE,
nonce=nonce,
balance_hash=balance_hash,
additional_hash=additional_hash,
canonical_identifier=canonical_identifier,
partner_signature=closing_signature,
)
try:
partner_recovered_address = recover(
data=partner_signed_data, signature=closing_signature
)
our_recovered_address = recover(data=our_signed_data, signature=non_closing_signature)
# InvalidSignature is raised by raiden.utils.signer.recover if signature
# is not bytes or has the incorrect length
#
# ValueError is raised if the PublicKey instantiation failed, let it
# propagate because it's a memory pressure problem.
#
# Exception is raised if the public key recovery failed.
except Exception: # pylint: disable=broad-except
raise RaidenUnrecoverableError("Couldn't verify the balance proof signature")
else:
if our_recovered_address != self.node_address:
raise RaidenUnrecoverableError("Invalid balance proof signature")
if partner_recovered_address != partner:
raise RaidenUnrecoverableError("Invalid update transfer signature")
# Check the preconditions for calling updateNonClosingBalanceProof at
# the time the event was emitted.
try:
channel_onchain_detail = self._detail_channel(
participant1=self.node_address,
participant2=partner,
block_identifier=given_block_identifier,
channel_identifier=channel_identifier,
)
closer_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=partner,
partner=self.node_address,
block_identifier=given_block_identifier,
)
given_block_number = self.client.get_block(given_block_identifier)["number"]
except ValueError:
# If `given_block_identifier` has been pruned the checks cannot be
# performed.
pass
except BadFunctionCallOutput:
raise_on_call_returned_empty(given_block_identifier)
else:
# The latest channel is of no importance for the update transfer
# precondition checks, the only constraint that has to be satisfied
# is that the provided channel id provided is at the correct
# state. For this reason `getChannelIdentifier` is not called, as
# for version 0.4.0 that would return the identifier of the latest
# channel.
if channel_onchain_detail.state != ChannelState.CLOSED:
msg = (
f"The channel was not closed at the provided block "
f"({given_block_identifier}). This call should never have "
f"been attempted."
)
raise RaidenUnrecoverableError(msg)
if channel_onchain_detail.settle_block_number < given_block_number:
msg = (
"update transfer cannot be called after the settlement "
"period, this call should never have been attempted."
)
raise RaidenUnrecoverableError(msg)
if closer_details.nonce == nonce:
msg = (
"update transfer was already done, this call should never "
"have been attempted."
)
raise RaidenRecoverableError(msg)
log_details = {
"contract": to_checksum_address(self.address),
"node": to_checksum_address(self.node_address),
"partner": to_checksum_address(partner),
"nonce": nonce,
"balance_hash": encode_hex(balance_hash),
"additional_hash": encode_hex(additional_hash),
"closing_signature": encode_hex(closing_signature),
"non_closing_signature": encode_hex(non_closing_signature),
"given_block_identifier": format_block_id(given_block_identifier),
}
with log_transaction(log, "update_transfer", log_details):
self._update_transfer(
channel_identifier=channel_identifier,
partner=partner,
balance_hash=balance_hash,
nonce=nonce,
additional_hash=additional_hash,
closing_signature=closing_signature,
non_closing_signature=non_closing_signature,
log_details=log_details,
)
def _update_transfer(
self,
channel_identifier: ChannelID,
partner: Address,
balance_hash: BalanceHash,
nonce: Nonce,
additional_hash: AdditionalHash,
closing_signature: Signature,
non_closing_signature: Signature,
log_details: Dict[Any, Any],
) -> None:
checking_block = self.client.get_checking_block()
gas_limit = self.proxy.estimate_gas(
checking_block,
"updateNonClosingBalanceProof",
channel_identifier=channel_identifier,
closing_participant=partner,
non_closing_participant=self.node_address,
balance_hash=balance_hash,
nonce=nonce,
additional_hash=additional_hash,
closing_signature=closing_signature,
non_closing_signature=non_closing_signature,
)
if gas_limit:
gas_limit = safe_gas_limit(
gas_limit,
self.metadata.gas_measurements["TokenNetwork.updateNonClosingBalanceProof"],
)
log_details["gas_limit"] = gas_limit
transaction_hash = self.proxy.transact(
"updateNonClosingBalanceProof",
gas_limit,
channel_identifier=channel_identifier,
closing_participant=partner,
non_closing_participant=self.node_address,
balance_hash=balance_hash,
nonce=nonce,
additional_hash=additional_hash,
closing_signature=closing_signature,
non_closing_signature=non_closing_signature,
)
receipt = self.client.poll(transaction_hash)
failed_receipt = check_transaction_threw(receipt=receipt)
if failed_receipt:
# Because the gas estimation succeeded it is known that:
# - The channel existed.
# - The channel was at the state closed.
# - The partner node was the closing address.
# - The account had enough balance to pay for the gas (however
# there is a race condition for multiple transactions #3890)
# These checks do not have problems with race conditions because
# `poll`ing waits for the transaction to be confirmed.
mining_block = failed_receipt["blockNumber"]
if failed_receipt["cumulativeGasUsed"] == gas_limit:
msg = (
"update transfer failed and all gas was used. Estimate gas "
"may have underestimated update transfer, or succeeded even "
"though an assert is triggered, or the smart contract code "
"has an conditional assert."
)
raise RaidenUnrecoverableError(msg)
# Query the current state to check for transaction races
channel_data = self._detail_channel(
participant1=self.node_address,
participant2=partner,
block_identifier=mining_block,
channel_identifier=channel_identifier,
)
# The channel identifier can be set to 0 if the channel is
# settled, or it could have a higher value if a new channel was
# opened. A lower value is an unrecoverable error.
was_channel_gone = (
channel_data.channel_identifier == 0
or channel_data.channel_identifier > channel_identifier
)
if was_channel_gone:
msg = (
f"The provided channel identifier does not match the value "
f"on-chain at the block the update transfer was mined ({mining_block}). "
f"provided_channel_identifier={channel_identifier}, "
f"onchain_channel_identifier={channel_data.channel_identifier}"
)
raise RaidenRecoverableError(msg)
if channel_data.state >= ChannelState.SETTLED:
# This should never happen if the settlement window and gas
# price estimation is done properly.
#
# This is a race condition that cannot be prevented,
# therefore it is a recoverable error.
msg = "Channel was already settled when update transfer was mined."
raise RaidenRecoverableError(msg)
if channel_data.settle_block_number < mining_block:
# The channel is cleared from the smart contract's storage
# on call to settle, this means that settle_block_number
# may be zero, therefore this check must be done after the
# channel's state check.
#
# This is a race condition that cannot be prevented,
# therefore it is a recoverable error.
msg = "update transfer was mined after the settlement window."
raise RaidenRecoverableError(msg)
partner_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=partner,
partner=self.node_address,
block_identifier=mining_block,
)
if partner_details.nonce != nonce:
# A higher value should be impossible because a signature
# from this node is necessary and this node should send the
# partner's balance proof with the highest nonce
#
# A lower value means some unexpected failure.
msg = (
f"update transfer failed, the on-chain nonce is higher then our expected "
f"value expected={nonce} actual={partner_details.nonce}"
)
raise RaidenUnrecoverableError(msg)
if channel_data.state < ChannelState.CLOSED:
msg = (
f"The channel state changed unexpectedly. "
f"block=({mining_block}) onchain_state={channel_data.state}"
)
raise RaidenUnrecoverableError(msg)
raise RaidenUnrecoverableError("update transfer failed for an unknown reason")
else:
# The transaction would have failed if sent, figure out why.
# The latest block can not be used reliably because of reorgs,
# therefore every call using this block has to handle pruned data.
failed_at = self.proxy.rpc_client.get_block("latest")
failed_at_blockhash = encode_hex(failed_at["hash"])
failed_at_blocknumber = failed_at["number"]
self.proxy.rpc_client.check_for_insufficient_eth(
transaction_name="updateNonClosingBalanceProof",
transaction_executed=False,
required_gas=self.metadata.gas_measurements[
"TokenNetwork.updateNonClosingBalanceProof"
],
block_identifier=failed_at_blocknumber,
)
detail = self._detail_channel(
participant1=self.node_address,
participant2=partner,
block_identifier=failed_at_blockhash,
channel_identifier=channel_identifier,
)
if detail.state < ChannelState.CLOSED:
msg = (
f"cannot call update_transfer channel has not been closed yet. "
f"current_state={detail.state}"
)
raise RaidenUnrecoverableError(msg)
if detail.state >= ChannelState.SETTLED:
msg = (
f"cannot call update_transfer channel has been settled already. "
f"current_state={detail.state}"
)
raise RaidenRecoverableError(msg)
if detail.settle_block_number < failed_at_blocknumber:
raise RaidenRecoverableError(
"update_transfer transation sent after settlement window"
)
# At this point it is known the channel is CLOSED on block
# `failed_at_blockhash`
partner_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=partner,
partner=self.node_address,
block_identifier=failed_at_blockhash,
)
if not partner_details.is_closer:
raise RaidenUnrecoverableError(
"update_transfer cannot be sent if the partner did not close the channel"
)
raise RaidenUnrecoverableError(
f"update_transfer gas estimation failed for an unknown reason. "
f"Reference block {failed_at_blockhash} {failed_at_blocknumber}."
)
def unlock(
self,
channel_identifier: ChannelID,
sender: Address,
receiver: Address,
pending_locks: PendingLocksState,
given_block_identifier: BlockSpecification,
) -> None:
if not pending_locks:
raise ValueError("unlock cannot be done without pending locks")
# Check the preconditions for calling unlock at the time the event was
# emitted.
try:
channel_onchain_detail = self._detail_channel(
participant1=sender,
participant2=receiver,
block_identifier=given_block_identifier,
channel_identifier=channel_identifier,
)
sender_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=sender,
partner=receiver,
block_identifier=given_block_identifier,
)
except ValueError:
# If `given_block_identifier` has been pruned the checks cannot be
# performed.
pass
except BadFunctionCallOutput:
raise_on_call_returned_empty(given_block_identifier)
else:
if channel_onchain_detail.state != ChannelState.SETTLED:
msg = (
f"The channel was not settled at the provided block "
f"({given_block_identifier}). This call should never have "
f"been attempted."
)
raise RaidenUnrecoverableError(msg)
local_locksroot = compute_locksroot(pending_locks)
if sender_details.locksroot != local_locksroot:
msg = (
f"The provided locksroot ({to_hex(local_locksroot)}) "
f"does correspond to the on-chain locksroot "
f"{to_hex(sender_details.locksroot)} for sender "
f"{to_checksum_address(sender)}."
)
raise RaidenUnrecoverableError(msg)
if sender_details.locked_amount == 0:
msg = (
f"The provided locked amount on-chain is 0. This should "
f"never happen because a lock with an amount 0 is forbidden"
f"{to_hex(sender_details.locksroot)} for sender "
f"{to_checksum_address(sender)}."
)
raise RaidenUnrecoverableError(msg)
log_details = {
"node": to_checksum_address(self.node_address),
"contract": to_checksum_address(self.address),
"sender": to_checksum_address(sender),
"receiver": to_checksum_address(receiver),
"pending_locks": pending_locks,
"given_block_identifier": format_block_id(given_block_identifier),
}
with log_transaction(log, "unlock", log_details):
self._unlock(
channel_identifier=channel_identifier,
sender=sender,
receiver=receiver,
pending_locks=pending_locks,
given_block_identifier=given_block_identifier,
log_details=log_details,
)
def _unlock(
self,
channel_identifier: ChannelID,
sender: Address,
receiver: Address,
pending_locks: PendingLocksState,
given_block_identifier: BlockSpecification,
log_details: Dict[Any, Any],
) -> None:
checking_block = self.client.get_checking_block()
leaves_packed = b"".join(pending_locks.locks)
gas_limit = self.proxy.estimate_gas(
checking_block,
"unlock",
channel_identifier=channel_identifier,
receiver=receiver,
sender=sender,
locks=encode_hex(leaves_packed),
)
if gas_limit:
gas_limit = safe_gas_limit(gas_limit, UNLOCK_TX_GAS_LIMIT)
log_details["gas_limit"] = gas_limit
transaction_hash = self.proxy.transact(
function_name="unlock",
startgas=gas_limit,
channel_identifier=channel_identifier,
receiver=receiver,
sender=sender,
locks=leaves_packed,
)
receipt = self.client.poll(transaction_hash)
failed_receipt = check_transaction_threw(receipt=receipt)
if failed_receipt:
# Because the gas estimation succeeded it is known that:
# - The channel was settled.
# - The channel had pending locks on-chain for that participant.
# - The account had enough balance to pay for the gas (however
# there is a race condition for multiple transactions #3890)
if failed_receipt["cumulativeGasUsed"] == gas_limit:
msg = (
f"Unlock failed and all gas was used "
f"({gas_limit}). Estimate gas may have underestimated "
f"unlock, or succeeded even though an assert is "
f"triggered, or the smart contract code has an "
f"conditional assert."
)
raise RaidenUnrecoverableError(msg)
# Query the current state to check for transaction races
sender_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=sender,
partner=receiver,
block_identifier=given_block_identifier,
)
is_unlock_done = sender_details.locksroot == LOCKSROOT_OF_NO_LOCKS
if is_unlock_done:
raise RaidenRecoverableError("The locks are already unlocked")
raise RaidenRecoverableError("Unlocked failed for an unknown reason")
else:
# The transaction has failed, figure out why.
# The latest block can not be used reliably because of reorgs,
# therefore every call using this block has to handle pruned data.
failed_at = self.proxy.rpc_client.get_block("latest")
failed_at_blockhash = encode_hex(failed_at["hash"])
failed_at_blocknumber = failed_at["number"]
self.proxy.rpc_client.check_for_insufficient_eth(
transaction_name="unlock",
transaction_executed=False,
required_gas=UNLOCK_TX_GAS_LIMIT,
block_identifier=failed_at_blocknumber,
)
detail = self._detail_channel(
participant1=sender,
participant2=receiver,
block_identifier=failed_at_blockhash,
channel_identifier=channel_identifier,
)
sender_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=sender,
partner=receiver,
block_identifier=failed_at_blockhash,
)
if detail.state < ChannelState.SETTLED:
msg = (
f"cannot call unlock on a channel that has not been settled yet. "
f"current_state={detail.state}"
)
raise RaidenUnrecoverableError(msg)
is_unlock_done = sender_details.locksroot == LOCKSROOT_OF_NO_LOCKS
if is_unlock_done:
raise RaidenRecoverableError("The locks are already unlocked ")
raise RaidenUnrecoverableError(
f"unlock estimation failed for an unknown reason. Reference "
f"block {failed_at_blockhash} {failed_at_blocknumber}."
)
def settle(
self,
channel_identifier: ChannelID,
transferred_amount: TokenAmount,
locked_amount: LockedAmount,
locksroot: Locksroot,
partner: Address,
partner_transferred_amount: TokenAmount,
partner_locked_amount: LockedAmount,
partner_locksroot: Locksroot,
given_block_identifier: BlockSpecification,
) -> None:
# `channel_operations_lock` is used to serialize conflicting channel
# operations. E.g. this settle and a channel open.
with self.channel_operations_lock[partner]:
try:
channel_onchain_detail = self._detail_channel(
participant1=self.node_address,
participant2=partner,
block_identifier=given_block_identifier,
channel_identifier=channel_identifier,
)
our_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=self.node_address,
partner=partner,
block_identifier=given_block_identifier,
)
partner_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=partner,
partner=self.node_address,
block_identifier=given_block_identifier,
)
given_block_number = self.client.get_block(given_block_identifier)["number"]
except ValueError:
# If `given_block_identifier` has been pruned the checks cannot be
# performed.
pass
except BadFunctionCallOutput:
raise_on_call_returned_empty(given_block_identifier)
else:
if channel_identifier != channel_onchain_detail.channel_identifier:
msg = (
f"The provided channel identifier {channel_identifier} "
f"does not match onchain channel_identifier "
f"{channel_onchain_detail.channel_identifier}."
)
raise BrokenPreconditionError(msg)
if given_block_number < channel_onchain_detail.settle_block_number:
msg = (
"settle cannot be called before the settlement "
"period ends, this call should never have been attempted."
)
raise BrokenPreconditionError(msg)
if channel_onchain_detail.state != ChannelState.CLOSED:
msg = (
f"The channel was not closed at the provided block "
f"({given_block_identifier}). This call should never have "
f"been attempted."
)
raise BrokenPreconditionError(msg)
our_balance_hash = hash_balance_data(
transferred_amount=transferred_amount,
locked_amount=locked_amount,
locksroot=locksroot,
)
partner_balance_hash = hash_balance_data(
transferred_amount=partner_transferred_amount,
locked_amount=partner_locked_amount,
locksroot=partner_locksroot,
)
if our_details.balance_hash != our_balance_hash:
msg = "Our balance hash does not match the on-chain value"
raise BrokenPreconditionError(msg)
if partner_details.balance_hash != partner_balance_hash:
msg = "Partner balance hash does not match the on-chain value"
raise BrokenPreconditionError(msg)
log_details = {
"channel_identifier": channel_identifier,
"contract": to_checksum_address(self.address),
"node": to_checksum_address(self.node_address),
"our_address": to_checksum_address(self.node_address),
"transferred_amount": transferred_amount,
"locked_amount": locked_amount,
"locksroot": encode_hex(locksroot),
"partner": to_checksum_address(partner),
"partner_transferred_amount": partner_transferred_amount,
"partner_locked_amount": partner_locked_amount,
"partner_locksroot": encode_hex(partner_locksroot),
"given_block_identifier": format_block_id(given_block_identifier),
}
with log_transaction(log, "settle", log_details):
self._settle(
channel_identifier=channel_identifier,
transferred_amount=transferred_amount,
locked_amount=locked_amount,
locksroot=locksroot,
partner=partner,
partner_transferred_amount=partner_transferred_amount,
partner_locked_amount=partner_locked_amount,
partner_locksroot=partner_locksroot,
log_details=log_details,
)
def _settle(
self,
channel_identifier: ChannelID,
transferred_amount: TokenAmount,
locked_amount: LockedAmount,
locksroot: Locksroot,
partner: Address,
partner_transferred_amount: TokenAmount,
partner_locked_amount: LockedAmount,
partner_locksroot: Locksroot,
log_details: Dict[Any, Any],
) -> None:
checking_block = self.client.get_checking_block()
# The second participant transferred + locked amount must be higher
our_maximum = transferred_amount + locked_amount
partner_maximum = partner_transferred_amount + partner_locked_amount
our_bp_is_larger = our_maximum > partner_maximum
if our_bp_is_larger:
kwargs = {
"participant1": partner,
"participant1_transferred_amount": partner_transferred_amount,
"participant1_locked_amount": partner_locked_amount,
"participant1_locksroot": partner_locksroot,
"participant2": self.node_address,
"participant2_transferred_amount": transferred_amount,
"participant2_locked_amount": locked_amount,
"participant2_locksroot": locksroot,
}
else:
kwargs = {
"participant1": self.node_address,
"participant1_transferred_amount": transferred_amount,
"participant1_locked_amount": locked_amount,
"participant1_locksroot": locksroot,
"participant2": partner,
"participant2_transferred_amount": partner_transferred_amount,
"participant2_locked_amount": partner_locked_amount,
"participant2_locksroot": partner_locksroot,
}
gas_limit = self.proxy.estimate_gas(
checking_block, "settleChannel", channel_identifier=channel_identifier, **kwargs
)
if gas_limit:
gas_limit = safe_gas_limit(
gas_limit, self.metadata.gas_measurements["TokenNetwork.settleChannel"]
)
log_details["gas_limit"] = gas_limit
transaction_hash = self.proxy.transact(
function_name="settleChannel",
startgas=gas_limit,
channel_identifier=channel_identifier,
**kwargs,
)
receipt = self.client.poll(transaction_hash)
failed_receipt = check_transaction_threw(receipt=receipt)
if failed_receipt:
failed_at_blockhash = encode_hex(failed_receipt["blockHash"])
failed_at_blocknumber = failed_receipt["blockNumber"]
self.proxy.rpc_client.check_for_insufficient_eth(
transaction_name="settleChannel",
transaction_executed=True,
required_gas=self.metadata.gas_measurements["TokenNetwork.settleChannel"],
block_identifier=failed_at_blocknumber,
)
channel_onchain_detail = self._detail_channel(
participant1=self.node_address,
participant2=partner,
block_identifier=failed_at_blockhash,
channel_identifier=channel_identifier,
)
our_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=self.node_address,
partner=partner,
block_identifier=failed_at_blockhash,
)
partner_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=partner,
partner=self.node_address,
block_identifier=failed_at_blockhash,
)
our_balance_hash = hash_balance_data(
transferred_amount=transferred_amount,
locked_amount=locked_amount,
locksroot=locksroot,
)
partner_balance_hash = hash_balance_data(
transferred_amount=partner_transferred_amount,
locked_amount=partner_locked_amount,
locksroot=partner_locksroot,
)
participants = get_channel_participants_from_open_event(
token_network=self,
channel_identifier=channel_identifier,
contract_manager=self.contract_manager,
from_block=self.metadata.filters_start_at,
)
if not participants:
msg = (
f"The provided channel identifier {channel_identifier} "
f"does not exist on-chain."
)
raise RaidenUnrecoverableError(msg)
if self.node_address not in participants:
msg = (
f"Settling a channel in which the current node is not a participant "
f"of is not allowed."
)
raise RaidenUnrecoverableError(msg)
if channel_onchain_detail.state in (ChannelState.SETTLED, ChannelState.REMOVED):
raise RaidenRecoverableError("Channel is already settled")
if channel_onchain_detail.state == ChannelState.OPENED:
raise RaidenUnrecoverableError("Channel is still open. It cannot be settled")
is_settle_window_over = (
channel_onchain_detail.state == ChannelState.CLOSED
and failed_at_blocknumber > channel_onchain_detail.settle_block_number
)
if not is_settle_window_over:
raise RaidenUnrecoverableError(
"Channel cannot be settled before settlement window is over"
)
# At this point, we are certain that the channel is still in
# CLOSED state. Therefore, on-chain balance_hashes being
# different than the provided ones should not happen.
if our_details.balance_hash != our_balance_hash:
msg = "Our balance hash does not match the on-chain value"
raise RaidenUnrecoverableError(msg)
if partner_details.balance_hash != partner_balance_hash:
msg = "Partner balance hash does not match the on-chain value"
raise RaidenUnrecoverableError(msg)
raise RaidenRecoverableError("Settle failed for an unknown reason")
else:
# The latest block can not be used reliably because of reorgs,
# therefore every call using this block has to handle pruned data.
failed_at = self.proxy.rpc_client.get_block("latest")
failed_at_blockhash = encode_hex(failed_at["hash"])
failed_at_blocknumber = failed_at["number"]
channel_onchain_detail = self._detail_channel(
participant1=self.node_address,
participant2=partner,
block_identifier=failed_at_blockhash,
channel_identifier=channel_identifier,
)
our_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=self.node_address,
partner=partner,
block_identifier=failed_at_blockhash,
)
partner_details = self._detail_participant(
channel_identifier=channel_identifier,
detail_for=partner,
partner=self.node_address,
block_identifier=failed_at_blockhash,
)
our_balance_hash = hash_balance_data(
transferred_amount=transferred_amount,
locked_amount=locked_amount,
locksroot=locksroot,
)
partner_balance_hash = hash_balance_data(
transferred_amount=partner_transferred_amount,
locked_amount=partner_locked_amount,
locksroot=partner_locksroot,
)
participants = get_channel_participants_from_open_event(
token_network=self,
channel_identifier=channel_identifier,
contract_manager=self.contract_manager,
from_block=self.metadata.filters_start_at,
)
if not participants:
msg = (
f"The provided channel identifier {channel_identifier} "
f"does not exist on-chain."
)
raise RaidenUnrecoverableError(msg)
if self.node_address not in participants:
msg = (
f"Settling a channel in which the current node is not a participant "
f"of is not allowed."
)
raise RaidenUnrecoverableError(msg)
if channel_onchain_detail.state in (ChannelState.SETTLED, ChannelState.REMOVED):
raise RaidenRecoverableError("Channel is already settled")
if channel_onchain_detail.state == ChannelState.OPENED:
raise RaidenUnrecoverableError("Channel is still open. It cannot be settled")
is_settle_window_over = (
channel_onchain_detail.state == ChannelState.CLOSED
and failed_at_blocknumber > channel_onchain_detail.settle_block_number
)
if not is_settle_window_over:
raise RaidenUnrecoverableError(
"Channel cannot be settled before settlement window is over"
)
# At this point, we are certain that the channel is still in
# CLOSED state. Therefore, on-chain balance_hashes being
# different than the provided ones should not happen.
if our_details.balance_hash != our_balance_hash:
msg = "Our balance hash does not match the on-chain value"
raise RaidenUnrecoverableError(msg)
if partner_details.balance_hash != partner_balance_hash:
msg = "Partner balance hash does not match the on-chain value"
raise RaidenUnrecoverableError(msg)
raise RaidenRecoverableError(
f"Settle gas estimation failed for an unknown reason. Reference "
f"block {failed_at_blockhash} {failed_at_blocknumber}."
)
|
import numpy as np
import torch
import os
import json
import pickle
import math
import pdb
import time
import pdb
import argparse
from tqdm import tqdm
def get_input_frame(current_frame):
return (current_frame - 1)*2 + 11 - 2*5
def data_prepare(csv_path, file_info_path, data_path, rep_type, target_path):
with open(csv_path, 'r') as f:
ids = f.readlines()
ids = [x.strip().split(',') for x in ids]
#self.ids = ids
samp_rate = 16000
spec_stride = 0.01
window_size = 0.02
size = len(ids)
rep_path = os.path.join(data_path, rep_type)
#self.file_info_path = file_info_path
with open(file_info_path, 'rb') as j:
file_meta = pickle.load(j)
for i in tqdm(range(size)):
sample = ids[i]
file_name, accent_label = sample[0], sample[1]
#accent_label = 'test'
file_name = file_name.split('/')[-1].split('.')[0]
representation = np.load(os.path.join(rep_path, file_name + '_{}.npy'.format(rep_type)))
representation = torch.from_numpy(representation)
times = file_meta[file_name]['times']
rep_list = torch.unbind(representation, dim=1)
accent_path = target_path
if not os.path.exists(accent_path):
os.makedirs(accent_path)
valid_phone_list = ['aa', 'el', 'ch', 'ae', 'eh', 'ix', 'ah', 'ao', 'w', 'ih', 'tcl', 'en', 'ey', 'ay', 'ax', 'zh', 'er', 'gcl', 'ng', 'nx', 'iy', 'sh', 'pcl', 'uh', 'bcl', 'dcl', 'th', 'dh', 'kcl', 'v', 'hv', 'y', 'hh', 'jh', 'dx', 'em', 'ux', 'axr', 'b', 'd', 'g', 'f', 'k', 'm', 'l', 'n', 'q', 'p', 's', 'r', 't', 'oy', 'ow', 'z', 'uw']
count_dict = dict([(key, 0) for key in valid_phone_list])
count = 0
#samp
for i in range(len(rep_list)):
frame_idx = i
if(rep_type != 'spec'):
frame_idx = get_input_frame(frame_idx)
window_start = frame_idx*spec_stride*samp_rate
window_mid = window_start + (samp_rate*window_size/2)
#print(window_start, window_mid)
alligned_phone = 'na'
for j in range(len(times)):
#print(window_mid, times[j])
if (window_mid < float(times[j])):
alligned_phone = file_meta[file_name]['phones'][j]
break
#print(alligned_phone)
if(alligned_phone == 'na'):
#print ("Oops error in allignment for ", file_name, "frame ",frame_idx )
pass
if(alligned_phone in valid_phone_list):
count_dict[alligned_phone] += 1
path = os.path.join(accent_path, file_name+'_'+rep_type+'_'+alligned_phone+'_'+str(count_dict[alligned_phone]))
#print(alligned_phone)
np.save(path, rep_list[i].numpy())
return
parser = argparse.ArgumentParser(description='Take command line arguments')
parser.add_argument('--csv_path',type=str)
parser.add_argument('--file_info_path',type=str)
parser.add_argument('--data_path',type=str)
parser.add_argument('--rep_type',type=str)
parser.add_argument('--target_path',type=str)
args = parser.parse_args()
if __name__ == '__main__':
data_prepare(args.csv_path, args.file_info_path, args.data_path, args.rep_type, args.target_path)
|
"""
Copyright 2019 Christos Diou
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import scipy
import pandas as pd
import json
import jwt
import requests
import time
from datetime import datetime, timedelta
def get_segments(signal, mask, only_true=True):
"""
Get the segments of a signal indicated by consecutive "True" values in a
binary mask
Parameters
----------
signal : numpy.array
1-D numpy array
mask : numpy.array
Boolean array with same shape as signal
only_true: Boolean If it is True, only segments corresponding to True values
of the original signal are returned. Otherwise, segments corresponding to
both True and False values are returned.
Returns
-------
segments : list
List of numpy.array elements, each containing a segment of the original
signal.
events : list
List with (start, stop) pairs indicating the start and end sample for each
segment
"""
if signal.shape[0] != mask.shape[0]:
raise ValueError("Signal and mask shape do not match")
# Vectorized way to identify semgments. Not straightforward, but fast.
segments = []
events = []
idx = np.where(np.concatenate(([True], mask[:-1] != mask[1:], [True])))[0]
for i in range(len(idx[:-1])):
seg = signal[idx[i]:idx[i + 1]]
segments.append(seg)
events.append((idx[i], idx[i + 1]))
if only_true:
if mask[0]:
ret_segments = segments[::2]
ret_events = events[::2]
else:
ret_segments = segments[1::2]
ret_events = events[1::2]
else:
ret_segments = segments
ret_events = events
return ret_segments, ret_events
def get_normalized_data(data, nominal_voltage):
"""
Normalize power with voltage measurements, if available. See also Hart's
1985 paper.
Parameters
----------
data: pandas.DataFrame
Pandas dataframe with columns 'active', 'voltage' and, optionally,
'reactive'.
Returns
-------
out: pandads.DataFrame. A normalized dataframe, where
.. math::
P_n(i) = P(i)\left\(\frac{V_0}{V(i)}\right\)^{1.5}\\
Q_n(i) = Q(i)\left\(\frac{V_0}{V(i)}\right\)^{2.5}
"""
if 'active' not in data.columns:
raise ValueError("No \'active\' column.")
# First, copy the df and make sure there are no NAs
r_data = data.dropna()
# Normalization. Raise active power to 1.5 and reactive power to
# 2.5. See Hart's 1985 paper for an explanation. 1mV is added to avoid
# division by zero
if 'voltage' in data.columns:
r_data.loc[:, 'active'] = data['active'] * \
np.power((nominal_voltage / (data['voltage'] + 0.001)), 1.5)
if 'reactive' in data.columns:
r_data.loc[:, 'reactive'] = data['reactive'] * \
np.power((nominal_voltage / (data['voltage'] + 0.001)), 2.5)
return r_data
def preprocess_data(data, subset=['active', 'reactive']):
"""
Drop duplicates and resample all data to 1 second sampling frequency.
Parameters
----------
data : pandas.DataFrame
Pandas dataframe with the original data.
subset : list
Subset of columns to consider for preprocessing and removal of NAs (see
also pandas.DataFrame.dropna())
Returns
-------
out : pandas.DataFrame
Preprocessed data
"""
# Make sure timestamps are in correct order
out = data.sort_index()
# Make sure there are no NAs
out = out.dropna(subset=subset)
# Round timestamps to 1s
out.index = out.index.round('1s')
out = out.reset_index()
# Remove possible entries with same timestamp. Keep last entry.
out = out.drop_duplicates(subset='index', keep='last')
out = out.set_index('index')
# TODO: Do we handle NAs? (dropna)
# Resample to 1s and fill-in the gaps
out = out.asfreq('1S', method='pad')
return out
def match_power(p1, p2, active_only=True, t=35.0, lp=1000, m=0.05):
"""
Match power consumption p1 against p2 according to Hart's algorithm.
Parameters
----------
p1, p2 : 1x2 Numpy arrays (active and reactive power).
active_only : Boolean indicating if match should take into account only
active power or both active and reactive power
t : Float used to determine whether there is a match or no. If the
difference is over t, then there is no match
lp : Large power threshold. If the maximum active power of p1 and p2 is over
this value, then a percentage is used for matching, instead of t.
m : The matching percentage used for large active power values.
Returns
-------
match : Boolean for match (True) or no match (False)
distance : Distance between power consumptions. It is L1 distance in the
case of active_only=True or L2 distance in case active_only=False.
"""
# TODO: Check and enforce signature shapes
if p1[0] < 0.0 or p2[0] < 0.0:
raise ValueError('Active power must be positive')
if max((p1[0], p2[0])) >= lp:
t_active = m * p2[0]
else:
t_active = t
if max((np.fabs(p1[1]), np.fabs(p2[1]))) >= lp:
t_reactive = m * p2[1]
else:
t_reactive = t
T = np.fabs(np.array([t_active, t_reactive]))
if active_only:
d = np.fabs(p2[0] - p1[0])
if d < T[0]:
# Match
return True, d
else:
d = np.linalg.norm(p2 - p1)
if all(np.fabs(p2 - p1) < T):
# Match
return True, d
return False, d
def power_curve_from_activations(appliances, start=None, end=None):
"""
Create a power curve corresponding to the joint power consumption of a list
of appliances.
Parameters
----------
appliances : List
List containing eeris_nilm.appliance.Appliance instances. Warning: This
function produces power consumption curves with 1 second period for the
union of the appliance usage duration without size limitations. It is the
caller's responsibility to ensure that no memory issues occur.
start : String or None
Start time for the power curve. The string must be in
a format understood by pandas.Timestamp(). Only activations taking place
after this time are considered. If None, the earliest start time of the data
is used.
end : String or None
End time for the power curve. The string must be in
a format understood by pandas.Timestamp(). Only activations taking place
before this time are considered. If None, the earliest start time of the
data is used.
Returns
-------
curve : pandas.DataFrame
Dataframe with timestamp index (1 second period) of active and reactive
power consumption of the appliance.
"""
# Determine the size of the return dataframe
if start is None or end is None:
est_start = None
est_end = None
for i in range(len(appliances)):
app = appliances[i]
app.activations.sort_values('start')
ap_start = app.activations.loc[0, 'start']
ap_end = app.activations['start'].iat[-1]
if est_start is None or ap_start < est_start:
est_start = ap_start
if est_end is None or ap_end > est_end:
est_end = ap_end
if start is None and end is not None:
start = est_start
end = pd.Timestamp(end)
elif start is not None and end is None:
start = pd.Timestamp(start)
end = est_end
else:
start = est_start
end = est_end
else:
start = pd.Timestamp(start)
end = pd.Timestamp(end)
idx = pd.date_range(start=start, end=end, freq='S')
# ncol = appliances[0].signature.shape[1]
ncol = 2 # Fixed number of columns.
data = np.zeros(np.array([idx.shape[0], ncol]))
power = pd.DataFrame(index=idx, data=data, columns=['active', 'reactive'])
for i in range(len(appliances)):
app = appliances[i]
s = appliances[i].signature[0]
for _, act in appliances[i].activations.iterrows():
power.loc[act['start']:act['end']] += s
return power
def activations_from_power_curve(data, states=False, threshold=35):
"""
Create a pandas dataframe of appliance activations from a single appliance's
power consumption curve (only active power is considered). This function can
take into account multiple appliance states (i.e., when an appliance changes
its state, it can lead to a different activation or not).
Parameters
----------
data : pandas.Dataframe
Pandas dataframe with a column 'active'. Other columns, if present, are
ignored.
states : bool
Whether to take states into account or not.
threshold: float
Threshold value for detecting activations
Returns
-------
activations : pandas.DataFrame
Dataframe with columns 'start', 'end', 'active'. When states is False, the
column active is always zero.
"""
if 'active' not in data.columns:
s = ("Expect \'active\' and, optionally,",
"\'reactive\' and \'voltage\' columns")
raise ValueError(s)
data_n = data['active']
# Pre-process data to a constant sampling rate, and fill-in missing
# data.
data_n = preprocess_data(data_n)
start_ts = data_n.index[0]
# Work with numpy data from now on.
npdata = data_n.values[:, 0]
if states:
# Apply a 5-th order derivative filter to detect edges
sobel = np.array([-2, -1, 0, 1, 2])
# Apply an edge threshold
edge = np.fabs(scipy.convolve(npdata, sobel, mode='same'))
mask = (edge < threshold) & (npdata > threshold)
segments, events = get_segments(npdata, mask)
# Get the start and end timestamp for each event
else:
# Hardcoded threshold
mask = npdata > threshold
segments, events = get_segments(npdata, mask)
seg_events = np.array([[start_ts + pd.Timedelta(event[0], unit='seconds'),
start_ts + pd.Timedelta(event[1], unit='seconds')]
for event in events])
df_list = []
for i in range(seg_events.shape[0]):
df = pd.DataFrame({'start': seg_events[i, 0],
'end': seg_events[i, 1],
'active': np.mean(segments[i])}, index=[0])
df_list.append(df)
if len(df_list) > 0:
activations = pd.concat(df_list)
else:
activations = None
return activations
def remove_overlapping_matches(matches):
"""
Remove overlapping matches. First, the function removes matches that
include other matches of the same appliance (start1 < start2 and end1 >
end2).
Parameters
----------
matches : pandas.DataFrame with columns 'start', 'end', as well as other
columns with match signature.
Returns
-------
out : pandas.DataFrame without matches that are supersets of other
matches
"""
out = matches.copy()
# Find all segments that enclose this one
for _, row in matches.iterrows():
start = row['start']
end = row['end']
idx = out.index[(out['start'] < start) & (out['end'] > end)]
out.drop(index=idx, inplace=True)
return out
# TODO: Merge partially overlapping segments?
def get_data_from_cenote_response(resp):
"""
Convert response from cenote system to pandas.DataFrame that can be used by
eeris_nilm.
"""
if not resp.ok:
return None
rd = json.loads(resp.text)
data = pd.DataFrame(rd['results'])
if data.shape[0] == 0:
return None
data = data.drop(['uuid', 'cenote$created_at', 'cenote$id'], axis=1)
data['cenote$timestamp'] = pd.to_datetime(data['cenote$timestamp'],
unit='ms', origin='unix')
data = data.rename(columns={'cenote$timestamp': 'timestamp'})
data = data.set_index('timestamp')
data.index.name = None
return data
def request_with_retry(url, params=None, data=None, json=None, request='get',
requests_limit=3600, token=None):
"""
Calls requests with parameters url and params, data or json (whichever is
not None). If it fails, it retries requests_limit times (with a sleep time
of 1s in-between).
"""
n_r = 0
f = None
if request == 'get':
f = requests.get
elif request == 'put':
f = requests.put
elif request == 'post':
f = requests.post
elif request == 'delete':
f = requests.delete
else:
raise ValueError("Current implementation does not handle %s requests",
request)
args = {}
if data is not None:
args['data'] = data
if json is not None:
args['json'] = json
while n_r < requests_limit:
try:
if token is not None:
r = f(url, params, **args, headers={'Authorization': 'jwt %s' %
(token)})
else:
r = f(url, params, **args)
break
except requests.exceptions.RequestException as e:
print("Request error: " + e)
n_r += 1
if n_r >= requests_limit:
# Nothing we can do.
raise SystemExit(e)
print("Retrying... %d / %d" % (n_r, requests_limit))
time.sleep(1.0)
return r
# Other utilities
def get_jwt(user, secret):
"""
Helper that generates a JWT given a username and a secret.
"""
now = datetime.utcnow()
payload = {
'user': user,
'iat': now,
'nbf': now,
'exp': now + timedelta(seconds=1*60*60)
}
# decode should be removed when upgrading to pyjwt 2.0.0 (currently with
# 1.7.0)
jwt_token = jwt.encode(payload, secret, algorithm='HS256').decode('utf-8')
return jwt_token
|
# -*- coding: utf-8 -*-
from app.airport.Airport import Airport
from app.common.http_methods_unittests import get_request
from app.common.target_urls import MY_AIRPORT
import unittest
class TestAirport(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.__html_page = get_request(MY_AIRPORT)
def test_str(self):
airport = Airport(country='Égypte',
money='123456',
kerosene_supply='',
kerosene_capacity='',
engines_supply='',
planes_capacity=84,
staff='',
airport_name='Aéroport égyptien')
self.assertEqual('Airport Aéroport égyptien H84 Égypte', str(airport))
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-09 08:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('about_me_plugin', '0004_auto_20180409_1506'),
]
operations = [
migrations.AddField(
model_name='statistic',
name='fa_icon',
field=models.CharField(default='', max_length=80),
),
]
|
import re
import sys
import json
import torch
import logging
import argparse
import mysql.connector
from dialogparser import get_intents
from connector import request_db
from deanonymization import anonymization
from telegram import Update
from telegram.ext import (
Updater, CommandHandler, MessageHandler, Filters, CallbackContext)
from transformers import GPT2Tokenizer, GPT2LMHeadModel
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
connection = {
"host":"remotemysql.com",
"user":"fcjRTVuTI0",
"password":"rTnUuTKbvQ",
"database":"fcjRTVuTI0",
}
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers "
"model on a causal language modeling task")
parser.add_argument("--checkpoint", type=str, default=None,
help="A path for initial model.")
parser.add_argument("--dialog_domain", type=str, default="consulta_saldo",
help="Domain of possible dialogs with chatbot.")
return parser.parse_args()
def initialize_table():
mydb = mysql.connector.connect(**connection)
create_table_dialogs = "CREATE TABLE IF NOT EXISTS dialogs (id BIGINT NOT NULL AUTO_INCREMENT, dialog_domain VARCHAR(256) NOT NULL, situation BOOLEAN NOT NULL, PRIMARY KEY (id))"
create_table_turns = "CREATE TABLE IF NOT EXISTS turns (turn_num INT NOT NULL, id_dialog BIGINT NOT NULL, speaker VARCHAR(256) NULL, utterance VARCHAR(2048) NOT NULL, utterance_delex VARCHAR(2048) NOT NULL, intent_action VARCHAR(256) NOT NULL, PRIMARY KEY (id_dialog, turn_num), FOREIGN KEY (id_dialog) REFERENCES dialogs(id))"
mycursor = mydb.cursor()
mycursor.execute(create_table_dialogs)
mydb.commit()
mycursor.execute(create_table_turns)
mydb.commit()
mydb.close()
def insert_dialog(dialog_domain):
mydb = mysql.connector.connect(**connection)
insert_query = "INSERT INTO dialogs (dialog_domain, situation) VALUES (%s, %s)"
values = (dialog_domain, 0)
mycursor = mydb.cursor()
mycursor.execute(insert_query, values)
mydb.commit()
id_dialog = mycursor.lastrowid
mydb.close()
return id_dialog
def insert_turn(id_dialog, speaker, utterance,
utterance_delex, intent_action, turn_num):
mydb = mysql.connector.connect(**connection)
insert_query = "INSERT INTO turns (id_dialog, turn_num, speaker, utterance, utterance_delex, intent_action) VALUES (%s, %s, %s, %s, %s, %s)"
values = (id_dialog, turn_num, speaker, utterance, utterance_delex, intent_action)
mycursor = mydb.cursor()
mycursor.execute(insert_query, values)
mydb.commit()
mydb.close()
def update_situation(id_dialog, situation):
mydb = mysql.connector.connect(**connection)
update_query = "UPDATE dialogs SET situation = "+str(situation)+" WHERE id = "+str(id_dialog)
mycursor = mydb.cursor()
mycursor.execute(update_query)
mydb.commit()
mydb.close()
def telegram_bot(args):
with open('telegram.json') as fin: api = json.load(fin)
with torch.no_grad():
tokenizer = GPT2Tokenizer.from_pretrained(args.checkpoint)
model = GPT2LMHeadModel.from_pretrained(args.checkpoint)
updater = Updater(token=api['token'])
dispatcher = updater.dispatcher
initialize_table()
def start(update, context):
response = ("Olá. Eu sou o Ze Carioca, como eu posso te ajudar? "
"Ao final avalie a nossa conversa, utilizando a tag /correct "
"quando eu me comporto adequadamente e /incorrect quando o meu "
"comportamento saiu do esperado. O domínio da nossa conversa é "
+args.dialog_domain+".")
context.bot.send_message(chat_id=update.effective_chat.id, text=response)
def restart(update, context):
response = ("Olá. Eu sou o Ze Carioca, como eu posso te ajudar? "
"Ao final avalie a nossa conversa, utilizando a tag /correct "
"quando eu me comporto adequadamente e /incorrect quando o meu "
"comportamento saiu do esperado. O domínio da nossa conversa é "
+args.dialog_domain+".")
context.bot.send_message(chat_id=update.effective_chat.id, text=response)
if 'id' in context.user_data: context.user_data.pop('id')
if 'variables' in context.user_data: context.user_data.pop('variables')
if 'turn' in context.user_data: context.user_data.pop('turn')
if 'msg' in context.user_data: context.user_data.pop('msg')
def correct(update, context):
if 'id' in context.user_data: update_situation(context.user_data['id'], 1)
context.bot.send_message(chat_id=update.effective_chat.id,
text="Diálogo correto adicionado com sucesso! Obrigada!")
def incorrect(update, context):
if 'id' in context.user_data: update_situation(context.user_data['id'], 0)
context.bot.send_message(chat_id=update.effective_chat.id,
text="Diálogo incorreto adicionado com sucesso! Obrigada!")
def reply(update, context):
if 'msg' not in context.user_data: context.user_data['msg'] = ""
msg = '<sos_u>'+update.message.text.lower()+'<eos_u><sos_b>'
logging.info("[USER] " + context.user_data['msg'])
contextmsg = tokenizer.encode(context.user_data['msg']+msg)
context_length = len(contextmsg)
max_len=80
outputs = model.generate(input_ids=torch.LongTensor(
contextmsg).reshape(1,-1),
max_length=context_length+max_len,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.encode(['<eos_b>'])[0])
generated = outputs[0].numpy().tolist()
decoded_output = tokenizer.decode(generated)
action_db, trans = request_db(decoded_output.split('<eos_u>')[-1])
logging.info("[DATABASE] " + action_db + str(trans))
action_db = tokenizer.encode(action_db)
outputs = model.generate(input_ids=torch.LongTensor(
generated+action_db).reshape(1,-1),
max_length=context_length+max_len,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.encode(['<eos_r>'])[0])
generated = outputs[0].numpy().tolist()
decoded_output = tokenizer.decode(generated)
context.user_data['msg'] = decoded_output
for k,v in trans:
decoded_output = decoded_output.replace(k,v,1)
system_response = decoded_output.split('<sos_r>')[-1].split('<eos_r>')[0]
logging.info("[SYSTEM] "+decoded_output)
context.bot.send_message(chat_id=update.effective_chat.id,
text=system_response)
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
restart_handler = CommandHandler('restart', restart)
dispatcher.add_handler(restart_handler)
correct_handler = CommandHandler('correct', correct)
dispatcher.add_handler(correct_handler)
incorrect_handler = CommandHandler('incorrect', incorrect)
dispatcher.add_handler(incorrect_handler)
reply_handler = MessageHandler(Filters.text & (~Filters.command), reply)
dispatcher.add_handler(reply_handler)
updater.start_polling()
updater.idle()
if __name__ == "__main__":
args = parse_args()
telegram_bot(args)
|
from django.core.management.base import BaseCommand
from django.core.management import call_command
from django.utils.text import slugify
import subprocess
import datetime
import logging
class Command(BaseCommand):
args = ""
mei_data_locations = {
# 'st_gallen_390': "data_dumps/mei/csg-390",
# 'st_gallen_391': None,
"salzinnes": "data_dumps/mei/salz"
}
def handle(self, *args, **kwargs):
logging.basicConfig(
filename="logs/mei_changes/changelog.log",
level=logging.INFO,
format="%(asctime)s %(message)s",
)
# We use this log file to keep track of what happens
# log = open("logs/mei_changes/changelog.txt", 'wa')
logging.info("########### Begin session ################")
for manuscript in list(self.mei_data_locations.keys()):
# Open the log file
try:
manuscript_log_file = open(
"logs/mei_changes/{0}.txt".format(manuscript), "r+"
)
except IOError:
# If the file didn't already exist...
open(
"logs/mei_changes/{0}.txt".format(manuscript), "w"
).close()
manuscript_log_file = open(
"logs/mei_changes/{0}.txt".format(manuscript), "w+"
)
# Grab the console output
console_output = slugify(
"{0}".format(
subprocess.check_output(
["ls", "-l", self.mei_data_locations[manuscript]]
)
)
)
if console_output != manuscript_log_file.read():
manuscript_log_file.write(console_output)
logging.info("{0} has changed!".format(manuscript))
# Flush the old version of that manuscript from Solr
call_command("remove_mei_data", manuscript)
logging.info("{0} removed from Solr.".format(manuscript))
# Reimport the manuscript
call_command("import_mei_data", "mei_to_solr", manuscript)
logging.info("New {0} imported into Solr.".format(manuscript))
else:
logging.info("{0} has not changed.".format(manuscript))
# We're done with this file
manuscript_log_file.close()
logging.info("########### End session ##################")
# print console_output
# # Call a command
# call_command("auto_manage_mei_data")
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
def plotly_plot(x=None, y=None, df=None, style='scatter',
title=None, filename='plotly_plot_test', xlabel=None, ylabel=None,
vlines=None, hlines=None, xlim=(None, None), ylim=(None, None),
dropna=True, dateextractX=False, dateextractY=False, figsize=(16, 9),
plotlyplot=True, saveAs=None, **kwargs):
'''Interactively plots a series or two in plotly.
Must set unique `title` or `filename` for plot to exist semi-permanently,
else overwritten on the next use of function.
Parameters
----------
x : array, series, or list OR column name in df
numpy array, pandas series, or list of primary values to plot
y : array, series, or list OR column name in df
numpy array, pandas series, or list of secondary values to plot
df : pandas DataFrame, optional
if given, uses dataframe to create x and y arrays using x and y column names given
style : string
argument to choose style of plot. currently implemented dist, hist, line, and scatter
title, xlabel, ylabel : string, optional
labels of plot. if filename not given, filename = title
vlines, hlines : int or list, optional
list of x/y points to make vertical/horizontal lines in the plot
xlim, ylim : tuple (min, max), optional
horizontal/vertical boundries of the figure
dropna : boolean, optional (default is True)
drop nans from series
dateextractX, dateextractY : boolean, optional (default is False)
try to convert x and y to datetimes using utils.date.extract_date
plotlyplot: boolean, optional (default is True)
set to False for prototyping
filename: string, optional
file name on server for plotly plot. unique label ensures plot will not be overwritten
saveAs : string (default is None)
If given, save the figure using saveAs as the file name.
kwargs : dict
additional keyworded arguments passed to plotting functions
Returns
-------
iframe : iframe
Notes
-----
ToDo - Fix autolabeling. broked it when fixing dates..
'''
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1, 1, 1)
yisNone = False
if y is None:
yisNone = True
# if dataframe provided, create x (and y) array(s)
if df is not None:
x = df[x]
if not yisNone:
y = df[y]
# checking if x (and y) are pandas Series, if not convert to
if not isinstance(x, pd.Series):
x = pd.Series(x)
else:
if xlabel is None:
xlabel = x.name
if not yisNone:
if not isinstance(y, pd.Series):
y = pd.Series(y)
else:
if ylabel is None:
ylabel = y.name
# if dropna, drop nan values from x (and accompanying values in y)
if dropna:
try:
nan_indices = pd.isnull(x)
if sum(nan_indices) > 0:
print 'nan values in series to be dropped:', sum(nan_indices)
x = x[~nan_indices].reset_index(drop=True)
if not yisNone:
y = y[~nan_indices].reset_index(drop=True)
except:
pass
# if y not provided: set y to x.index, swap x and y as we are interested
# in plotting x on the 'y' against the index
if yisNone:
y = x.index
x, y = y, x
# try to extract_date x and y
if dateextractX:
try:
x = utils.date.extract_date(x)
print 'date extracted x'
except:
pass
if dateextractY and not yisNone:
try:
y = utils.date.extract_date(y)
print 'date extracted y'
except:
pass
# dist or hist: distribution of x plot
if style == 'dist':
try:
sns.distplot(y, **kwargs)
except:
print "failed producing seaborn distribution plot.. trying hist"
plt.hist(y, **kwargs)
if ylabel is None:
ylabel = 'frequency'
elif style == 'hist':
plt.hist(y, **kwargs)
if ylabel is None:
ylabel = 'frequency'
# line or scatter: x vs y plot
elif style == 'line':
plt.plot(x, y, **kwargs)
elif style == 'scatter':
plt.scatter(x, y, **kwargs)
else:
print 'style currently not available'
return None
if ylim[0] is None:
y_min = plt.ylim()[0]
else:
y_min = ylim[0]
if ylim[1] is None:
y_max = plt.ylim()[1]
else:
y_max = ylim[1]
plt.ylim(y_min, y_max)
if xlim[0] is None:
x_min = plt.xlim()[0]
else:
x_min = xlim[0]
if xlim[1] is None:
x_max = plt.xlim()[1]
else:
x_max = xlim[1]
plt.xlim(x_min, x_max)
# vlines, hlines. should maybe export this to their own function for other
# uses
if vlines is not None:
if not isinstance(vlines, (list, pd.core.series.Series, np.ndarray)):
vlines = [vlines]
for vl in vlines:
plt.plot([vl, vl], [y_min, y_max])
if hlines is not None:
if xlim is None:
xlim = plt.xlim()
if not isinstance(hlines, (list, pd.core.series.Series, np.ndarray)):
hlines = [hlines]
for hl in hlines:
plt.plot([x_min, x_max], [hl, hl])
# title, filename handling
if (title is None) and (filename != 'plotly_plot_test'):
title = filename
if title is not None:
plt.title(title, size=20)
# if title is set and filename is default, set filename to title
if filename == 'plotly_plot_test':
filename = title
# x and y label handling. auto labeling hashed out for now
if xlabel is not None:
plt.xlabel(xlabel, size=18)
if ylabel is not None:
plt.ylabel(ylabel, size=18)
if saveAs is not None:
plt.savefig(saveAs, bbox_inches='tight', dpi=270)
# render in plotly or return nothing to output static chart
if plotlyplot:
iframe = py.iplot_mpl(fig, filename=filename, fileopt='overwrite')
print iframe.embed_code.split('src="')[1].split('.emb')[0]
py.iplot_mpl(fig, filename=filename, fileopt='overwrite')
return iframe
def plotly_date_frequency_plot(
df=None,
array=None,
series=None,
title=None,
filename='plotly_date_frequency_plot_test',
weekend_bool=True,
weekend_height=20,
xlabel=None,
ylabel=None,
vlines=None,
hlines=None,
xlim=None,
ylim=None,
**kwargs):
'''plotly_date_frequency_plot uses plotly to interactively plot a bar chart showing a date series or df[array] frequency
Requires df, array str or series
To write a permanent, linkable plot, change filename
Optional weekend indicators with adjustable height
.. todo:
Add hue parameter for stacked bar plot (see Jason's implementation)
Add resample argument for weekly, monthly views
'''
if series is None:
series = df[array]
else:
series = pd.Series(series)
day_vcs = series.value_counts().sort_index()
day_tuples = zip(day_vcs.keys(), day_vcs.values)
if weekend_bool:
# create list of weekend tuples to indicate weekends
start, end = utils.date.extract_date(
min(series)), utils.date.extract_date(
max(series))
running_day = start
weekend_days = []
while running_day <= end:
if running_day.weekday() in set([5, 6]):
weekend_days.append(running_day)
running_day += dt.timedelta(days=1)
if len(weekend_days) % 2 == 1:
weekend_days = weekend_days[:-1]
weekend_tuples = []
for i in range(len(weekend_days) / 2):
weekend_tuples.append(
(weekend_days[i * 2], weekend_days[i * 2 + 1]))
# plotly plot
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(1, 1, 1)
plt.bar(utils.date.extract_date([day[0] for day in day_tuples]), [
count[1] for count in day_tuples], **kwargs)
if weekend_bool:
for i in weekend_tuples:
plt.plot([i[0], i[1], i[0], i[1]], [.1, weekend_height,
weekend_height, .1], alpha=.6, color='grey')
if vlines is not None:
if ylim is None:
ylim = plt.ylim()
if not isinstance(vlines, (list, pd.core.series.Series, np.ndarray)):
vlines = [vlines]
for vl in vlines:
plt.plot([vl, vl], [ylim[0], ylim[1]])
if hlines is not None:
if xlim is None:
xlim = plt.xlim()
if not isinstance(hlines, (list, pd.core.series.Series, np.ndarray)):
hlines = [hlines]
for hl in hlines:
plt.plot([xlim[0], xlim[1]], [hl, hl], )
if title is not None:
plt.title(title, size=20)
if xlabel is not None:
plt.xlabel(xlabel, size=20)
if ylabel is not None:
plt.ylabel(ylabel, size=20)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
return py.iplot_mpl(fig, filename=filename, fileopt='overwrite')
|
# jitsi shortcuts for keybow 2040
# based on the hid-keys-simple.py example by Sandy Macdonald
# drop the keybow2040.py file into your `lib` folder on your `CIRCUITPY` drive.
# NOTE! requires the adafruit_hid CircuitPython library also!
import board
from keybow2040 import Keybow2040
import usb_hid
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
from adafruit_hid.keycode import Keycode
# setup Keybow
i2c = board.I2C()
keybow = Keybow2040(i2c)
keys = keybow.keys
# setup keyboard and layout
keyboard = Keyboard(usb_hid.devices)
layout = KeyboardLayoutUS(keyboard)
# the keycodes in order
keymap = [
Keycode.SPACE, # push to talk
Keycode.THREE, # focus on person 3
Keycode.ZERO, # focus on me
Keycode.M, # toggle mic
Keycode.A, # call quality
Keycode.FOUR, # focus on person 4
Keycode.ONE, # focus on person 1
Keycode.V, # toggle video
Keycode.T, # speaker stats
Keycode.FIVE, # focus on person 5
Keycode.TWO, # focus on person 2
Keycode.D, # screen sharing
Keycode.S, # full screen
Keycode.W, # tile view
Keycode.F, # video thumbnails
Keycode.R # raise hand
]
# the key colours in order
rgb = [
(255, 127, 0), # push to talk
(255, 63, 63), # focus on person 3
(255, 63, 63), # focus on me
(0, 255, 0), # toggle mic
(63, 255, 63), # call quality
(255, 63, 63), # focus on person
(255, 63, 63), # focus on person
(255, 0, 0), # toggle video
(63, 255, 63), # speaker stats
(255, 63, 63), # focus on person 5
(255, 63, 63), # focus on person 2
(0, 255, 255), # screen sharing
(0, 0, 255), # full screen
(255, 255, 0), # tile view
(255, 255, 0), # video thumbnails
(255, 255, 255) # raise hand
]
# set initial colours and attach handler functions to the keys
for key in keys:
# default colour
key.set_led(*rgb[key.number])
if key.number == 0:
@keybow.on_press(key)
def press_handler(key):
keycode = keymap[key.number]
keyboard.press(keycode)
key.set_led(255, 255, 255)
# release handler
@keybow.on_release(key)
def release_handler(key):
keycode = keymap[key.number]
keyboard.release(keycode)
key.set_led(*rgb[key.number])
else:
# press handler
@keybow.on_press(key)
def press_handler(key):
keycode = keymap[key.number]
keyboard.send(keycode)
key.set_led(255, 255, 255)
# release handler
@keybow.on_release(key)
def release_handler(key):
key.set_led(*rgb[key.number])
while True:
# always remember to call keybow.update()!
keybow.update()
|
import torch.multiprocessing as mp
import torch
from . import embed_eval
from .embed_save import save_model
import timeit
import numpy as np
from tqdm import tqdm
from .logging_thread import write_tensorboard
from .graph_embedding_utils import manifold_dist_loss_relu_sum, metric_loss
from .manifold_initialization import initialize_manifold_tensor
from .manifolds import EuclideanManifold
from .manifold_tensors import ManifoldParameter
import random
def train(
device,
model,
manifold,
dimension,
data,
optimizer,
loss_params,
n_epochs,
eval_every,
sample_neighbors_every,
lr_scheduler,
shared_params,
thread_number,
feature_manifold,
conformal_loss_params,
tensorboard_watch={}
):
batch_num = 0
for epoch in range(1, n_epochs + 1):
batch_losses = []
if conformal_loss_params is not None:
batch_conf_losses = []
t_start = timeit.default_timer()
if (epoch - 1) % sample_neighbors_every == 0 and thread_number == 0:
optimizer.zero_grad()
inputs = None
graph_dists = None
conf_loss = None
loss = None
import gc; gc.collect()
torch.cuda.empty_cache()
with torch.no_grad():
model.to(device)
nns = data.refresh_manifold_nn(model.get_embedding_matrix(), manifold, return_nns=True)
if epoch > 1:
syn_acc, sem_acc = embed_eval.eval_analogy(model, manifold, nns)
write_tensorboard('add_scalar', ['syn_acc', syn_acc, epoch - 1])
write_tensorboard('add_scalar', ['sem_acc', sem_acc, epoch - 1])
import gc; gc.collect()
torch.cuda.empty_cache()
data_iterator = tqdm(data) if thread_number == 0 else data
for batch in data_iterator:
if batch_num % eval_every == 0 and thread_number == 0:
mean_loss = 0 # float(np.mean(batch_losses)) use to eval every batch setting this to zero as its only used for printing output
savable_model = model.get_savable_model()
save_data = {
'epoch': epoch
}
if data.features is not None:
save_data["features"] = data.features
if model.get_additional_embeddings() is not None:
save_data["additional_embeddings_state_dict"] = model.get_additional_embeddings().state_dict()
if hasattr(model, "main_deltas"):
save_data["main_deltas_state_dict"] = model.main_deltas.state_dict()
if hasattr(model, "additional_deltas"):
save_data["additional_deltas_state_dict"] = model.additional_deltas.state_dict()
save_data["deltas"] = model.deltas
save_data.update(shared_params)
path = save_model(savable_model, save_data)
elapsed = 0 # Used to eval every batch setting this to zero as its only used for printing output
embed_eval.evaluate(batch_num, elapsed, mean_loss, path)
conf_loss = None
delta_loss = None
inputs, graph_dists = batch
inputs = inputs.to(device)
graph_dists = graph_dists.to(device)
optimizer.zero_grad()
rand_val = random.random()
optimizing_model = False
if hasattr(model, "get_additional_embeddings"):
if rand_val > 0.7:
optimizing_model = False
optimizing_deltas = False
model.deltas = False
for p in model.parameters():
p.requires_grad = False
for p in model.get_additional_embeddings().parameters():
p.requires_grad = True
if model.deltas:
for p in model.main_deltas.parameters():
p.requires_grad = False
if hasattr(model, "additional_deltas"):
for p in model.additional_deltas.parameters():
p.requires_grad = False
else:
optimizing_model = True
optimizing_deltas = False
model.deltas = False
for p in model.parameters():
p.requires_grad = True
for p in model.get_additional_embeddings().parameters():
p.requires_grad = False
if model.deltas:
for p in model.main_deltas.parameters():
p.requires_grad = False
if hasattr(model, "additional_deltas"):
for p in model.additional_deltas.parameters():
p.requires_grad = False
'''
else:
optimizing_model = False
optimizing_deltas = True
model.deltas = True
for p in model.parameters():
p.requires_grad = False
for p in model.get_additional_embeddings().parameters():
p.requires_grad = False
if model.deltas:
for p in model.main_deltas.parameters():
p.requires_grad = True
if hasattr(model, "additional_deltas"):
for p in model.additional_deltas.parameters():
p.requires_grad = True
'''
loss = 0.5 * manifold_dist_loss_relu_sum(model, inputs, graph_dists, manifold, **loss_params)
if optimizing_model and hasattr(model, 'embedding_model') and conformal_loss_params is not None and epoch % conformal_loss_params["update_every"] == 0:
main_inputs = inputs.narrow(1, 0, 1).squeeze(1).clone().detach()
perm = torch.randperm(main_inputs.size(0))
idx = perm[:conformal_loss_params["num_samples"]]
main_inputs = main_inputs[idx]
conf_loss = metric_loss(model, main_inputs, feature_manifold, manifold, dimension,
isometric=conformal_loss_params["isometric"], random_samples=conformal_loss_params["random_samples"],
random_init=conformal_loss_params["random_init"])
if hasattr(model, 'main_deltas') and optimizing_deltas:
main_inputs = inputs.view(inputs.shape[0], -1)
vals = model.main_deltas(model.index_map[main_inputs][model.index_map[main_inputs] >= 0])
mean_deltas = torch.mean(torch.norm(vals, dim=-1))
delta_loss = 800 * torch.mean(torch.norm(vals, dim=-1) ** 2)
if conformal_loss_params is not None and conf_loss is not None:
total_loss = (1 - conformal_loss_params["weight"]) * loss + conformal_loss_params["weight"] * conf_loss
if delta_loss is not None:
# total_loss += delta_loss
pass
total_loss.backward()
else:
if conformal_loss_params is not None:
scaled_loss = (1 - conformal_loss_params["weight"]) * loss
else:
scaled_loss = loss
if delta_loss is not None:
scaled_loss += delta_loss
scaled_loss.backward()
optimizer.step()
batch_losses.append(loss.cpu().detach().numpy())
if thread_number == 0:
write_tensorboard('add_scalar', ['minibatch_loss', batch_losses[-1], batch_num])
if delta_loss is not None:
write_tensorboard('add_scalar', ['minibatch_delta_loss', delta_loss.cpu().detach().numpy(), batch_num])
write_tensorboard('add_scalar', ['minibatch_delta_mean', mean_deltas.cpu().detach().numpy(), batch_num])
for name, value in tensorboard_watch.items():
write_tensorboard('add_scalar', [name, value.cpu().detach().numpy(), batch_num])
if conf_loss is not None:
batch_conf_losses.append(conf_loss.cpu().detach().numpy())
if thread_number == 0:
write_tensorboard('add_scalar', ['minibatch_conf_loss', batch_conf_losses[-1], batch_num])
elapsed = timeit.default_timer() - t_start
batch_num += 1
mean_loss = float(np.mean(batch_losses))
if thread_number == 0:
if conformal_loss_params is not None and len(batch_conf_losses) > 0:
mean_conf_loss = float(np.mean(batch_conf_losses))
metric_loss_type = "isometric" if conformal_loss_params["isometric"] else "conformal"
write_tensorboard('add_scalar', [f'batch_{metric_loss_type}_loss', mean_conf_loss, epoch])
write_tensorboard('add_scalar', ['batch_loss', mean_loss, epoch])
write_tensorboard('add_scalar', ['learning_rate', lr_scheduler.get_lr()[0], epoch])
lr_scheduler.step()
|
"""
Test for One Hot Design Generator Module
"""
import numpy as np
import pytest
from tagupy.design.generator import OneHot
@pytest.fixture
def correct_inputs():
return [1, 2, 3, 4, 5, 6, 7]
def test_init_invalid_input():
arg = [
["moge", None, np.ones((2, 3)), 3.4, 0, -22]
]
for el in arg:
with pytest.raises(AssertionError) as e:
OneHot(el)
assert f"{el}" in f"{e.value}", \
f"NoReasons: Inform the AssertionError reasons, got {e.value}"
def test_init_correct_input(correct_inputs):
exp = [1, 2, 3, 4, 5, 6, 7]
for i, el in enumerate(correct_inputs):
assert OneHot(el).n_rep == exp[i], \
f"self.n_rep expected {exp[i]}, got {OneHot(el).n_rep}"
def test_get_exmatrix_invalid_input_dtype():
arg = [
["moge", None, np.ones((2, 3)), 3.4, 0, -1]
]
model = OneHot(1)
for n_factor in arg:
with pytest.raises(AssertionError) as e:
model.get_exmatrix(n_factor)
assert f"{n_factor}" in f"{e.value}", \
f"NoReasons: Inform the AssertionError reasons, got {e.value}"
def test_get_exmatrix_output_dtype(correct_inputs):
model = OneHot(1)
for n_factor in correct_inputs:
ret = model.get_exmatrix(n_factor)
assert isinstance(ret, np.ndarray), \
f"dtype of exmatrix expected np.ndarray got {type(ret)}"
def test_get_exmatrix_output_shape(correct_inputs):
n_rep = 11
model = OneHot(n_rep)
for n_factor in correct_inputs:
exp = ((n_factor + 1) * n_rep, n_factor)
ret = model.get_exmatrix(n_factor)
assert ret.shape == exp, \
f"shape of exmatrix expected {exp}, got {ret.shape}"
def test_get_exmatrix_output_element(correct_inputs):
model = OneHot(11)
for n_factor in correct_inputs:
ret = model.get_exmatrix(n_factor)
assert ((ret == 1) | (ret == 0)).all(), \
f"all the elements in exmatrix should be either 0 or 1, got {ret}"
def test_get_exmatrix_sum(correct_inputs):
n_rep = 11
model = OneHot(n_rep)
for n_factor in correct_inputs:
ret = model.get_exmatrix(n_factor)
sum = (ret.sum(axis=0), ret.sum(axis=1))
assert ((sum[1] == 1) | (sum[1] == 0)).all(), \
f"sum of values in a row should be \
either 0 or 1, got {sum[1]}"
assert np.bincount(sum[1])[0] == n_rep, \
f"raws for negative control should be given as many as n_rep, \
got {np.bincount(sum[1])[0]}"
assert np.array_equal(sum[0], np.full((n_factor), n_rep)), \
f"sum of values in a col should be n_rep, got {sum[0]}"
|
import numpy as np
from mxnet import ndarray as nd
def calculate_indexes(current_index, shape, gradients_cumulative, coordinates):
if len(coordinates) == 1:
return gradients_cumulative[current_index] + coordinates[0]
if len(coordinates) == 2:
return gradients_cumulative[current_index] + (coordinates[0] * shape[1] + coordinates[1])
return None
def build_message_map(idx, gradients_cumulative, numpy_delta_positive, numpy_delta_negative,
zero_setter, one_setter):
map_ups = np.where(numpy_delta_positive == 1)
ups = calculate_indexes(idx, numpy_delta_positive.shape, gradients_cumulative, map_ups)
ups = one_setter(ups)
map_downs = np.where(numpy_delta_negative == 1)
downs = calculate_indexes(idx, numpy_delta_negative.shape, gradients_cumulative, map_downs)
downs = zero_setter(downs)
return np.concatenate((ups, downs))
def get_indices(ups_or_downs, gradients_cumulative, gradients_blueprint):
first_indices = [
0 if t == 0 else len(gradients_cumulative) - 1 if gradients_cumulative[-1] <= t else \
list(map(lambda x: x > t, gradients_cumulative)).index(True) - 1 for t in ups_or_downs]
offsets = [(t - gradients_cumulative[first_indices[i]]) for i, t in enumerate(ups_or_downs)]
all_indices = []
for idx, t in enumerate(ups_or_downs):
if len(gradients_blueprint[first_indices[idx]]) == 1:
all_indices.append((first_indices[idx], offsets[idx],))
elif len(gradients_blueprint[first_indices[idx]]) == 2:
all_indices.append(
(first_indices[idx], offsets[idx] // gradients_blueprint[first_indices[idx]][1],
offsets[idx] % gradients_blueprint[first_indices[idx]][1]))
return all_indices
def decode_message_map(ctx, weight_indices, gradients_blueprint, gradients_cumulative, tau, zero_setter):
message_map = np.array(weight_indices)
sign_detector = np.vectorize(lambda int_type: int_type & (1 << 31) > 0, otypes=[np.bool])
signs = sign_detector(message_map)
ups = zero_setter(message_map[signs])
downs = message_map[np.logical_not(signs)]
peer_gradients = [nd.zeros(shape=blueprint, ctx=ctx) for blueprint in gradients_blueprint]
up_indices = get_indices(ups, gradients_cumulative, gradients_blueprint)
down_indices = get_indices(downs, gradients_cumulative, gradients_blueprint)
for up in up_indices:
if len(up) == 2:
peer_gradients[up[0]][up[1]] = tau
elif len(up) == 3:
peer_gradients[up[0]][up[1]][up[2]] = tau
for down in down_indices:
if len(down) == 2:
peer_gradients[down[0]][down[1]] = -tau
elif len(down) == 3:
peer_gradients[down[0]][down[1]][down[2]] = -tau
return peer_gradients
|
import logging
from django.conf import settings
from django.db import transaction
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from api import serializers
from api.common import constants
from api.resources.decorators import tenant_user_api
from core import models
logger = logging.getLogger(__name__)
class TenantListView(APIView):
@transaction.atomic
def post(self, request):
serializer = serializers.TenantSerializer(
data=request.data, user=request.user)
serializer.is_valid(raise_exception=True)
tenant = serializer.save()
ret = serializer.data
data = {
'user_id': request.user.id,
'role_type': constants.TENANT_USER_ROLE_TYPE.ADMIN.value,
}
serializer = serializers.TenantUserSerializer(
data=data, user=request.user, tenant=tenant,
extra_request=dict(tenant_id=tenant.id))
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(ret, status=status.HTTP_200_OK)
class TenantView(APIView):
@tenant_user_api
def get(self, request, tenant_user, domain):
tenant = models.Tenant.objects.get(domain=domain)
serializer = serializers.TenantSerializer(tenant)
return Response(serializer.data, status=status.HTTP_200_OK)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
import sys
from PyQt4.QtGui import *
# Create an PyQT4 application object.
a = QApplication(sys.argv)
# The QWidget widget is the base class of all user interface objects in PyQt4.
w = QWidget()
# Set window size.
w.resize(320, 240)
# Set window title
w.setWindowTitle("Hello World!")
# Show window
w.show()
sys.exit(a.exec_())
|
"""Testing for Word ExtrAction for time SEries cLassification."""
import numpy as np
import pytest
import re
from scipy.sparse import csr_matrix, hstack
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2
from pyts.approximation import SymbolicFourierApproximation
from pyts.transformation import WEASEL
n_samples, n_timestamps, n_classes = 8, 200, 2
rng = np.random.RandomState(42)
X = rng.randn(n_samples, n_timestamps)
y = rng.randint(n_classes, size=n_samples)
@pytest.mark.parametrize(
'params, error, err_msg',
[({'word_size': "3"}, TypeError, "'word_size' must be an integer."),
({'window_sizes': {}}, TypeError, "'window_sizes' must be array-like."),
({'window_steps': "3"}, TypeError,
"'window_steps' must be None or array-like."),
({'chi2_threshold': "3"}, TypeError,
"'chi2_threshold' must be a float or an integer."),
({'word_size': 0}, ValueError, "'word_size' must be a positive integer."),
({'window_sizes': np.ones((2, 4))}, ValueError,
"'window_sizes' must be one-dimensional."),
({'window_sizes': ['a', 'b', 'c']}, ValueError,
"The elements of 'window_sizes' must be integers or floats."),
({'window_sizes': [0.5, 2.]}, ValueError,
"If the elements of 'window_sizes' are floats, they all must be greater "
"than 0 and lower than or equal to 1."),
({'window_sizes': [300]}, ValueError,
"All the elements in 'window_sizes' must be lower than or equal to "
"n_timestamps."),
({'word_size': 4, 'window_sizes': [4, 6], 'drop_sum': True}, ValueError,
"If 'drop_sum=True', 'word_size' must be lower than the minimum value "
"in 'window_sizes'."),
({'word_size': 5, 'window_sizes': [4, 6], 'drop_sum': False}, ValueError,
"If 'drop_sum=False', 'word_size' must be lower than or equal to the "
"minimum value in 'window_sizes'."),
({'window_steps': np.ones((2, 4))}, ValueError,
"'window_steps' must be one-dimensional."),
({'window_sizes': [8, 10], 'window_steps': [1, 2, 3]}, ValueError,
"If 'window_steps' is not None, it must have the same size as "
"'window_sizes'."),
({'window_sizes': [8, 10], 'window_steps': ['a', 'b']}, ValueError,
"If 'window_steps' is not None, the elements of 'window_steps' must be "
"integers or floats."),
({'window_sizes': [8, 10], 'window_steps': [0.5, 2.]}, ValueError,
"If the elements of 'window_steps' are floats, they all must be greater "
"than 0 and lower than or equal to 1."),
({'window_sizes': [8], 'window_steps': [0]}, ValueError,
"All the elements in 'window_steps' must be greater than or equal to 1 "
"and lower than or equal to n_timestamps."),
({'window_sizes': [8], 'window_steps': [250]}, ValueError,
"All the elements in 'window_steps' must be greater than or equal to 1 "
"and lower than or equal to n_timestamps."),
({'chi2_threshold': -1}, ValueError,
"'chi2_threshold' must be positive.")]
)
def test_parameter_check(params, error, err_msg):
"""Test parameter validation."""
weasel = WEASEL(**params)
with pytest.raises(error, match=re.escape(err_msg)):
weasel.fit(X, y)
@pytest.mark.parametrize(
'sparse, instance', [(True, csr_matrix), (False, np.ndarray)])
def test_sparse_dense(sparse, instance):
"""Test that the expected type is returned."""
weasel = WEASEL(strategy='quantile', sparse=sparse)
assert isinstance(weasel.fit(X, y).transform(X), instance)
assert isinstance(weasel.fit_transform(X, y), instance)
def test_accurate_results():
"""Test that the actual results are the expected ones."""
X_features = csr_matrix((n_samples, 0), dtype=np.int64)
vocabulary_ = {}
weasel = WEASEL(
word_size=4, n_bins=3, window_sizes=[5, 10],
window_steps=None, anova=True, drop_sum=True, norm_mean=True,
norm_std=True, strategy='entropy', chi2_threshold=2, alphabet=None
)
for window_size, n_windows in zip([5, 10], [40, 20]):
X_windowed = X.reshape(n_samples, n_windows, window_size)
X_windowed = X_windowed.reshape(n_samples * n_windows, window_size)
sfa = SymbolicFourierApproximation(
n_coefs=4, drop_sum=True, anova=True, norm_mean=True,
norm_std=True, n_bins=3, strategy='entropy', alphabet=None
)
y_repeated = np.repeat(y, n_windows)
X_sfa = sfa.fit_transform(X_windowed, y_repeated)
X_word = np.asarray([''.join(X_sfa[i])
for i in range((n_samples * n_windows))])
X_word = X_word.reshape(n_samples, n_windows)
X_bow = np.asarray([' '.join(X_word[i]) for i in range(n_samples)])
vectorizer = CountVectorizer(ngram_range=(1, 2))
X_counts = vectorizer.fit_transform(X_bow)
chi2_statistics, _ = chi2(X_counts, y)
relevant_features = np.where(
chi2_statistics > 2)[0]
X_features = hstack([X_features, X_counts[:, relevant_features]])
old_length_vocab = len(vocabulary_)
vocabulary = {value: key
for (key, value) in vectorizer.vocabulary_.items()}
for i, idx in enumerate(relevant_features):
vocabulary_[i + old_length_vocab] = \
str(window_size) + " " + vocabulary[idx]
arr_desired = X_features.toarray()
# Accuracte results for fit followed by transform
arr_actual_1 = weasel.fit_transform(X, y).toarray()
np.testing.assert_allclose(arr_actual_1, arr_desired, atol=1e-5, rtol=0)
assert weasel.vocabulary_ == vocabulary_
# Accuracte results for fit_transform
arr_actual_2 = weasel.fit(X, y).transform(X).toarray()
np.testing.assert_allclose(arr_actual_2, arr_desired, atol=1e-5, rtol=0)
assert weasel.vocabulary_ == vocabulary_
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\situations\complex\single_sim_visitor_situation.py
# Compiled at: 2018-07-22 23:25:27
# Size of source mod 2**32: 5333 bytes
from sims4.tuning.instances import lock_instance_tunables
from sims4.tuning.tunable import Tunable
from sims4.utils import classproperty
from situations.ambient.walkby_limiting_tags_mixin import WalkbyLimitingTagsMixin
from situations.bouncer.bouncer_types import BouncerExclusivityCategory
from situations.situation import Situation
from situations.situation_complex import CommonInteractionCompletedSituationState, SituationState, SituationComplexCommon, TunableSituationJobAndRoleState, SituationStateData, CommonSituationState
from situations.situation_types import SituationCreationUIOption
import services, situations
class _HasFrontDoorArrivalState(CommonInteractionCompletedSituationState):
def _on_interaction_of_interest_complete(self, **kwargs):
self._change_state(self.owner.visit_state())
def timer_expired(self):
self._change_state(self.owner.visit_state())
class _HasNoFrontDoorArrivalState(CommonInteractionCompletedSituationState):
def _on_interaction_of_interest_complete(self, **kwargs):
self._change_state(self.owner.visit_state())
def timer_expired(self):
self._change_state(self.owner.visit_state())
class _VisitState(CommonInteractionCompletedSituationState):
def _on_interaction_of_interest_complete(self, **kwargs):
self._change_state(self.owner.leave_state())
def timer_expired(self):
self._change_state(self.owner.leave_state())
class _LeaveState(CommonSituationState):
def timer_expired(self):
self.owner._self_destruct()
class SingleSimVisitorSituation(WalkbyLimitingTagsMixin, SituationComplexCommon):
INSTANCE_TUNABLES = {'visitor_job_and_role':TunableSituationJobAndRoleState(description='\n The job and role state for the visitor.\n '),
'has_front_door_arrival_state':_HasFrontDoorArrivalState.TunableFactory(description='\n The arrival state for the visitor if the lot has a front door.\n ',
display_name='1. Has Front Door Arrival State',
tuning_group=SituationComplexCommon.SITUATION_STATE_GROUP),
'has_no_front_door_arrival_state':_HasNoFrontDoorArrivalState.TunableFactory(description='\n The arrival state for the visitor if the lot has a front door.\n ',
display_name='1. Has No Front Door Arrival State',
tuning_group=SituationComplexCommon.SITUATION_STATE_GROUP),
'visit_state':_VisitState.TunableFactory(description="\n The state for the visitor to interact with the lot and it's owner.\n ",
display_name='2. Visit State',
tuning_group=SituationComplexCommon.SITUATION_STATE_GROUP),
'leave_state':_LeaveState.TunableFactory(description='\n The state for the visitor to leave the lot.\n ',
display_name='3. Leave State',
tuning_group=SituationComplexCommon.SITUATION_STATE_GROUP)}
REMOVE_INSTANCE_TUNABLES = Situation.NON_USER_FACING_REMOVE_INSTANCE_TUNABLES
@classmethod
def _states(cls):
return (SituationStateData(1, _HasFrontDoorArrivalState, factory=(cls.has_front_door_arrival_state)),
SituationStateData(2, _HasNoFrontDoorArrivalState, factory=(cls.has_no_front_door_arrival_state)),
SituationStateData(3, _VisitState, factory=(cls.visit_state)),
SituationStateData(4, _LeaveState, factory=(cls.leave_state)))
@classmethod
def _get_tuned_job_and_default_role_state_tuples(cls):
return [
(cls.visitor_job_and_role.job,
cls.visitor_job_and_role.role_state)]
@classmethod
def default_job(cls):
pass
def start_situation(self):
super().start_situation()
if services.get_door_service().has_front_door():
self._change_state(self.has_front_door_arrival_state())
else:
self._change_state(self.has_no_front_door_arrival_state())
@classmethod
def get_sims_expected_to_be_in_situation(cls):
return 1
@property
def _should_cancel_leave_interaction_on_premature_removal(self):
return True
@classproperty
def situation_serialization_option(cls):
return situations.situation_types.SituationSerializationOption.OPEN_STREETS
lock_instance_tunables(SingleSimVisitorSituation, exclusivity=(BouncerExclusivityCategory.WALKBY),
creation_ui_option=(SituationCreationUIOption.NOT_AVAILABLE))
|
from django.db import models
# Create your models here.
class itemlost(models.Model):
product_title = models.CharField(max_length=100)
place = models.TextField(default='Lost this item near ..')
date = models.DateField()
time=models.TimeField()
description = models.TextField()
contactme = models.CharField(max_length=150,default='email')
username=models.CharField(max_length=100,blank=True,default='NULL')
def __str__(self):
return f'{self.username} lost {self.product_title}'
class itemfound(models.Model):
product_title = models.CharField(max_length=100)
place = models.TextField(default='Found this item near ..')
date = models.DateField()
time=models.TimeField()
description = models.TextField()
contactme = models.CharField(max_length=150,default='email')
username=models.CharField(max_length=100,blank=True,default='NULL')
def __str__(self):
return f'{self.username} found {self.product_title}'
'''class RegistrationData(models.Model):
username=models.CharField(max_length=100)
password=models.CharField(max_length=100)
rollnumber=models.CharField(max_length=100)
email=models.CharField(max_length=100)
def __str__(self):
return self.username'''
|
from rest_framework import serializers
from rest_framework.serializers import ModelSerializer, DateTimeField
from modules.kanban.models.pipe_line import PipeLine
class PipeLineSerializer(ModelSerializer):
updated_at = DateTimeField(format="%Y-%m-%d %H:%M:%S", required=False, read_only=True)
card_set = serializers.HyperlinkedRelatedField(
many=True,
read_only=True,
view_name='card-detail'
)
class Meta:
model = PipeLine
fields = ('board', 'name', 'updated_at', 'card_set')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@version: v1.0
@author: Evan
@time: 2019/12/11 9:33
"""
class User(object):
def __init__(self, account, password):
self.account = account
self.password = password
|
#!/usr/bin/env python3
import re
s = input()
print("YES" if all(a == b or "*" in (a, b) for a, b in zip(s, s[::-1])) else "NO")
|
import random, math, os, pylab
output_dir = 'direct_disks_box_movie'
def direct_disks_box(N, sigma):
condition = False
while condition == False:
L = [(random.uniform(sigma, 1.0 - sigma), random.uniform(sigma, 1.0 - sigma))]
for k in range(1, N):
a = (random.uniform(sigma, 1.0 - sigma), random.uniform(sigma, 1.0 - sigma))
min_dist = min(math.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2) for b in L)
if min_dist < 2.0 * sigma:
condition = False
break
else:
L.append(a)
condition = True
return L
img = 0
if not os.path.exists(output_dir): os.makedirs(output_dir)
def snapshot(pos, colors):
global img
pylab.subplots_adjust(left=0.10, right=0.90, top=0.90, bottom=0.10)
pylab.gcf().set_size_inches(6, 6)
pylab.axis([0, 1, 0, 1])
pylab.setp(pylab.gca(), xticks=[0, 1], yticks=[0, 1])
for (x, y), c in zip(pos, colors):
circle = pylab.Circle((x, y), radius=sigma, fc=c)
pylab.gca().add_patch(circle)
pylab.savefig(os.path.join(output_dir, '%d.png' % img), transparent=True)
pylab.close()
img += 1
N = 4
colors = ['r', 'b', 'g', 'orange']
sigma = 0.2
n_runs = 8
for run in range(n_runs):
pos = direct_disks_box(N, sigma)
snapshot(pos, colors)
|
# -*- coding: utf-8 -*-
import re
import bisect
import datetime
import numpy as np
import pandas as pd
from collections import OrderedDict, defaultdict
from copy import copy
from utils.adjustment import *
from utils.datetime import (
get_end_date,
get_previous_trading_date,
)
from . asset_service import AssetType, AssetService
from .. instrument.universe_service import UniverseService, Universe
from .. core.enums import SecuritiesType
from .. data.database_api import (
load_futures_daily_data,
load_futures_minute_data,
load_futures_rt_minute_data
)
from .. const import (
CONTINUOUS_FUTURES_PATTERN,
FUTURES_DAILY_FIELDS,
FUTURES_MINUTE_FIELDS,
TRADE_ESSENTIAL_MINUTE_BAR_FIELDS,
REAL_TIME_MINUTE_BAR_FIELDS,
MULTI_FREQ_PATTERN
)
CURRENT_BAR_FIELDS_MAP = {
'barTime': 'barTime',
'closePrice': 'closePrice',
'highPrice': 'highPrice',
'lowPrice': 'lowPrice',
'openPrice': 'openPrice',
'totalValue': 'turnoverValue',
'totalVolume': 'turnoverVol'
}
def _ast_stylish(raw_data_dict, symbols, time_bars, fields, style, rtype='frame'):
"""
将raw_data_dict按照style和rtype转化成对应的格式,其中raw_data_dict为满足ast格式的dict
Args:
raw_data_dict(dict of ndarray, 'ast' style): 需要转化的原始数据,一般为MarketData.slice返回数据,样式为ast
symbols(list of str): raw_data_dict中包含的symbol列表,需要与ndarray中colume对应
time_bars(list of str): raw_data_dict中包含的time_bars列表,需要与ndarray中的index对应
fields(list of str): raw_data_dict中包含的attributes列表,与其key值对应
style('tas'/'ast'/'sat'): 需要转化为的目标样式
rtype('frame'/'array'): 需要转化为的目标格式,dict或者DataFrame
Returns:
dict or frame:将raw_data_dict转化完成后,满足style样式的rtype类型的数据结构
"""
if style == 'ast':
history_data = {}
for attribute in fields:
if rtype == 'frame':
history_data[attribute] = pd.DataFrame(data=raw_data_dict[attribute], columns=symbols, index=time_bars)
if rtype == 'array':
history_data[attribute] = {s: raw_data_dict[attribute][:, i] for (i, s) in enumerate(symbols)}
history_data['time'] = {s: np.array(time_bars) for s in symbols}
elif style == 'sat':
history_data = {}
for idx, symbol in enumerate(symbols):
if rtype == 'frame':
history_data[symbol] = pd.DataFrame(data={a: raw_data_dict[a][:, idx] for a in fields}, index=time_bars, columns=fields)
if rtype == 'array':
history_data[symbol] = {a: raw_data_dict[a][:, idx] for a in fields}
history_data[symbol]['time'] = np.array(time_bars)
elif style == 'tas':
history_data = {}
for idx, trade_date in enumerate(time_bars):
if rtype == 'frame':
history_data[trade_date] = pd.DataFrame(data={a: raw_data_dict[a][idx, :] for a in fields},
index=symbols, columns=fields)
if rtype == 'array':
history_data[trade_date] = {a: raw_data_dict[a][idx, :] for a in fields}
history_data[trade_date]['symbol'] = np.array(symbols)
else:
# raise ValueError('Exception in "MarketService._ast_stylish": '
# 'history style \'%s\' is not supported here, please refer to document for details' % style)
raise Exception()
return history_data
def _ast_slice(raw_data_dict, symbols, end_time_str, fields, start_time_str=None, check_attribute='closePrice',
time_range=None, cached_trading_days=None, **options):
"""
对raw_data_dict进行slice操作,其中raw_data_dict为满足ast结构的dict,其中time_range和start_time_str必须有一个有值
Args:
raw_data_dict(dict of DataFrame, ast style): 需要进行slice的原始数据
symbols(list of str): 需要slice出的符号数据
end_time_str(date formatted str): slice出数据的截止时间,日线数据格式为'YYYYmmdd',分钟线数据格式为'YYYYmmdd HH:MM'
fields(list of str): 需要slice出的字段列表
start_time_str(date formatted str or None): slice出数据的开始时间,日线数据格式为'YYYYmmdd',分钟线数据格式为'YYYmmdd HH:MM',该字段和time_range必须有一个不为空
check_attribute(str): 用于检查raw_data_dict是否为空的字段,raw_data_dict不含有该字段,则表示数据为空
time_range(int or None): 从end_time_str向前slice时间长度
"""
time_index = raw_data_dict[check_attribute].index
end_time_str = end_time_str if end_time_str in time_index \
else cached_trading_days[min(bisect.bisect_right(cached_trading_days, end_time_str), len(cached_trading_days) - 1)]
end_time_str = min(end_time_str, time_index[-1])
end_pos = time_index.get_loc(end_time_str) + 1 if end_time_str in time_index else 0
if time_range is None:
start_pos = time_index.get_loc(start_time_str) if start_time_str in time_index \
else bisect.bisect_left(time_index, start_time_str)
else:
start_pos = max(end_pos - time_range, 0)
time_bars = time_index.tolist()[start_pos:end_pos]
result = {}
for attribute in fields:
df_matrix = raw_data_dict[attribute].as_matrix()
symbol_idxs = [raw_data_dict[attribute].columns.get_loc(s) for s in symbols]
result[attribute] = df_matrix[start_pos: end_pos, symbol_idxs]
return result, time_bars
def _rolling_load_data(data, trading_days, universe, max_cache_days, data_load_func, fields):
"""
加载trading_days对应的行情数据,其中data中已有数据不做从重新加载
Args:
data(dict of dict): 原有数据,其中含有的trading_days数据不会做重新加载
trading_days(list of datetime): 需要滚动加载数据的交易日列表
universe(list of str): 需要滚动加载的股票池,必须与data中的universe保持一致
max_cache_days(int or None): 需要保留的data中原有数据的长度,如果为None则表示保留所有原有数据
data_load_func(func: universe, trading_days, fields => dict of dict): 数据加载函数
fields(list of str): 需要加载的数据字段
Returns:
dict of DataFrame, ast style: 滚动加载之后新的数据内容
"""
data = copy(data)
if len(data) == 0:
trading_days_in_loaded = []
else:
trading_days_in_loaded = [datetime.datetime.strptime(t, '%Y-%m-%d') for t in data.values()[0].index]
target_days = sorted(set(trading_days_in_loaded) | set(trading_days))
if max_cache_days is not None:
target_days = target_days[-max_cache_days:]
to_reserve_days = sorted(set(trading_days_in_loaded) & set(target_days))
to_load_trading_days = sorted(set(target_days) - set(to_reserve_days))
if len(to_load_trading_days) == 0:
return data
new_data = data_load_func(universe, to_load_trading_days, fields)
if len(data) == 0 or len(to_reserve_days) == 0:
for var in fields:
if var in new_data:
data[var] = new_data[var].sort_index()
else:
to_reserve_tdays = [t.strftime('%Y-%m-%d') for t in to_reserve_days]
for var in fields:
if var in new_data and isinstance(new_data[var], pd.DataFrame):
data[var] = pd.concat([data[var].loc[to_reserve_tdays], new_data[var]], axis=0).sort_index()
else:
data[var] = data[var].loc[to_reserve_tdays].sort_index()
return data
def _uncompress_minute_bars(minute_bars, columns, index_field, index_series=None):
"""
展开压缩的分钟线数据,要求该分钟线数据必须在时间上可对齐
"""
result_dict = {}
for column in columns:
result_dict[column] = np.concatenate(minute_bars[column].as_matrix())
result_df = pd.DataFrame(result_dict)
if index_series is not None:
result_df[index_field] = pd.Series(index_series, name=index_field)
return result_df.set_index(index_field)
def _concat_data(data_list, rtype='frame', axis=0):
"""
对dict或dataframe数据进行拼装的共用方法
Args:
data_list(list of dict of dict): 需要进行拼装的数据
rtype(dict or frame): 原始数据类型
axis(0 or 1): 0表示row拼装,1表示column拼装
Returns:
dict of dict or dict of DataFrame
"""
data_list = [d for d in data_list if d is not None]
if rtype == 'frame':
return pd.concat(data_list, axis=axis)
elif rtype == 'array':
result = {}
if axis == 1:
for data in data_list:
result.update(data)
if axis == 0:
for data in data_list:
for key, value in data.iteritems():
result.setdefault(key, [])
result[key].append(value)
for key, value in result.iteritems():
# if key != 'symbol':
# result[key] = np.concatenate(value)
result[key] = np.concatenate(value)
return result
def _append_data(raw_data, sliced_data, style, rtype='frame'):
"""
将slice并stylish之后的数据进行组合拼装
Args:
raw_data(dict of DataFrame or dict of dict): 需要进行拼装数据的原始数据,格式和style及rtype对应
sliced_data(dict of DataFrame or dict of dict): 需要进行拼装数据的新数据,格式和raw_data必须保持一致
style(ast or sat or tas): 拼装数据的数据格式
rtype(frame or dict): 拼装数据的数据类型
Returns:
dict of dict or dict of DataFrame
"""
result = {}
if style == 'ast':
for attribute in set(raw_data.keys()) | set(sliced_data.keys()):
a_data = _concat_data([raw_data.get(attribute, None), sliced_data.get(attribute, None)], axis=1, rtype=rtype)
result[attribute] = a_data
if style == 'sat':
result.update(raw_data)
result.update(sliced_data)
if style == 'tas':
for tdays in set(raw_data.keys()) | set(sliced_data.keys()):
t_data = _concat_data([raw_data.get(tdays), sliced_data.get(tdays)], axis=0, rtype=rtype)
result[tdays] = t_data
return result
def _load_current_bars_by_now(current_trading_day, universe, fields, load_func):
"""
加载交易日当天日内已产生的分钟线压缩成AST array
Args:
current_trading_day(datetime.datetime): 模拟交易当前交易日
universe(list or set): equity symbol list or set
fields(list of string): attribute list
load_func(func): 加载当前交易日已聚合的接口方法
Returns(dict):
attribute: df
"""
sta_data = load_func(current_trading_day, universe)
a_df_dict = {}
current_str = current_trading_day.strftime('%Y-%m-%d')
# todo: 期货未加载持仓量的分钟线bar数据
for field in REAL_TIME_MINUTE_BAR_FIELDS:
s_array_dict = {}
default_array = np.array([], dtype='S') if field in ['barTime', 'tradeTime'] else np.array([])
for symbol in universe:
s_array_dict.update(
{symbol: np.array([e[field] for e in sta_data[symbol]] if symbol in sta_data else default_array)})
a_df_dict.update({
CURRENT_BAR_FIELDS_MAP[field]: pd.DataFrame.from_dict({current_str: s_array_dict}, orient='index')})
a_df_dict['barTime'] = a_df_dict['barTime'].applymap(lambda x: np.char.encode(x))
def _map_to_date(bar_time, prev_trading_day, prev_next_date, curr_trading_day):
"""
返回bar_time所对应的日期
"""
if bar_time.startswith('2'):
date = prev_trading_day
elif bar_time[:2] < '09':
date = prev_next_date
else:
date = curr_trading_day
return date + bar_time
vmap_date = np.vectorize(_map_to_date, otypes=[basestring])
if 'tradeTime' in fields or 'tradeDate' in fields:
prev_trading_day = get_previous_trading_date(current_trading_day)
prev_trading_str = prev_trading_day.strftime('%Y-%m-%d ')
prev_next_day = prev_trading_day + datetime.timedelta(days=1)
prev_next_str = prev_next_day.strftime('%Y-%m-%d ')
curr_trading_str = current_trading_day.strftime('%Y-%m-%d ')
trade_time_df = a_df_dict['barTime'].applymap(lambda x:
vmap_date(x, prev_trading_str, prev_next_str, curr_trading_str))
if 'tradeTime' in fields:
a_df_dict['tradeTime'] = trade_time_df
if 'tradeDate' in fields:
a_df_dict['tradeDate'] = trade_time_df.applymap(lambda x:
np.core.defchararray.rjust(x.astype(str), width=10))
if 'clearingDate' in fields:
a_df_dict['clearingDate'] = a_df_dict['barTime'].applymap(lambda x:
np.full_like(x, fill_value=current_str))
return a_df_dict
class MarketService(object):
"""
行情数据服务类
* asset_service: AssetService
* market_data_list: 含各AssetType的MarketData的集合
* minute_bar_map: 含分钟线行情时的每个交易日bar_time
* universe_service: UniverseService
"""
def __init__(self):
self.stock_market_data = None
self.futures_market_data = None
self.fund_market_data = None
self.otc_fund_market_data = None
self.index_market_data = None
self.market_data_list = list()
self.asset_service = None
self.universe_service = None
self.minute_bar_map = dict()
self.calendar_service = None
self.daily_bars_loaded_days = None
self.minute_bars_loaded_days = None
self._available_daily_fields = None
self._available_minute_fields = None
def batch_load_data(self, universe=None,
calendar_service=None,
universe_service=None,
asset_service=None,
**kwargs):
"""
Batch load market data.
Args:
universe(list of universe): universe list
calendar_service(obj): calendar service
universe_service(obj): universe service
asset_service(obj): asset service
**kwargs: key-value parameters
Returns:
MarketService(obj): market service
"""
self.create_with(universe,
market_service=self,
universe_service=universe_service,
asset_service=asset_service,
calendar_service=calendar_service)
self.rolling_load_daily_data(calendar_service.all_trading_days)
return self
def subset(self, *args, **kwargs):
"""
Subset the market service
"""
return self
@classmethod
def create_with_service(cls,
asset_service=None,
universe_service=None,
calendar_service=None,
market_service=None):
"""
通过静态方法创建MarketService实例,market_data_list中含asset_service中各类资产的MarketData
Args:
asset_service: AssetService
universe_service: UniverseService
calendar_service: CalendarService
market_service: MarketService
Returns:
MarketService
"""
market_service = market_service or cls()
market_service.asset_service = asset_service
market_service.universe_service = universe_service
market_service.calendar_service = calendar_service
futures_universe = asset_service.filter_symbols(asset_type=AssetType.FUTURES,
symbols=universe_service.full_universe)
if len(futures_universe) > 0:
market_service.futures_market_data = FuturesMarketData(futures_universe)
market_service.market_data_list.append(market_service.futures_market_data)
return market_service
@classmethod
def create_with(cls,
universe='A',
market_service=None,
asset_service=None,
universe_service=None,
calendar_service=None):
"""
使用universe创建MarketService
Args:
universe(list of str or str): MarketService中需要包含的股票池
market_service(obj): market service
asset_service(obj): asset service
universe_service(obj): universe service
calendar_service(obj): calendar service
Returns:
MarketService(obj): market service
"""
prev_trading_day = get_previous_trading_date(get_end_date().today())
if universe_service is None:
if isinstance(universe, Universe):
pass
elif isinstance(universe, list):
universe = Universe(*universe)
else:
universe = Universe(universe)
universe_service = UniverseService(universe, [prev_trading_day])
asset_service = asset_service or AssetService.from_symbols(universe_service.full_universe)
return cls.create_with_service(asset_service=asset_service,
universe_service=universe_service,
market_service=market_service,
calendar_service=calendar_service)
def slice(self, symbols, fields, end_date, freq='d', time_range=1, style='ast', rtype='frame',
f_adj=None, prepare_dates=None, **options):
"""
依次对market_data_list各项进行slice
Args:
symbols(list of symbol): 对universe中特定symbol的列表进行slice
fields(list of str): 返回***_bars行情中所选择字段
end_date: slice截止日期
freq: 'd' or 'm'
time_range(int): end_date往前交易日天数
style: 'ast', 'sat' or 'tas', 默认'ast'
rtype: 默认'frame'(dict of DataFrame) or 'array'(dict of array)
f_adj(string): 期货复权类型
prepare_dates(list of datetime): 为了完成slice,需要确保分钟线已经加载并展开的日期
Returns:
dict of dict: 格式视style与rtype参数输入
"""
result = {}
if symbols == 'all':
symbols = list(self.universe_service.full_universe)
symbols = symbols if isinstance(symbols, list) else [symbols]
for market_data in self.market_data_list:
selected_universe = self.asset_service.filter_symbols(asset_type=market_data.asset_type, symbols=symbols)
if len(selected_universe) != 0:
result = _append_data(result,
market_data.slice(selected_universe, fields, end_date, freq=freq,
style=style, time_range=time_range, rtype=rtype,
f_adj=f_adj, prepare_dates=prepare_dates,
cached_trading_days=self.calendar_service.cache_all_trading_days,
**options),
style, rtype=rtype)
return result
def batch_load_daily_data(self, trading_days):
"""
批量加载日线数据
Args:
trading_days(list of datetime): 批量加载日线的具体交易日列表
"""
self.rolling_load_daily_data(trading_days)
def rolling_load_daily_data(self, trading_days, max_cache_days=None):
"""
依次对market_data_list中各项MarketData进行日行情加载。
Args:
trading_days(list of datetime): backtest时所传入含max_window_history
max_cache_days(int): market_data_list中daily_bars最大载入天数
"""
for market_data in self.market_data_list:
if market_data is not None:
market_data.rolling_load_daily_data(trading_days, max_cache_days, asset_service=self.asset_service)
self.daily_bars_loaded_days = market_data.daily_bars_loaded_days or self.daily_bars_loaded_days
def rolling_load_minute_data(self, trading_days, max_cache_days=5):
"""
依次对market_data_list中各项MarketData进行分钟线行情加载。
Args:
trading_days(list of datetime.datetime): 批量加载日线的具体交易日列表
max_cache_days(int): 最大保留的分钟线天数
"""
for market_data in self.market_data_list:
if market_data is not None:
market_data.rolling_load_minute_data(trading_days, max_cache_days)
self.minute_bars_loaded_days = market_data.minute_bars_loaded_days or self.minute_bars_loaded_days
def available_fields(self, freq='d'):
"""
返回日行情或分钟行情可获取attribute
Args:
freq('d' or 'm'), 默认'd'
Returns:
list of str
"""
if freq == 'd':
if self._available_daily_fields is None:
self._available_daily_fields = set()
for market_data in self.market_data_list:
self._available_daily_fields |= set(market_data.daily_fields)
return self._available_daily_fields
elif freq == 'm':
if self._available_minute_fields is None:
self._available_minute_fields = set()
for market_data in self.market_data_list:
self._available_minute_fields |= set(market_data.minute_fields)
return self._available_minute_fields
def get_market_data(self, account_type):
"""
Get market data by account type.
Args:
account_type(string): account type
"""
if account_type == SecuritiesType.futures:
return self.futures_market_data
def load_current_trading_day_bars(self, current_trading_day, debug=False):
"""
Load current trading day bars
Args:
current_trading_day(datetime.datetime): current trading day
debug(boolean): debug or not
"""
normalized_today_minutes = set()
for market_data in self.market_data_list:
if market_data is not None:
market_bar_time = market_data.load_current_trading_day_bars(current_trading_day, debug=debug)
normalized_today_minutes |= market_bar_time
self.minute_bars_loaded_days = market_data.minute_bars_loaded_days or self.minute_bars_loaded_days
today_minutes = sorted(normalized_today_minutes)
if '21:00' in today_minutes:
begin_index = today_minutes.index('21:00')
today_minutes = today_minutes[begin_index:] + today_minutes[:begin_index]
current_minute_bar_map = {current_trading_day.strftime('%Y-%m-%d'): today_minutes}
self.minute_bar_map.update(current_minute_bar_map)
def back_fill_rt_bar_times(self, date, bar_time_list):
"""
模拟交易场景填充实时分钟bar
Args:
date(datetime.datetime): date
bar_time_list(list): list of barTime
"""
date_string = date.strftime('%Y-%m-%d')
if date_string not in self.minute_bar_map:
return
if bar_time_list:
self.minute_bar_map[date_string].extend(bar_time_list)
def prepare_daily_cache(self, symbols, end_date, time_range, fields=TRADE_ESSENTIAL_MINUTE_BAR_FIELDS):
"""
准备分钟线cache数据
Args:
symbols(list): symbol list
end_date(string): end date
time_range(int): time range
fields(list): field list
"""
daily_cache_data = {e: {} for e in ['tas', 'sat', 'ast']}
for market_data in self.market_data_list:
selected_universe = self.asset_service.filter_symbols(asset_type=market_data.asset_type, symbols=symbols)
selected_universe = list(selected_universe)
if not len(selected_universe):
continue
ast_array, time_bars = market_data.slice(selected_universe, fields, end_date, freq='d',
time_range=time_range, rtype='array',
cached_trading_days=self.calendar_service.cache_all_trading_days,
no_stylish=True)
adj_data_dict, time_bars = \
market_data.adjust(ast_array, selected_universe, time_bars, f_adj=None, s_adj='pre_adj', freq='d')
for k, cache_item in daily_cache_data.iteritems():
raw_data_dict = ast_array if k == 'tas' else adj_data_dict
daily_cache_data[k] = \
_append_data(cache_item, _ast_stylish(raw_data_dict, selected_universe, time_bars,
fields, k, rtype='array'), k, rtype='array')
return daily_cache_data
def prepare_minute_cache(self, symbols, end_date, time_range, fields=TRADE_ESSENTIAL_MINUTE_BAR_FIELDS):
"""
准备分钟线cache数据
Args:
symbols(list): symbol list
end_date(string): end date
time_range(int): time range
fields(list): field list
"""
# minute_cache_data = {e: {} for e in ['tas', 'sat', 'ast']}
minute_cache_data = {e: {} for e in ['tas', 'sat']}
for market_data in self.market_data_list:
selected_universe = self.asset_service.filter_symbols(asset_type=market_data.asset_type, symbols=symbols)
selected_universe = list(selected_universe)
selected_universe = market_data._valid_symbols(selected_universe)
if not len(selected_universe):
continue
ast_array, time_bars = market_data.slice(selected_universe, fields, end_date, freq='m',
time_range=time_range, rtype='array',
cached_trading_days=self.calendar_service.cache_all_trading_days,
no_stylish=True)
adj_data_dict, time_bars = \
market_data.adjust(ast_array, selected_universe, time_bars, f_adj=None, s_adj='pre_adj', freq='m')
for k, cache_item in minute_cache_data.iteritems():
raw_data_dict = ast_array if k == 'tas' else adj_data_dict
minute_cache_data[k] = \
_append_data(cache_item, _ast_stylish(raw_data_dict, selected_universe, time_bars,
fields, k, rtype='array'), k, rtype='array')
return minute_cache_data
class MarketData(object):
"""
行情内容包装类
Attributes:
* daily_bars: 日线数据,默认格式为ast的dict of DataFrame
* minute_bars: 分钟线数据,默认格式为ast的dict of dict of ndarray
"""
def __init__(self, universe, daily_fields, minute_fields, daily_bars_loader, minute_bars_loader,
daily_bars_check_field='closePrice', minute_bars_check_field='closePrice', asset_type=None):
self.universe = universe
self.asset_type = asset_type
self.factor_bars = dict()
self.daily_bars = dict()
self.daily_fields = daily_fields
self.dividends = None
self.allot = None
self._daily_bar_check_field = daily_bars_check_field
self._daily_bars_loader = daily_bars_loader
self._daily_bars_loaded_days = list()
self.minute_bars = dict()
self.minute_fields = minute_fields
self._minute_bars_check_field = minute_bars_check_field
self._minute_bars_loader = minute_bars_loader
self._minute_bars_expanded = dict()
self._minute_bars_loaded_days = list()
self._load_multi_freq_data = None
@property
def daily_bars_loaded_days(self):
"""
Daily bars loaded days.
"""
return self._daily_bars_loaded_days
@property
def minute_bars_loaded_days(self):
"""
Minute bars loaded days.
"""
return self._minute_bars_loaded_days
def rolling_load_daily_data(self, trading_days, max_cache_days=None, asset_service=None):
"""
MarketData的日行情加载方法
Args:
trading_days(list of datetime): 需加载的交易日,backtest中已含max_window_history
max_cache_days(int): daily_bars最大加载的交易天数,默认加载全部交易日
asset_service()
"""
self.daily_bars = _rolling_load_data(self.daily_bars, trading_days, self.universe, max_cache_days,
self._daily_bars_loader, self.daily_fields)
self._daily_bars_loaded_days = [datetime.datetime.strptime(td, '%Y-%m-%d')
for td in self.daily_bars[self._daily_bar_check_field].index]
self._load_dividends(trading_days)
self._load_allots(trading_days)
def rolling_load_minute_data(self, trading_days, max_cache_days):
"""
MarketData滚动加载分钟线数据, 如cache_minute_bars则展开分钟线行情数据
Args:
trading_days(list of datetime): 需加载分钟线的交易日
max_cache_days: minute_bars中最大加载的交易日
Returns:
dict of DataFrame (ast格式),当前增量加载完成之后的分钟线数据
"""
if self._daily_bar_check_field not in self.daily_bars:
raise AttributeError('Exception in "MarketData.rolling_load_minute_data": '
'daily data must be loaded before rolling load minute data')
if not set(trading_days) <= set(self._daily_bars_loaded_days):
raise AttributeError('Exception in "MarketData.rolling_load_minute_data": '
'minute increment load data must be in scope of daily trading data')
self.minute_bars = _rolling_load_data(self.minute_bars, trading_days, self.universe, max_cache_days,
self._minute_bars_loader, self.minute_fields)
self._minute_bars_loaded_days = [datetime.datetime.strptime(td, '%Y-%m-%d')
for td in self.minute_bars[self._minute_bars_check_field].index]
return self.minute_bars
def load_current_trading_day_bars(self, current_trading_day, debug=False):
"""
MarketData加载当日已聚合分钟线
Args:
current_trading_day(datetime.datetime): 交易日
debug(boolean): debug or not
Returns(set):
该MarketData的分钟BarTime
"""
if not debug:
current_bars = _load_current_bars_by_now(current_trading_day, self.universe,
fields=self.minute_fields,
load_func=self._current_trading_day_bars_loader)
else:
# using default empty values
current_bars = dict()
for field in self.minute_fields:
default_array = np.array([], dtype='S') if field in ['barTime', 'tradeTime'] else np.array([])
values = {symbol: default_array for symbol in self.universe}
frame = pd.DataFrame.from_dict({current_trading_day.strftime('%Y-%m-%d'): values}, orient='index')
current_bars[field] = frame
for field in current_bars:
self.minute_bars[field] = self.minute_bars[field].append(current_bars[field])
self._minute_bars_loaded_days = [datetime.datetime.strptime(td, '%Y-%m-%d')
for td in self.minute_bars[self._minute_bars_check_field].index]
normalized_bar_time = set()
if self.asset_type == AssetType.FUTURES:
for bar_array in current_bars['barTime'].loc[current_trading_day.strftime('%Y-%m-%d')].tolist():
normalized_bar_time |= set(bar_array)
else:
universal_last_minute = max(current_bars['barTime'].loc[current_trading_day.strftime('%Y-%m-%d')].apply(
lambda x: x[-1] if x.size > 0 else ''))
if universal_last_minute:
raise NotImplementedError
return normalized_bar_time
def adjust(self, raw_data_dict, symbols, time_bars, **kwargs):
"""
Adjust market prices
Args:
raw_data_dict(dict): original data
symbols(list): symbol list
time_bars(list): time_bar list
**kwargs: key-value parameters
"""
return raw_data_dict, time_bars
def slice(self, symbols, fields, end_date=None, freq='d', style='ast', time_range=1, rtype='frame',
f_adj=None, s_adj=None, cached_trading_days=None, prepare_dates=None, **options):
"""
行情Panel数据的筛选
Args:
symbols(list of symbol): 需要筛选的symbols列表
fields(list of str): 需要筛选的字段列表
end_date(datetime.datetime): 需要获取的行情结束时间
freq('d' or 'm'): 需要获取的行情数据频率,'d'代表日线,'m'代表分钟线
style('ast', 'sat' or 'tas'): 返回数据Panel的层次样式顺序(field->column->index),其中'a'表示attribute,'s'表示symbol,'t'表示time,默认'ast'
time_range(int): 切割end_date前time_range个交易日
rtype('frame' or 'array'): 返回的Panel数据的格式,frame表示dict of DataFrame, dict表示dict of array
f_adj(string): futures adj type
s_adj(string): stock adj type
cached_trading_days(list of str time): 所有交易日缓存
prepare_dates(list of datetime): 为了完成slice,需要确保分钟线已经加载并展开的日期
Returns:
dict, 格式视style与rtype参数输入
-------
"""
end_time_str = end_date.strftime('%Y-%m-%d')
self._check_time_range(end_date, freq)
symbols = self._valid_symbols(symbols)
fields = self._valid_fields(fields, freq)
check_attribute = self._daily_bar_check_field if freq == 'd' else self._minute_bars_check_field
if freq == 'd':
data = self.daily_bars
elif freq == 'm':
data = self.minute_bars
else:
raise AttributeError('Exception in "MarketData.slice": unknown data slice query')
raw_data_dict, time_bars = _ast_slice(data, symbols, end_time_str=end_time_str, fields=fields,
check_attribute=check_attribute, time_range=time_range,
cached_trading_days=cached_trading_days, **options)
if options.get('no_stylish'):
return raw_data_dict, time_bars
adj_data_dict, time_bars = self.adjust(raw_data_dict, symbols, time_bars, f_adj=f_adj, s_adj=s_adj, freq=freq)
return _ast_stylish(adj_data_dict, symbols, time_bars, fields, style, rtype=rtype)
def _valid_symbols(self, symbols):
"""
slice的helper函数,过滤valid_symbol
"""
valid_symbols = self.daily_bars[self._daily_bar_check_field].columns
return [symbol for symbol in symbols if symbol in valid_symbols]
def _valid_fields(self, fields, freq='d'):
"""
slice的helper函数,过滤valid_fields
"""
fields = fields if isinstance(fields, list) else [fields]
if freq == 'd':
return list(set(fields) & set(self.daily_fields))
elif freq == 'm' or MULTI_FREQ_PATTERN.match(freq):
return list(set(fields) & set(self.minute_fields))
def _load_dividends(self, *args, **kwargs):
"""
Load dividends.
Args:
*args: list parameters
**kwargs: key-value parameters
"""
return
def _load_allots(self, *args, **kwargs):
"""
Load allots.
Args:
*args: list parameters
**kwargs: key-value parameters
"""
return
def _check_time_range(self, end_date, freq):
"""
检查slice时end_date和freq是否合法
"""
valid_trading_days = []
if freq == 'm' or MULTI_FREQ_PATTERN.match(freq):
valid_trading_days = self._minute_bars_loaded_days
elif freq == 'd':
valid_trading_days = self._daily_bars_loaded_days
return valid_trading_days[0] <= end_date <= valid_trading_days[-1]
def _current_trading_day_bars_loader(self, *args, **kwargs):
raise NotImplementedError
class FuturesMarketData(MarketData):
"""
Futures market data.
"""
def __init__(self, futures_universe):
"""
Args:
futures_universe: set of stock symbol, 如:set(['IFM0', 'HCM0'])
"""
super(FuturesMarketData, self).__init__(futures_universe,
FUTURES_DAILY_FIELDS,
FUTURES_MINUTE_FIELDS,
self._daily_data_loader,
self._minute_data_loader,
asset_type=AssetType.FUTURES)
self._prev_clearing_date_map = dict()
self.continuous_fq_factors = {}
def adjust(self, raw_data_dict, symbols, time_bars, f_adj=None, freq='d', **kwargs):
"""
Futures adjustment.
Args:
raw_data_dict(dict): raw data dict
symbols(string): symbol
time_bars(list): time bar list
f_adj(string): f_adj
freq(string): frequency
"""
data_dict = raw_data_dict
adj_info = self.continuous_fq_factors.get(f_adj, None)
adj_columns = {x: symbols.index(x) for x in symbols if CONTINUOUS_FUTURES_PATTERN.match(x)}
valid_keys = ['closePrice', 'openPrice', 'highPrice', 'lowPrice', 'settlementPrice', 'preSettlementPrice']
adj_keys = list(set(valid_keys) & set(raw_data_dict.keys()))
adj_func = adj_func_choose(f_adj)
if adj_info and adj_func and adj_columns and adj_keys:
adj_matrix = adj_matrix_choose(f_adj, (len(time_bars), len(symbols)))
for column, column_index in adj_columns.iteritems():
adj_matrix[:, column_index] = adj_func(time_bars, adj_info[column])
for key in adj_keys:
data_dict[key] = adj_operator(data_dict[key], adj_matrix, f_adj)
return data_dict, time_bars
def rolling_load_daily_data(self, trading_days, max_cache_days=None, asset_service=None):
"""
FuturesMarketData的rolling_load_daily_data,一次全部加载完整的trading_days。
Args:
trading_days(list of datetime): 需加载的交易日,backtest中已含max_window_history
max_cache_days(int): daily_bars最大加载的交易天数,默认加载全部交易日
"""
if len(trading_days) == 0:
return
MarketData.rolling_load_daily_data(self, trading_days, max_cache_days)
self._prev_clearing_date_map = dict(zip(
self._daily_bars_loaded_days,
[get_previous_trading_date(trading_days[0])] + self._daily_bars_loaded_days[:-1]))
self._prev_clearing_date_map = {key.strftime('%Y-%m-%d'): value.strftime('%Y-%m-%d')
for key, value in self._prev_clearing_date_map.iteritems()}
def rolling_load_minute_data(self, trading_days, max_cache_days):
"""
FuturesMarketData加载压缩好的分钟线数据
Args:
trading_days(list of datetime): 需加载分钟线的交易日list
max_cache_days(int): minute_bars最大加载的分钟线交易日数量
Returns:
dict, 压缩好的各fields分钟线行情
"""
minute_data_compressed = MarketData.rolling_load_minute_data(self, trading_days, max_cache_days)
return minute_data_compressed
def calc_continuous_fq_factors(self, continuous_futures=None, artificial_switch_info=None):
"""
计算连续合约的价差平移因子,前复权因子
Args:
continuous_futures(list of str): 连续合约名称列表
artificial_switch_info(Series): 连续合约切换信息
"""
if continuous_futures is None:
return
fq_add, fq_multiple = defaultdict(OrderedDict), defaultdict(OrderedDict)
self.continuous_fq_factors.update({'add': fq_add, 'mul': fq_multiple})
daily_close = self.daily_bars['closePrice']
for date in artificial_switch_info.index:
q_date = date.strftime('%Y-%m-%d')
if q_date not in daily_close.index:
for continuous, switch in artificial_switch_info[date].iteritems():
self.continuous_fq_factors['add'][continuous][q_date] = 0
self.continuous_fq_factors['mul'][continuous][q_date] = 1
continue
index = daily_close.index.get_loc(q_date) - 1
for continuous, switch in artificial_switch_info[date].iteritems():
if not all(switch) or filter(lambda x: not isinstance(x, (str, unicode)), switch):
# 含主力合约切换信息某一项为None
continue
symbol_from, symbol_to = switch
column_from = daily_close.columns.get_loc(symbol_from)
column_to = daily_close.columns.get_loc(symbol_to)
data_from, data_to = daily_close.iat[index, column_from], daily_close.iat[index, column_to]
if filter(lambda x: not x or np.isnan(x), (data_from, data_to)):
continue
add = data_to - data_from
multiple = data_to / data_from
self.continuous_fq_factors['add'][continuous][q_date] = 0 if np.isnan(add) else add
self.continuous_fq_factors['mul'][continuous][q_date] = 1 if np.isnan(multiple) else multiple
def get_trade_time(self, clearing_date, minute_bar):
"""
根据清算日期和分钟线获取对应的trade_time,主要用作expand_slice的查询时的end_time_str
Args:
clearing_date(datetime): 清算日期
minute_bar(str): 分钟线,格式为HH:mm
"""
prev_trading_day = self._prev_clearing_date_map.get(clearing_date, None)
if prev_trading_day is None:
raise AttributeError('Exception in "FuturesMarketData.get_trade_time": '
'unknown clearing date {}'.format(clearing_date))
if minute_bar > '16:00':
return '{} {}'.format(prev_trading_day, minute_bar)
else:
return '{} {}'.format(clearing_date, minute_bar)
@staticmethod
def _daily_data_loader(universe, trading_days, fields):
daily_data = load_futures_daily_data(universe, trading_days, FUTURES_DAILY_FIELDS)
if 'turnoverVol' in fields:
daily_data['turnoverVol'] = daily_data.get('turnoverVol', daily_data.get('volume'))
return daily_data
@staticmethod
def _minute_data_loader(universe, trading_days, fields, freq='m'):
"""
FuturesMarketData.minute_bars的具体加载函数
"""
minute_data = load_futures_minute_data(universe, trading_days, FUTURES_MINUTE_FIELDS, freq=freq)
if 'turnoverVol' in fields:
minute_data['turnoverVol'] = minute_data['volume']
return minute_data
@staticmethod
def _current_trading_day_bars_loader(current_trading_day, universe, **kwargs):
"""
Load real-time 1 minute bars.
Args:
current_trading_day:
universe(list of set):
fields(list)
Returns:
dict: future real-time data
"""
# customize start_time, end_time, fields
current_date = datetime.datetime.today()
if (current_trading_day > current_date) and (20 <= current_date.hour <= 21):
sta_data = dict()
else:
sta_data = load_futures_rt_minute_data(universe)
return sta_data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.