content
stringlengths 5
1.05M
|
|---|
import arcpy, os
class ToolValidator(object):
"""Class for validating a tool's parameter values and controlling
the behavior of the tool's dialog."""
def __init__(self):
"""Setup arcpy and the list of tool parameters."""
self.params = arcpy.GetParameterInfo()
def initializeParameters(self):
"""Refine the properties of a tool's parameters. This method is
called when the tool is opened."""
self.params[0].value = "- - I'll enter metadata values for a new incident - -"
return
def updateParameters(self):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
# Get or set the values of params[2] - params[10] based on the value in params[0].
if self.params[0].value: ## If params[0] is not NULL
if not self.params[0].hasBeenValidated: ## If the value of params[0] has changed from the prior value of params[0]
inTable = os.path.join(os.path.dirname(__file__),
"EventMetadataTemplate.gdb\\MetadataDefaults") ## Specify the table containing metadata defaults
incidentList = ["- - I'll enter metadata values for a new incident - -"] ## Initial value of incidentList
with arcpy.da.SearchCursor(inTable, "IncidentName") as irows: ## Set up a search cursor to get a list of incidents
for irow in irows: ## For each row of the metadata defaults table
incidentList.append(irow[0]) ## Append each value of IncidentName to incidentList
incidentList.sort() ## Sort incidentList
self.params[0].filter.list = incidentList ## Use incidentList as a picklist for params[0]
if "- - " in self.params[0].value: ## If the user elected to enter metadata values for a new incident
for i in range(2, 11):
self.params[i].value = None ## Set params[2] - params[10] to NULL
self.params[11].value = "None" ## Set params[11] to "None"
self.params[12].value = False ## Set params[12] to False
else: ## If the user elected to use an existing metadata collection
with arcpy.da.SearchCursor(inTable, ["IncidentName", "UnitID", "LocalIncidentID", "IRWINID", "IMTName",
"GACC", "ContactName", "ContactEmail", "ContactPhone"]) as rows:
for row in rows:
if row[0] == self.params[0].value:
self.params[ 2].value = row[0] ## Update params[2] - params[10] based on the value of params[0]
self.params[ 3].value = row[1]
self.params[ 4].value = row[2]
self.params[ 5].value = row[3]
self.params[ 6].value = row[4]
self.params[ 7].value = row[5]
self.params[ 8].value = row[6]
self.params[ 9].value = row[7]
self.params[10].value = row[8]
self.params[11].value = "None" ## Set params[11] to "None"
self.params[12].value = False ## Set params[12] to False
else: ## If the value of params[0] has not changed from the prior value of params[0]
pass ## Leave params[2] - params[12] as is
else: ## If params[0] is NULL
for i in range(1, 11): ## For params[1] - params[10]
self.params[i].value = None ## Set params[1] - params[10] to NULL
self.params[11].value = "None" ## Set params[11] to "None"
self.params[12].value = False ## Set params[12] to False
# Set the value and enabled status of params[12] based on whether a fGDB or a runtime GDB was specified in params[1].
if self.params[1].value: ## If params[1] is not NULL
if not self.params[1].hasBeenValidated: ## If the value of params[1] has changed from the prior value of params[1]
if self.params[1].valueAsText.endswith(".gdb"): ## If params[1] represents a file geodatabase
self.params[12].enabled = True ## Enable params[12] and set its value to False
self.params[12].value = False
else: ## If params[1] represents a runtime geodatabase
self.params[12].enabled = False ## Disable params[12] and set its value to False
self.params[12].value = False
else: ## If the value of params[1] has not changed from the prior value of params[1]
pass ## Leave params[12] as is
else: ## If params[1] is NULL
self.params[12].enabled = False ## Disable params[12] and set its value to False
self.params[12].value = False
return
def updateMessages(self):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
|
"""Provides Type structures.
"""
# Author: Pearu Peterson
# Created: February 2019
import re
import ctypes
import _ctypes
import inspect
from llvmlite import ir
import warnings
from .targetinfo import TargetInfo
from .utils import check_returns_none
import numba as nb
import numpy as np
from numba.core import typing, datamodel, extending, typeconv
from numba.core.imputils import lower_cast
class TypeSystemManager:
"""Manages context specific aliases.
Usage:
with Type.alias(A='Array', bool='bool8', ...):
# Type.fromstring('A a') will be replaced with Type.fromstring('Array a')
"""
def alias(self, aliases):
self.aliases = aliases
return self
def __enter__(self):
self.old_aliases = Type.aliases.copy()
new_aliases = self.old_aliases.copy()
new_aliases.update(self.aliases)
Type.aliases.clear()
Type.aliases.update(new_aliases)
def __exit__(self, exc_type, exc_value, traceback):
Type.aliases.clear()
Type.aliases.update(self.old_aliases)
if exc_type is None:
return True
class TypeParseError(Exception):
"""Failure to parse type definition
"""
def _findparen(s):
"""Find the index of left parenthesis that matches with the one at the
end of a string.
Used internally.
"""
j = s.find(')')
assert j >= 0, repr((j, s))
if j == len(s) - 1:
i = s.find('(')
if i < 0:
raise TypeParseError('failed to find lparen index in `%s`' % s)
return i
i = s.rfind('(', 0, j)
if i < 0:
raise TypeParseError('failed to find lparen index in `%s`' % s)
t = s[:i] + '_'*(j-i+1) + s[j+1:]
assert len(t) == len(s), repr((t, s))
return _findparen(t)
def _commasplit(s):
"""Split a comma-separated items taking into account parenthesis
and brackets.
Used internally.
"""
lst = s.split(',')
ac = ''
p1, p2, p3, p4 = 0, 0, 0, 0
rlst = []
for i in lst:
p1 += i.count('(') - i.count(')')
p2 += i.count('{') - i.count('}')
p3 += i.count('[') - i.count(']')
p4 += i.count('<') - i.count('>')
if p1 == p2 == p3 == p4 == 0:
rlst.append((ac + ',' + i if ac else i).strip())
ac = ''
else:
ac = ac + ',' + i if ac else i
if p1 == p2 == p3 == p4 == 0:
return rlst
raise TypeParseError('failed to comma-split `%s`' % s)
_booln_match = re.compile(r'\A(boolean|bool|b)(1|8)\Z').match
_charn_match = re.compile(r'\A(char)(32|16|8)(_t|)\Z').match
_intn_match = re.compile(r'\A(signed\s*int|int|i)(\d+)(_t|)\Z').match
_uintn_match = re.compile(r'\A(unsigned\s*int|uint|u)(\d+)(_t|)\Z').match
_floatn_match = re.compile(r'\A(float|f)(16|32|64|128|256)(_t|)\Z').match
_complexn_match = re.compile(
r'\A(complex|c)(16|32|64|128|256|512)(_t|)\Z').match
_bool_match = re.compile(r'\A(boolean|bool|_Bool|b)\Z').match
_string_match = re.compile(r'\A(string|str)\Z').match
_char_match = re.compile(r'\A(char)\Z').match
_schar_match = re.compile(r'\A(signed\s*char)\Z').match
_uchar_match = re.compile(r'\A(unsigned\s*char|uchar)\Z').match
_byte_match = re.compile(r'\A(signed\s*byte|byte)\Z').match
_ubyte_match = re.compile(r'\A(unsigned\s*byte|ubyte)\Z').match
_wchar_match = re.compile(r'\A(wchar)(_t|)\Z').match
_short_match = re.compile(
r'\A(signed\s*short\s*int|signed\s*short|short\s*int|short)\Z').match
_ushort_match = re.compile(
r'\A(unsigned\s*short\s*int|unsigned\s*short|ushort)\Z').match
_int_match = re.compile(r'\A(signed\s*int|signed|int|i)\Z').match
_uint_match = re.compile(r'\A(unsigned\s*int|unsigned|uint|u)\Z').match
_long_match = re.compile(
r'\A(signed\s*long\s*int|signed\s*long|long\s*int|long|l)\Z').match
_ulong_match = re.compile(
r'\A(unsigned\s*long\s*int|unsigned\s*long|ulong)\Z').match
_longlong_match = re.compile(
r'\A(signed\s*long\s*long\s*int|signed\s*long\s*long|long\s*long'
r'\s*int|long\s*long)\Z').match
_ulonglong_match = re.compile(
r'\A(unsigned\s*long\s*long\s*int|unsigned\s*long\s*long)\Z'
).match
_size_t_match = re.compile(r'\A(std::|c_|)size_t\Z').match
_ssize_t_match = re.compile(r'\A(std::|c_|)ssize_t\Z').match
_float_match = re.compile(r'\A(float|f)\Z').match
_double_match = re.compile(r'\A(double|d)\Z').match
_longdouble_match = re.compile(r'\A(long\s*double)\Z').match
_complex_match = re.compile(r'\A(complex|c)\Z').match
# For `Type.fromstring('<typespec> <name>')` support:
_type_name_match = re.compile(r'\A(.*)\s(\w+)\Z').match
# bad names are the names of types that can have modifiers such as
# `signed`, `unsigned`, `long`, etc.:
_bad_names_match = re.compile(r'\A(char|byte|short|int|long|double)\Z').match
# For the custom type support:
_custom_type_name_params_match = re.compile(r'\A(\w+)\s*[<](.*)[>]\Z').match
class Complex64(ctypes.Structure):
_fields_ = [("real", ctypes.c_float), ("imag", ctypes.c_float)]
@classmethod
def from_param(cls, obj):
if isinstance(obj, complex):
return cls(obj.real, obj.imag)
if isinstance(obj, (int, float)):
return cls(obj.real, 0.0)
raise NotImplementedError(repr(type(obj)))
class Complex128(ctypes.Structure):
_fields_ = [("real", ctypes.c_double), ("imag", ctypes.c_double)]
@classmethod
def from_param(cls, obj):
if isinstance(obj, complex):
return cls(obj.real, obj.imag)
if isinstance(obj, (int, float)):
return cls(obj.real, 0.0)
raise NotImplementedError(repr(type(obj)))
def topython(self):
return complex(self.real, self.imag)
# Initialize type maps
_ctypes_imap = {
ctypes.c_void_p: 'void*', None: 'void', ctypes.c_bool: 'bool',
ctypes.c_char_p: 'char%s*' % (ctypes.sizeof(ctypes.c_char()) * 8),
ctypes.c_wchar_p: 'char%s*' % (ctypes.sizeof(ctypes.c_wchar()) * 8),
}
_ctypes_char_map = {}
_ctypes_int_map = {}
_ctypes_uint_map = {}
_ctypes_float_map = {}
_ctypes_complex_map = {}
for _k, _m, _lst in [
('char', _ctypes_char_map, ['c_char', 'c_wchar']),
('int', _ctypes_int_map,
['c_int8', 'c_int16', 'c_int32', 'c_int64', 'c_int',
'c_long', 'c_longlong', 'c_byte', 'c_short', 'c_ssize_t']),
('uint', _ctypes_uint_map,
['c_uint8', 'c_uint16', 'c_uint32', 'c_uint64', 'c_uint',
'c_ulong', 'c_ulonglong', 'c_ubyte', 'c_ushort', 'c_size_t']),
('float', _ctypes_float_map,
['c_float', 'c_double', 'c_longdouble']),
('complex', _ctypes_complex_map,
[Complex64, Complex128])
]:
for _n in _lst:
if isinstance(_n, str):
_t = getattr(ctypes, _n, None)
else:
_t = _n
if _t is not None:
_b = ctypes.sizeof(_t) * 8
if _b not in _m:
_m[_b] = _t
_ctypes_imap[_t] = _k + str(_b)
_numba_imap = {nb.void: 'void', nb.boolean: 'bool'}
_numba_char_map = {}
_numba_bool_map = {}
_numba_int_map = {}
_numba_uint_map = {}
_numba_float_map = {}
_numba_complex_map = {}
for _k, _m, _lst in [
('int', _numba_int_map,
['int8', 'int16', 'int32', 'int64', 'intc', 'int_', 'intp',
'long_', 'longlong', 'short', 'char']),
('uint', _numba_uint_map,
['uint8', 'uint16', 'uint32', 'uint64', 'uintc', 'uint',
'uintp', 'ulong', 'ulonglong', 'ushort']),
('float', _numba_float_map,
['float32', 'float64', 'float_', 'double']),
('complex', _numba_complex_map, ['complex64', 'complex128']),
]:
for _n in _lst:
_t = getattr(nb, _n, None)
if _t is not None:
_b = _t.bitwidth
if _b not in _m:
_m[_b] = _t
_numba_imap[_t] = _k + str(_b)
# numpy mapping
_numpy_imap = {}
for v in set(np.typeDict.values()):
name = np.dtype(v).name
_numpy_imap[v] = name
# python_imap values must be processed with Type.fromstring
_python_imap = {int: 'int64', float: 'float64', complex: 'complex128',
str: 'string', bytes: 'char*'}
# Data for the mangling algorithm, see mangle/demangle methods.
#
_mangling_suffices = '_VW'
_mangling_prefixes = 'PKaAMrR'
_mangling_map = dict(
void='v', bool='b',
char8='c', char16='z', char32='w',
int8='B', int16='s', int32='i', int64='l', int128='q',
uint8='U', uint16='S', uint32='I', uint64='L', uint128='Q',
float16='h', float32='f', float64='d', float128='x',
complex32='H', complex64='F', complex128='D', complex256='X',
string='t',
)
_mangling_imap = {}
for _k, _v in _mangling_map.items():
assert _v not in _mangling_imap, repr((_k, _v))
assert len(_v) == 1, repr((_k, _v))
_mangling_imap[_v] = _k
# make sure that mangling keys will not conflict with mangling
# operators:
_i = set(_mangling_imap).intersection(_mangling_suffices+_mangling_prefixes)
assert not _i, repr(_i)
class MetaType(type):
custom_types = dict()
aliases = dict()
# ctypes generated types need to be cached to be usable
ctypes_types = dict()
def __new__(cls, name, bases, dct):
cls = super().__new__(cls, name, bases, dct)
if name != 'Type':
cls.custom_types[name] = cls
return cls
def alias(cls, **aliases):
"""
Define type aliases context. For instance,
with Type.alias(myint='int64'):
...
will ensure under the with block we have
Type.fromstring('myint') == Type.fromstring('int64')
"""
return TypeSystemManager().alias(aliases)
class Type(tuple, metaclass=MetaType):
"""Represents a type.
There are six kinds of a types:
========== ============================== ==============================
Type Description Internal structure
========== ============================== ==============================
void a "no type" Type()
atomic e.g. ``int32`` Type(<str>,)
pointer e.g. ``int32*`` Type(<Type instance>, '*')
struct e.g. ``{int32, int32}`` Type(<Type instances>, <Type instances>, ...)
function e.g. ``int32(int32, int32)`` Type(<Type instance>,
(<Type instances>, ...), name='')
custom e.g. ``MyClass<int32, int32>`` Type((<object>,))
undefined e.g. fromcallable(foo) Type(None)
========== ============================== ==============================
Atomic types are types with names (Type contains a single
string). All other types (except "no type") are certain
implementations of atomic types.
The name content of an atomic type is arbitrary but it cannot be
empty. For instance, Type('a') and Type('a long name') are atomic
types.
Parsing types from a string is not fixed to any type system, the
names of types can be arbitrary. However, converting the Type
instances to concrete types such as provided in numpy or numba,
the following atomic types are defined (the first name corresponds
to normalized name)::
no type: void, none
bool: bool, boolean, _Bool, b
1-bit bool: bool1, boolean1, b1
8-bit bool: bool8, boolean8, b8
8-bit char: char8, char
16-bit char: char16
32-bit char: char32, wchar
8-bit signed integer: int8, i8, byte, signed char
16-bit signed integer: int16, i16, int16_t
32-bit signed integer: int32, i32, int32_t
64-bit signed integer: int64, i64, int64_t
128-bit signed integer: int128, i128, int128_t
8-bit unsigned integer: uint8, u8, ubyte, unsigned char
16-bit unsigned integer: uint16, u16, uint16_t
32-bit unsigned integer: uint32, u32, uint32_t
64-bit unsigned integer: uint64, u64, uint64_t
128-bit unsigned integer: uint128, u128, uint64_t
16-bit float: float16, f16
32-bit float: float32, f32, float
64-bit float: float64, f64, double
128-bit float: float128, f128, long double
32-bit complex: complex32, c32, complex
64-bit complex: complex64, c64
128-bit complex: complex128, c128
256-bit complex: complex256, c256
string: string, str
with the following extensions::
N-bit signed integer: int<N>, i<N> for instance: int5, i31
N-bit unsigned integer: uint<N>, u<N>
N-bit float: float<N>
N-bit complex: complex<N>
Also ``byte, short, int, long, long long, signed int, size_t,
ssize_t``, etc are supported but their normalized names are system
dependent.
Custom types
------------
The typesystem supports processing custom types that are usually
named structures or C++ template specifications, but not only.
Custom type can have arbitrary number of (hashable) parameters. A
custom type is concrete when the parameters with type Type are
concrete.
Custom types must implement tonumba method if needed. For that,
one must derive MyClass from Type.
Internally, a custom type has two possible representations: if
MyClass is derived from Type then `MyClass<a, b>` is represented
as `MyClass(('a', 'b'))`, otherwise, it is represented as
`Type(('MyClass', 'a', 'b'))`. If the parameters of a custom types
need to be Type instances, use `preprocess_args` for required
conversion of arguments.
"""
_mangling = None
def __new__(cls, *args, **params):
args = cls.preprocess_args(args)
obj = tuple.__new__(cls, args)
obj._params = params
if not obj._is_ok:
if obj._is_function:
raise ValueError('cannot create named function type from `%s`' % (args,))
raise ValueError(
'attempt to create an invalid Type object from `%s`' % (args,))
return obj.postprocess_type()
@classmethod
def preprocess_args(cls, args):
"""Preprocess the arguments of Type constructor.
Overloading this classmethod may be useful for custom types
for processing its parameters.
"""
return args
def postprocess_type(self):
"""Postprocess Type construction. Must return Type instance.
"""
return self
def annotation(self, **annotations):
"""Set and get annotations.
"""
annotation = self._params.get('annotation')
if annotation is None:
annotation = self._params['annotation'] = {}
annotation.update(annotations)
return annotation
def __or__(self, other):
"""Apply annotations to a copy of type instance.
"""
self.annotation() # ensures annotation key in _params
params = self._params.copy()
annotation = params['annotation'] = params['annotation'].copy()
if isinstance(other, str):
if other:
annotation[other] = ''
elif isinstance(other, dict):
annotation.update(other)
return type(self)(*self, **params)
def inherit_annotations(self, other):
if isinstance(other, Type):
self.annotation().update(other.annotation())
for a, b in zip(self, other):
if b is None:
continue
if isinstance(a, str):
pass
elif isinstance(a, Type):
a.inherit_annotations(b)
elif isinstance(a, tuple):
for x, y in zip(a, b):
if isinstance(x, str):
pass
else:
x.inherit_annotations(y)
else:
raise NotImplementedError('inherit_annotations: %s'
% ((a, type(a)),))
def params(self, other=None, **params):
"""In-place update of parameters from other or/and dictionary and return self.
"""
if other is not None:
return self.params(None, **other._params).params(None, **params)
for k, v in params.items():
if k == 'annotation':
self.annotation().update(v)
else:
orig_v = self._params.get(k)
if orig_v is not None and orig_v != v:
warnings.warn(
f'{type(self).__name__}.params: overwriting '
f'existing parameter {k} with {v} (original value is {orig_v})')
self._params[k] = v
return self
def set_mangling(self, mangling):
"""Set mangling string of the type.
"""
self._mangling = mangling
def mangling(self):
if self._mangling is None:
self._mangling = self.mangle()
return self._mangling
@property
def is_void(self):
return len(self) == 0
@property
def is_atomic(self):
return len(self) == 1 and isinstance(self[0], str)
@property
def is_undefined(self):
return len(self) == 1 and self[0] is None
@property
def is_int(self):
return self.is_atomic and self[0].startswith('int')
@property
def is_uint(self):
return self.is_atomic and self[0].startswith('uint')
@property
def is_float(self):
return self.is_atomic and self[0].startswith('float')
@property
def is_complex(self):
return self.is_atomic and self[0].startswith('complex')
@property
def is_string(self):
return self.is_atomic and self[0] == 'string'
@property
def is_bool(self):
return self.is_atomic and self[0].startswith('bool')
@property
def is_char(self):
return self.is_atomic and self[0].startswith('char')
@property
def is_aggregate(self):
# ref: https://llvm.org/docs/LangRef.html#aggregate-types
return self.is_struct
@property
def is_pointer(self):
return len(self) == 2 and isinstance(self[0], Type) \
and isinstance(self[1], str) and self[1] == '*'
@property
def is_struct(self):
return len(self) > 0 and all(isinstance(s, Type) for s in self)
@property
def _is_function(self):
return len(self) == 2 and isinstance(self[0], Type) and \
isinstance(self[1], tuple) and not isinstance(self[1], Type) \
and all([isinstance(p, Type) for p in self[1]])
@property
def is_function(self):
return self._is_function and 'name' in self._params
@property
def is_custom(self):
return len(self) == 1 and not isinstance(self[0], Type) and isinstance(self[0], tuple)
@property
def is_signed(self):
# https://en.cppreference.com/w/cpp/types/numeric_limits/is_signed
return self.is_atomic and (self.is_char or self.is_int or self.is_float)
@property
def is_unsigned(self):
# https://en.cppreference.com/w/cpp/types/numeric_limits/is_signed
return self.is_atomic and not self.is_signed
@property
def is_complete(self):
"""Return True when the Type instance does not contain unknown types.
"""
if self.is_atomic:
return not self[0].startswith('<type of')
elif self.is_pointer:
return self[0].is_complete
elif self.is_struct:
for m in self:
if not m.is_complete:
return False
elif self.is_function:
if not self[0].is_complete:
return False
for a in self[1]:
if not a.is_complete:
return False
elif self.is_void:
pass
elif self.is_custom:
for a in self[0]:
if isinstance(a, Type) and not a.is_complete:
return False
elif self.is_undefined:
return False
else:
raise NotImplementedError(repr(self))
return True
@property
def is_concrete(self):
"""Return True when the Type instance is concrete.
"""
if self.is_atomic:
return (self.is_int or self.is_uint or self.is_float
or self.is_complex or self.is_bool
or self.is_string or self.is_char)
elif self.is_pointer:
return self[0].is_concrete
elif self.is_struct:
for m in self:
if not m.is_concrete:
return False
elif self.is_function:
if not self[0].is_concrete:
return False
for a in self[1]:
if not a.is_concrete:
return False
elif self.is_void:
pass
elif self.is_custom:
for a in self[0]:
if isinstance(a, Type) and not a.is_concrete:
return False
elif self.is_undefined:
return False
else:
raise NotImplementedError(repr(self))
return True
@property
def _is_ok(self):
return self.is_void or self.is_atomic or self.is_pointer \
or self.is_struct \
or (self.is_function and len(self[1]) > 0) \
or self.is_custom or self.is_undefined
def __repr__(self):
d = {}
for k, v in self._params.items():
if v:
d[k] = v
if d:
return '%s%s|%s' % (type(self).__name__, tuple.__repr__(self), d)
return '%s%s' % (type(self).__name__, tuple.__repr__(self))
@property
def consumes_nargs(self):
"""Return the number of arguments that the given type consumes.
"""
return len(self.as_consumed_args)
@property
def as_consumed_args(self):
"""Return the argument types that the given type will consume.
"""
return [self]
@property
def arity(self):
"""Return the arity of the function type.
Some function argument types may consume several function
arguments (e.g., data pointer and data size). The arity of
function type is the number of functon arguments that are
consumed when constructing a call.
Arguments with default value are ignored.
"""
assert self.is_function
arity = 0
for t in self[1]:
if t.annotation().get('default'):
break
arity += t.consumes_nargs
return arity
@property
def argument_types(self):
"""Return the list of consumed argument types.
"""
assert self.is_function
atypes = []
for t in self[1]:
atypes.extend(t.as_consumed_args)
return atypes
@property
def name(self):
"""Return declarator name.
Function types always define a name: when not specified then
the name value will be an empty string.
For other types the name is optional: when not specified then
the name value will be None.
"""
return self._params.get('name')
def get_field_position(self, name):
"""Return the index of a structure member with name.
Returns None when no match with member names is found.
"""
assert self.is_struct
for i, m in enumerate(self):
if m.name == name:
return i
def __str__(self):
if self._is_ok:
return self.tostring()
return tuple.__str__(self)
def tostring(self, use_typename=False, use_annotation=True):
"""Return string representation of a type.
"""
if use_annotation:
s = self.tostring(use_typename=use_typename, use_annotation=False)
annotation = self.annotation()
for name, value in annotation.items():
if value:
s = '%s | %s=%s' % (s, name, value)
else:
s = '%s | %s' % (s, name)
return s
if self.is_void:
return 'void'
name = self._params.get('name')
if self.is_function:
if use_typename:
typename = self._params.get('typename')
if typename is not None:
return typename
if name:
name = ' ' + name
return (self[0].tostring(use_typename=use_typename)
+ name + '(' + ', '.join(
a.tostring(use_typename=use_typename)
for a in self[1]) + ')')
if name is not None:
suffix = ' ' + name
else:
suffix = ''
if use_typename:
typename = self._params.get('typename')
if typename is not None:
return typename + suffix
if self.is_atomic:
return self[0] + suffix
if self.is_pointer:
return self[0].tostring(use_typename=use_typename) + '*' + suffix
if self.is_struct:
clsname = self._params.get('clsname')
if clsname is not None:
return clsname + '<' + ', '.join(
[t.tostring(use_typename=use_typename)
for t in self]) + '>' + suffix
return '{' + ', '.join([t.tostring(use_typename=use_typename)
for t in self]) + '}' + suffix
if self.is_custom:
params = self[0]
if type(self) is Type:
name = params[0]
params = params[1:]
else:
name = type(self).__name__
new_params = []
for a in params:
if isinstance(a, Type):
s = a.tostring(use_typename=use_typename)
else:
s = str(a)
new_params.append(s)
return (name + '<' + ', '.join(new_params) + '>' + suffix)
raise NotImplementedError(repr(self))
def toprototype(self):
if self.is_void:
return 'void'
typename = self._params.get('typename')
if typename is not None:
return typename
if self.is_atomic:
s = self[0]
if self.is_int or self.is_uint:
return s + '_t'
if self.is_float:
bits = self.bits
if bits == 32:
return 'float'
elif bits == 64:
return 'double'
return s
if self.is_pointer:
return self[0].toprototype() + '*'
if self.is_struct:
return '{' + ', '.join([t.toprototype() for t in self]) + '}'
if self.is_function:
return self[0].toprototype() + '(' + ', '.join(
a.toprototype() for a in self[1]) + ')'
if self.is_custom:
raise NotImplementedError(f'{type(self).__name__}.toprototype()')
raise NotImplementedError(repr(self))
def tonumba(self, bool_is_int8=None):
"""Convert Type instance to numba type object.
Parameters
----------
bool_is_int8: {bool, None}
If true, boolean data and values are mapped to LLVM `i8`,
otherwise to `i1`. Note that numba boolean maps data to `i8`
and value to `i1`. To get numba convention, specify
`bool_is_int8` as `None`.
"""
numba_type = self._params.get('tonumba')
if numba_type is not None:
return numba_type
if self.is_void:
return nb.void
if self.is_int:
return _numba_int_map.get(int(self[0][3:]))
if self.is_uint:
return _numba_uint_map.get(int(self[0][4:]))
if self.is_float:
return _numba_float_map.get(int(self[0][5:]))
if self.is_complex:
return _numba_complex_map.get(int(self[0][7:]))
if self.is_bool:
if bool_is_int8 is None:
return _numba_bool_map.get(self.bits, nb.boolean)
return boolean8 if bool_is_int8 else boolean1
if self.is_pointer:
if self[0].is_void:
return nb.types.voidptr
if self[0].is_struct:
ptr_type = self._params.get('NumbaType', structure_type.StructureNumbaPointerType)
else:
ptr_type = self._params.get('NumbaType', nb.types.CPointer)
return ptr_type(self[0].tonumba(bool_is_int8=bool_is_int8))
if self.is_struct:
struct_name = self._params.get('name')
if struct_name is None:
struct_name = 'STRUCT'+self.mangling()
members = []
for i, member in enumerate(self):
name = member._params.get('name', '_%s' % (i+1))
members.append((name,
member.tonumba(bool_is_int8=bool_is_int8)))
return structure_type.make_numba_struct(struct_name, members, origin=self)
if self.is_function:
rtype = self[0].tonumba(bool_is_int8=bool_is_int8)
atypes = [t.tonumba(bool_is_int8=bool_is_int8)
for t in self[1] if not t.is_void]
return rtype(*atypes)
if self.is_string:
return nb.types.string
if self.is_char:
# in numba, char==int8
return _numba_int_map.get(int(self[0][4:]))
if self.is_atomic:
return nb.types.Type(self[0])
if self.is_custom:
return self.__typesystem_type__.tonumba(bool_is_int8=bool_is_int8)
raise NotImplementedError(repr(self))
def toctypes(self):
"""Convert Type instance to ctypes type object.
"""
if self.is_void:
return None
if self.is_int:
return _ctypes_int_map[int(self[0][3:])]
if self.is_uint:
return _ctypes_uint_map[int(self[0][4:])]
if self.is_float:
return _ctypes_float_map[int(self[0][5:])]
if self.is_complex:
return _ctypes_complex_map[int(self[0][7:])]
if self.is_bool:
return ctypes.c_bool
if self.is_char:
return _ctypes_char_map[int(self[0][4:])]
if self.is_pointer:
if self[0].is_void:
return ctypes.c_void_p
if self[0].is_char:
return getattr(ctypes,
_ctypes_char_map.get(
int(self[0][0][4:])).__name__ + '_p')
return ctypes.POINTER(self[0].toctypes())
if self.is_struct:
ctypes_type_name = 'rbc_typesystem_struct_%s' % (self.mangle())
typ = type(self).ctypes_types.get(ctypes_type_name)
if typ is None:
fields = [((t.name if t.name else 'f%s' % i), t.toctypes())
for i, t in enumerate(self)]
typ = type(ctypes_type_name,
(ctypes.Structure, ),
dict(_fields_=fields))
type(self).ctypes_types[ctypes_type_name] = typ
return typ
if self.is_function:
ctypes_type_name = 'rbc_typesystem_function_%s' % (self.mangle())
typ = type(self).ctypes_types.get(ctypes_type_name)
if typ is None:
rtype = self[0].toctypes()
atypes = []
for t in self[1]:
if t.is_struct:
# LLVM struct argument (as an aggregate type)
# is mapped to struct member arguments:
atypes.extend([m.toctypes() for m in t])
elif t.is_void:
pass
else:
atypes.append(t.toctypes())
typ = ctypes.CFUNCTYPE(rtype, *atypes)
type(self).ctypes_types[ctypes_type_name] = typ
return typ
if self.is_string:
return ctypes.c_wchar_p
if self.is_custom:
raise NotImplementedError(f'{type(self).__name__}.toctypes()|self={self}')
raise NotImplementedError(repr((self, self.is_string)))
def tollvmir(self, bool_is_int8=None):
if self.is_int:
return ir.IntType(self.bits)
if self.is_float:
return {32: ir.FloatType, 64: ir.DoubleType}[self.bits]()
if self.is_bool:
if bool_is_int8:
return ir.IntType(8)
return ir.IntType(self.bits)
if self.is_pointer:
if self[0].is_void:
# mapping void* to int8*
return ir.IntType(8).as_pointer()
return self[0].tollvmir(bool_is_int8=bool_is_int8).as_pointer()
if self.is_void:
# Used only as the return type of a function without a return value.
# TODO: ensure that void is used only as function return type or a pointer dtype.
return ir.VoidType()
if self.is_struct:
return ir.LiteralStructType([m.tollvmir(bool_is_int8=bool_is_int8) for m in self])
raise NotImplementedError(f'{type(self).__name__}.tollvmir()|self={self}')
@classmethod
def _fromstring(cls, s):
s = s.strip()
if len(s) > 1 and s.endswith('*'): # pointer
return cls(cls._fromstring(s[:-1]), '*')
if s.endswith('}'): # struct
if not s.startswith('{'):
raise TypeParseError(
'mismatching curly parenthesis in `%s`' % (s))
return cls(*map(cls._fromstring,
_commasplit(s[1:-1].strip())))
if s.endswith(')'): # function
i = _findparen(s)
if i < 0:
raise TypeParseError('mismatching parenthesis in `%s`' % (s))
atypes = tuple(map(cls._fromstring,
_commasplit(s[i+1:-1].strip())))
r = s[:i].strip()
if r.endswith(')'):
j = _findparen(r)
if j < 0:
raise TypeParseError('mismatching parenthesis in `%s`' % (r))
rtype = cls._fromstring(r[:j])
d = r[j+1:-1].strip()
if d.startswith('*'):
while d.startswith('*'):
d = d[1:].lstrip()
if d.endswith(')'):
k = _findparen(d)
name = d[:k]
rtype = cls(rtype, atypes, name='')
atypes = tuple(map(cls._fromstring,
_commasplit(d[k+1:-1].strip())))
return cls(rtype, atypes, name=name)
name = d
return cls(rtype, atypes, name=name)
rtype = cls._fromstring(r)
if rtype.is_function:
name = rtype._params['name']
rtype._params['name'] = ''
else:
name = rtype._params.pop('name', '')
return cls(rtype, atypes, name=name)
if s.endswith('>') and not s.startswith('<'): # custom
i = s.index('<')
name = s[:i]
params = _commasplit(s[i+1:-1].strip())
params = tuple(map(cls._fromstring, params)) if params else ()
name = cls.aliases.get(name, name)
if name in cls.custom_types:
cls = cls.custom_types[name]
r = cls(params)
else:
r = cls((name,) + params)
return r
if s == 'void' or s == 'none' or not s: # void
return cls()
if '|' in s:
s, a = s.rsplit('|', 1)
t = cls._fromstring(s.rstrip())
if '=' in a:
n, v = a.split('=', 1)
else:
n, v = a, ''
n = n.strip()
v = v.strip()
if n or v:
t.annotation(**{n: v})
return t
m = _type_name_match(s)
if m is not None:
name = m.group(2)
if not _bad_names_match(name):
# `<typespec> <name>`
t = cls._fromstring(m.group(1))
t._params['name'] = name
return t
# atomic
if s in cls.custom_types:
return cls.custom_types[s](())
return cls(s)
@classmethod
def fromstring(cls, s):
"""Return new Type instance from a string.
Parameters
----------
s : str
"""
try:
return cls._fromstring(s)._normalize()
except TypeParseError as msg:
raise ValueError('failed to parse `%s`: %s' % (s, msg))
@classmethod
def fromnumpy(cls, t):
"""Return new Type instance from numpy type object.
"""
n = _numpy_imap.get(t)
if n is not None:
return cls.fromstring(n)
raise NotImplementedError(repr(t))
@classmethod
def fromnumba(cls, t):
"""Return new Type instance from numba type object.
"""
if hasattr(t, "__typesystem_type__") or hasattr(type(t), "__typesystem_type__"):
return t.__typesystem_type__
n = _numba_imap.get(t)
if n is not None:
return cls.fromstring(n)
if isinstance(t, typing.templates.Signature):
atypes = (cls.fromnumba(a) for a in t.args)
rtype = cls.fromnumba(t.return_type)
return cls(rtype, tuple(atypes) or (Type(),), name='')
if isinstance(t, nb.types.misc.CPointer):
return cls(cls.fromnumba(t.dtype), '*')
if isinstance(t, nb.types.NumberClass):
return cls.fromnumba(t.instance_type)
if isinstance(t, nb.types.Boolean):
# boolean1 and boolean8 map both to bool
return cls.fromstring('bool')
if isinstance(t, nb.types.misc.RawPointer):
if t == nb.types.voidptr:
return cls(cls(), '*')
raise NotImplementedError(repr((t, type(t).__bases__)))
@classmethod
def fromctypes(cls, t):
"""Return new Type instance from ctypes type object.
"""
n = _ctypes_imap.get(t)
if n is not None:
return cls.fromstring(n)
if issubclass(t, ctypes.Structure):
return cls(*(cls.fromctypes(_t)
for _f, _t in t._fields_))
if issubclass(t, ctypes._Pointer):
return cls(cls.fromctypes(t._type_), '*')
if issubclass(t, ctypes._CFuncPtr):
atypes = tuple(cls.fromctypes(a) for a in t._argtypes_)
return cls(cls.fromctypes(t._restype_), atypes or (Type(),), name='')
raise NotImplementedError(repr(t))
@classmethod
def fromcallable(cls, func):
"""Return new Type instance from a callable object.
The callable object must use annotations for specifying the
types of arguments and return value.
"""
if not hasattr(func, '__name__'):
raise ValueError(
'constructing Type instance from a callable without `__name__`'
f' is not supported, got {func}|{type(func).__bases__}')
if func.__name__ == '<lambda>':
# lambda function cannot carry annotations, hence:
raise ValueError('constructing Type instance from '
'a lambda function is not supported')
sig = get_signature(func)
annot = sig.return_annotation
if isinstance(annot, dict):
rtype = cls() | annot # void
elif annot == sig.empty:
if check_returns_none(func):
rtype = cls() # void
else:
rtype = cls(None) # cannot deterimine return value type
else:
rtype = cls.fromobject(annot)
atypes = []
for n, param in sig.parameters.items():
annot = param.annotation
if param.kind not in [inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.POSITIONAL_ONLY]:
raise ValueError(
'callable argument kind must be positional,'
' `%s` has kind %s' % (param, param.kind))
if param.default != sig.empty:
if annot == sig.empty:
annot = {}
annot['default'] = param.default
if isinstance(annot, dict):
atypes.append(cls('<type of %s>' % n) | annot)
elif annot == sig.empty:
atypes.append(cls('<type of %s>' % n))
else:
atypes.append(cls.fromobject(annot))
return cls(rtype, tuple(atypes) or (Type(),), name=func.__name__)
@classmethod
def fromvalue(cls, obj):
"""Return Type instance that corresponds to given value.
"""
for mapping in [_python_imap, _numpy_imap]:
n = mapping.get(type(obj))
if n is not None:
return cls.fromstring(n)
if isinstance(obj, _ctypes._Pointer):
return cls.fromctypes(obj._type_).pointer()
if isinstance(obj, ctypes.c_void_p):
return cls(cls(), '*')
if hasattr(obj, '__typesystem_type__'):
return cls.fromobject(obj.__typesystem_type__)
raise NotImplementedError('%s.fromvalue(%r|%s)'
% (cls.__name__, obj, type(obj)))
@classmethod
def fromobject(cls, obj):
"""Return new Type instance from any object.
Parameters
----------
obj : object
"""
if isinstance(obj, cls):
return obj
if isinstance(obj, str):
return cls.fromstring(obj)
n = _python_imap.get(obj)
if n is not None:
return cls.fromstring(n)
if hasattr(obj, "__typesystem_type__") or hasattr(type(obj), "__typesystem_type__"):
return obj.__typesystem_type__
if hasattr(obj, '__module__'):
if obj.__module__.startswith('numba'):
return cls.fromnumba(obj)
if obj.__module__.startswith('ctypes'):
return cls.fromctypes(obj)
if obj.__module__.startswith('numpy'):
return cls.fromnumpy(obj)
if inspect.isclass(obj):
if obj is int:
return cls('int64')
return cls.fromstring(obj.__name__)
if callable(obj):
return cls.fromcallable(obj)
raise NotImplementedError(repr((type(obj))))
def _normalize(self):
"""Return new Type instance with atomic types normalized.
"""
params = self._params
if self.is_void:
return self
if self.is_atomic:
s = self[0]
a = type(self).aliases.get(s)
if a is not None:
return Type.fromobject(a).params(self)._normalize()
m = _string_match(s)
if m is not None:
return Type('string', **params)
for match, ntype in [
(_booln_match, 'bool'),
(_charn_match, 'char'),
(_intn_match, 'int'),
(_uintn_match, 'uint'),
(_floatn_match, 'float'),
(_complexn_match, 'complex'),
]:
m = match(s)
if m is not None:
bits = m.group(2)
return Type(ntype + bits, **params)
if s.endswith('[]'):
return Type.fromstring(f'Array<{s[:-2]}>')._normalize()
m = _bool_match(s)
if m is not None:
return Type('bool', **params)
if _char_match(s):
return Type('char8', **params) # IEC 9899 defines sizeof(char)==1
if _float_match(s):
return Type('float32', **params) # IEC 60559 defines sizeof(float)==4
if _double_match(s):
return Type('float64', **params) # IEC 60559 defines sizeof(double)==8
target_info = TargetInfo()
for match, otype, ntype in [
(_wchar_match, 'wchar', 'char'),
(_schar_match, 'char', 'int'),
(_uchar_match, 'uchar', 'uint'),
(_byte_match, 'byte', 'int'),
(_ubyte_match, 'ubyte', 'uint'),
(_short_match, 'short', 'int'),
(_ushort_match, 'ushort', 'uint'),
(_int_match, 'int', 'int'),
(_uint_match, 'uint', 'uint'),
(_long_match, 'long', 'int'),
(_ulong_match, 'ulong', 'uint'),
(_longlong_match, 'longlong', 'int'),
(_ulonglong_match, 'ulonglong', 'uint'),
(_size_t_match, 'size_t', 'uint'),
(_ssize_t_match, 'ssize_t', 'int'),
(_longdouble_match, 'longdouble', 'float'),
(_complex_match, 'complex', 'complex'),
]:
if match(s) is not None:
sz = target_info.sizeof(otype)
if sz is not None:
bits = str(sz * 8)
return Type(ntype + bits, **params)
break
if target_info.strict:
raise ValueError('%s is not concrete' % (self))
return self
if self.is_pointer:
return Type(
self[0]._normalize(), self[1], **params)
if self.is_struct:
return Type(
*(t._normalize() for t in self), **params)
if self.is_function:
return Type(
self[0]._normalize(),
tuple(t._normalize() for t in self[1]),
**params)
if self.is_custom:
params = []
for a in self[0]:
if isinstance(a, Type):
a = a._normalize()
params.append(a)
return type(self)(tuple(params))
raise NotImplementedError(repr(self))
def mangle(self):
"""Return mangled type string.
Mangled type string is a string representation of the type
that can be used for extending the function name.
"""
name_suffix = ''
name = self.name
if name:
name_suffix = 'W' + str(len(name)) + 'W' + name
if self.is_void:
assert name_suffix == '', name_suffix
return 'v'
if self.is_pointer:
return '_' + self[0].mangle() + 'P' + name_suffix
if self.is_struct:
return '_' + ''.join(m.mangle()
for m in self) + 'K' + name_suffix
if self.is_function:
r = self[0].mangle()
a = ''.join([a.mangle() for a in self[1]])
return '_' + r + 'a' + a + 'A' + name_suffix
if self.is_custom:
params = self[0]
if type(self) is Type:
name = params[0]
params = params[1:]
else:
name = type(self).__name__
n = _mangling_map.get(name)
n = name if n is None else n
r = 'V' + str(len(n)) + 'V' + n
a = ''.join([a.mangle() for a in params])
return '_' + r + 'r' + a + 'R' + name_suffix
if self.is_atomic:
n = _mangling_map.get(self[0])
if n is not None:
return n + name_suffix
n = self[0]
return 'V' + str(len(n)) + 'V' + n + name_suffix
raise NotImplementedError(repr(self))
@classmethod
def demangle(cls, s):
block, rest = _demangle(s)
assert not rest, repr(rest)
assert len(block) == 1, repr(block)
return block[0]
@property
def bits(self):
if self.is_void:
return 0
if self.is_bool:
return int(self[0][4:] or 1)
if self.is_int:
return int(self[0][3:])
if self.is_uint or self.is_char:
return int(self[0][4:])
if self.is_float:
return int(self[0][5:])
if self.is_complex:
return int(self[0][7:])
if self.is_struct:
return sum([m.bits for m in self])
if self.is_pointer:
return TargetInfo().sizeof('voidptr')*8 or 64
return NotImplemented
def match(self, other):
"""Return match penalty when other can be converted to self.
Otherwise, return None.
Parameters
----------
other : {Type, tuple}
Specify other signature. If other is a tuple of signatures,
then it is interpreted as argument types of a function
signature.
Returns
-------
penalty : {int, None}
Penalty of a match. For a perfect match, penalty is 0.
If match is impossible, return None
"""
if isinstance(other, Type):
if self == other:
return 0
if other.is_void:
return (0 if self.is_void else None)
elif other.is_pointer:
if not self.is_pointer:
return
if self[0].is_void or other[0].is_void:
return 0
penalty = self[0].match(other)
if penalty is None:
if self[0].is_void:
penalty = 1
return penalty
elif self.is_pointer:
return
elif other.is_struct:
if not self.is_struct:
return
if len(self) != len(other):
return
penalty = 0
for a, b in zip(self, other):
p = a.match(b)
if p is None:
return
penalty = penalty + p
return penalty
elif self.is_struct:
return
elif other.is_function:
if not self.is_function:
return
if len(self[1]) != len(other[1]):
return
penalty = self[0].match(other[0])
if penalty is None:
return
for a, b in zip(self[1], other[1]):
p = a.match(b)
if p is None:
return
penalty = penalty + p
return penalty
elif self.is_function:
return
elif (
(other.is_int and self.is_int)
or (other.is_bool and self.is_bool)
or (other.is_float and self.is_float)
or (other.is_uint and self.is_uint)
or (other.is_char and self.is_char)
or (other.is_complex and self.is_complex)):
if self.bits >= other.bits:
return 0
return other.bits - self.bits
elif (
(other.is_int and self.is_uint)
or (other.is_uint and self.is_int)):
if self.bits >= other.bits:
return 10
return 10 + other.bits - self.bits
elif self.is_complex and (other.is_float
or other.is_int or other.is_uint):
return 1000
elif self.is_float and (other.is_int or other.is_uint):
return 1000
elif (self.is_float or self.is_complex) and other.is_bool:
return
elif (self.is_int or self.is_uint) and other.is_bool:
if self.bits >= other.bits:
return 0
return other.bits - self.bits
elif self.is_bool and (other.is_int or other.is_uint):
if self.bits >= other.bits:
return 0
return other.bits - self.bits
elif ((self.is_int or self.is_uint or self.is_bool)
and (other.is_float or other.is_complex)):
return
elif self.is_float and other.is_complex:
return
elif self.is_pointer and (other.is_int or other.is_uint):
if self.bits == other.bits:
return 1
return
# TODO: lots of
raise NotImplementedError(repr((self, other)))
elif isinstance(other, tuple):
if not self.is_function:
return
atypes = self[1]
if len(other) == 0 and len(atypes) == 1 and atypes[0].is_void:
return 0
if len(atypes) != len(other):
return
penalty = 0
for a, b in zip(atypes, other):
p = a.match(b)
if p is None:
return
penalty = penalty + p
return penalty
raise NotImplementedError(repr(type(other)))
def __call__(self, *atypes, **params):
return Type(self, atypes, **params)
def pointer(self):
numba_ptr_type = self._params.get('NumbaPointerType')
if numba_ptr_type is not None:
return Type(self, '*').params(NumbaType=numba_ptr_type)
return Type(self, '*')
def apply_templates(self, templates):
"""Iterator of concrete types derived from applying templates mapping
to self.
The caller must allow templates dictionary to be changed in-situ.
"""
if self.is_concrete:
yield self
elif self.is_atomic:
type_list = templates.get(self[0])
if type_list:
for i, t in enumerate(type_list):
t = Type.fromobject(t)
t._params.update(self._params)
if t.is_concrete:
# templates is changed in-situ! This ensures that
# `T(T)` produces `i4(i4)`, `i8(i8)` for `T in
# [i4, i8]`. To produce all possible combinations
# `i4(i4)`, `i8(i8)`, `i8(i4)`, `i4(i8)`, use
# `T1(T2)` where `T1 in [i4, i8]` and `T2 in [i4,
# i8]`
templates[self[0]] = [t]
# assert not self._params, (t, self, self._params)
yield t
# restore templates
templates[self[0]] = type_list
else:
# this will avoid infinite recursion when
# templates is `T in [U]` and `U in [T]`
templates[self[0]] = []
for ct in t.apply_templates(templates):
templates[self[0]] = [ct]
yield ct
templates[self[0]] = []
templates[self[0]] = type_list
else:
raise TypeError(f'cannot make {self} concrete using template mapping {templates}')
elif self.is_pointer:
for t in self[0].apply_templates(templates):
yield Type(t, self[1], **self._params)
elif self.is_struct:
for i, t in enumerate(self):
if not t.is_concrete:
for ct in t.apply_templates(templates):
yield from Type(
*(self[:i] + (ct,) + self[i+1:]),
**self._params).apply_templates(templates)
return
assert 0
elif self.is_function:
rtype, atypes = self
if not rtype.is_concrete:
for rt in rtype.apply_templates(templates):
yield from Type(rt, atypes, **self._params).apply_templates(templates)
return
for i, t in enumerate(atypes):
if not t.is_concrete:
for ct in t.apply_templates(templates):
yield from Type(
rtype, atypes[:i] + (ct,) + atypes[i+1:],
**self._params).apply_templates(templates)
return
assert 0
elif self.is_custom:
params = self[0]
cls = type(self)
if cls is Type:
name = params[0]
params = params[1:]
else:
name = cls.__name__
if name in templates:
for cname in templates[name]:
cname = Type.aliases.get(cname, cname)
if isinstance(cname, str):
cls = Type.custom_types.get(cname, Type)
else:
assert issubclass(cname, Type), cname
cls = cname
cname = cls.__name__
if cls is Type:
typ = cls((cname,) + params)
else:
typ = cls(params)
yield from typ.params(self).apply_templates(templates)
return
for i, t in enumerate(params):
if not isinstance(t, Type):
continue
if not t.is_concrete:
for ct in t.apply_templates(templates):
if cls is Type:
yield from cls(
(name,) + params[:i] + (ct,) + params[i+1:],
**self._params).apply_templates(templates)
else:
yield from cls(
params[:i] + (ct,) + params[i+1:],
**self._params).apply_templates(templates)
return
assert 0, repr(self)
else:
raise NotImplementedError(f'apply_templates: {self} {templates}')
def _demangle_name(s, suffix='W'):
if s and s[0] == suffix:
i = s.find(suffix, 1)
assert i != -1, repr(s)
ln = int(s[1:i])
rest = s[i+ln+1:]
return s[i+1:i+ln+1], rest
return None, s
def _demangle(s):
"""Helper function to demangle the string of mangled Type.
Used internally.
Algorithm invented by Pearu Peterson, February 2019
"""
if not s:
return (Type(),), ''
if s[0] == 'V':
name, rest = _demangle_name(s, suffix='V')
typ = Type(name)
elif s[0] == '_':
block, rest = _demangle(s[1:])
kind, rest = rest[0], rest[1:]
assert kind in '_'+_mangling_suffices+_mangling_prefixes, repr(kind)
if kind == 'P':
assert len(block) == 1, repr(block)
typ = Type(block[0], '*')
elif kind == 'K':
typ = Type(*block)
elif kind == 'a': # function
assert len(block) == 1, repr(block)
rtype = block[0]
atypes, rest = _demangle('_' + rest)
typ = Type(rtype, atypes, name='')
elif kind in 'AR':
return block, rest
elif kind == 'r': # custom
assert len(block) == 1, repr(block)
name = block[0]
assert name.is_atomic, name
name = name[0]
if rest and rest[0] == 'R':
atypes = ()
rest = rest[1:]
else:
atypes, rest = _demangle('_' + rest)
if name in Type.custom_types:
typ = Type.custom_types[name](atypes)
else:
typ = Type((name,)+atypes)
else:
raise NotImplementedError(repr((kind, s)))
else:
rest = s[1:]
t = _mangling_imap[s[0]]
if t == 'void':
typ = Type()
else:
typ = Type(t)
vname, rest = _demangle_name(rest)
if vname is not None:
typ._params['name'] = vname
result = [typ]
if rest and rest[0] not in _mangling_prefixes:
r, rest = _demangle(rest)
result.extend(r)
return tuple(result), rest
# TODO: move numba boolean support to rbc/boolean_type.py
class Boolean1(nb.types.Boolean):
def can_convert_from(self, typingctx, other):
return isinstance(other, nb.types.Boolean)
@datamodel.register_default(Boolean1)
class Boolean1Model(datamodel.models.BooleanModel):
def get_data_type(self):
return self._bit_type
class Boolean8(nb.types.Boolean):
bitwidth = 8
def can_convert_to(self, typingctx, other):
return isinstance(other, nb.types.Boolean)
def can_convert_from(self, typingctx, other):
return isinstance(other, nb.types.Boolean)
@datamodel.register_default(Boolean8)
class Boolean8Model(datamodel.models.BooleanModel):
def get_value_type(self):
return self._byte_type
boolean1 = Boolean1('boolean1')
boolean8 = Boolean8('boolean8')
@lower_cast(Boolean1, nb.types.Boolean)
@lower_cast(Boolean8, nb.types.Boolean)
def literal_booleanN_to_boolean(context, builder, fromty, toty, val):
return builder.icmp_signed('!=', val, val.type(0))
@lower_cast(nb.types.Boolean, Boolean1)
@lower_cast(nb.types.Boolean, Boolean8)
def literal_boolean_to_booleanN(context, builder, fromty, toty, val):
llty = context.get_value_type(toty)
return builder.zext(val, llty)
@extending.lower_builtin(bool, Boolean8)
def boolean8_to_bool(context, builder, sig, args):
[val] = args
return builder.icmp_signed('!=', val, val.type(0))
_numba_bool_map[1] = boolean1
_numba_bool_map[8] = boolean8
_numba_imap[boolean1] = 'bool1'
_numba_imap[boolean8] = 'bool8'
boolean8ptr = nb.types.CPointer(boolean8)
boolean8ptr2 = nb.types.CPointer(boolean8ptr)
_pointer_types = [boolean8ptr, boolean8ptr2, nb.types.intp, nb.types.voidptr]
for _i, _p1 in enumerate(_pointer_types[:2]):
for _j, _p2 in enumerate(_pointer_types):
if _p1 == _p2 or _i > _j:
continue
typeconv.rules.default_type_manager.set_compatible(
_p1, _p2, typeconv.Conversion.safe)
typeconv.rules.default_type_manager.set_compatible(
_p2, _p1, typeconv.Conversion.safe)
_ufunc_pos_args_match = re.compile(
r'(?P<name>\w[\w\d_]*)\s*[(](?P<pos_args>[^/)]*)[/]?(?P<rest>.*)[)]').match
_req_opt_args_match = re.compile(r'(?P<req_args>[^[]*)(?P<opt_args>.*)').match
_annot_match = re.compile(
r'\s*(?P<name>\w[\w\d_]*)\s*[:](?P<annotation>.*)').match
def get_signature(obj):
if inspect.isfunction(obj):
return inspect.signature(obj)
if isinstance(obj, np.ufunc):
parameters = dict()
returns = dict()
sigline = obj.__doc__.lstrip().splitlines(1)[0]
m = _ufunc_pos_args_match(sigline)
name = m['name']
assert name == obj.__name__, (name, obj.__name__)
# positional arguments
m = _req_opt_args_match(m['pos_args'])
req_pos_names, opt_pos_names = [], []
for n in m.group('req_args').split(','):
n = n.strip()
if n:
req_pos_names.append(n)
parameters[n] = inspect.Parameter(
n, inspect.Parameter.POSITIONAL_ONLY)
for n in (m.group('opt_args').replace('[', '')
.replace(']', '').split(',')):
n = n.strip()
if n:
opt_pos_names.append(n)
parameters[n] = inspect.Parameter(
n, inspect.Parameter.POSITIONAL_ONLY, default=None)
# TODO: process non-positional arguments in `m['rest']`
# scan for annotations and determine returns
mode = 'none'
for line in obj.__doc__.splitlines():
if line in ['Parameters', 'Returns', 'Notes', 'See Also',
'Examples']:
mode = line
continue
if mode == 'Parameters':
m = _annot_match(line)
if m is not None:
n = m['name']
if n in parameters:
annot = m['annotation'].strip()
parameters[n] = parameters[n].replace(
annotation=annot)
if mode == 'Returns':
m = _annot_match(line)
if m is not None:
n = m['name']
annot = m['annotation'].strip()
returns[n] = annot
sig = inspect.Signature(parameters=parameters.values())
if len(returns) == 1:
sig = sig.replace(return_annotation=list(returns.values())[0])
elif returns:
sig = sig.replace(return_annotation=returns)
return sig
raise NotImplementedError(obj)
# Import numba support
if 1: # to avoid flake E402
from . import structure_type
|
from builtins import print
import argparse
from f0cal.farm.client import entities
import f0cal.core
from progress.bar import IncrementalBar
from progress.spinner import MoonSpinner
import re
import json
from time import sleep
import os
from f0cal.farm.client.api_client import DeviceFarmApi, ConnectionError, ClientError, ServerError
import wrapt
import sys
import rich.console
import rich.table
from collections.abc import Iterable
from conans.model.ref import ConanName
from conans.errors import InvalidNameException
REFERENCE_REGEX = '([\\w]*)\\/([\\w]*)(#[\\d]*)?$'
class JsonFileParser:
def __init__(self, json_file):
'''Parser for current device config'''
self.json_file = json_file
if os.path.exists(json_file):
self.data = json.load(open(json_file))
else:
os.makedirs(os.path.dirname(json_file), exist_ok=True)
self.data = {}
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value):
self.data[key] = value
def __iter__(self):
return self.data.__iter__()
def write(self):
with open(self.json_file, 'w') as f:
json.dump(self.data, f)
def resolve_remote_url(remote_name):
remotes_file = JsonFileParser(f0cal.core.CORE.config['api']['remotes_file'])
if remote_name in remotes_file:
return remotes_file[remote_name]
print(f'Remote {remote_name} not found. Please configure the remote first using: f0cal remote add')
exit(1)
def create_class(class_name, noun, remote=False, entities=entities):
api_key = f0cal.core.CORE.config["api"].get("api_key")
if remote:
# TODO THIS A HACKY WORKAROUND FOR THE PLUGPARSE RUNNING ALL ARG SETTERS
parser = argparse.ArgumentParser()
parser.add_argument("--remote", "-r", type=lambda remote_name: resolve_remote_url(remote_name), required=True)
ns, _ = parser.parse_known_args()
api_url = ns.remote
else:
api_url = f0cal.core.CORE.config["api"]["api_url"]
client = DeviceFarmApi(api_url, api_key)
cls = type(
class_name, (getattr(entities, class_name),), {"CLIENT": client, "NOUN": noun}
)
return cls
def _resolve_reference_type(ref):
# match = re.match(REFERENCE_REGEX, ref)
# if match:
# return 'reference'
if ref.startswith(':'):
return 'id'
return 'name'
def query(class_name, noun, ref, remote=None, entities=entities):
cls = create_class(class_name, noun, remote, entities)
ref_type = _resolve_reference_type(ref)
if ref_type == 'reference':
print('Referencing objects is only supported from ids currently. Check back soon for full namespace resolution')
exit(1)
try:
if ref_type == 'name':
# Instance names are resolved locally
if noun == 'instance':
device_config = JsonFileParser(f0cal.core.CORE.config['api']['device_file'])
if ref not in device_config:
print(
f'Name {ref} not found. If you created in a different env try querying '
'all instances: \n f0cal farm instance query and then referencing it via id \n :<id> ')
exit(1)
return cls.from_id(device_config[ref]['id'])
inst = cls.from_name(ref)
else:
_id = ref.replace(':', '')
inst = cls.from_id(_id)
return inst
except entities.NoSuchItemException as e:
print(e.args[0])
exit(1)
except (ConnectionError, ClientError, ServerError) as e:
print(e.args[0])
exit(1)
def parse_query_string(query_string):
ret = {}
try:
pairs = query_string.split(",")
for pair in pairs:
key, val = map(lambda x: x.strip(), pair.split("=="))
ret[key] = val
except:
print("Error parsing query string. Please make sure it is formatted correctly")
return ret
def parse_update_string(update_string):
ret = {}
try:
pairs = update_string.split(",")
for pair in pairs:
key, val = map(lambda x: x.strip(), pair.split("="))
ret[key] = val
except:
print("Error parsing update string. Please make sure it is formatted correctly")
return ret
@wrapt.decorator
def api_key_required(wrapped, instance, args, kwargs):
api_key = f0cal.core.CORE.config['api'].get('api_key')
if api_key is None:
print(
'An API KEY is required for this action please set one with\n$f0cal farm config update "api_key=$YOU_API_KEY"\n'
'You can obtain one at f0cal.com')
exit(1)
return wrapped(*args, **kwargs)
class Printer:
@classmethod
def _force_string(cls, blob):
if isinstance(blob, list):
return "\n".join(blob)
return str(blob)
@classmethod
def _blob_to_table(cls, rows, cols):
table = rich.table.Table(show_header=True)
[table.add_column(col_name) for col_name in cols]
[table.add_row(*r) for r in rows]
return table
@classmethod
def blob_to_table(cls, blob):
if not isinstance(blob, list):
blob = [blob]
_coerce = lambda _row: [cls._force_string(_c) for _c in _row.values()]
_capitalize = lambda _col: _col.replace("_", " ").title()
rows = [tuple(_coerce(row)) for row in blob]
cols = [_capitalize(col) for col in blob.pop().keys()]
return cls._blob_to_table(rows, cols)
@classmethod
def unk_to_blob(cls, unk):
if isinstance(unk, list):
return [cls.unk_to_blob(u) for u in unk]
elif isinstance(unk, dict):
return unk
return cls.obj_to_blob(unk)
@classmethod
def obj_to_blob(cls, obj):
if hasattr(obj, 'printable_json'):
return obj.printable_json
return {k: v for k, v in obj.__dict__.items() if not k.startswith("_")}
@classmethod
def print_table(cls, unk):
if isinstance(unk, Iterable) and len(unk) == 0:
print("EMPTY")
return
blob = cls.unk_to_blob(unk)
rich.console.Console().print(cls.blob_to_table(blob))
@classmethod
def print_json(cls, unk):
blob = cls.unk_to_blob(unk)
print(blob)
@wrapt.decorator
def printer(wrapped, instance, args, kwargs):
json = kwargs.pop("json", False)
try:
out = wrapped(*args, **kwargs)
except (ConnectionError, ClientError, ServerError) as e:
print(e.args[0])
exit(1)
if json:
Printer.print_json(out)
else:
Printer.print_table(out)
return out
class QueueingBar(IncrementalBar):
suffix = '%(spinner)s'
spinner_pos = 0
spinner_phases = MoonSpinner.phases
@property
def spinner(self):
self.spinner_pos = (self.spinner_pos + 1) % len(self.spinner_phases)
return self.spinner_phases[self.spinner_pos]
class InstanceStatusPrinter:
def __init__(self, instance):
self.instance = instance
def block(self):
self._wait_queued()
self._wait_provisioning()
def _wait_queued(self):
if self.instance.status == 'queued':
print(f"No hardware immediately available")
original_queue_position = self.instance.queue_position
queue_position = original_queue_position
with QueueingBar(message=f'Current queue length: {queue_position}', max=original_queue_position) as bar:
while self.instance.status == 'queued':
self._refresh_instance()
if queue_position != self.instance.queue_position:
queue_position = self.instance.queue_position
bar.message=f'Current queue length {queue_position}'
bar.next()
bar.update()
sleep(.25)
bar.finish()
def _refresh_instance(self):
error_count = 0
while error_count < 5:
try:
self.instance.refresh()
return
except (ConnectionError, ServerError) as e:
error_count += 1
self.instance.refresh()
def _wait_provisioning(self):
with IncrementalBar('Loading your device image', suffix=' [ %(elapsed_td)s/ 00:06:000 ]', max=360) as bar:
elapsed_time = 0
while self.instance.status == 'provisioning':
bar.next()
sleep(1)
elapsed_time += 1
if elapsed_time % 5 == 0:
self._refresh_instance()
bar.finish()
if self.instance.status == 'error':
print(f'There was an error starting instance {self.instance.id} please contact F0cal')
exit(1)
if self.instance.status == 'ready':
print('Your instance is ready to be used')
# TODO REFACTOR TO PULL OUT BASE CLASS
class ImageStatusPrinter:
def __init__(self, image):
self.image = image
def block(self):
self._wait_saving()
def _wait_saving(self):
with MoonSpinner('Saving your device image: ') as bar:
elapsed_time = 0
while self.image.status == 'saving':
bar.next()
sleep(.25)
elapsed_time += .25
if elapsed_time % 3 == 0:
self.image.refresh()
bar.finish()
if self.image.status == 'ready':
print('Your image has been saved')
return
if 'error' in self.image.status:
print("There was an error saving you instance please contact F0cal")
exit(1)
def verify_conan_name(name):
try:
ConanName.validate_name(name)
except InvalidNameException as e:
print(e.args[0])
exit(1)
return name
|
import CoreMedia
def CMTIMERANGE_IS_VALID(range):
return (
CMTIME_IS_VALID(range.start)
and CMTIME_IS_VALID(range.duration)
and range.duration.epoch == 0
and range.duration.value >= 0
)
def CMTIMERANGE_IS_INVALID(range):
return not CMTIMERANGE_IS_VALID(range)
def CMTIMERANGE_IS_INDEFINITE(range):
return CMTIMERANGE_IS_VALID(range) and (
CMTIME_IS_INDEFINITE(range.start) or CMTIME_IS_INDEFINITE(range.duration)
)
def CMTIMERANGE_IS_EMPTY(range):
return CMTIMERANGE_IS_VALID(range) and range.duration == kCMTimeZero
def CMTIMEMAPPING_IS_VALID(mapping):
return CMTIMERANGE_IS_VALID(mapping.target)
def CMTIMEMAPPING_IS_INVALID(mapping):
return not CMTIMEMAPPING_IS_VALID(mapping)
def CMTIMEMAPPING_IS_EMPTY(mapping):
return not CMTIME_IS_NUMERIC(mapping.source.start) and CMTIMERANGE_IS_VALID(
mapping.target
)
def CMSimpleQueueGetFullness(queue):
if CMSimpleQueueGetCapacity(queue):
return CMSimpleQueueGetCount(queue) / CMSimpleQueueGetCapacity(queue)
else:
return 0.0
def CMTIME_IS_VALID(time):
return (time.flags & CoreMedia.kCMTimeFlags_Valid) != 0
def CMTIME_IS_INVALID(time):
return not CMTIME_IS_VALID(time)
def CMTIME_IS_POSITIVE_INFINITY(time):
return (
CMTIME_IS_VALID(time)
and (time.flags & CoreMedia.kCMTimeFlags_PositiveInfinity) != 0
)
def CMTIME_IS_NEGATIVE_INFINITY(time):
return (
CMTIME_IS_VALID(time)
and (time.flags & CoreMedia.kCMTimeFlags_NegativeInfinity) != 0
)
def CMTIME_IS_INDEFINITE(time):
return CMTIME_IS_VALID(time) and (time.flags & CoreMedia.kCMTimeFlags_Indefinite) != 0
def CMTIME_IS_NUMERIC(time):
return (
time.flags
& (CoreMedia.kCMTimeFlags_Valid | CoreMedia.kCMTimeFlags_ImpliedValueFlagsMask)
) == CoreMedia.kCMTimeFlags_Valid
def CMTIME_HAS_BEEN_ROUNDED(time):
return (
CMTIME_IS_NUMERIC(time)
and (time.flags & CoreMedia.kCMTimeFlags_HasBeenRounded) != 0
)
|
#!/bin/python3
import sys
E = 100
i = 0
n,k = input().strip().split(' ')
n,k = [int(n),int(k)]
c = [int(c_temp) for c_temp in input().strip().split(' ')]
while E > 0:
i = (i + k) % n
E -= 1
if c[i] == 1:
E -= 2
if i == 0:
break
print(E)
|
from django.db import models
class Person(models.Model):
class Meta:
verbose_name_plural = "People"
nick = models.CharField(max_length=56)
name = models.CharField(max_length=128)
email = models.EmailField()
phone = models.CharField(max_length=24, null=True, blank=True)
about = models.TextField()
title = models.CharField(max_length=56, default="Software Engineer")
def __str__(self):
return self.name
class Project(models.Model):
class Meta:
verbose_name_plural = "Projects"
person = models.ForeignKey(Person, on_delete=models.DO_NOTHING)
name = models.CharField(max_length=56)
url = models.CharField(max_length=256, blank=True)
desc = models.TextField(null=True, blank=True)
languages = models.ManyToManyField('Language', blank=True)
frameworks = models.ManyToManyField('Framework', blank=True)
technologies = models.ManyToManyField('Technology', blank=True)
protocols = models.ManyToManyField('Protocol', blank=True)
open_source = models.BooleanField(default=True)
def __str__(self):
return self.name
def link_display(self):
if self.open_source:
return "Source Code"
return "Website"
def foss_status(self):
if self.open_source:
return "Open Source"
return "Proprietary"
class Language(models.Model):
class Meta:
verbose_name_plural = "Languages"
ordering = ["name"]
person = models.ForeignKey(Person, on_delete=models.DO_NOTHING)
projects = models.ManyToManyField(
Project, through="Project_languages", blank=True)
name = models.CharField(max_length=56, unique=True)
def __str__(self):
return self.name
class Framework(models.Model):
class Meta:
verbose_name_plural = "Frameworks"
language = models.ForeignKey(Language, on_delete=models.CASCADE)
projects = models.ManyToManyField(
Project, through="Project_frameworks", blank=True)
name = models.CharField(max_length=56)
def __str__(self):
return self.name
class Technology(models.Model):
class Meta:
verbose_name_plural = "Technologies"
person = models.ForeignKey(Person, on_delete=models.DO_NOTHING)
projects = models.ManyToManyField(
Project, through="Project_technologies", blank=True)
name = models.CharField(max_length=56, unique=True)
def __str__(self):
return self.name
class Protocol(models.Model):
class Meta:
verbose_name_plural = "Protocols"
person = models.ForeignKey(Person, on_delete=models.DO_NOTHING)
projects = models.ManyToManyField(
Project, through="Project_protocols", blank=True)
name = models.CharField(max_length=56, unique=True)
url = models.URLField()
def __str__(self):
return self.name
|
import pandas as pd
import glob
import random
import os
import cv2
import numpy as np
import tables
xmltile_path = '/path/to/xml_tiles/'
hdf5_path = 'path/to/save/xml2hdf.hdf5'
csv = 'path/to/xml2tile.csv'
df = pd.read_csv(csv)
df_xml['ids']=df_xml['tile_name'].str[:23]
df=df.rename(columns={'tile_name':'filename'})
tiles = glob.glob(xml2tile_path+'*/*.png')
labels = []
ids = []
for j in tiles:
i = os.path.split(j)[1]
label = df[df.filename==i].label.values[0]
labels.append(label)
idx = df[df.filename==i].ids.values[0]
ids.append(idx)
img_dtype = tables.UInt8Atom()
data_shape = (0, 1024, 1024, 3)
hdf5_file = tables.open_file(hdf5_path, mode='w')
storage = hdf5_file.create_earray(hdf5_file.root, 'img', img_dtype, shape=data_shape)
hdf5_file.create_array(hdf5_file.root, 'labels', labels)
hdf5_file.create_array(hdf5_file.root, 'ids', ids)
for i in range(len(tiles)):
if i % 1000 == 0 and i > 1:
print('data: {}/{}'.format(i, len(test_addrs)))
tile = tiles[i]
try:
img = cv2.imread(tile)
if img.shape[0]!=1024 or img.shape[1]!=1024:
img = cv2.resize(img, (1024, 1024), interpolation=cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
storage.append(img[None])
hdf5_file.close()
|
from spacy.tokens import Token
class QuestionWord:
FORM = 'FORM'
CHOICE = 'CHOICE'
OBJECT = 'OBJECT'
REASON = 'REASON'
TIME = 'TIME'
PERSON = 'PERSON'
PLACE = 'PLACE'
question_pos = 'QUESTION'
question_words = {
'where': PLACE,
'who': PERSON,
'when': TIME,
'why': REASON,
'what': OBJECT,
'which': CHOICE,
'how': FORM
}
def __init__(self, token: Token):
self.text = self.question_words[token.text.lower()]
self.lemma_ = self.question_words[token.text.lower()]
self.pos_ = self.question_pos
self.dep_ = token.dep_
self.is_space = False
self.children = list()
|
# Copyright 2020 University of New South Wales, University of Sydney, Ingham Institute
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from flask import Flask
from loguru import logger
import pydicom
from pymedphys._dicom.connect.listen import DicomListener
class Algorithm:
def __init__(self, name, function, default_settings):
self.name = name
self.function = function
self.default_settings = default_settings
def settings_to_json(self):
return json.dumps(self.default_settings, indent=4)
class FlaskApp(Flask):
"""
Custom Flask App
"""
algorithms = {}
celery_started = False
beat_started = False
dicom_listener_port = 7777
dicom_listener_aetitle = "PLATIPY_SERVICE"
api = None # Holds reference to api for extensibility
def register(self, name, default_settings=None):
def decorator(f):
self.algorithms.update({name: Algorithm(name, f, default_settings)})
return f
return decorator
def run(
self,
host=None,
port=None,
debug=None,
dicom_listener_port=7777,
dicom_listener_aetitle="PLATIPY_SERVICE",
load_dotenv=True,
**options
):
logger.info("Starting APP!")
self.dicom_listener_port = dicom_listener_port
self.dicom_listener_aetitle = dicom_listener_aetitle
self.run_dicom_listener(dicom_listener_port, dicom_listener_aetitle)
super().run(
host=host,
port=port,
debug=debug,
load_dotenv=load_dotenv,
use_reloader=False,
**options
)
def run_dicom_listener(self, listen_port, listen_ae_title):
"""
Background task that listens at a specific port for incoming dicom series
"""
from .models import Dataset, DataObject
from . import db
logger.info(
"Starting Dicom Listener on port: {0} with AE Title: {1}",
listen_port,
listen_ae_title,
)
def series_recieved(dicom_path):
logger.info("Series Recieved at path: {0}".format(dicom_path))
# Get the SeriesUID
series_uid = None
for f in os.listdir(dicom_path):
f = os.path.join(dicom_path, f)
try:
d = pydicom.read_file(f)
series_uid = d.SeriesInstanceUID
except Exception as e:
logger.debug("No Series UID in: {0}".format(f))
logger.debug(e)
if series_uid:
logger.info("Image Series UID: {0}".format(series_uid))
else:
logger.error("Series UID could not be determined... Stopping")
return
# Find the data objects with the given series UID and update them
dos = DataObject.query.filter_by(series_instance_uid=series_uid).all()
if len(dos) == 0:
logger.error(
"No Data Object found with Series UID: {0} ... Stopping".format(series_uid)
)
return
for do in dos:
do.is_fetched = True
do.path = dicom_path
db.session.commit()
try:
dicom_listener = DicomListener(
port=listen_port, ae_title=listen_ae_title, on_released_callback=series_recieved
)
dicom_listener.start()
except Exception as e:
logger.error("Listener Error: " + str(e))
|
import sys
import os
import shutil
import sys
GLOBAL_PATH='/Users/heitorsampaio/Google_Drive/Projetos/Protein_DeepLearning/DeepPM'
sys.path.insert(0, GLOBAL_PATH+'/lib')
if len(sys.argv) != 7:
print('please input the right parameters: interval')
sys.exit(1)
test_file = sys.argv[1]
modelfile = sys.argv[2]
weightfile = sys.argv[3]
data_dir = sys.argv[4]
CV_dir = sys.argv[5]
ktop_node = sys.argv[6]
feature_dir_global =data_dir +'/Feature_aa_ss_sa/'
pssm_dir_global = data_dir + '/PSSM_Fea/'
if not os.path.exists(feature_dir_global):
print("Cuoldn't find folder ",feature_dir_global, " please setting it in the script ./predict_single.py")
exit(-1)
if not os.path.exists(pssm_dir_global):
print("Cuoldn't find folder ",pssm_dir_global, " please setting it in the script ./predict_single.py")
exit(-1)
results_file = CV_dir+'/DCNN_results.txt'
if not os.path.exists(CV_dir):
os.makedirs(CV_dir)
if not os.path.exists(modelfile):
print("Cuoldn't find file ",modelfile)
exit(-1)
if not os.path.exists(weightfile):
print("Cuoldn't find file ",modelfile)
exit(-1)
resultdir = CV_dir+'/DCNN_results'
if not os.path.exists(resultdir):
os.makedirs(resultdir)
print("###### Evaluating data");
cmd1='python3 ' + GLOBAL_PATH + '/lib/DLS2F_predict_fea.py '+ test_file + ' ' + modelfile+ ' ' + weightfile+ ' ' + feature_dir_global + ' ' + pssm_dir_global + ' ' + resultdir + ' '+str(ktop_node)
print("Running ", cmd1,"\n\n")
os.system(cmd1)
cmd2='python3 ' + GLOBAL_PATH + '/lib/DLS2F_evaluate_SCOP.py '+ test_file + ' '+GLOBAL_PATH +'/datasets/D1_SimilarityReduction_dataset/fold_label_relation2.txt ' + resultdir + ' ' + results_file
print("Running ", cmd2,"\n\n")
os.system(cmd2)
##clean files
shutil.rmtree(resultdir)
|
# Game of X/O
# data structure: list, tupel, dictionary
# tuple are not changabe / immutable
# dictionary uses key words
# thus list is best suited, they allow to access elements via number
board = [" " for i in range(9)]
def print_board():
row1 = "| {} | {} | {} |".format(board[0], board[1], board[2])
row2 = "| {} | {} | {} |".format(board[3], board[4], board[5])
row3 = "| {} | {} | {} |".format(board[6], board[7], board[8])
print() #Use print when you want to show a value to a human. return is a keyword. When a return statement is reached, Python will stop the execution of the current function
print(row1)
print(row2)
print(row3)
print()
def player_move(icon):
if icon == "X":
number = 1
elif icon == "O":
number = 2
print("Your turn player {}".format(number))
choice = int(input("Enter your move (1-9): ").strip())
if board[choice -1] == " ":
board[choice -1 ] = icon
else:
print()
print("That space is taken!")
def is_draw():
if " " not in board:
return True
else:
return False
while True:
print_board()
player_move("X")
print_board()
player_move("O")
#no winning function
|
from oeqa.runtime.case import OERuntimeTestCase
class MpiTest(OERuntimeTestCase):
def test_mpi(self):
processors = [1, 2, 4, 8]
# mpirun binary is in the form "aarch64-poky-linux-mpirun"
prefix = self.tc.td["TARGET_PREFIX"]
for p in processors:
cmd = (prefix + 'mpirun -np ' + str(p) +
' /opt/mpi-test/count_mpi_ranks')
(status, output) = self.target.run(cmd)
msg = ('Failed to test MPI\ncmd: %s\nstatus: %s\noutput: %s'
% (cmd, status, output))
self.assertEqual(status, 0, msg=msg)
expected = "Number of ranks = " + str(p)
self.assertEqual(output, expected, msg=msg)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
import tensorflow as tf
from absl import flags
import graph.graph_utils
from graph.block_ops import DepthwiseConv2D, Conv2D, MixConv, MyNetComponents
from graph.blockargs import BlockArgsDecoder
from graph.op_collections import SE, build_conv_bn_act
from graph.searchableops import SearchableDwiseConv, SearchableBlock, SearchableMixConv
from util.math_utils import argmax
from util.utils import is_iterable
FLAGS = flags.FLAGS
class MBConvBlock(tf.keras.layers.Layer, MyNetComponents):
def __init__(self, block_args, global_params):
self._block_args = block_args
self._global_params = global_params
self.act_fn = BlockArgsDecoder.get_act_fn_from_string(
block_args.get("act_fn") if block_args.get("act_fn") else global_params.act_fn)
self.tensordict = {}
self._set_data_format_related_stuffs(self._global_params)
self._has_se = self._block_args.se_ratio is not None and self._block_args.se_ratio > 0
self._build()
super(MBConvBlock, self).__init__()
def _build(self):
expand_filters = int(self._block_args.input_filters * self._block_args.expand_ratio)
self._build_expand(output_filters=expand_filters)
self._build_dwise(output_filters=expand_filters)
if self._has_se:
self._build_se(output_filters=expand_filters)
output_filters = self._block_args.output_filters
self._build_proj(output_filters=output_filters)
def _build_expand(self, output_filters):
if self._block_args.expand_ratio != 1:
kwargs = dict(
cls=Conv2D.__name__,
output_filters=output_filters,
kernel_size=[1, 1],
strides=[1, 1])
else:
kwargs = None
self._expand_layers = build_conv_bn_act(self._global_params, kwargs, add_bn=True, act_fn=self.act_fn)
def _build_dwise(self, output_filters):
kernel_size = self._block_args.kernel_size
kwargs = dict(
cls=DepthwiseConv2D.__name__,
kernel_size=[kernel_size, kernel_size],
strides=self._block_args.strides)
self._dwise_layers = build_conv_bn_act(self._global_params, kwargs, add_bn=True, act_fn=self.act_fn)
def _build_proj(self, output_filters):
kwargs = dict(
cls=Conv2D.__name__,
output_filters=output_filters,
kernel_size=[1, 1],
strides=[1, 1])
self._proj_layers = build_conv_bn_act(self._global_params, kwargs, add_bn=True, act_fn=None)
def _build_se(self, output_filters):
se_ratio = self._block_args.se_ratio
num_reduced_filters = max(
1, int(self._block_args.input_filters * se_ratio))
se_inner_act_fn = BlockArgsDecoder.get_se_inner_act_fn_from_string(self._global_params.se_inner_act_fn)
se_gating_fn = BlockArgsDecoder.get_se_gating_fn_from_string(self._global_params.se_gating_fn)
self._se = SE(self._global_params, num_reduced_filters, output_filters, se_inner_act_fn, se_gating_fn)
def call(self, inputs, training=True, drop_connect_rate=None):
x = self._expand_layers(inputs, training=training)
tf.logging.info('Expand: %s shape: %s' % (x.name, x.shape))
x = self._dwise_layers(x, training=training)
tf.logging.info('DWConv: %s shape: %s' % (x.name, x.shape))
if self._has_se:
with tf.variable_scope('se'):
x = self._call_se(x)
tf.logging.info('SE: %s shape: %s' % (x.name, x.shape))
x = self._proj_layers(x, training=training)
if self.can_add_residcon:
if drop_connect_rate:
x = graph.graph_utils.drop_connect(x, training, drop_connect_rate)
x = tf.add(x, inputs)
tf.logging.info('Project: %s shape: %s' % (x.name, x.shape))
return x
def _call_se(self, x):
outputs = self._se(x)
log_excitation_names = FLAGS.log_excitation_names_containing
if log_excitation_names:
activations = self._se.activations
this_excitation_name = activations.name.replace(":", "_")
if 'all' in log_excitation_names or any(
[log_name in this_excitation_name for log_name in log_excitation_names]):
self.tensordict[this_excitation_name] = activations
return outputs
def get_output_shape(self, input_shape=(224, 224, 3)):
for layers in [self._expand_layers, self._dwise_layers, self._proj_layers]:
input_shape = layers.get_output_shape(input_shape)
return input_shape
def _get_(self, what, input_shape=(224, 224, 3)):
block_layers = [self._expand_layers, self._dwise_layers, self._proj_layers]
if self._has_se:
block_layers.insert(2, self._se)
result = 0
for layers in block_layers:
result += layers._get_(what, input_shape)
input_shape = layers.get_output_shape(input_shape)
return result
@property
def can_add_residcon(self):
if self._block_args.id_skip and all(s == 1 for s in self._block_args.strides) and \
self._block_args.input_filters == self._block_args.output_filters:
return True
else:
return False
def tensordict_to_write_on_tensorboard(self):
"""
TF Tensors to write on tensorboard.
If you return {name, tensor},
On tensorboard, the tensor will be written under net/name
"""
return self.tensordict
class MixConvBlock(MBConvBlock):
def _build(self):
input_filters = self._block_args.input_filters
expand_ratios = self._block_args.expand_ratio
total_expand_ratio = functools.reduce(lambda x, y: x + y, expand_ratios)
self.expand_filters = int(input_filters * total_expand_ratio)
self._build_expand(output_filters=self.expand_filters)
self._build_dwise(output_filters=self.expand_filters)
if self._has_se:
self._build_se(output_filters=self.expand_filters)
output_filters = self._block_args.output_filters
self._build_proj(output_filters=output_filters)
def _build_dwise(self, output_filters):
kernel_sizes = self._block_args.kernel_size
input_filters = self._block_args.input_filters
expand_ratios = self._block_args.expand_ratio
filter_splits = [int(input_filters * er) for er in expand_ratios]
assert self.expand_filters == sum(filter_splits)
kwargs = dict(
cls=MixConv.__name__,
kernel_sizes=kernel_sizes,
filter_splits=filter_splits,
strides=self._block_args.strides)
self._dwise_layers = build_conv_bn_act(self._global_params, kwargs, add_bn=True, act_fn=self.act_fn)
class SearchableMBConvBlock(MBConvBlock, SearchableBlock):
def __init__(self, block_args, global_params):
super(SearchableMBConvBlock, self).__init__(block_args, global_params)
self.is_supergraph_training_tensor = global_params.is_supergraph_training_tensor
self._change_to_searchableblock(global_params)
searchable_op_cls = SearchableDwiseConv
@property
def core_op(self):
return self._dwise_layers['conv']
@core_op.setter
def core_op(self, value):
self._dwise_layers['conv'] = value
def conv_type_for(self, kernel_size, expand_ratio):
return MBConvBlock.__name__
@property
def _k_sel_list(self):
kernel_size = self._block_args.kernel_size
k_split_unit = 2
k_min_size = 3
return list(range(k_min_size, kernel_size + 1, k_split_unit))
@property
def _er_sel_list_with_0_maybe(self):
er_sel_list = []
can_zeroout_to_make_skipop = self.can_add_residcon
if can_zeroout_to_make_skipop:
er_sel_list.append(0)
expand_ratio = self._block_args.expand_ratio
assert int(expand_ratio) == expand_ratio, "We only support int expand_ratios"
er_split_unit = 2
er_sel_list.extend(range(er_split_unit, int(expand_ratio) + 1, er_split_unit))
return er_sel_list
@property
def core_C_sel_list(self):
input_filters = self._block_args.input_filters
return [input_filters * er for er in self._er_sel_list_with_0_maybe]
def useconds_for_ksize_expandratio(self, kernel_size, expand_ratio):
channels = self._block_args.input_filters * expand_ratio
return self.core_op.useconds_for_ksize_channels(kernel_size, channels)
def ksize_expandratio_of_useconds(self, useconds_kern, useconds_chan):
input_filters = self._block_args.input_filters
k, C = self.core_op.ksize_channels_of_useconds(useconds_kern, useconds_chan)
er = C // input_filters
return k, er
def tensordict_to_write_on_tensorboard(self):
"""Because of multiple inheritance..."""
self.tensordict.update(self.core_op.tensordict_to_write_on_tensorboard())
return self.tensordict
class SearchableMixConvBlock(MixConvBlock, SearchableBlock):
def __init__(self, block_args, global_params):
super(SearchableMixConvBlock, self).__init__(block_args, global_params)
self.is_supergraph_training_tensor = global_params.is_supergraph_training_tensor
self._change_to_searchableblock(global_params)
searchable_op_cls = SearchableMixConv
@property
def core_op(self):
return self._dwise_layers['conv']
@core_op.setter
def core_op(self, value):
self._dwise_layers['conv'] = value
def conv_type_for(self, kernel_size, expand_ratio):
simplified_ksize, simplified_er = self.simplify_kern_expandratio(kernel_size, expand_ratio)
if isinstance(simplified_ksize, int):
return MBConvBlock.__name__
else:
return MixConvBlock.__name__
@property
def _k_sel_list(self):
k_list_per_branches = []
k_split_unit = 2
k_min_size = 3
for ksize in self._block_args.kernel_size:
k_list_per_branches.append(list(range(k_min_size, ksize + 1, k_split_unit)))
return k_list_per_branches
@property
def _er_sel_list_with_0_maybe(self):
er_split_unit = 2
def build_er_sel_list(expand_ratio, can_zeroout):
er_sel_list = []
if can_zeroout:
er_sel_list.append(0)
assert int(expand_ratio) == expand_ratio, "We only support int expand_ratios"
er_sel_list.extend(range(er_split_unit, int(expand_ratio) + 1, er_split_unit))
return er_sel_list
largest_kernel_index = argmax(self._block_args.kernel_size)
er_list_per_branches = []
for i, er in enumerate(self._block_args.expand_ratio):
unable_to_zeroout = (i == largest_kernel_index) and not self.can_add_residcon
er_list_per_branches.append(build_er_sel_list(er, can_zeroout=(not unable_to_zeroout)))
return er_list_per_branches
@property
def core_C_sel_list(self):
input_filters = self._block_args.input_filters
C_list_per_branches = []
for er_list in self._er_sel_list_with_0_maybe:
C_list_per_branches.append([input_filters * er for er in er_list])
return C_list_per_branches
def get_listof_possible_ksize_expandratio_with_0_maybe(self):
result = []
for (k, C) in self.core_op.get_listof_possible_ksize_channels():
result.append((k, self._convert_to_er(C)))
return result
def useconds_for_ksize_expandratio(self, kernel_size, expand_ratio):
k, er = self.simplify_kern_expandratio(kernel_size, expand_ratio)
return self.core_op.useconds_for_ksize_channels(k, self._convert_to_channels(er))
def simplify_kern_expandratio(self, kernel_size, expand_ratio):
k, c = self.core_op.simplify_k_C(kernel_size, self._convert_to_channels(expand_ratio))
return k, self._convert_to_er(c)
def ksize_expandratio_of_useconds(self, useconds_kern, useconds_chan):
"""returns simplified k_sizes and expand_ratios"""
k_list, C_list = self.core_op.ksize_channels_of_useconds(useconds_kern, useconds_chan)
er_list = self._convert_to_er(C_list)
return k_list, er_list
def tensordict_to_write_on_tensorboard(self):
"""Because of multiple inheritance..."""
self.tensordict.update(self.core_op.tensordict_to_write_on_tensorboard())
return self.tensordict
def _convert_to_er(self, channels):
input_filters = self._block_args.input_filters
if is_iterable(channels):
channels = tuple([c // input_filters for c in channels])
else:
channels //= input_filters
return channels
def _convert_to_channels(self, expand_ratio):
input_filters = self._block_args.input_filters
if is_iterable(expand_ratio):
channels = tuple([er * input_filters for er in expand_ratio])
else:
channels = expand_ratio * input_filters
return channels
@classmethod
def get_summary_prefix(cls, branch_idx):
return cls.searchable_op_cls.get_summary_prefix(branch_idx)
@classmethod
def get_label(cls, branch_idx, summary_name):
label = cls.searchable_op_cls.get_summary_prefix(branch_idx) + summary_name
return label
@property
def num_branches(self):
return self.core_op.num_branches
|
#!/usr/bin/env python3
"""Generate badges for repos under an user/org - here we're going to do it for python/ pypi
but this can be easily adapted
"""
from __future__ import annotations
import argparse
import re
from typing import Any
import requests
from lib import github_graph
from lib.utils import getUsername, printf
parser = argparse.ArgumentParser("Generate badges for repos under an user/org")
parser.add_argument(
"-o",
"--orgs",
action="append",
nargs="+",
help="add an org to get traffic for",
)
parser.add_argument(
"-u",
"--user",
action="store_true",
help="return the list of user owned repos?",
)
args = parser.parse_args()
username = getUsername()
if not args.orgs and not args.user:
printf.logPrint("Pass at least 1 org or 1 user see --help for more info")
sourceRepos = []
for organization in sum(args.orgs, []):
sourceRepos += github_graph.getListOfRepos(organization, organization=True)
if args.user:
sourceRepos += github_graph.getListOfRepos(username)
sortRepos = []
for repoData in sourceRepos:
repositoryName = repoData["name"]
sortRepos.append(
(
("(Archived) " if repoData["isArchived"] else "") + repositoryName,
repoData["owner"]["login"],
repositoryName,
)
)
def getKey(item: list[Any]):
"""Return the key"""
return item[0]
sortedRepos = sorted(sortRepos, key=getKey, reverse=True)
for repoData in sortedRepos:
badge = f"https://img.shields.io/pypi/dm/{repoData[2].lower()}.svg?style=for-the-badge"
ret = requests.get(badge)
downRank = "UNKNOWN"
try:
downloads = int(
re.findall(b"<title>(.*?)</title>", ret.content)[0]
.lower()
.replace(b"downloads: ", b"")
.replace(b"/month", b"")
)
downRank = "LOW" if downloads < 65 else "HIGH"
except ValueError:
pass
print(
f"""## {repoData[0]}



goto: https://github.com/{repoData[1]}/{repoData[2]}
downloads: {downRank}
"""
)
|
from datetime import date
def is_payday_leap_year(year, payday, frequency='biweekly'):
"""Determine if a given year is a payday leap year.
Determine if a given year is a payday leap year, based on the
given payday on a weekly or biweekly pay calendar (specified by
`frequency`). Assumes paychecks are allowed to be disbursed
on holidays.
Args:
year (int): The year we're testing.
payday (date): A payday from the specified pay calendar.
Does not need to be in the same year as `year`.
frequency (str): Pay frequency. Valid values are 'weekly'
or 'biweekly'. Default is 'biweekly'.
Returns:
True if the year is a payday leap year, False if not.
"""
new_years_day = date(year, 1, 1)
jan_2 = date(year, 1, 2)
freq_in_days = 7 if frequency == 'weekly' else 14
# Determine if new year's day is a payday.
# If new year's day is a payday, then it's always a 27 payday year.
if abs((payday - new_years_day).days) % freq_in_days == 0:
result = True
# Handle leap years - Jan. 2 can also be a payday.
elif (year % 4 == 0) and (abs((payday - jan_2).days) % freq_in_days == 0):
result = True
else:
result = False
return result
def get_payday_leap_years(
payday,
frequency='biweekly',
count=5,
starting_year=date.today().year
):
"""Get the next n payday leap years.
Return a list of the next n payday leap years, where n is specified
by `count`.
Args:
payday (date): A payday from the specified pay calendar.
frequency (str): Pay frequency. Valid values are 'weekly'
or 'biweekly'. Default is 'biweekly'.
count (int): The number of payday leap years to return. Default is 5.
starting_year (int): The year to start counting from.
Returns:
A list of ints.
"""
results = []
# Start counting from the current year.
year = starting_year
while len(results) < count:
if is_payday_leap_year(year, payday, frequency):
results.append(year)
year += 1
return results
if __name__ == '__main__':
year = 2018
# January 11, 2018
payday = date(2018, 1, 11)
for i in range(51):
print("{0} is a payday leap year: {1}".format(
year, is_payday_leap_year(year, payday)
))
year += 1
|
import datetime
from unittest.mock import ANY
from contextlib import contextmanager
import pytest
from dateutil.tz import tzutc
from rhub.lab import model
from rhub.api.vault import Vault
from rhub.auth.keycloak import KeycloakClient
API_BASE = '/v0'
def _db_add_row_side_effect(data_added):
def side_effect(row):
for k, v in data_added.items():
setattr(row, k, v)
return side_effect
def test_location_list(client):
model.Location.query.limit.return_value.offset.return_value = [
model.Location(
id=1,
name='RDU',
description='',
),
]
model.Location.query.count.return_value = 1
rv = client.get(
f'{API_BASE}/lab/location',
headers={'Authorization': 'Bearer foobar'},
)
assert rv.status_code == 200
assert rv.json == {
'data': [
{
'id': 1,
'name': 'RDU',
'description': '',
'_href': ANY,
}
],
'total': 1,
}
def test_location_get(client):
model.Location.query.get.return_value = model.Location(
id=1,
name='RDU',
description='',
)
rv = client.get(
f'{API_BASE}/lab/location/1',
headers={'Authorization': 'Bearer foobar'},
)
model.Location.query.get.assert_called_with(1)
assert rv.status_code == 200
assert rv.json == {
'id': 1,
'name': 'RDU',
'description': '',
'_href': ANY,
}
def test_location_create(client, db_session_mock, mocker):
location_data = {
'name': 'RDU',
'description': 'Raleigh',
}
model.Location.query.filter.return_value.count.return_value = 0
db_session_mock.add.side_effect = _db_add_row_side_effect({'id': 1})
rv = client.post(
f'{API_BASE}/lab/location',
headers={'Authorization': 'Bearer foobar'},
json=location_data,
)
db_session_mock.add.assert_called()
location = db_session_mock.add.call_args.args[0]
for k, v in location_data.items():
assert getattr(location, k) == v
assert rv.status_code == 200
assert rv.json == {
'id': 1,
'name': 'RDU',
'description': 'Raleigh',
'_href': ANY,
}
def test_location_update(client):
location = model.Location(
id=1,
name='RDU',
description='',
)
model.Location.query.get.return_value = location
rv = client.patch(
f'{API_BASE}/lab/location/1',
headers={'Authorization': 'Bearer foobar'},
json={
'description': 'Raleigh',
},
)
model.Location.query.get.assert_called_with(1)
assert location.description == 'Raleigh'
assert rv.status_code == 200
assert rv.json == {
'id': 1,
'name': 'RDU',
'description': 'Raleigh',
'_href': ANY,
}
def test_location_delete(client, db_session_mock):
location = model.Location(
id=1,
name='RDU',
description='',
)
model.Location.query.get.return_value = location
rv = client.delete(
f'{API_BASE}/lab/location/1',
headers={'Authorization': 'Bearer foobar'},
)
model.Location.query.get.assert_called_with(1)
db_session_mock.delete.assert_called_with(location)
assert rv.status_code == 204
|
import config
import pandas as pd
from sklearn.preprocessing import LabelEncoder
def preprocessing(df):
df['adults'].fillna(0, inplace = True)
df['children'].fillna(0, inplace = True)
df['babies'].fillna(0, inplace = True)
df['children'] = df['children'].astype(int)
df['total_guest'] = df['adults'] + df['children'] + df['babies']
df_hotel = df[['hotel', 'lead_time', 'adults', 'children', 'babies', 'country', 'market_segment', 'distribution_channel', 'is_repeated_guest',
'previous_cancellations', 'reserved_room_type', 'assigned_room_type', 'booking_changes', 'deposit_type', 'agent',
'days_in_waiting_list', 'required_car_parking_spaces', 'total_of_special_requests', 'is_canceled', 'adr', 'total_guest']].copy()
df_hotel.agent.fillna(0, inplace = True)
df_hotel.agent = df_hotel.agent.astype(int)
df_hotel.hotel.replace({"Resort Hotel" : 1, "City Hotel" : 2}, inplace = True)
# columns with dtype object
categorical_features = list(df_hotel.select_dtypes(include=['object']).columns)
# Label Encoder
label_encoder_feat = {}
for i, feature in enumerate(categorical_features):
df_hotel[feature] = df_hotel[feature].astype(str)
label_encoder_feat[feature] = LabelEncoder()
df_hotel[feature] = label_encoder_feat[feature].fit_transform(df_hotel[feature])
return df_hotel
|
#!/cygdrive/c/Python25/python
########################################################################
### at-spi.py -- load TIText program onto ATMega128[L] via SPI
###
__doc__ = """
usage: at-spi.py titext_file
"""
import sys
sys.path.insert(0, "build/lib.win32-2.5")
from d2xx import *
from spi import *
from time import sleep, time
class AT128SPI:
MOSI = 0x80 ### Ox
MISO = 0x40 ### I
SCK = 0x10 ### Ox
SPI_EN = 0x08 ### O1
RST = 0x04 ### Ox
def __init__(self):
p = self.p = D2XX()
s = self.s = SPI(self.SPI_EN, self.RST,
self.SCK, self.MOSI, self.MISO,
self._read, self._write)
p.mode(self.MOSI | self.SCK | self.SPI_EN | self.RST, FT_MODE_ASYNC)
p.speed(FT_BAUD_115200)
s.spien(1)
s.rst(1)
s.sck(0)
s.mosi(0)
s.update(True)
self.spiByte = self.spiByte
self.spi = self.spi
def _read(self):
return self.p.bits()
def _write(self, s):
while s:
nw = self.p.write(s)
s = s[nw:]
def milliSleep(self, ms):
self.s.flush()
sleep(ms * 0.001)
def cpuAttn(self):
### RST and SCK low
self.s.rst(0)
self.s.sck(0)
self.s.update(True)
### Positive pulse on RST
self.s.rst(1)
self.s.update()
### Brief pause
self.milliSleep(1)
### RST low
self.s.rst(0)
self.s.update()
### Wait 20ms plus one
self.milliSleep(21)
def cpuRlse(self):
### RST low
self.s.rst(0)
self.s.update()
self.milliSleep(1)
### RST high
self.s.rst(1)
self.s.update(True)
### Disable SPI
self.s.spien(0)
self.s.update(True)
def spiByte(self, byte, doRead=False):
return self.s.byte(byte, doRead)
def spi(self, bytes, doRead=False):
return [self.spiByte(x, doRead) for x in bytes]
def close(self):
self.s.flush()
self.p.close()
__del__ = close
def progMode(self):
for i in xrange(16):
### send enter-programming-mode instruction
self.spiByte(0xac)
self.spiByte(0x53)
b = self.spiByte(0x00, True)
self.spiByte(0x00)
### done, if in sync
if b == 0x53:
return
### not in sync, try again...
### high RST pulse
self.s.rst(1)
self.s.update()
self.milliSleep(1)
### RST low
self.s.rst(0)
self.s.update()
### Wait 20ms plus one
self.milliSleep(21)
raise RuntimeError, "Cannot enter programming mode"
def chipErase(self):
### erase the chip
self.spiByte(0xac)
self.spiByte(0x80)
self.spiByte(0x00)
self.spiByte(0x00)
self.milliSleep(11)
### end CE mode
self.s.rst(1)
self.s.update()
self.milliSleep(1)
self.s.rst(0)
self.s.update(True)
def m(fmt, *rest):
if rest:
fmt = fmt % rest
print fmt.rstrip()
sys.stdout.flush()
def load(items):
### get into programming mode
T = time()
p = AT128SPI()
m("Enter program mode...")
p.cpuAttn()
p.progMode()
### dump fuses
if 0:
print "Lock", [hex(x) for x in p.spi([0x58, 0x00, 0x00, 0x00], True)]
print "Sig0", [hex(x) for x in p.spi([0x30, 0x00, 0x00, 0x00], True)]
print "Sig1", [hex(x) for x in p.spi([0x30, 0x00, 0x01, 0x00], True)]
print "Sig2", [hex(x) for x in p.spi([0x30, 0x00, 0x02, 0x00], True)]
print "Sig3", [hex(x) for x in p.spi([0x30, 0x00, 0x03, 0x00], True)]
print "Fuse", [hex(x) for x in p.spi([0x50, 0x00, 0x00, 0x00], True)]
print "Xtnd", [hex(x) for x in p.spi([0x50, 0x08, 0x00, 0x00], True)]
print "High", [hex(x) for x in p.spi([0x58, 0x08, 0x00, 0x00], True)]
print "Cal0", [hex(x) for x in p.spi([0x38, 0x00, 0x00, 0x00], True)]
print "Cal1", [hex(x) for x in p.spi([0x38, 0x00, 0x01, 0x00], True)]
print "Cal2", [hex(x) for x in p.spi([0x38, 0x00, 0x02, 0x00], True)]
print "Cal3", [hex(x) for x in p.spi([0x38, 0x00, 0x03, 0x00], True)]
m("Fuse dump done")
### erase eeprom and flash
m("Mass erase...")
p.chipErase()
p.cpuAttn()
p.progMode()
### program up the chip
m("Programming...")
todo = len(items)
assert not (todo & 1)
word = 0
next = iter(items).next
while todo:
blk = (todo > 255) and 256 or todo
wds = blk >> 1
assert not (blk & 1)
assert ((todo > 255) and (wds == 0x80)) or \
((todo < 256) and (todo == (wds << 1)))
for b in xrange(wds):
p.spi([0x40, 0x00, b, next()])
p.spi([0x48, 0x00, b, next()])
### data sheet is w r o n g
p.spi([0x4c, word >> 8, (word & 0xff), 0x00])
p.milliSleep(6)
todo -= blk
word += wds
m(" %3.0f%% done", (100 * (float(len(items) - todo) / len(items))))
### release the interface
m("Reset...")
p.cpuRlse()
p.close()
### dump stats
n = len(items)
T = time() - T
m("Programmed %d bytes in %.3f sec, %.3f kb/s", n, T, n / (128. * T))
m("Done.")
def go(data):
data = data.replace("\r\n", "\n").replace("\r", "\n")
assert data.startswith("@0\n")
data = data[3:]
assert "@" not in data
bytes = [ ]
for b in data.split():
bytes.append(int(b, 16))
load(bytes)
def usage():
raise SystemExit, __doc__
def main(args):
len(args) == 1 or usage()
f = open(args[0])
try:
d = f.read()
finally:
f.close()
go(d)
if __name__ == "__main__":
main(sys.argv[1:])
### EOF at-spi.py
|
from .core import AdvancedBrowser
# Advanced Browser modules
from . import custom_fields
from . import note_fields
from . import config
|
from common.MessageInterface import MessageInterface
from django.conf import settings
import logging,sys,multiprocessing,time,os
logger = logging.getLogger(__name__)
QUEUE_NAME = None
ROUTING_KEY = None
class MessageReceiver(multiprocessing.Process):
''' subscribes to a queue and executes the given callback'''
# this method should be re-defined by the user via inheritance
def consume_msg(self,channel,method_frame,header_frame,body):
pass
def __init__(self,
msg_queue,
msg_routing_key,
msg_host,
msg_port,
msg_exchange_name,
msg_ssl_cert,
msg_ssl_key,
msg_ssl_ca_certs,
):
# execute super constructor
super(MessageReceiver,self).__init__()
#self.exit = multiprocessing.Event()
self.messageInterface = MessageInterface()
self.messageInterface.host = msg_host
self.messageInterface.port = msg_port
self.messageInterface.exchange_name = msg_exchange_name
self.messageInterface.ssl_cert = msg_ssl_cert
self.messageInterface.ssl_key = msg_ssl_key
self.messageInterface.ssl_ca_certs = msg_ssl_ca_certs
self.message_queue = msg_queue
self.message_routing_key = msg_routing_key
def run(self):
logger.debug(' in run ')
# setup receiving queue and exchange
logger.debug( ' open blocking connection to setup queue ' )
self.messageInterface.open_blocking_connection()
self.messageInterface.create_queue(self.message_queue,self.message_routing_key)
self.messageInterface.close()
logger.debug( ' open select connection ' )
# start consuming incoming messages
try:
self.messageInterface.open_select_connection(self.on_connection_open)
except:
logger.exception(' Received exception while opening select connection: ' + str(sys.exc_info()[1]))
raise
logger.debug( ' start message consumer ' )
try:
self.messageInterface.connection.ioloop.start()
except:
logger.exception(' Received exception while starting ioloop for message consumer: ' + str(sys.exc_info()[1]))
raise
# not working... connection is None for some reason
def shutdown(self):
logger.debug(' stopping message consumer ')
try:
logger.debug(' message connection: ' + str(self.messageInterface.connection) )
logger.debug(' message ioloop: ' + str(self.messageInterface.connection.ioloop) )
self.messageInterface.connection.ioloop.stop()
logger.debug( ' after stopping message consumer ')
except:
logger.exception(' Received exception while stopping ioloop for the message consumer: ' + str(sys.exc_info()[1]))
raise
#self.exit.set()
def on_connection_open(self,connection):
logger.debug(' in on_connection_open')
try:
connection.channel(self.on_channel_open)
except:
logger.exception(' Received exception while opening connection to message server: ' + str(sys.exc_info()[1]))
raise
def on_channel_open(self,channel):
logger.debug(' in on_channel_open')
try:
channel.basic_consume(self.consume_msg,self.message_queue)
except:
logger.exception(' Received exception while creating message consumer: ' + str(sys.exc_info()[1]))
raise
|
# coding: utf-8
from operator import attrgetter
from requests_futures.sessions import FuturesSession
from .data import Band
def get_top_bands(pages_to_fetch, lastfm_username, lastfm_api_key):
result = []
futures = []
future_session = FuturesSession(max_workers=10)
for page_idx in range(pages_to_fetch):
url = 'http://ws.audioscrobbler.com/2.0/'
params = {
'method': 'user.gettopartists',
'user': lastfm_username,
'api_key': lastfm_api_key,
'format': 'json',
'period': 'overall',
'page': page_idx+1,
}
futures.append((future_session.get(url, params=params), page_idx))
for future, page_idx in futures:
j = future.result().json()
artists = j.get('topartists', {}).get('artist', [])
for a in artists:
name = a.get('name', None)
pc = int(a.get('playcount', 0))
if name and pc:
result.append(Band(name=name, playcount=pc))
return sorted(result, key=attrgetter('playcount'), reverse=True)
|
#!/usr/bin/env python
#
# This example shows two easy ways to create a dasboard: using a view as a
# templeate, and copying another dashboard.
# In both cases, a filter is used to define what entities the new dashboard
# will monitor.
#
import getopt
import sys
from sdcclient import SdMonitorClient
#
# Parse arguments
#
def usage():
print(('usage: %s [-d|--dashboard <name>] <sysdig-token>' % sys.argv[0]))
print('-d|--dashboard: Set name of dashboard to create')
print('You can find your token at https://app.sysdigcloud.com/#/settings/user')
sys.exit(1)
try:
opts, args = getopt.getopt(sys.argv[1:], "d:", ["dashboard="])
except getopt.GetoptError:
usage()
# Name for the dashboard to create
dashboardName = "Overview by Process"
for opt, arg in opts:
if opt in ("-d", "--dashboard"):
dashboardName = arg
if len(args) != 1:
usage()
sdc_token = args[0]
#
# Instantiate the SDC client
#
sdclient = SdMonitorClient(sdc_token)
#
# Create the new dashboard, applying to cassandra in production
#
# Name of the view to copy
viewName = "Overview by Process"
# Filter to apply to the new dashboard.
# Remember that you can use combinations of any segmentation criteria you find
# in Sysdig Cloud Explore page.
# You can also refer to AWS tags by using "cloudProvider.tag.*" metadata or
# agent tags by using "agent.tag.*" metadata
dashboardFilter = 'proc.name = "cassandra"'
print('Creating dashboard from view')
ok, res = sdclient.create_dashboard_from_view(dashboardName, viewName, dashboardFilter)
#
# Check the result
#
if ok:
print('Dashboard created successfully')
else:
print(res)
sys.exit(1)
#
# Make a Copy the just created dasboard, this time applying it to cassandra in
# the dev namespace
#
# Name of the dashboard to copy
dashboardCopy = "Copy of {}".format(dashboardName)
# Filter to apply to the new dashboard. Same as above.
dashboardFilter = 'proc.name != "cassandra"'
print('Creating dashboard from dashboard')
ok, res = sdclient.create_dashboard_from_dashboard(dashboardCopy, dashboardName, dashboardFilter)
#
# Check the result
#
if ok:
print('Dashboard copied successfully')
else:
print(res)
sys.exit(1)
|
############################################################
## 01/22/2018 by Lord.Doubi. Revised from OpenCV tutorial ##
############################################################
import cv2
import os
import numpy as np
import Cameracontrol as cc
class Bug():
def __init__(self,name):
self.name = name
self.background = name+"_Background"
os.chdir('./data')
files=os.listdir('.')
identifier = 0
if self.name in files:
print "Already in database"
else:
os.system('mkdir {}'.format(self.name))
os.chdir('..')
# creat bug name and folder and go back to main folder
def getimage(self,i):
"""
i = 0 get bacground image
i = 1 get fluorescence image
"""
os.chdir('./data/{}'.format(self.name))
if i == 0:
cc.Cameratakepicture(self.background)
if i == 1:
cc.Cameratakepicture(self.name)
else:
return 'Error'
os.chdir('..')
os.chdir('..')
def realimage(self):
os.chdir('./data/{}'.format(self.name))
self.profile = cv2.imread(self.name,0)
self.background = cv2.imread(self.background,0)
self.realimage = self.profile-self.background
cv2.imwrite("/{}/{}.png".format(self.name,self.realimage),self.realimage)
# store substracted image in /data/bug
def findcountours(self):
self.blur = cv2.GaussianBlur(self.realimage,(17,17),0)
self.ret,self.th = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
self.im2, self.contours,self.hierarchy = cv2.findContours(self.th, 1, 2)
n=len(self.contours)
i = 0
r = 0
approx=[None]*n
perimeter=[None]*n
epsilon=[None]*n
self.thco = cv2.cvtColor(self.th, cv2.COLOR_GRAY2BGR)
print 'Total Contours found{}'.format(n)
while i<n:
M = cv2.moments(self.contours[i])
area = cv2.contourArea(self.contours[i])
if area > 100:
if area < 10000:
perimeter[i] = cv2.arcLength(self.contours[i],True)
epsilon[i] = 0.01*cv2.arcLength(self.contours[i],True)
approx[i] = cv2.approxPolyDP(self.contours[i],epsilon[i],True)
cv2.drawContours(self.thco,[approx[i]],-1,(0,0,255),4)
r = r+1
print 'Intensity for Filtered Contours {} is {}\n'.format(r,M)
i=i+1
print 'Filtered Contours found{}'.format(r)
cv2.imwrite("{}{}.png".format(self.name,'_processed'),self.thco)
os.chdir('..')
os.chdir('..')
#go back to main folder
|
"""
API operations for samples in the Galaxy sample tracking system.
"""
import logging
from galaxy import util
from galaxy.util.bunch import Bunch
from galaxy.web import url_for
from galaxy.web.base.controller import BaseAPIController, web
log = logging.getLogger(__name__)
class SamplesAPIController(BaseAPIController):
update_types = Bunch(SAMPLE=['sample_state', 'run_details'],
SAMPLE_DATASET=['sample_dataset_transfer_status'])
update_type_values = []
for k, v in update_types.items():
update_type_values.extend(v)
@web.expose_api
def index(self, trans, **kwd):
"""
GET /api/requests/{encoded_request_id}/samples
Displays a collection (list) of sample of a sequencing request.
"""
try:
request_id = trans.security.decode_id(kwd['request_id'])
except TypeError:
trans.response.status = 400
return "Malformed request id ( %s ) specified, unable to decode." % str(kwd['request_id'])
try:
request = trans.sa_session.query(trans.app.model.Request).get(request_id)
except:
request = None
if not request or not (trans.user_is_admin() or request.user.id == trans.user.id):
trans.response.status = 400
return "Invalid request id ( %s ) specified." % str(request_id)
rval = []
for sample in request.samples:
item = sample.to_dict()
item['url'] = url_for('samples',
request_id=trans.security.encode_id(request_id),
id=trans.security.encode_id(sample.id))
item['id'] = trans.security.encode_id(item['id'])
rval.append(item)
return rval
@web.expose_api
def update(self, trans, id, payload, **kwd):
"""
PUT /api/samples/{encoded_sample_id}
Updates a sample or objects related ( mapped ) to a sample.
"""
update_type = None
if 'update_type' not in payload:
trans.response.status = 400
return "Missing required 'update_type' parameter, consult the API documentation for help."
else:
update_type = payload.pop('update_type')
if update_type not in self.update_type_values:
trans.response.status = 400
return "Invalid value for 'update_type' parameter (%s) specified, consult the API documentation for help." % update_type
sample_id = util.restore_text(id)
try:
decoded_sample_id = trans.security.decode_id(sample_id)
except TypeError:
trans.response.status = 400
return "Malformed sample_id (%s) specified, unable to decode." % str(sample_id)
try:
sample = trans.sa_session.query(trans.app.model.Sample).get(decoded_sample_id)
except:
sample = None
if not sample:
trans.response.status = 400
return "Invalid sample id ( %s ) specified." % str(sample_id)
if not trans.user_is_admin():
trans.response.status = 403
return "You are not authorized to update samples."
requests_admin_controller = trans.webapp.controllers['requests_admin']
if update_type == 'run_details':
deferred_plugin = payload.pop('deferred_plugin', None)
if deferred_plugin:
try:
trans.app.job_manager.deferred_job_queue.plugins[deferred_plugin].create_job(trans, sample=sample, **payload)
except:
log.exception('update() called with a deferred job plugin (%s) but creating the deferred job failed:' % deferred_plugin)
status, output = requests_admin_controller.edit_template_info(trans,
cntrller='api',
item_type='sample',
form_type=trans.model.FormDefinition.types.RUN_DETAILS_TEMPLATE,
sample_id=sample_id,
**payload)
return status, output
elif update_type == 'sample_state':
return self.__update_sample_state(trans, sample, sample_id, **payload)
elif update_type == 'sample_dataset_transfer_status':
# update sample_dataset transfer status
return self.__update_sample_dataset_status(trans, **payload)
def __update_sample_state(self, trans, sample, encoded_sample_id, **payload):
# only admin user may update sample state in Galaxy sample tracking
if not trans.user_is_admin():
trans.response.status = 403
return "only an admin user may update sample state in Galaxy sample tracking."
if 'new_state' not in payload:
trans.response.status = 400
return "Missing required parameter: 'new_state'."
new_state_name = payload.pop('new_state')
comment = payload.get('comment', '')
# check if the new state is a valid sample state
possible_states = sample.request.type.states
new_state = None
for state in possible_states:
if state.name == new_state_name:
new_state = state
if not new_state:
trans.response.status = 400
return "Invalid sample state requested ( %s )." % new_state_name
requests_common_cntrller = trans.webapp.controllers['requests_common']
status, output = requests_common_cntrller.update_sample_state(trans=trans,
cntrller='api',
sample_ids=[encoded_sample_id],
new_state=new_state,
comment=comment)
return status, output
def __update_sample_dataset_status(self, trans, **payload):
# only admin user may transfer sample datasets in Galaxy sample tracking
if not trans.user_is_admin():
trans.response.status = 403
return "Only an admin user may transfer sample datasets in Galaxy sample tracking and thus update transfer status."
if 'sample_dataset_ids' not in payload or 'new_status' not in payload:
trans.response.status = 400
return "Missing one or more required parameters: 'sample_dataset_ids' and 'new_status'."
sample_dataset_ids = payload.pop('sample_dataset_ids')
new_status = payload.pop('new_status')
error_msg = payload.get('error_msg', '')
requests_admin_cntrller = trans.webapp.controllers['requests_admin']
status, output = requests_admin_cntrller.update_sample_dataset_status(trans=trans,
cntrller='api',
sample_dataset_ids=sample_dataset_ids,
new_status=new_status,
error_msg=error_msg)
return status, output
|
import datetime
import uuid
from django.conf import settings
from django.db import models
from django_countries.fields import CountryField
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
from django_freeradius.models import RadiusCheck
IDENTITY_TOKEN_EXPIRATION_DAYS = getattr(settings,
'IDENTITY_TOKEN_EXPIRATION_DAYS', 5)
class TimeStampedModel(models.Model):
"""
self-updating created and modified fields
"""
created = models.DateTimeField(_('created'),
auto_now_add=True, editable=False)
modified = models.DateTimeField(_('modified'),
auto_now=True, editable=False)
class Meta:
abstract = True
class IdentityGenericManyToOne(models.Model):
name = models.CharField(max_length=256, blank=False, null=False)
class Meta:
abstract = True
class AbstractIdentityAddress(models.Model):
address = models.CharField(_('Indirizzo'),max_length=150, blank=True, null=True)
locality_name = models.CharField(_('Località'),max_length=135, blank=True, null=True)
state = models.CharField(_('Comune'), max_length=60, blank=True, null=True)
postal_code = models.CharField(_('Cap'),max_length=60, blank=True, null=True)
country_name = CountryField(_('Nazione'),blank=True, null=True)
note = models.TextField(max_length=768, blank=True, null=True)
primary = models.BooleanField(_('Recapito principale'),default=False)
class Meta:
ordering = ['primary',]
verbose_name_plural = _("Address book")
def __str__(self):
return '%s' % (self.persona)
class AttributeProvider(TimeStampedModel):
"""
Attribute Providers
json_config let us to never mind about changements in future
"""
name = models.CharField(max_length=256, blank=False, null=False, help_text=_(''))
url = models.CharField(max_length=512, blank=False, null=False, help_text=_(''))
json_config = models.TextField(blank=True, null=True, help_text='')
class Identity(TimeStampedModel):
"""
Provides registry
"""
personal_title = models.CharField(max_length=12, blank=True, null=True)
name = models.CharField(max_length=256, blank=False, null=False,
help_text=_('Nome o ragione sociale'))
surname = models.CharField(max_length=135, blank=False, null=False)
email = models.EmailField()
telephone = models.CharField(max_length=135, blank=True, null=True)
common_name = models.CharField(max_length=256, blank=True, null=True,
help_text=_('Nome o ragione sociale'))
country = CountryField(blank=True, help_text=_('nazionalità, cittadinanza'))
city = models.CharField(max_length=128, blank=True, null=True,
help_text=_('residenza'))
codice_fiscale = models.CharField(max_length=16, blank=True,
null=True, help_text='')
date_of_birth = models.DateField(blank=True, null=True)
place_of_birth = models.CharField(max_length=128, blank=True,
null=True, help_text='')
description = models.TextField(max_length=1024, blank=True,
null=True)
class Meta:
ordering = ['created',]
verbose_name_plural = _("Identità digitali")
def create_token(self, radcheck):
validity_days = IDENTITY_TOKEN_EXPIRATION_DAYS
_time_delta = datetime.timedelta(days=validity_days)
identity_token = IdentityRadiusAccount.objects.filter(identity=self,
radius_account=radcheck,
is_active=True,
valid_until__gte=timezone.now()).last()
if not identity_token:
identity_token = IdentityRadiusAccount.objects.create(identity=self,
radius_account=radcheck,
is_active=True,
valid_until=timezone.now()+_time_delta)
else:
identity_token.valid_until = identity_token.valid_until + _time_delta
identity_token.save()
return identity_token
def get_tokens(self):
return IdentityRadiusAccount.objects.filter(identity=self)
def get_active_tokens(self):
return self.get_tokens().filter(is_active=True,
valid_until__gte=timezone.now())
def get_used_tokens(self):
return self.get_tokens().filter(identity=self,
is_active=False)
def get_radchecks(self):
usernames = self.get_tokens().values_list('radius_account__username')
if not usernames: return RadiusCheck.objects.none()
return RadiusCheck.objects.filter(username__in=[i[0] for i in usernames])
def get_active_radchecks(self):
return self.get_radchecks().filter(is_active=True,
valid_until__gte=timezone.now())
def get_expired_radchecks(self):
return self.get_radchecks().filter(is_active=True,
valid_until__lte=timezone.now())
def __str__(self):
return '{} {}'.format(self.name, self.surname)
class Affiliation(models.Model):
AFFILIATION = (
('faculty', 'faculty'),
('student', 'student'),
('staff', 'staff'),
('alum', 'alum'),
('member', 'member'),
('affiliate', 'affiliate'),
('employee', 'employee'),
('library-walk-in', 'library-walk-in'),
)
identity = models.ForeignKey(Identity, on_delete=models.CASCADE,
blank=False, null=True)
name = models.CharField(choices=AFFILIATION,
max_length=32)
origin = models.CharField(max_length=254, blank=True, default='unical',
help_text='istitution of orgin, where the guest came from')
description = models.TextField(blank=True, default='')
def __str__(self):
return self.name
class IdentityThirdPartiesAttribute(TimeStampedModel):
"""
Which Provider contains other attributes related to that identity
smart generalization for future implementation
"""
identity = models.ForeignKey(Identity, on_delete=models.CASCADE)
attribute_provider = models.ForeignKey(AttributeProvider, on_delete=models.CASCADE)
class IdentityAddress(AbstractIdentityAddress, TimeStampedModel):
"""
many to one, many addresses to one identity
"""
identity = models.ForeignKey(Identity, on_delete=models.CASCADE)
class Meta:
ordering = ['primary',]
verbose_name_plural = _("Addresses")
def __str__(self):
return '%s %s' % (self.identity, self.primary)
class AddressType(models.Model):
name = models.CharField(max_length=12, blank=False, null=False,
help_text=_('tecnologia usata se email, telefono...'), unique=True)
description = models.CharField(max_length=256, blank=True)
def __str__(self):
return '%s %s' % (self.name, self.description)
class IdentityDelivery(TimeStampedModel):
"""
Generalized contacts classification
email, telephone, facebook, twitter
"""
identity = models.ForeignKey(Identity, on_delete=models.CASCADE)
type = models.ForeignKey(AddressType,
blank=False, null=False,
on_delete=models.CASCADE)
value = models.CharField(max_length=135, blank=False, null=False,
help_text=_('mario.rossi@yahoo.it oppure 02 3467457, in base al tipo'))
class IdentityRole(IdentityGenericManyToOne):
identity = models.ForeignKey(Identity, on_delete=models.CASCADE)
class IdentityAffilitation(IdentityGenericManyToOne):
identity = models.ForeignKey(Identity, on_delete=models.CASCADE)
class IdentityRadiusAccount(models.Model):
identity = models.ForeignKey(Identity, on_delete=models.CASCADE)
radius_account = models.ForeignKey(RadiusCheck, on_delete=models.CASCADE)
token = models.UUIDField(unique=True, default=uuid.uuid4,
blank=True, null=True,
help_text="{}/identity/radius_renew/$token".format(settings.BASE_URL))
sent = models.BooleanField(default=False)
valid_until = models.DateTimeField(blank=True, null=True)
used = models.DateTimeField(blank=True, null=True)
is_active = models.BooleanField(default=True)
created = models.DateTimeField(_('created'), auto_now_add=True)
def save(self, *args, **kwargs):
if not self.valid_until:
validity_days = IDENTITY_TOKEN_EXPIRATION_DAYS
_time_delta = datetime.timedelta(days=validity_days)
self.valid_until = timezone.now() + _time_delta
super(IdentityRadiusAccount, self).save(*args, **kwargs)
class Meta:
verbose_name = _('radius secret reset token')
verbose_name_plural = _('radius secret reset tokens')
def __str__(self):
return '{} {}'.format(self.radius_account, self.is_active)
|
"""Test the old numpy pickler, compatibility version."""
import shutil
import os
import random
import nose
from tempfile import mkdtemp
# numpy_pickle is not a drop-in replacement of pickle, as it takes
# filenames instead of open files as arguments.
from joblib import numpy_pickle_compat
###############################################################################
# Test fixtures
env = dict()
def setup_module():
"""Test setup."""
env['dir'] = mkdtemp()
env['filename'] = os.path.join(env['dir'], 'test.pkl')
def teardown_module():
"""Test teardown."""
shutil.rmtree(env['dir'])
def test_z_file():
# Test saving and loading data with Zfiles.
filename = env['filename'] + str(random.randint(0, 1000))
data = numpy_pickle_compat.asbytes('Foo, \n Bar, baz, \n\nfoobar')
with open(filename, 'wb') as f:
numpy_pickle_compat.write_zfile(f, data)
with open(filename, 'rb') as f:
data_read = numpy_pickle_compat.read_zfile(f)
nose.tools.assert_equal(data, data_read)
|
import config
from subprocess import Popen, PIPE
from util import gform
import math
import os
import shutil
import numpy as num
import scipy
import tempfile
import sys
from pyrocko import io, util
pjoin = os.path.join
class Gfdb:
def __init__(self, gfdbpath):
self.path = gfdbpath
self.extractor = None
self.builder = None
self.tempdir = None
self.tempfilebase = None
gfdb_infos_str = {}
cmd = [ config.gfdb_info_prog, gfdbpath ]
gfdb_info_process = Popen( cmd, stdout=PIPE)
self.string = gfdb_info_process.communicate()[0]
if gfdb_info_process.poll():
sys.exit('fatal: could not get get gfdb information')
for line in self.string.splitlines():
k,v = line.split('=')
gfdb_infos_str[k] = v.strip()
for k in [ 'dt', 'dx', 'dz', 'firstx', 'firstz' ]:
setattr(self, k, float( gfdb_infos_str[k] ))
for k in [ 'nchunks', 'nx', 'nz', 'ng' ]:
setattr(self, k, int( gfdb_infos_str[k] ))
def dump_traces(self, x, z, format='table'):
if not self.extractor:
self.extractor = Popen( [config.gfdb_extract_prog, self.path], stdin=PIPE, stdout=PIPE, close_fds=True)
self.tempdir = tempfile.mkdtemp("","gfdb_extract-")
self.tempfilebase = pjoin( self.tempdir, 'trace' )
fns = []
for ig in range(self.ng):
fn = '%s-%i.%s' % (self.tempfilebase, ig, format)
self.extractor.stdin.write("%f %f %i '%s'\n" % (x,z,ig+1,fn))
self.extractor.stdin.flush()
answer = self.extractor.stdout.readline()
if answer.strip() == 'ok':
fns.append(fn)
else:
fns.append(None)
return fns
def get_traces_slow( self, x, z ):
fns = self.dump_traces(x,z)
traces = []
for fn in fns:
if fn:
tab = num.loadtxt(fn, dtype=num.float).transpose()
if tab.ndim == 2:
time = tab[0]
data = tab[1]
else:
time = num.array([tab[0].copy()])
data = num.array([tab[1].copy()])
traces.append( (time,data) )
else:
traces.append( None )
return traces
def get_traces_pyrocko(self, x,z):
fns = self.dump_traces(x,z, format='mseed')
traces = []
for igm, fn in enumerate(fns):
ig = igm+1
if fn:
for tr in io.load(fn):
ix = 1 + int(round((x-self.firstx)/self.dx))
iz = 1 + int(round((z-self.firstz)/self.dz))
gridx = self.firstx + (ix-1)*self.dx
gridz = self.firstz + (iz-1)*self.dz
sx = util.base36encode(ix)
sz = util.base36encode(iz)
tr.meta = {'x':gridx, 'z':gridz, 'ig':ig}
tr.set_codes(network=sz, station=sx, channel='%i' % ig)
traces.append(tr)
return traces
def put_traces_slow( self, x,z, traces):
assert len(traces) == self.ng
if not self.builder:
self.builder = Popen( [config.gfdb_build_prog, self.path], stdin=PIPE, stdout=PIPE, close_fds=True)
self.tempdir = tempfile.mkdtemp('','gfdb_build-')
self.tempfilebase = pjoin(self.tempdir, 'trace')
for ig, xx in enumerate(traces):
if xx is not None:
(time, data) = xx
tab = num.array((time,data)).transpose()
if len(tab) > 0:
fn = '%s-%i.table' % (self.tempfilebase, ig)
num.savetxt(fn, tab)
self.builder.stdin.write("%f %f %i '%s'\n" % (x,z,ig+1,fn))
answer = self.builder.stdout.readline()
assert answer.strip() == fn
def terminate(self):
if self.extractor:
self.extractor.stdin.close()
self.extractor.stdout.close()
self.extractor.wait()
self.extractor = None
if self.builder:
self.builder.stdin.close()
self.builder.stdout.close()
self.builder.wait()
self.builder = None
if self.tempdir:
shutil.rmtree(self.tempdir)
self.tempdir = None
self.tempfilebase = None
def __del__(self):
self.terminate()
def __str__(self):
return '''
GFDB: %s
dt [s]: %s
dx [m]: %s
dz [m]: %s
firstx [m]: %s
firstz [m]: %s
nx : %6i
nz : %6i
ng : %6i
'''.strip() % tuple([self.path] + [ gform(x,5) for x in (self.dt, self.dx, self.dz,
self.firstx, self.firstz) ] + [ self.nx, self.nz, self.ng ])
|
# Copyright 2010-2019 Dan Elliott, Russell Valentine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bottle import response, request, HTTPError, HTTPResponse, static_file
import util
import logging
import pytz
from datetime import timedelta, datetime
import time
def confCache():
'''Checks cache request headers. Sets cache response headers. Browser checks etag every request.'''
def decorator(func):
def wrapper(*args, **kwargs):
confVersion = util.getConfVersion()
ifmatch = request.environ.get('HTTP_IF_NONE_MATCH')
ifmatch2 = request.environ.get('HTTP_IF_MODIFIED_SINCE')
#logging.debug(str(ifmatch)+", "+str(ifmatch2))
if(ifmatch == str(confVersion[0])):
header = {}
header['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, header=header)
lastModified = confVersion[1].astimezone(pytz.utc).strftime("%a, %d %b %Y %H:%M:%S GMT")
response.headers['Cache-Control'] = 'private, no-cache, must-revalidate'
#, max-age=' + str(td.total_seconds())
response.headers['ETag'] = util.getConfVersion()[0]
response.headers['Last-Modified'] = lastModified
td = timedelta(days=30)
response.headers['Expires'] = (datetime.utcnow() + td).strftime("%a, %d %b %Y %H:%M:%S GMT")
return func(*args, **kwargs)
return wrapper
return decorator
def noCache():
'''Sets response headers to make the browser not cache.'''
def decorator(func):
def wrapper(*args, **kwargs):
response.headers['Pragma']='no-cache'
response.headers['Expires']='Fri, 30 Oct 1998 14:19:41 GMT'
return func(*args, **kwargs)
return wrapper
return decorator
def static_file_expires_monday(filename, root, mimetype='auto', guessmime=True, download=False):
'''Service a static file that expires next monday.'''
now = util.getDateFromParam('now')
cacheTime = datetime.date.today()
cacheTime = cacheTime + datetime.timedelta(days=-cacheTime.weekday(), weeks=1)
cacheTime = datetime.datetime(cacheTime.year, cacheTime.month, cacheTime.day, 00, 00, tzinfo=now.tzinfo)
dt=cacheTime-now
seconds = dt.days*86400 + dt.seconds
response.headers['Cache-Control'] = 'private, max-age='+str(seconds)
response.headers['Expires']=cacheTime.astimezone(pytz.utc).strftime("%a, %d %b %Y %H:%M:%S GMT")
return static_file(filename, root, mimetype, guessmime, download)
# Local Variables:
# indent-tabs-mode: t
# python-indent: 4
# tab-width: 4
# End:
|
# Generated by Django 2.0.4 on 2018-10-13 18:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0055_auto_20181014_0310'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='school_yearZ',
new_name='school_year',
),
migrations.RenameField(
model_name='user',
old_name='departmentZ',
new_name='department',
),
]
|
from typing import Any, Iterator
import psycopg2.errors
from patabase import Postgres
from qucom.exceptions import *
def _error_handler(func):
def wrapper(*args, **kwargs):
assert isinstance(args[0], Qucom)
if 'table' in kwargs:
table = kwargs['table']
else:
table = args[1]
if 'pk' in kwargs:
pk = kwargs['pk']
elif len(args) > 2 and isinstance(args[2], int):
pk = args[2]
else:
pk = -1
try:
return func(*args, **kwargs)
except psycopg2.errors.InvalidTextRepresentation:
raise InvalidValue(f'Invalid value type ({str(kwargs)})') from None
except psycopg2.errors.UndefinedTable:
raise UndefinedTable(f'Table not found (table={table})') from None
except psycopg2.errors.UndefinedColumn:
raise UndefinedColumn(f'Column not found ({str(kwargs)})') from None
except psycopg2.errors.UniqueViolation:
raise DuplicateRecord(f'Duplicate record ({str(kwargs)})') from None
except psycopg2.errors.NotNullViolation:
raise NotNull(f'Not NULL violation ({str(kwargs)})') from None
except psycopg2.errors.RaiseException as e:
if 'Nothing updated' in str(e):
raise NothingUpdated(f'Record not found (id={pk})') from None
if 'Nothing deleted' in str(e):
raise NothingDeleted(f'Record not found (id={pk})') from None
raise e
return wrapper
class Qucom(object):
def __init__(self, user: str, password: str, database: str, host: str = 'localhost', port=5432):
try:
self._db = Postgres(
host=host,
port=port,
user=user,
password=password,
database=database)
except psycopg2.OperationalError as e:
if 'Connection refused' in str(e):
raise ConnectionRefused('Could not connect to server') from None
raise e
@_error_handler
def add(self, table: str, **parameters: Any) -> int:
if not parameters:
raise RequiredArgument('Parameters can not be empty')
placeholders = ['%s' for _ in parameters]
sql = f'''
insert into {table} ({', '.join(parameters)})
values ({', '.join(placeholders)})
returning id
'''
rows = self._db.select(sql, *parameters.values())
row = next(rows)
return row['id']
@_error_handler
def edit(self, table: str, pk: int, **parameters: Any) -> int:
if not parameters:
raise RequiredArgument('Parameters can not be empty')
fields = [f'{key} = %s' for key in parameters if parameters[key]]
values = [parameters[key] for key in parameters if parameters[key]]
sql = f'''
do $$
begin
update {table}
set {', '.join(fields)}
where id = %s;
if not found then
raise exception 'Nothing updated';
end if;
end
$$
'''
return self._db.perform(sql, *values, pk)
@_error_handler
def delete(self, table: str, pk: int) -> None:
sql = f'''
do $$
begin
if exists(select from {table} where id = %s) then
delete
from {table}
where id = %s;
else
raise exception 'Nothing deleted';
end if;
end
$$
'''
self._db.perform(sql, pk, pk)
@_error_handler
def list(self, table: str, user_id: int = None, limit: int = 10, offset: int = 0) -> list:
sql = f'''
select *
from {table}_facade
'''
if user_id:
sql += f' where {user_id} = any(user_ids)'
sql += f' limit {limit}'
sql += f' offset {offset}'
return list(self._db.select(sql))
@_error_handler
def get(self, table: str, pk: int, user_id: int = None) -> dict:
sql = f'''
select *
from {table}_facade
where id = %s
'''
if user_id:
sql += f' and {user_id} = any(user_ids)'
return next(self._db.select(sql, pk), dict())
@_error_handler
def query(self, table: str, q: str, fields: list, user_id: int = None, limit: int = 10, offset: int = 0) -> list:
filters = [f'{key}::varchar like %s' for key in fields]
values = [f'%{q}%' for _ in fields]
sql = f'''
select *
from {table}_facade
where {' or '.join(filters)}
'''
if user_id:
sql += f' and {user_id} = any(user_ids)'
sql += f' limit {limit}'
sql += f' offset {offset}'
return list(self._db.select(sql, *values))
@_error_handler
def calendar(self, table: str) -> list:
sql = f'''
select *
from {table}_calendar
'''
return list(self._db.select(sql))
@_error_handler
def columns(self, table: str, exclusions: list = None):
sql = f'''
select column_name, is_nullable, data_type
from information_schema.columns
where table_schema = 'public'
and table_name = %s
'''
rows = []
for row in self._db.select(sql, table):
if exclusions and row['column_name'] in exclusions:
continue
rows.append(row)
return rows
@_error_handler
def count(self, table: str) -> int:
sql = f'''
select count(*)
from {table}_facade
'''
rows = self._db.select(sql)
row = next(rows)
return row['count']
def perform(self, sql: str, *args: Any) -> int:
return self._db.perform(sql, *args)
def select(self, sql: str, *args: Any) -> Iterator[dict]:
return self._db.select(sql, *args)
def procedure(self, func_name: str, **parameters: Any) -> int:
return self._db.procedure(func_name, **parameters)
def function(self, func_name: str, **parameters: Any) -> Iterator[dict]:
return self._db.function(func_name, **parameters)
|
import torch
import torch.distributed as dist
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def reduce_loss_dict(loss_dict):
"""
Reduce the loss dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
loss_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
loss_names = []
all_losses = []
for k in sorted(loss_dict.keys()):
loss_names.append(k)
all_losses.append(loss_dict[k])
all_losses = torch.stack(all_losses, dim=0)
dist.reduce(all_losses, dst=0)
if dist.get_rank() == 0:
# only main process gets accumulated, so only divide by
# world_size in this case
all_losses /= world_size
reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}
return reduced_losses
|
"""
Copyright 2014 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import netaddr
import time
from cloudcafe.common.tools.datagen import rand_name
from cloudcafe.networking.networks.common.behaviors \
import NetworkingBaseBehaviors, NetworkingResponse
from cloudcafe.networking.networks.common.constants \
import NeutronResponseCodes
from cloudcafe.networking.networks.common.exceptions \
import NetworkIDMissingException, ResourceBuildException,\
ResourceDeleteException, ResourceGetException, ResourceListException,\
ResourceUpdateException
class PortsBehaviors(NetworkingBaseBehaviors):
def __init__(self, ports_client, ports_config):
super(PortsBehaviors, self).__init__()
self.config = ports_config
self.client = ports_client
def get_subnet_ids_from_fixed_ips(self, fixed_ips):
"""
@summary: gets the subnet ids from the port fixed IPs attribute
@param fixed_ips: list of fixed_ips
@type fixed_ips: list(dict)
@return: subnet ids and errors lists from fixed IPs
@rtype: dict
"""
# Errors list will contain unexpected fixed IPs if any
results = {'subnet_ids': [], 'errors': []}
for fixed_ip in fixed_ips:
if 'subnet_id' not in fixed_ip or fixed_ip['subnet_id'] is None:
results['errors'].append(fixed_ip)
else:
results['subnet_ids'].append(fixed_ip['subnet_id'])
return results
def format_fixed_ips(self, fixed_ips):
"""
@summary: formats fixed ips for assertions removing zeros on
IPv6 addresses
@param fixed_ips: list of fixed_ips
@type fixed_ips: list(dict)
@return: formated fixed_ips
@rtype: list(dict)
"""
result = [dict(subnet_id=fixed_ip['subnet_id'], ip_address=str(
netaddr.IPAddress(fixed_ip['ip_address'])))
for fixed_ip in fixed_ips]
return result
def create_port(self, network_id, name=None, admin_state_up=None,
mac_address=None, fixed_ips=None, device_id=None,
device_owner=None, tenant_id=None, security_groups=None,
resource_build_attempts=None, raise_exception=True,
use_exact_name=False, poll_interval=None,
timeout=None, use_over_limit_retry=None):
"""
@summary: Creates and verifies a Port is created as expected
@param network_id: network port is associated with (CRUD: CR)
@type network_id: string
@param name: human readable name for the port, may not be unique.
(CRUD: CRU)
@type name: string
@param admin_state_up: true or false (default true), the admin state
of the port. If down, the port does not forward packets (CRUD: CRU)
@type admin_state_up: bool
@param mac_address: mac address to use on the port (CRUD: CR)
@type mac_address: string
@param fixed_ips: ip addresses for the port associating the
port with the subnets where the IPs come from (CRUD: CRU)
@type fixed_ips: list(dict)
@param device_id: id of device using this port (CRUD: CRUD)
@type device_id: string
@param device_owner: entity using this port (ex. dhcp agent,CRUD: CRUD)
@type device_owner: string
@param tenant_id: owner of the port (CRUD: CR)
@type tenant_id: string
@param security_groups: ids of any security groups associated with the
port (CRUD: CRUD)
@type security_groups: list(dict)
@param resource_build_attempts: number of API retries
@type resource_build_attempts: int
@param raise_exception: flag to raise an exception if the Port was not
created or to return None
@type raise_exception: bool
@param use_exact_name: flag if the exact name given should be used
@type use_exact_name: bool
@param poll_interval: sleep time interval between API retries
@type poll_interval: int
@param timeout: port update timeout for over limit retries
@type timeout: int
@param use_over_limit_retry: flag to enable/disable the port update
over limits retries
@type use_over_limit_retry: bool
@return: NetworkingResponse object with api response and failure list
@rtype: common.behaviors.NetworkingResponse
"""
if not network_id:
raise NetworkIDMissingException
if name is None:
name = rand_name(self.config.starts_with_name)
elif not use_exact_name:
name = rand_name(name)
poll_interval = poll_interval or self.config.api_poll_interval
resource_build_attempts = (resource_build_attempts or
self.config.api_retries)
use_over_limit_retry = (use_over_limit_retry or
self.config.use_over_limit_retry)
timeout = timeout or self.config.resource_create_timeout
result = NetworkingResponse()
err_msg = 'Port Create failure'
for attempt in range(resource_build_attempts):
self._log.debug('Attempt {0} of {1} building port {2}'.format(
attempt + 1, resource_build_attempts, name))
resp = self.client.create_port(
network_id=network_id, name=name,
admin_state_up=admin_state_up, mac_address=mac_address,
fixed_ips=fixed_ips, device_id=device_id,
device_owner=device_owner, tenant_id=tenant_id,
security_groups=security_groups)
if use_over_limit_retry:
endtime = time.time() + int(timeout)
retry_msg = ('OverLimit retry with a {0}s timeout creating a '
'port on network {1}').format(timeout, network_id)
self._log.info(retry_msg)
while (resp.status_code ==
NeutronResponseCodes.REQUEST_ENTITY_TOO_LARGE and
time.time() < endtime):
resp = self.client.create_port(
network_id=network_id, name=name,
admin_state_up=admin_state_up, mac_address=mac_address,
fixed_ips=fixed_ips, device_id=device_id,
device_owner=device_owner, tenant_id=tenant_id,
security_groups=security_groups)
time.sleep(poll_interval)
resp_check = self.check_response(resp=resp,
status_code=NeutronResponseCodes.CREATE_PORT, label=name,
message=err_msg, network_id=network_id)
result.response = resp
if not resp_check:
return result
# Failures will be an empty list if the create was successful the
# first time
result.failures.append(resp_check)
time.sleep(poll_interval)
else:
err_msg = (
'Unable to create {0} port after {1} attempts: '
'{2}').format(name, resource_build_attempts, result.failures)
self._log.error(err_msg)
if raise_exception:
raise ResourceBuildException(err_msg)
return result
def update_port(self, port_id, name=None, admin_state_up=None,
fixed_ips=None, device_id=None, device_owner=None,
security_groups=None, resource_update_attempts=None,
raise_exception=False, poll_interval=None,
timeout=None, use_over_limit_retry=None):
"""
@summary: Updates and verifies a specified Port
@param port_id: The UUID for the port
@type port_id: string
@param name: human readable name for the port, may not be unique
(CRUD: CRU)
@type name: string
@param admin_state_up: true or false (default true), the admin state
of the port. If down, the port does not forward packets (CRUD: CRU)
@type admin_state_up: bool
@param fixed_ips: ip addresses for the port associating the port with
the subnets where the IPs come from (CRUD: CRU)
@type fixed_ips: list(dict)
@param device_id: id of device using this port (CRUD: CRUD)
@type device_id: string
@param string device_owner: entity using this port (ex. dhcp agent,
CRUD: CRUD)
@type device_owner: string
@param security_groups: ids of any security groups associated with the
port (CRUD: CRUD)
@type security_groups: list(dict)
@param resource_update_attempts: number of API retries
@type resource_update_attempts: int
@param raise_exception: flag to raise an exception if the
Port was not updated or to return None
@type raise_exception: bool
@param poll_interval: sleep time interval between API retries
@type poll_interval: int
@param timeout: port update timeout for over limit retries
@type timeout: int
@param use_over_limit_retry: flag to enable/disable the port update
over limits retries
@type use_over_limit_retry: bool
@return: NetworkingResponse object with api response and failure list
@rtype: common.behaviors.NetworkingResponse
"""
poll_interval = poll_interval or self.config.api_poll_interval
resource_update_attempts = (resource_update_attempts or
self.config.api_retries)
use_over_limit_retry = (use_over_limit_retry or
self.config.use_over_limit_retry)
timeout = timeout or self.config.resource_update_timeout
result = NetworkingResponse()
err_msg = 'Port Update failure'
for attempt in range(resource_update_attempts):
self._log.debug('Attempt {0} of {1} updating port {2}'.format(
attempt + 1, resource_update_attempts, port_id))
resp = self.client.update_port(
port_id=port_id, name=name, admin_state_up=admin_state_up,
fixed_ips=fixed_ips, device_id=device_id,
device_owner=device_owner, security_groups=security_groups)
if use_over_limit_retry:
endtime = time.time() + int(timeout)
retry_msg = ('OverLimit retry with a {0}s timeout updating '
'port {1}').format(timeout, port_id)
self._log.info(retry_msg)
while (resp.status_code ==
NeutronResponseCodes.REQUEST_ENTITY_TOO_LARGE and
time.time() < endtime):
resp = self.client.update_port(
port_id=port_id, name=name,
admin_state_up=admin_state_up,
fixed_ips=fixed_ips, device_id=device_id,
device_owner=device_owner,
security_groups=security_groups)
time.sleep(poll_interval)
resp_check = self.check_response(resp=resp,
status_code=NeutronResponseCodes.UPDATE_PORT,
label=port_id, message=err_msg)
result.response = resp
if not resp_check:
return result
# Failures will be an empty list if the update was successful the
# first time
result.failures.append(resp_check)
time.sleep(poll_interval)
else:
err_msg = (
'Unable to update {0} port after {1} attempts: '
'{2}').format(port_id, resource_update_attempts,
result.failures)
self._log.error(err_msg)
if raise_exception:
raise ResourceUpdateException(err_msg)
return result
def get_port(self, port_id, resource_get_attempts=None,
raise_exception=False, poll_interval=None,
timeout=None, use_over_limit_retry=None):
"""
@summary: Shows and verifies a specified port
@param port_id: The UUID for the port
@type port_id: string
@param resource_get_attempts: number of API retries
@type resource_get_attempts: int
@param raise_exception: flag to raise an exception if the get
Port was not as expected or to return None
@type raise_exception: bool
@param poll_interval: sleep time interval between API retries
@type poll_interval: int
@param timeout: port get timeout for over limit retries
@type timeout: int
@param use_over_limit_retry: flag to enable/disable the port update
over limits retries
@type use_over_limit_retry: bool
@return: NetworkingResponse object with api response and failure list
@rtype: common.behaviors.NetworkingResponse
"""
poll_interval = poll_interval or self.config.api_poll_interval
resource_get_attempts = (resource_get_attempts or
self.config.api_retries)
poll_interval = poll_interval or self.config.api_poll_interval
use_over_limit_retry = (use_over_limit_retry or
self.config.use_over_limit_retry)
timeout = timeout or self.config.resource_get_timeout
result = NetworkingResponse()
err_msg = 'Port Get failure'
for attempt in range(resource_get_attempts):
self._log.debug('Attempt {0} of {1} getting network {2}'.format(
attempt + 1, resource_get_attempts, port_id))
resp = self.client.get_port(port_id=port_id)
if use_over_limit_retry:
endtime = time.time() + int(timeout)
retry_msg = ('OverLimit retry with a {0}s timeout getting '
'port {1}').format(timeout, port_id)
self._log.info(retry_msg)
while (resp.status_code ==
NeutronResponseCodes.REQUEST_ENTITY_TOO_LARGE and
time.time() < endtime):
resp = self.client.get_port(port_id=port_id)
time.sleep(poll_interval)
resp_check = self.check_response(resp=resp,
status_code=NeutronResponseCodes.GET_PORT,
label=port_id, message=err_msg)
result.response = resp
if not resp_check:
return result
# Failures will be an empty list if the get was successful the
# first time
result.failures.append(resp_check)
time.sleep(poll_interval)
else:
err_msg = (
'Unable to GET {0} port after {1} attempts: '
'{2}').format(port_id, resource_get_attempts, result.failures)
self._log.error(err_msg)
if raise_exception:
raise ResourceGetException(err_msg)
return result
def list_ports(self, port_id=None, network_id=None, name=None, status=None,
admin_state_up=None, device_id=None, tenant_id=None,
device_owner=None, mac_address=None, limit=None,
marker=None, page_reverse=None, resource_list_attempts=None,
raise_exception=False, poll_interval=None, timeout=None,
use_over_limit_retry=None):
"""
@summary: Lists ports and verifies the response is the expected
@param port_id: The UUID for the port to filter by
@type port_id: string
@param network_id: network ID to filter by
@type network_id: string
@param name: port name to filter by
@type name: string
@param status: port status to filter by
@type status: string
@param admin_state_up: Admin state of the port to filter by
@type admin_state_up: bool
@param device_id: id of device to filter by
@type device_id: string
@param tenant_id: owner of the port to filter by
@type tenant_id: string
@param device_owner: device owner to filter by
@type device_owner: string
@param mac_address: mac address to filter by
@type mac_address: string
@param limit: page size
@type limit: int
@param marker: Id of the last item of the previous page
@type marker: string
@param page_reverse: direction of the page
@type page_reverse: bool
@param resource_list_attempts: number of API retries
@type resource_list_attempts: int
@param raise_exception: flag to raise an exception if the list
Port was not as expected or to return None
@type raise_exception: bool
@param poll_interval: sleep time interval between API retries
@type poll_interval: int
@param timeout: port get timeout for over limit retries
@type timeout: int
@param use_over_limit_retry: flag to enable/disable the port update
over limits retries
@type use_over_limit_retry: bool
@return: NetworkingResponse object with api response and failure list
@rtype: common.behaviors.NetworkingResponse
"""
poll_interval = poll_interval or self.config.api_poll_interval
resource_list_attempts = (resource_list_attempts or
self.config.api_retries)
use_over_limit_retry = (use_over_limit_retry or
self.config.use_over_limit_retry)
timeout = timeout or self.config.resource_get_timeout
result = NetworkingResponse()
err_msg = 'Port List failure'
for attempt in range(resource_list_attempts):
self._log.debug('Attempt {0} of {1} with port list'.format(
attempt + 1, resource_list_attempts))
resp = self.client.list_ports(
port_id=port_id, network_id=network_id, name=name,
status=status, admin_state_up=admin_state_up,
device_id=device_id, tenant_id=tenant_id,
device_owner=device_owner, mac_address=mac_address,
limit=limit, marker=marker, page_reverse=page_reverse)
if use_over_limit_retry:
endtime = time.time() + int(timeout)
retry_msg = ('OverLimit retry with a {0}s timeout listing '
'ports').format(timeout, port_id)
self._log.info(retry_msg)
while (resp.status_code ==
NeutronResponseCodes.REQUEST_ENTITY_TOO_LARGE and
time.time() < endtime):
resp = self.client.list_ports(
port_id=port_id, network_id=network_id, name=name,
status=status, admin_state_up=admin_state_up,
device_id=device_id, tenant_id=tenant_id,
device_owner=device_owner, mac_address=mac_address,
limit=limit, marker=marker, page_reverse=page_reverse)
time.sleep(poll_interval)
resp_check = self.check_response(resp=resp,
status_code=NeutronResponseCodes.LIST_PORTS,
label='', message=err_msg)
result.response = resp
if not resp_check:
return result
# Failures will be an empty list if the list was successful the
# first time
result.failures.append(resp_check)
time.sleep(poll_interval)
else:
err_msg = (
'Unable to LIST ports after {0} attempts: '
'{1}').format(resource_list_attempts, result.failures)
self._log.error(err_msg)
if raise_exception:
raise ResourceListException(err_msg)
return result
def delete_port(self, port_id, resource_delete_attempts=None,
raise_exception=False, poll_interval=None,
timeout=None, use_over_limit_retry=None):
"""
@summary: Deletes and verifies a specified port is deleted
@param string port_id: The UUID for the port
@type port_id: string
@param resource_delete_attempts: number of API retries
@type resource_delete_attempts: int
@param raise_exception: flag to raise an exception if the deleted
Port was not as expected or to return None
@type raise_exception: bool
@param poll_interval: sleep time interval between API retries
@type poll_interval: int
@param timeout: port delete timeout for over limit retries
@type timeout: int
@param use_over_limit_retry: flag to enable/disable the port delete
over limits retries
@type use_over_limit_retry: bool
@return: NetworkingResponse object with api response and failure list
@rtype: common.behaviors.NetworkingResponse
"""
poll_interval = poll_interval or self.config.api_poll_interval
resource_delete_attempts = (resource_delete_attempts or
self.config.api_retries)
use_over_limit_retry = (use_over_limit_retry or
self.config.use_over_limit_retry)
timeout = timeout or self.config.resource_delete_timeout
result = NetworkingResponse()
for attempt in range(resource_delete_attempts):
self._log.debug('Attempt {0} of {1} deleting port {2}'.format(
attempt + 1, resource_delete_attempts, port_id))
resp = self.client.delete_port(port_id=port_id)
if use_over_limit_retry:
endtime = time.time() + int(timeout)
retry_msg = ('OverLimit retry with a {0}s timeout deleting '
'port {1}').format(timeout, port_id)
self._log.info(retry_msg)
while (resp.status_code ==
NeutronResponseCodes.REQUEST_ENTITY_TOO_LARGE and
time.time() < endtime):
resp = self.client.delete_port(port_id=port_id)
time.sleep(poll_interval)
result.response = resp
# Delete response is without entity so resp_check can not be used
if (resp.ok and
resp.status_code == NeutronResponseCodes.DELETE_PORT):
return result
err_msg = ('{port} Port Delete failure, expected status '
'code: {expected_status}. Response: {status} {reason} '
'{content}').format(
port=port_id,
expected_status=NeutronResponseCodes.DELETE_PORT,
status=resp.status_code, reason=resp.reason,
content=resp.content)
self._log.error(err_msg)
result.failures.append(err_msg)
time.sleep(poll_interval)
else:
err_msg = (
'Unable to DELETE {0} port after {1} attempts: '
'{2}').format(port_id, resource_delete_attempts,
result.failures)
self._log.error(err_msg)
if raise_exception:
raise ResourceDeleteException(err_msg)
return result
def clean_port(self, port_id, timeout=None, poll_interval=None):
"""
@summary: deletes a port within a time out
@param string port_id: The UUID for the port
@type port_id: string
@param timeout: seconds to wait for the port to be deleted
@type timeout: int
@param poll_interval: sleep time interval between API delete/get calls
@type poll_interval: int
@return: None if delete was successful or the undeleted port_id
@rtype: None or string
"""
timeout = timeout or self.config.resource_delete_timeout
poll_interval = poll_interval or self.config.api_poll_interval
endtime = time.time() + int(timeout)
log_msg = 'Deleting {0} port within a {1}s timeout '.format(
port_id, timeout)
self._log.info(log_msg)
resp = None
while time.time() < endtime:
try:
self.client.delete_port(port_id=port_id)
resp = self.client.get_port(port_id=port_id)
except Exception as err:
err_msg = ('Encountered an exception deleting a port with'
'the clean_network method. Exception: {0}').format(err)
self._log.error(err_msg)
if (resp is not None and
resp.status_code == NeutronResponseCodes.NOT_FOUND):
return None
time.sleep(poll_interval)
err_msg = 'Unable to delete {0} port within a {1}s timeout'.format(
port_id, timeout)
self._log.error(err_msg)
return port_id
def clean_ports(self, ports_list, timeout=None, poll_interval=None):
"""
@summary: deletes each port from a list calling clean_port
@param ports_list: list of ports UUIDs
@type ports_list: list(str)
@param timeout: seconds to wait for the port to be deleted
@type timeout: int
@param poll_interval: sleep time interval between API delete/get calls
@type poll_interval: int
@return: list of undeleted ports UUIDs
@rtype: list(str)
"""
log_msg = 'Deleting ports: {0}'.format(ports_list)
self._log.info(log_msg)
undeleted_ports = []
for port in ports_list:
result = self.clean_port(port_id=port, timeout=timeout,
poll_interval=poll_interval)
if result:
undeleted_ports.append(result)
if undeleted_ports:
err_msg = 'Unable to delete ports: {0}'.format(
undeleted_ports)
self._log.error(err_msg)
return undeleted_ports
|
import importlib
import logging
import sys
logger = logging.getLogger('root')
logger.setLevel(logging.INFO)
logFormatter = logging.Formatter('%(asctime)s %(process)d [%(funcName)s] %(levelname)s: %(message)s')
console_handler = logging.StreamHandler()
console_handler.setFormatter(logFormatter)
logger.addHandler(console_handler)
config_module = sys.argv[1]
logger.info('Using config: {}'.format(config_module))
config = importlib.import_module(config_module)
logger.info('Myvar = {}'.format(config.myvar))
|
class ProgressBar:
def __init__(self, max_value, modulo_threshold):
"""
This is a progress bar that can be printed during iterative processes of fixed length.
:param max_value: int the last value of the iteration, usually an index.
:param modulo_threshold: int modulo that determines how often the progress bar is actually updated.
"""
self.max_value = max_value
self.modulo_threshold = modulo_threshold
def update(self, i):
"""
Updates the progress bar console print according to the current iteration value.
:param i: int current value of the iteration, usually an index, always <= max_value.
:return: None
"""
last = (i + self.modulo_threshold - 1) > self.max_value
if (i % self.modulo_threshold) == 0 or last:
max_print = 35
i += 1
n = int((i / self.max_value) * 100)
n_symbols = int(max_print * (n / 100))
spaces = "-" * int(max_print - n_symbols)
i = self.max_value if (i + 1) == self.max_value else i
n_str = str(i) + "/" + str(self.max_value) + "|"
print("\r", "|" + "=" * n_symbols + spaces + "| |" + str(n) + " %| |" + n_str, end=" ")
|
import os
import numpy as np
import pandas as pd
import subprocess
import optparse
from sklearn.svm import SVC
from sklearn import cross_validation
from sklearn import grid_search
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn import grid_search
from sklearn.cross_validation import train_test_split
from stacking_create_training_set import stacking_create_training_set
import xml.etree.ElementTree as ET
###################################################
# Testing the model on pure test set of 0.5 size ##
###################################################
########## OUTPUT: p,r,f1 on test set #############
###################################################
#defining the options of the script
#INPUTS: -i duke_config.xml, -N number_of_configurations, -a amplitude_of_perturbation, -g gold_standard_name
parser = optparse.OptionParser()
parser.add_option('-i','--input', dest = 'file_name', help = 'file_name')
parser.add_option('-N','--number', dest = 'N', help = 'number of classifiers',type = int)
parser.add_option('-a','--amplitude', dest = 'a', help = 'amplitude of perturbation',type = float)
parser.add_option('-g','--gold', dest = 'gold_standard_name', help = 'gold_standard_name')
(options, args) = parser.parse_args()
if options.file_name is None:
options.file_name = raw_input('Enter file name:')
if options.N is None:
options.N = raw_input('Enter number of classifiers:')
if options.a is None:
options.a = 0.05 #default to 0.05
if options.gold_standard_name is None:
options.gold_standard_name = raw_input('Enter gold standard file name:')
file_name = options.file_name #define the variables
gold_standard_name = options.gold_standard_name
N = int(options.N)
a = float(options.a)
#open files for writing
output_file_raw = open('ensemble_duke_output_raw_T2_n%d.txt' %N,'w')
#output_file = open('ensemble_duke_stacking_output_T2_n%d.txt' %N,'w')
gold_standard_read = open(gold_standard_name,'rU')
#iterate for each tweaked configuration
#read actual threshold
tree = ET.parse(file_name)
root = tree.getroot()
for thresh in root.iter('threshold'):
central_thresh = float(thresh.text) #central value of the threshold
thresholds = np.linspace(central_thresh - a/2, central_thresh + a/2, N)
for threshold in thresholds:
for thresh in root.iter('threshold'):
thresh.text = str(threshold)
thresh.set('updated','yes')
tree.write('../../../config/FEIII2016/copy_T2.xml')
java_command = ["java","-Xmx5000m", "-cp", "../../../lib/Duke/duke-core/target/*:../../../lib/Duke/duke-dist/target/*:../../../lib/Duke/duke-es/target/*:../../../lib/Duke/duke-json/target/*:../../../lib/Duke/duke-lucene/target/*:../../../lib/Duke/duke-mapdb/target/*:../../../lib/Duke/duke-mongodb/target/*:../../../lib/Duke/duke-server/target/*:../../../lib/Duke/lucene_jar/*", "no.priv.garshol.duke.Duke", "--showmatches","--batchsize=100000", "--threads=4", "../../../config/FEIII2016/copy_T2.xml"]
output_file_raw.write(subprocess.check_output(java_command)) #call duke on the copy.xml file and write the raw output on file
output_file_raw.write('\n')
output_file_raw.write('End of run\n')
output_file_raw.close()
#duke_output_parser('ensemble_duke_output_raw_T2_n%d.txt' %N, 'ensemble_duke_output_union_T2_n%d.txt' %N,'FFIEC','SEC')
#create the training set, named training_set_T1_n%d.csv
stacking_create_training_set('ensemble_duke_output_raw_T2_n%d.txt' %N,'training_set_T2_n%d.csv' %N, gold_standard_name, N)
#read it and make machine learning on it
data = pd.read_csv('training_set_T2_n%d.csv' %N)
#turn data into arrays
X = data.values[:,2:(N+2)] #x variables
y = np.array(data['y']) #class variables
#p_scores = []
#r_scores = []
#f1_scores = []
#T = 5
#repeat the split many times and average the results in order to cancel random fluctuations
#for i in range(T):
#stratified split in train and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.50, stratify = y, random_state = 20)
# fit an SVM with rbf kernel
clf = SVC( kernel = 'rbf',cache_size = 1000)
#hyper-parameter optimization through grid-search cross validation
parameters = {'gamma' : np.logspace(-9,3,30),'C': np.logspace(-2,10,30)}
gs_rbf = grid_search.GridSearchCV(clf,param_grid=parameters,cv = 4)
gs_rbf.fit(X_train,y_train)
#select the best hyper-parameters
clf = gs_rbf.best_estimator_
#save the output
y_predict = np.reshape(clf.predict(X_test),(len(X_test),1))
#p_scores.append(precision_score(y_test,y_predict,average = 'binary'))
#r_scores.append(recall_score(y_test,y_predict,average = 'binary'))
#f1_scores.append(f1_score(y_test,y_predict,average = 'binary'))
#p = np.mean(p_scores)
#r = np.mean(r_scores)
#f1 = np.mean(f1_scores)
p = precision_score(y_test,y_predict,average = 'binary')
r = recall_score(y_test,y_predict,average = 'binary')
f1 = f1_score(y_test,y_predict,average = 'binary')
print "%.3f,%.3f,%.3f" %(p,r,f1)
|
"""
Created by catzoo
Description: Discord.py role checks
"""
import os
import asqlite
import env_config
class NoDatabase(Exception):
"""Used for Checks.connection being None"""
pass
# noinspection PyRedundantParentheses
class Checks:
"""
This is used for discord.py checks
Use:
- developer_check(ctx)
- Only checks for guild owner / debug_id(s)
- manager_check(ctx)
- Checks for level 3 roles / user check
- moderator_check(ctx)
- Checks for level 2 roles / user check
- user_check(ctx)
- Checks for level 1 roles / user check
This will store roles in SQLite databases (location depending on env_config)
"""
def __init__(self):
self.connection = None
@classmethod
async def create(cls):
"""Creates the connection for the class
Not doing this in __init__ since its async"""
self = Checks()
location = f'{env_config.data_folder}/mod.db'
if not os.path.exists(location):
conn = await asqlite.connect(location)
c = await conn.cursor()
await c.execute("CREATE TABLE roles (role_id integer NOT NULL, level integer)")
else:
conn = await asqlite.connect(location)
self.connection = conn
return self
async def get_cursor(self):
"""Created this for use for most functions
But can be used to execute commands to the database if needed"""
if self.connection is None:
raise NoDatabase('Checks is not created!')
return await self.connection.cursor()
async def add_role(self, role_id, level):
"""Adds the role to the database."""
c = await self.get_cursor()
await c.execute("INSERT INTO roles VALUES (?,?)", (role_id, level))
async def remove_role(self, role_id):
"""Removes the role from the database."""
c = await self.get_cursor()
await c.execute("DELETE FROM roles WHERE role_id=?", (role_id))
async def get_role(self, role_id):
"""Returns the role from the database.
Might return None if it doesn't exist"""
c = await self.get_cursor()
await c.execute("SELECT * FROM roles WHERE role_id=?", (role_id))
return await c.fetchone()
async def get_all_roles(self):
"""Returns all the roles from the database
Might return None if there aren't any"""
c = await self.get_cursor()
await c.execute("SELECT * FROM roles")
return await c.fetchall()
async def _role_check(self, role_id, level):
"""Checks if the role is in the database with correct level"""
been_check = False
role = await self.get_role(role_id)
if role:
if role[1] >= level:
been_check = True
return been_check
async def _user_check(self, ctx):
"""See if its the guild's owner or the developer"""
been_check = False
if ctx.author.id in env_config.debug_id:
been_check = True
elif ctx.author == ctx.guild.owner:
been_check = True
return been_check
async def _main_check(self, ctx, level):
"""Uses both _role_check and _user_check"""
allow = False # saying if the check passed or not
c = await self.get_cursor()
await c.execute("SELECT * FROM roles")
if await self._user_check(ctx):
allow = True
else:
for r in ctx.author.roles:
if await self._role_check(r.id, level):
allow = True
return allow
@staticmethod
async def developer_check(ctx):
"""Highest level check.
Only checks for the developer or guild owner"""
self = await Checks.create()
return await self._user_check(ctx)
@staticmethod
async def manager_check(ctx):
"""Level 3 of role / user checking"""
self = await Checks.create()
return await self._main_check(ctx, 3)
@staticmethod
async def moderator_check(ctx):
"""Level 2 of role / user checking"""
self = await Checks.create()
return await self._main_check(ctx, 2)
@staticmethod
async def user_check(ctx):
"""Level 1 of role / user checking"""
self = await Checks.create()
return await self._main_check(ctx, 1)
|
"""Rollease Acmeda Automate Pulse asyncio protocol implementation."""
import logging
from aiopulse.hub import Hub
from aiopulse.elements import Roller, Room, Scene
from aiopulse.errors import (
CannotConnectException,
NotConnectedException,
NotRunningException,
InvalidResponseException,
)
from aiopulse.const import UpdateType
__all__ = [
"Hub",
"Roller",
"Room",
"Scene",
"CannotConnectException",
"NotConnectedException",
"NotRunningException",
"InvalidResponseException",
"UpdateType",
]
__version__ = "0.4.0"
__author__ = "Alan Murray"
_LOGGER = logging.getLogger(__name__)
|
"""
API for the cards project
"""
from dataclasses import asdict
from dataclasses import dataclass
from dataclasses import field
from typing import List
from .db import DB
__all__ = [
"Card",
"CardsDB",
"CardsException",
"MissingSummary",
"InvalidCardId"
]
@dataclass
class Card:
summary: str = None
owner: str = None
state: str = "todo"
id: int = field(default=None, compare=False)
@classmethod
def from_dict(cls, d):
return Card(**d)
def to_dict(self):
return asdict(self)
class CardsException(Exception):
pass
class MissingSummary(CardsException):
pass
class InvalidCardId(CardsException):
pass
class CardsDB:
def __init__(self, db_path):
self._db_path = db_path
self._db = DB(db_path, ".cards_db")
def add_card(self, card: Card) -> int:
"""Add a card, return the id of card."""
if not card.summary:
raise MissingSummary
if card.owner is None:
card.owner = ""
id = self._db.create(card.to_dict())
# update the item with the id added
self._db.update(id, {'id': id})
return id
def get_card(self, card_id: int) -> Card:
"""Return a card with a matching id."""
db_item = self._db.read(card_id)
if db_item is not None:
return Card.from_dict(db_item)
else:
raise InvalidCardId(card_id)
def list_cards(self, owner=None, state=None) -> List[Card]:
"""Return a list of cards."""
all = self._db.read_all()
if (owner is not None) and (state is not None):
return [Card.from_dict(t)
for t in all
if (t['owner'] == owner and t['state'] == state)]
elif owner is not None:
return [Card.from_dict(t) for t in all if t['owner'] == owner]
elif state is not None:
return [Card.from_dict(t) for t in all if t['state'] == state]
else:
return [Card.from_dict(t) for t in all]
def count(self) -> int:
"""Return the number of cards in db."""
return self._db.count()
def update_card(self, card_id: int, card_mods: Card) -> None:
"""Update a card with modifications."""
try:
self._db.update(card_id, card_mods.to_dict())
except KeyError as exc:
raise InvalidCardId(card_id) from exc
def start(self, card_id: int):
"""Set a card state to 'in prog'."""
self.update_card(card_id, Card(state="in prog"))
def finish(self, card_id: int):
"""Set a card state to 'done'."""
self.update_card(card_id, Card(state="done"))
def delete_card(self, card_id: int) -> None:
"""Remove a card from db with given card_id."""
try:
self._db.delete(card_id)
except KeyError as exc:
raise InvalidCardId(card_id) from exc
def delete_all(self) -> None:
"""Remove all tasks from db."""
self._db.delete_all()
def close(self):
self._db.close()
def path(self):
return self._db_path
|
# Generated by Django 2.2.3 on 2019-07-24 06:50
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('tweet', '0002_tweet'),
]
operations = [
migrations.AddField(
model_name='tweet',
name='created_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='tweet',
name='tweet',
field=models.TextField(default='tweet me !!', max_length=200),
),
]
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from parlai.core.params import ParlaiParser
from parlai.mturk.core.mturk_manager import MTurkManager
from worlds import TalkTheWalkWorld, InstructionWorld
from task_config import task_config
"""
This task consists of two local human agents and two MTurk agents,
chatting with each other in a free-form format.
You can end the conversation by sending a message ending with
`[DONE]` from human_1.
"""
def main():
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
argparser.add_argument(
'--replay', action='store_true', help='Set to replay old interactions'
)
argparser.add_argument(
'--replay-log-file',
type=str,
default='',
help='location of log to use if replay',
)
argparser.add_argument(
'--real-time', action='store_true', help='Set to replay in real time '
)
argparser.add_argument(
'--replay-bot',
action='store_true',
help='Set to replay bot actions instead of human',
)
argparser.add_argument(
'--model-file', type=str, default='', help='language generator model file'
)
argparser.add_argument(
'--world-idx', type=int, default=-1, help='specify world to load'
)
argparser.add_argument(
'--start-idx',
type=int,
default=0,
help='where to start replay, if replaying actions',
)
argparser.add_argument(
'--bot-type',
type=str,
default='discrete',
choices=['discrete', 'natural'],
help='which bot log to use',
)
opt = argparser.parse_args()
opt.update(task_config)
mturk_agent_1_id = 'Tourist'
mturk_agent_2_id = 'Guide'
mturk_agent_ids = [mturk_agent_1_id, mturk_agent_2_id]
task_directory_path = os.path.dirname(os.path.abspath(__file__))
opt['task'] = os.path.basename(task_directory_path)
opt['data_path'] = os.getcwd() + '/data/' + opt['task']
mturk_manager = MTurkManager(opt=opt, mturk_agent_ids=mturk_agent_ids)
mturk_manager.setup_server(task_directory_path=task_directory_path)
try:
mturk_manager.start_new_run()
mturk_manager.create_hits()
def run_onboard(worker):
world = InstructionWorld(opt=opt, mturk_agent=worker)
while not world.episode_done():
world.parley()
world.shutdown()
mturk_manager.set_onboard_function(onboard_function=run_onboard)
mturk_manager.ready_to_accept_workers()
def check_worker_eligibility(worker):
return True
global worker_count
worker_count = 0
def assign_worker_roles(workers):
workers[0].id = mturk_agent_ids[0]
workers[1].id = mturk_agent_ids[1]
return [workers[0], workers[1]]
def run_conversation(mturk_manager, opt, workers):
# Create mturk agents
mturk_agent_1 = workers[0]
mturk_agent_2 = workers[1]
conv_idx = mturk_manager.conversation_index
world = TalkTheWalkWorld(
opt=opt, agents=[mturk_agent_1, mturk_agent_2], world_tag=conv_idx
)
while not world.episode_done():
world.parley()
world.shutdown()
world.review_work()
if not opt.get('replay'):
world.save()
mturk_manager.start_task(
eligibility_function=check_worker_eligibility,
assign_role_function=assign_worker_roles,
task_function=run_conversation,
)
except Exception:
raise
finally:
mturk_manager.expire_all_unassigned_hits()
mturk_manager.shutdown()
if __name__ == '__main__':
main()
|
from pathlib import Path
from .timestamp import timestamp
RESULTS_DIR_PREFIX = 'results_'
def make_results_dir(results_root, prefix=RESULTS_DIR_PREFIX):
"""make a directory to contain results from an experiment
within a specified "root" results directory
Parameters
----------
results_root : str, pathlib.Path
root directory where results directories should be made
Returns
-------
results_dir_path : pathlib.Path
"""
results_root = Path(results_root)
if not results_root.is_dir():
raise NotADirectoryError(
f'path specified for results_root not found: {results_root}'
)
results_dir_path = results_root.joinpath(
f'{prefix}{timestamp()}'
)
results_dir_path.mkdir()
return results_dir_path
|
import asyncio
import logging
from xml.etree import ElementTree
from typing import List, Union
from yarl import URL
from feedsearch_crawler.feed_spider import FeedsearchSpider, FeedInfo
logging.getLogger(__name__).addHandler(logging.NullHandler())
name = "Feedsearch Crawler"
def search(
url: Union[URL, str, List[Union[URL, str]]],
try_urls: Union[List[str], bool] = False,
*args,
**kwargs
) -> List[FeedInfo]:
"""
Search for feeds at a URL.
:param url: URL or list of URLs to search
:param try_urls: Tries different paths that may contain feeds.
:return: List of FeedInfo objects
"""
results = asyncio.run(search_async(url, try_urls=try_urls, *args, **kwargs))
return results
async def search_async(
url: Union[URL, str, List[Union[URL, str]]],
try_urls: Union[List[str], bool] = False,
*args,
**kwargs
) -> List[FeedInfo]:
"""
Search asynchronously for feeds at a URL.
:param url: URL or list of URLs to search
:param try_urls: Tries different paths that may contain feeds.
:return: List of FeedInfo objects
"""
crawler = FeedsearchSpider(try_urls=try_urls, *args, **kwargs)
await crawler.crawl(url)
return sort_urls(list(crawler.items))
def sort_urls(feeds: List[FeedInfo]) -> List[FeedInfo]:
"""
Sort list of feeds based on Url score
:param feeds: List of FeedInfo objects
:return: List of FeedInfo objects sorted by score
"""
feeds = [f for f in feeds if isinstance(f, FeedInfo)]
sorted_urls = sorted(list(set(feeds)), key=lambda x: x.score, reverse=True)
return sorted_urls
def output_opml(feeds: List[FeedInfo]) -> bytes:
"""
Return feeds as a subscriptionlist OPML file.
http://dev.opml.org/spec2.html#subscriptionLists
:param feeds: List of FeedInfo objects
:return: OPML file as XML bytestring
"""
root = ElementTree.Element("opml", version="2.0")
head = ElementTree.SubElement(root, "head")
title = ElementTree.SubElement(head, "title")
title.text = "Feeds"
body = ElementTree.SubElement(root, "body")
for feed in feeds:
if not feed.url:
continue
fe = ElementTree.SubElement(body, "outline", type="rss", xmlUrl=str(feed.url))
if feed.title:
fe.set("text", feed.title)
fe.set("title", feed.title)
if feed.site_url:
fe.set("htmlUrl", str(feed.site_url))
if feed.description:
fe.set("description", feed.description)
if feed.version:
fe.set("version", feed.version)
return ElementTree.tostring(root, encoding="utf8", method="xml")
|
import unittest
import tableauserverclient.server.request_factory as factory
class BugFix257(unittest.TestCase):
def test_empty_request_works(self):
result = factory.EmptyRequest().empty_req()
self.assertEqual(b'<tsRequest />', result)
|
# from abc import ABCMeta
# import requests
# import json
# from requests.sessions import session
# def login(email,password):
# passw = requests.Session()
# payload = {
# 'email':email,
# 'password':password
# }
# response = passw.post('http://127.0.0.1:8000/auth/login/',json=payload)
# passw.headers.update({'authorization':json.loads(response.content)['tokens']})
# print(response.content)
# return passw
# session=login('chege.developer@gmail.com','string')
|
n = int(input())
k = int(input())
total = n
for i in range(k):
total += int(str(n) + ('0' * (i+1)))
print(total)
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the shell command."""
import os
import tempfile
import cr
class ShellCommand(cr.Command):
"""The implementation of the shell command.
The shell command is the escape hatch that lets user run any program in the
same environment that cr would use if it were running it.
"""
def __init__(self):
super(ShellCommand, self).__init__()
self.help = 'Launch a shell'
self.description = ("""
If no arguments are present, this launches an interactive system
shell (ie bash) with the environment modified to that used for the
build systems.
If any arguments are present, they are used as a command line to run
in that shell.
This allows you to run commands that are not yet available natively
in cr.
""")
def AddArguments(self, subparsers):
parser = super(ShellCommand, self).AddArguments(subparsers)
self.ConsumeArgs(parser, 'the shell')
return parser
def Run(self, context):
if context.remains:
cr.Host.Shell(context, *context.remains)
return
# If we get here, we are trying to launch an interactive shell
shell = os.environ.get('SHELL', None)
if shell is None:
print 'Don\'t know how to run a shell on this system'
elif shell.endswith('bash'):
ps1 = '[CR] ' + os.environ.get('PS1', '')
with tempfile.NamedTemporaryFile() as rcfile:
rcfile.write('source ~/.bashrc\nPS1="'+ps1+'"')
rcfile.flush()
cr.Host.Execute(context, shell, '--rcfile', rcfile.name)
else:
cr.Host.Execute(context, shell)
|
from setuptools import setup, find_packages
setup(
name="crysterm",
version="0.1.0",
author="mofuru",
author_email="me@mofuru.is-a.dev",
url="https://github.com/mofuru/crysterm",
license="MIT",
packages=find_packages(exclude=("tests", "docs"))
)
|
#
# Copyright 2011 Twitter, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Calculates PageRank for a given graph.
We assume that there are no dangling pages with no outgoing links.
"""
import os
from pycascading.helpers import *
def test(graph_file, d, iterations):
"""This is the Python implementation of PageRank."""
in_links = {}
out_degree = {}
pagerank = {}
file = open(graph_file)
for line in file:
(source, dest) = line.rstrip().split()
try:
in_links[dest].add(source)
except KeyError:
in_links[dest] = set(source)
try:
out_degree[source] += 1
except KeyError:
out_degree[source] = 1
pagerank[source] = 1.0
pagerank[dest] = 1.0
file.close()
old_pr = pagerank
new_pr = {}
for iteration in xrange(0, iterations):
for node in old_pr:
new_pr[node] = (1 - d)
try:
new_pr[node] += \
d * sum([old_pr[n] / out_degree[n] for n in in_links[node]])
except KeyError:
pass
tmp = old_pr
old_pr = new_pr
new_pr = tmp
return old_pr
def main():
"""The PyCascading job."""
# The damping factor
d = 0.85
# The number of iterations
iterations = 5
# The directed, unweighted graph in a space-separated file, in
# <source_node> <destination_node> format
graph_file = 'pycascading_data/graph.txt'
graph_source = Hfs(TextDelimited(Fields(['from', 'to']), ' ',
[String, String]), graph_file)
out_links_file = 'pycascading_data/out/pagerank/out_links'
pr_values_1 = 'pycascading_data/out/pagerank/iter1'
pr_values_2 = 'pycascading_data/out/pagerank/iter2'
# Some setup here: we'll need the ougoing degree of nodes, and we will
# initialize the pageranks of nodes to 1.0
flow = Flow()
graph = flow.source(graph_source)
# Count the number of outgoing links for every node that is a source,
# and store it in a field called 'out_degree'
graph | group_by('from') | native.count('out_degree') | \
flow.binary_sink(out_links_file)
# Initialize the pageranks of all nodes to 1.0
# This file has fields 'node' and 'pagerank', and is stored to pr_values_1
@udf
def constant(tuple, c):
"""Just a field with a constant value c."""
yield [c]
@udf
def both_nodes(tuple):
"""For each link returns both endpoints."""
yield [tuple.get(0)]
yield [tuple.get(1)]
graph | map_replace(both_nodes, 'node') | \
native.unique(Fields.ALL) | map_add(constant(1.0), 'pagerank') | \
flow.binary_sink(pr_values_1)
flow.run(num_reducers=1)
pr_input = pr_values_1
pr_output = pr_values_2
for iteration in xrange(0, iterations):
flow = Flow()
graph = flow.source(graph_source)
pageranks = flow.meta_source(pr_input)
out_links = flow.meta_source(out_links_file)
# Decorate the graph's source nodes with their pageranks and the
# number of their outgoing links
# We could have joined graph & out_links outside of the loop, but
# in order to demonstrate joins with multiple streams, we do it here
p = (graph & pageranks & (out_links | rename('from', 'from_out'))) | \
inner_join(['from', 'node', 'from_out']) | \
rename(['pagerank', 'out_degree'], ['from_pagerank', 'from_out_degree']) | \
retain('from', 'from_pagerank', 'from_out_degree', 'to')
# Distribute the sources' pageranks to their out-neighbors equally
@udf
def incremental_pagerank(tuple, d):
yield [d * tuple.get('from_pagerank') / tuple.get('from_out_degree')]
p = p | map_replace(['from', 'from_pagerank', 'from_out_degree'],
incremental_pagerank(d), 'incr_pagerank') | \
rename('to', 'node') | retain('node', 'incr_pagerank')
# Add the constant jump probability to all the pageranks that come
# from the in-links
p = (p & (pageranks | map_replace('pagerank', constant(1.0 - d), 'incr_pagerank'))) | group_by()
p = p | group_by('node', 'incr_pagerank', native.sum('pagerank'))
if iteration == iterations - 1:
# Only store the final result in a TSV file
p | flow.tsv_sink(pr_output)
else:
# Store intermediate results in a binary format for faster IO
p | flow.binary_sink(pr_output)
# Swap the input and output folders for the next iteration
tmp = pr_input
pr_input = pr_output
pr_output = tmp
flow.run(num_reducers=1)
print 'Results from PyCascading:', pr_input
os.system('cat %s/.pycascading_header %s/part*' % (pr_input, pr_input))
print 'The test values:'
test_pr = test(graph_file, d, iterations)
print 'node\tpagerank'
for n in sorted(test_pr.iterkeys()):
print '%s\t%g' % (n, test_pr[n])
|
# coding:utf-8
'''
Created on 2017/12/19
@author: sunyihuan
'''
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.model_selection import train_test_split
import scipy.io as scio
def data_check(data):
res = list(np.argmax(data, 1))
num = len(res)
classes = data.shape[1]
for i in range(classes):
print(str(i) + '的比例', round(100.0 * res.count(i) / num, 2), '%')
print('<------------------分割线---------------------->')
# show data
# X_data = np.reshape(X_data, (-1, 28, 28))
def show_data(X, Y):
for i in range(1, 10):
plt.subplot(330 + i)
plt.imshow(X[i], cmap=plt.get_cmap('gray'))
plt.title(Y[i])
plt.show()
def one_hot(y, classes):
# m, _ = y.reshape(-1, 1).shape
return np.eye(classes)[y]
def random_mini_batches(X, Y, mini_batch_size=64):
m = X.shape[0]
mini_batches = []
permutation = list(np.random.permutation(m))
shuffled_X = X[permutation, :]
shuffled_Y = Y[permutation]
num_complete_minibatches = math.floor(m / mini_batch_size)
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]
mini_batch_Y = shuffled_Y[k * mini_batch_size: k * mini_batch_size + mini_batch_size]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size: m, :]
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size: m]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def get_center_loss(features, labels, alpha, num_classes):
"""获取center loss及center的更新op
features: Tensor,表征样本特征,一般使用某个fc层的输出,shape应该为[batch_size, feature_length].
labels: Tensor,表征样本label,非one-hot编码,shape应为[batch_size].
alpha: 0-1之间的数字,控制样本类别中心的学习率,细节参考原文.
num_classes: 整数,表明总共有多少个类别,网络分类输出有多少个神经元这里就取多少.
Return:
loss: Tensor,可与softmax loss相加作为总的loss进行优化.
centers_update_op: op,用于更新样本中心的op,在训练时需要同时运行该op,否则样本中心不会更新
"""
# 获取特征的维数,例如256维
len_features = features.get_shape()[1]
# 建立一个Variable,shape为[num_classes, len_features],用于存储整个网络的样本中心,
# 设置trainable=False是因为样本中心不是由梯度进行更新的
centers = tf.get_variable('centers', [num_classes, len_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
# 将label展开为一维的,输入如果已经是一维的,则该动作其实无必要
labels = tf.reshape(labels, [-1])
# 根据样本label,获取mini-batch中每一个样本对应的中心值
centers_batch = tf.gather(centers, labels)
# 计算loss
loss = tf.div(tf.nn.l2_loss(features - centers_batch), int(len_features))
# 当前mini-batch的特征值与它们对应的中心值之间的差
diff = centers_batch - features
# 获取mini-batch中同一类别样本出现的次数,了解原理请参考原文公式(4)
unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
appear_times = tf.gather(unique_count, unique_idx)
appear_times = tf.reshape(appear_times, [-1, 1])
diff = diff / tf.cast((1 + appear_times), tf.float32)
diff = alpha * diff
centers_update_op = tf.scatter_sub(centers, labels, diff)
return loss, centers_update_op
def model(trX, trY, teX, teY, lr=0.01, epoches=200, minibatch_size=64, drop_prob=.3):
X = tf.placeholder(tf.float32, shape=[None, 28 * 28])
XX = tf.reshape(X, shape=[-1, 28, 28, 1])
Y = tf.placeholder(tf.int32, shape=[None, ])
YY = tf.one_hot(Y, 10, on_value=1, off_value=None, axis=1)
print(YY)
dp = tf.placeholder(tf.float32)
global_step = tf.Variable(0, trainable=False)
reg1 = tf.contrib.layers.l2_regularizer(scale=0.1)
conv1 = tf.layers.conv2d(XX, 32, 5, padding='same', activation=tf.nn.relu, kernel_regularizer=reg1)
conv1 = tf.layers.conv2d(conv1, 32, 3, padding='same', activation=tf.nn.relu)
conv1 = tf.layers.max_pooling2d(conv1, 2, 2, padding='same')
conv2 = tf.layers.conv2d(conv1, 64, 3, padding='same', activation=tf.nn.relu)
conv2 = tf.layers.conv2d(conv2, 64, 3, padding='same', activation=tf.nn.relu)
conv2 = tf.layers.max_pooling2d(conv2, 2, 2, padding='same')
# conv3 = tf.layers.conv2d(conv2, 128, 3, padding='same', activation=tf.nn.relu)
# conv3 = tf.layers.average_pooling2d(conv3, 2, 2, padding='same')
# convZ = tf.layers.flatten(pool3)
convZ = tf.contrib.layers.flatten(conv2)
fc1 = tf.layers.dense(convZ, 256, activation=tf.nn.relu)
fc1 = tf.layers.batch_normalization(fc1)
fc1 = tf.layers.dropout(fc1, rate=dp, training=True)
#
fc2 = tf.layers.dense(fc1, 128, activation=tf.nn.relu)
fc2 = tf.layers.batch_normalization(fc2)
fc2 = tf.layers.dropout(fc2, rate=dp, training=True)
# fc3 = tf.layers.dense(fc2, 2, activation=None, name='fc3')
# print(fc3)
# fc3_out = tf.nn.relu(fc3)
# fc3 = tf.layers.batch_normalization(fc3)
# fc3 = tf.layers.dropout(fc3, rate=dp, training=True)
ZL = tf.layers.dense(fc2, 10, activation=None)
# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=ZL, labels=Y))
learning_rate = tf.train.exponential_decay(lr,
global_step=global_step,
decay_steps=10, decay_rate=0.9)
learning_rate = tf.maximum(learning_rate, .0001)
with tf.variable_scope('loss_scope'):
centerloss, centers_update_op = get_center_loss(fc2, Y, 0.5, 10)
# self.loss = tf.losses.softmax_cross_entropy(onehot_labels=util.makeonehot(self.y, self.CLASSNUM), logits=self.score)
# lambda则0.1-0.0001之间不等
loss = tf.losses.sparse_softmax_cross_entropy(labels=Y, logits=ZL) + 0.05 * centerloss
with tf.control_dependencies([centers_update_op]):
train_op = tf.train.MomentumOptimizer(learning_rate, 0.9).minimize(loss)
# train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=global_step)
predict_op = tf.argmax(ZL, 1, name='predict')
print(predict_op)
correct_prediction = tf.equal(predict_op, tf.argmax(YY, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
add_global = global_step.assign_add(1)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
for epoch in range(epoches):
minibatches = random_mini_batches(trX, trY, minibatch_size)
for minibatch in minibatches:
minibatch_X, minibatch_Y = minibatch
__, _loss, _ = sess.run([add_global, loss, train_op],
feed_dict={X: minibatch_X, Y: minibatch_Y, dp: drop_prob})
if epoch % 5 == 0:
train_accuracy = accuracy.eval({X: trX[:2000], Y: trY[:2000], dp: 0.0})
test_accuracy = accuracy.eval({X: teX[:2000], Y: teY[:2000], dp: 0.0})
print("Cost after epoch %i: %f tr-acc: %f te-acc: %f" % (epoch, _loss, train_accuracy, test_accuracy))
train_accuracy = accuracy.eval({X: trX[:2000], Y: trY[:2000], dp: 0.0})
test_accuracy = accuracy.eval({X: teX[:2000], Y: teY[:2000], dp: 0.0})
# 修改网络倒数层为2,然后输出特征
# _fc3 = fc3.eval({X: teX[:2000], Y: teY[:2000], dp: 0.0})
# plt.scatter(_fc3[:, 0], _fc3[:, 1], c=teY[:2000])
# plt.show()
print("Train Accuracy:", train_accuracy)
print("Test Accuracy:", test_accuracy)
saver.save(sess, "save/model.ckpt")
def predict():
tf.reset_default_graph()
# graph
saver = tf.train.import_meta_graph("save/model.ckpt.meta")
# value
# a = tf.train.NewCheckpointReader('save/model.ckpt.index')
# saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, "save/model.ckpt")
graph = tf.get_default_graph()
predict_op = graph.get_tensor_by_name("predict:0")
X = graph.get_tensor_by_name("Placeholder:0")
dp = graph.get_tensor_by_name("Placeholder_2:0")
result = pd.read_csv(root_dir + 'sample_submission.csv')
for i in range(14):
prediction = predict_op.eval({X: preX[2000 * i:2000 * i + 2000], dp: 0.0})
result['Label'][2000 * i:2000 * i + 2000] = prediction
result.to_csv(root_dir + 'result.csv')
def myfind(x, y):
return [a for a in range(len(y)) if y[a] == x]
def draw_feature():
def _find(x, XList):
return [_i for _i in range(len(XList)) if XList[_i] == x]
tf.reset_default_graph()
# graph
saver = tf.train.import_meta_graph("save/model.ckpt.meta")
# value
# a = tf.train.NewCheckpointReader('save/model.ckpt.index')
# saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, "save/model.ckpt")
graph = tf.get_default_graph()
fc3_op = graph.get_tensor_by_name("fc3/BiasAdd:0")
X = graph.get_tensor_by_name("Placeholder:0")
dp = graph.get_tensor_by_name("Placeholder_2:0")
feature = np.zeros([42000, 2])
for i in range(21):
fc3 = fc3_op.eval({X: X_data[2000 * i:2000 * i + 2000], dp: 0.0})
feature[2000 * i:2000 * i + 2000] = fc3
scio.savemat(root_dir + 'fc3', {"X": feature, "Y": Y_data})
for i in range(10):
idx = _find(i, Y_data)
color = [(1, 0.5, 0.8), (1, 0, 0), (0.5, 0, 0.25),
(0, 0, 1), (0, 0, 0), (1, 0, 1), (1, 1, 0),
(0, .5, 0), (0.5, .5, .5), (0, .5, 0.75)]
plt.scatter(feature[idx, 0], feature[idx, 1], c=color[i], label=str(i), s=10)
plt.legend(loc='upper right')
plt.show()
root_dir = 'F:/dataSets/kaggle/MNIST/'
# root_dir = 'C:/Users/syh03/Desktop/Kaggle/MNIST/data/'
train_dir = root_dir + 'train.csv'
test_dir = root_dir + 'test.csv'
# read_data
data = pd.read_csv(train_dir)
X_data = np.array(data.iloc[:, 1:].values, dtype=np.float32) / 255.
Y_data = np.array(data.iloc[:, 0].values, dtype=np.int32)
print(X_data.shape)
pre_data = pd.read_csv(test_dir)
preX = np.array(pre_data.values, dtype=np.float32) / 255.
# Y_data = one_hot(Y_data, 10)
trX, teX, trY, teY = train_test_split(X_data, Y_data, test_size=.2, shuffle=True)
# data_check(trY)
# data_check(teY)
# model(trX, trY, teX, teY, epoches=1000)
predict()
# draw_feature()
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import tri
from compostela.tree_op import Tree_op
class Auxiliar_func:
@staticmethod
def plot_latent_tree(z_test, y_test, cand, x_s, disc,
y_s, d_d_p, tree, path):
"""
Plot in matplotlib the tree in the latent space
:param z_test: original points in the latent space
:param y_test: labels of original points
:param cand: candidates
:param x_s: points x new distribution
:param disc: discriminator values original
:param y_s: points y new distribution
:param d_d_p: distributions of the candidates points
:param tree: tree structure
:param path: path where to save
:return:
"""
plt.figure("per", figsize=(14, 10))
plt.subplot(2, 2, 1)
triang = tri.Triangulation(x_s, y_s)
plt.tricontour(x_s, y_s, cand, colors='k', levels=15)
plt.tricontourf(triang, cand, levels=15)
Tree_op.plot_tree_plt(tree)
plt.colorbar()
plt.legend()
plt.subplot(2, 2, 2)
triang = tri.Triangulation(x_s, y_s)
plt.tricontour(x_s, y_s, cand, colors='k', levels=15)
plt.tricontourf(triang, cand, levels=15)
for i in range(len(d_d_p)):
plt.scatter(d_d_p[i][:, 0], d_d_p[i][:, 1])
Tree_op.plot_tree_plt(tree)
plt.colorbar()
plt.subplot(2, 2, 3)
Tree_op.plot_tree_plt(tree)
plt.legend()
plt.subplot(2, 2, 4)
triang = tri.Triangulation(z_test[:, 0], z_test[:, 1])
plt.tricontourf(triang, disc.reshape(-1), levels=15)
plt.scatter(z_test[:, 0],
z_test[:, 1], c=y_test)
Tree_op.plot_tree_plt(tree)
plt.savefig(path)
plt.close()
@staticmethod
def create_images(model, tree, size):
"""
Create the candidates images to .png
:param model: Adversal Autoencoder instance
:param tree: tree structure
:param size: size image [height, width]
:return: array of all images
"""
imgs = []
for i in tree.tree:
imgs.append(tree[i]['point'])
imgs = model.decode(np.array(imgs))
return np.array(imgs).reshape((len(imgs), size[0], size[0]))
|
MAJOR = 0
MINOR = 4
PATCH = 1
PRE_RELEASE = ''
# Use the following formatting: (major, minor, patch, pre-release)
VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
__short_version__ = '.'.join(map(str, VERSION[:3]))
__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
__package_name__ = 'warprnnt_numba'
__contact_names__ = 'Somshubra Majumdar'
__contact_emails__ = 'titu1994@gmail.com'
__homepage__ = 'https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/'
__repository_url__ = 'https://github.com/titu1994/warprnnt_numba'
__download_url__ = 'https://github.com/titu1994/warprnnt_numba/releases'
__description__ = 'Warp RNNT loss ported to Numba for faster experimentation'
__license__ = 'MIT'
|
from defines import *
from model import *
from data import *
if __name__ == '__main__':
# step0: enable GPU version
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
if PARAM_ACTION == 1:
# step1: create training set
myGene = trainGenerator(PARAM_BATCHES,
PARAM_PATH_TRAIN,
PARAM_IMG_FOLDER,
PARAM_MSK_FOLDER,
PARAM_DATA_ARGS,
save_to_dir = PARAM_AUG_FOLDER)
# setp2: set up unet model
model = unet()
# step3: set up model checkpoint save path
model_checkpoint = ModelCheckpoint( PARAM_SAVED_MODEL,
monitor = PARAM_METRICS,
verbose = 1,
save_best_only = PARAM_SAVE_BEST_ONLY)
# step4: start training the model
model.fit_generator(myGene,
steps_per_epoch = PARAM_EPOCH_STEPS,
epochs = PARAM_N_EPOCHS,
callbacks = [model_checkpoint])
PARAM_ACTION = 2
if PARAM_ACTION == 2:
# setp1: load trained model and weights
model = unet()
model.load_weights(PARAM_SAVED_MODEL)
# step2: create testing set
testGeneX, testGeneY = testGenerator(PARAM_PATH_TEST,
PARAM_IMG_FOLDER,
PARAM_MSK_FOLDER)
# step3: evaluate model performance
results = model.predict(testGeneX, PARAM_N_TESTS, verbose=1)
# step4: save results
np.save(PARAM_PATH_TEST_NPY, results)
saveResult(PARAM_PATH_TEST_RESULTS,results)
# step5: visualization and dice/IoU?
mergeIm(PARAM_PATH_TEST, PARAM_IMG_FOLDER, PARAM_MSK_FOLDER,
PARAM_PATH_TEST_RESULTS, PARAM_PATH_TEST_ALL_IMG)
|
import time
import logging
import argparse
from pathlib import Path
import cProfile, pstats
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import plotly.express as px
import joblib
import mlflow
import graphviz
from skopt import BayesSearchCV
from skopt.plots import plot_objective
from sklearn.preprocessing import StandardScaler
from imblearn.under_sampling import RandomUnderSampler
from imblearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, GroupKFold, LeavePGroupsOut, GroupShuffleSplit
from sklearn.metrics import confusion_matrix, make_scorer, plot_confusion_matrix, plot_roc_curve
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier, HistGradientBoostingClassifier
from xgboost import XGBClassifier
from metrics import tss, hss2, roc_auc_score, get_scores_from_cm, optimal_tss, draw_ssp
from utils import get_output
from arnet.fusion import get_datasets
def standardize_data(X_train, X_test):
X_mean = X_train.mean(0)
X_std = X_train.std(0)
X_train = (X_train - X_mean) / X_std
X_test = (X_test - X_mean) / X_std
return X_train, X_test
def get_dataset_from_df(df):
X = df[cfg['features']].to_numpy()
y = df['label'].to_numpy()
groups = (df['prefix'] + df['arpnum'].apply(str)).to_numpy()
return X, y, groups
def get_dataset_numpy(database, dataset, auxdata, balanced=False, seed=None):
if cfg['smoke']:
balanced = {0: 50, 1: 50}
df_train, df_test = get_datasets(database, dataset, auxdata,
balanced=balanced, validation=False, shuffle=True, seed=seed)
X_train, y_train, g_train = get_dataset_from_df(df_train)
X_test, y_test, g_test = get_dataset_from_df(df_test)
X_train, X_test = standardize_data(X_train, X_test)
return X_train, X_test, y_train, y_test, g_train, g_test
def evaluate(X_test, y_test, model, save_dir='outputs'):
y_pred = model.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
plot_confusion_matrix(model, X_test, y_test)
save_path = save_dir / 'confusion_matrix.png'
plt.savefig(save_path)
mlflow.log_artifact(save_path)
#mlflow.log_figure(plt.gcf(), 'confusion_matrix_figure.png')
#plt.show()
scorer = make_scorer(roc_auc_score, needs_threshold=True)
auc = scorer(model, X_test, y_test)
plot_roc_curve(model, X_test, y_test) #TODO: mark decision threshold
plt.savefig(save_dir / 'roc.png')
mlflow.log_figure(plt.gcf(), 'roc.png')
y_score = get_output(model, X_test)
tss_opt = optimal_tss(y_test, y_score)
plt.savefig(save_dir / 'ssp.png')
mlflow.log_figure(draw_ssp(y_test, y_score), 'ssp.png')
#plt.show()
scores = get_scores_from_cm(cm)
scores.update({
'auc': auc,
'tss_opt': tss_opt,
})
save_path = save_dir / 'best_model_test_scores.md'
pd.DataFrame(scores, index=[0,]).to_markdown(save_path, tablefmt='grid')
# Inspect
estimator = model.best_estimator_['model']
if isinstance(estimator, DecisionTreeClassifier):
dot_data = export_graphviz(estimator, out_file=None,
max_depth=3,
feature_names=cfg['features'],
class_names=True,
filled=True)
graph = graphviz.Source(dot_data, format='png')
save_path = save_dir / 'tree_graphviz.png'
graph.render(save_path)
mlflow.log_artifact(save_path)
if isinstance(estimator, RandomForestClassifier):
# Feature importance based on mean decrease in impurity
fig, ax = plt.subplots()
forest_importances = pd.Series(estimator.feature_importances_,
index=cfg['features'])
std = np.std([tree.feature_importances_ for tree in estimator.estimators_], axis=0)
forest_importances.plot.bar(yerr=std, ax=ax)
ax.set_title("Feature importances using MDI")
ax.set_ylabel("Mean decrease in impurity")
fig.tight_layout()
mlflow.log_figure(fig, 'forest_importances.png')
#from sklearn.inspection import permutation_importance
#r = permutation_importance(model, X_test, y_test,
# n_repeats=10,
# random_state=0)
#for i in r.importances_mean.argsort()[::-1]:
# if r.importances_mean[i] - 2 * r.importances_std[i] > 0:
# print(f"{diabetes.feature_names[i]:<8}"
# f"{r.importances_mean[i]:.3f}"
# f" +/- {r.importances_std[i]:.3f}")
plt.close('all')
return scores
def tune(X_train, y_train, groups_train,
Model, param_space, method='grid', save_dir='outputs'):
#scorer = make_scorer(hss2)
scorer = make_scorer(roc_auc_score, needs_threshold=True)
pipe = Pipeline([
#('rus', RandomUnderSampler()),
#('scaler', StandardScaler()), # already did it in loading
('model', Model())
])
pipe_space = {'model__' + k: v for k, v in param_space.items()}
pipe_space.update({
#'rus__sampling_strategy': [1, 0.5, 0.1] # desired ratio of minority:majority
#'rus__sampling_strategy': (0.1, 1.0, 'uniform')
})
if method == 'grid':
search = GridSearchCV(pipe,
pipe_space,
scoring=scorer,
n_jobs=1,
cv=GroupKFold(cfg['bayes']['n_splits']),
refit=True, # default True
verbose=1)
search.fit(X_train, y_train, groups_train)
elif method == 'bayes':
search = BayesSearchCV(pipe,
pipe_space,
n_iter=cfg['bayes']['n_iter'], # default 50 # 8 cause out of range
scoring=scorer,
n_jobs=cfg['bayes']['n_jobs'], # at most n_points * cv jobs
n_points=cfg['bayes']['n_points'], # number of points to run in parallel
#pre_dispatch default to'2*n_jobs'. Can't be None. See joblib
cv=GroupKFold(cfg['bayes']['n_splits']), # if integer, StratifiedKFold is used by default
refit=True, # default True
verbose=0)
search.fit(X_train, y_train, groups_train)
# Partial Dependence plots of the (surrogate) objective function
# Not working for smoke test
#_ = plot_objective(search.optimizer_results_[0], # index out of range for QDA? If search space is empty, then the optimizer_results_ has length 1, but in plot_objective, optimizer_results_.models[-1] is called but models is an empty list. This should happen for all n_jobs though. Why didn't I come across it?
# dimensions=list(pipe_space.keys()),
# n_minimum_search=int(1e8))
#plt.tight_layout()
#plt.savefig(os.path.join(save_dir, 'parallel_dependence.png'))
#plt.show()
else:
raise
df = pd.DataFrame(search.cv_results_['params'])
df = df.rename(columns=lambda p: p.split('__')[1])
df = df.assign(**{new_k: search.cv_results_[k] for k, new_k in
[['mean_fit_time', 'fit_time'],
['std_test_score', 'score_std'],
['mean_test_score', 'score_mean'],
['rank_test_score', 'rank']]})
save_path = save_dir / 'cv_results.csv'
df.to_csv(save_path)
mlflow.log_artifact(save_path)
save_path = save_dir / 'cv_results.md'
df.to_markdown(save_path, tablefmt='grid')
mlflow.log_artifact(save_path)
fig = px.parallel_coordinates(df, color="score_mean",
dimensions=df.columns,
#color_continuous_scale=px.colors.diverging.Tealrose,
#color_continuous_midpoint=2
)
save_path = save_dir / 'parallel_coordinates.html'
fig.write_html(save_path.open(mode='w')) # alternatively, str(path.resolve())
mlflow.log_artifact(save_path)
#fig.show()
joblib.dump(search, save_dir / 'model.joblib')
mlflow.sklearn.log_model(search, 'model')
return search, df
def sklearn_main(database_dir):
"""
We sweep both dataset and model in this function because that's the key comparisons
made by the paper. Databases, on the other hand, is iterated outside this function.
"""
Models = [
#KNeighborsClassifier,
#QuadraticDiscriminantAnalysis,
SGDClassifier,
#SVC,
#DecisionTreeClassifier,
RandomForestClassifier,
#ExtraTreesClassifier,
#AdaBoostClassifier,
#GradientBoostingClassifier,
HistGradientBoostingClassifier,
]
grids = {
'SGDClassifier': {
'loss': [
'hinge', # linear SVM
'log', # logistic regression
],
'alpha': [1e-6, 1e-4, 1e-2],
'class_weight': 'balanced', # default to None (all classes are assumed to have weight one)
},
'QuadraticDiscriminantAnalysis': {
# priors=None, # By default, the class proportions are inferred from training data
},
'SVC': {
'C': [0.1, 1, 10],
'class_weight': [
{0: 1, 1: 1},
{0: 1, 1: 2},
{0: 1, 1: 10},
],
},
'DecisionTreeClassifier': {
'max_depth': [1, 2, 4, 8], # default None
'min_samples_leaf': [1, 0.00001, 0.0001, 0.001, 0.01], # 1 and 1.0 are different. Default 1
'class_weight': 'balanced', # default None (all classes are assumed to have weight one)
},
'RandomForestClassifier': {
'n_estimators': [10, 100, 1000],
'max_depth': [None, 2, 4, 8], # weak learners
#'min_samples_split': 2,
'class_weight': ['balanced', 'balanced_subsample'],
},
'ExtraTreesClassifier': {
},
'AdaBoostClassifier': {
},
'GradientBoostingClassifier': {
},
'HistGradientBoostingClassifier': {
},
#'XGBClassifier': {},
}
distributions = {
'SGDClassifier': {
'loss': [
#'hinge', # linear SVM
'log', # logistic regression
],
'alpha': (1e-6, 1e-1, 'log-uniform'),
'class_weight': ['balanced'], # default to None (all classes are assumed to have weight one)
},
'QuadraticDiscriminantAnalysis': {
'reg_param': [0], # BayesSearchCV require
# priors=None, # By default, the class proportions are inferred from training data
},
'DecisionTreeClassifier': {
'max_depth': [8, 16, 32, 64, None], # default None
#'min_samples_leaf': (0.000001, 0.01, 'log-uniform'),
# 1 and 1.0 are different. Default 1
'class_weight': ['balanced'], # default to None (all classes are assumed to have weight one)
},
'RandomForestClassifier': {
'n_estimators': [300], #[50, 100, 300], 300 better than 50 and 100
#'max_depth': [None, 1, 2, 4, 8], # RF doesn't use weak learner
'class_weight': ['balanced', 'balanced_subsample'], # default to None (all classes are assumed to have weight one)
'oob_score': [True],
},
'ExtraTreesClassifier': {
'n_estimators': [100, 300, 1000],
},
'AdaBoostClassifier': {
'n_estimators': [50],
'learning_rate': [1],
},
'GradientBoostingClassifier': {
'learning_rate': [0.1],
},
'HistGradientBoostingClassifier': {
'learning_rate': (0.0001, 0.1, 'log-uniform'),
'max_iter': [50, 100, 200, 400, 1000],
'max_depth': [None, 2, 4, 6],
},
}
results = []
for dataset in ['smarp', 'sharp', 'fused_smarp', 'fused_sharp']:
for balanced in [True]:
for cfg['seed'] in range(5):
dataset_blc = dataset + '_' + ('balanced' if balanced else 'raw')
X_train, X_test, y_train, y_test, groups_train, _ = get_dataset_numpy(
database_dir, dataset, cfg['auxdata'], balanced=balanced, seed=cfg['seed'])
# # Visualize processed train and test splits
# from eda import plot_selected_samples
# title = database_dir.name + ' ' + dataset_blc
# fig = plot_selected_samples(X_train, X_test, y_train, y_test, cfg['features'],
# title=title)
# fig.show()
# continue
for Model in Models:
t_start = time.time()
param_space = distributions[Model.__name__]
run_name = '_'.join([database_dir.name, dataset_blc, Model.__name__])
run_dir = Path(cfg['output_dir']) / run_name
run_dir.mkdir(parents=True, exist_ok=True)
with mlflow.start_run(run_name=run_name, nested=True) as run:
best_model, df = tune(X_train, y_train, groups_train,
Model, param_space, method='bayes',
save_dir=run_dir)
# Alternatively, param_space = grids[Model.__name__] and use 'grid' method
print(f'\nCV results of {Model.__name__} on {database_dir} {dataset_blc}:')
print(df.to_markdown(tablefmt='grid'))
scores = evaluate(X_test, y_test, best_model, save_dir=run_dir)
#mlflow.log_param('sampling_strategy', best_model.best_params_['rus__sampling_strategy'])
mlflow.log_params({k.replace('model__', ''): v for k, v in
best_model.best_params_.items() if k.startswith('model__')})
mlflow.set_tag('database_name', database_dir.name)
mlflow.set_tag('dataset_name', dataset)
mlflow.set_tag('balanced', balanced)
mlflow.set_tag('estimator_name', Model.__name__)
mlflow.set_tag('seed', cfg['seed'])
mlflow.log_metrics(scores)
#mlflow.sklearn.log_model(best_model, 'mlflow_model')
r = {
'database': database_dir.name,
'dataset': dataset_blc,
'model': Model.__name__,
'time': time.time() - t_start,
'seed': cfg['seed'],
}
r.update(scores)
r.update({
'params': dict(best_model.best_params_),
})
results.append(r)
results_df = pd.DataFrame(results)
save_path = Path(cfg['output_dir']) / f'{database_dir.name}_results.md'
results_df.to_markdown(save_path, tablefmt='grid')
results_df.to_csv(save_path.with_suffix('.csv'))
print(results_df.to_markdown(tablefmt='grid'))
def test_seed():
np.random.seed(0)
a = np.random.randint(0, 65536, 10)
assert np.all(a == [2732, 43567, 42613, 52416, 45891, 21243, 30403, 32103, 41993, 57043])
np.random.seed(None)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data_root', default='datasets')
parser.add_argument('-a', '--auxdata', default='datasets/sharp2smarp.npy')
parser.add_argument('-s', '--smoke', action='store_true')
parser.add_argument('-e', '--experiment_name', default='leaderboard2')
parser.add_argument('-r', '--run_name', default='sklearn')
parser.add_argument('-o', '--output_dir', default='outputs')
parser.add_argument('--seed', default=0)
args = parser.parse_args()
cfg = {
'features': ['AREA', 'USFLUXL', 'MEANGBL', 'R_VALUE'],
'bayes': {
'n_iter': 10, # light computation until the final stage
'n_jobs': 20,
'n_points': 4,
'n_splits': 5,
},
}
cfg.update(vars(args))
if args.smoke:
cfg.update({
'experiment_name': 'smoke',
'output_dir': 'outputs_smoke',
'bayes': {
'n_iter': 6,
'n_jobs': 2,
'n_points': 1,
'n_splits': 2,
},
})
test_seed()
t_start = time.time()
mlflow.set_experiment(cfg['experiment_name'])
with mlflow.start_run(run_name=cfg['run_name']) as run:
databases = [p for p in Path(cfg['data_root']).iterdir() if p.is_dir()]
databases = [Path(cfg['data_root']) / d for d in [
'M_Q_24hr',
'M_QS_24hr',
]]
logging.info(databases)
for database in databases:
sklearn_main(database)
print('Run time: {} s'.format(time.time() - t_start))
|
from __future__ import absolute_import, division, print_function
from abc import ABCMeta, abstractproperty, abstractmethod
import numpy as np
from glue.external import six
from glue.core.exceptions import IncompatibleAttribute
from glue.core.layer_artist import MatplotlibLayerArtist, ChangedTrigger
__all__ = ['HistogramLayerArtist']
@six.add_metaclass(ABCMeta)
class HistogramLayerBase(object):
lo = abstractproperty() # lo-cutoff for bin counting
hi = abstractproperty() # hi-cutoff for bin counting
nbins = abstractproperty() # number of bins
xlog = abstractproperty() # whether to space bins logarithmically
@abstractmethod
def get_data(self):
"""
Return array of bin counts
"""
pass
class HistogramLayerArtist(MatplotlibLayerArtist, HistogramLayerBase):
_property_set = MatplotlibLayerArtist._property_set + 'lo hi nbins xlog'.split()
lo = ChangedTrigger(0)
hi = ChangedTrigger(1)
nbins = ChangedTrigger(10)
xlog = ChangedTrigger(False)
att = ChangedTrigger()
def __init__(self, layer, axes):
super(HistogramLayerArtist, self).__init__(layer, axes)
self.ylog = False
self.cumulative = False
self.normed = False
self.y = np.array([])
self.x = np.array([])
self._y = np.array([])
self._scale_state = None
def get_data(self):
return self.x, self.y
def clear(self):
super(HistogramLayerArtist, self).clear()
self.x = np.array([])
self.y = np.array([])
self._y = np.array([])
def _calculate_histogram(self):
"""Recalculate the histogram, creating new patches"""
self.clear()
try:
data = self.layer[self.att].ravel()
if not np.isfinite(data).any():
return False
except IncompatibleAttribute as exc:
self.disable_invalid_attributes(*exc.args)
return False
if data.size == 0:
return
if self.lo > np.nanmax(data) or self.hi < np.nanmin(data):
return
if self.xlog:
data = np.log10(data)
rng = [np.log10(self.lo), np.log10(self.hi)]
else:
rng = self.lo, self.hi
nbinpatch = self._axes.hist(data,
bins=int(self.nbins),
range=rng)
self._y, self.x, self.artists = nbinpatch
return True
def _scale_histogram(self):
"""Modify height of bins to match ylog, cumulative, and norm"""
if self.x.size == 0:
return
y = self._y.astype(np.float)
dx = self.x[1] - self.x[0]
if self.normed:
div = y.sum() * dx
if div == 0:
div = 1
y /= div
if self.cumulative:
y = y.cumsum()
y /= y.max()
self.y = y
bottom = 0 if not self.ylog else 1e-100
for a, y in zip(self.artists, y):
a.set_height(y)
x, y = a.get_xy()
a.set_xy((x, bottom))
def _check_scale_histogram(self):
"""
If needed, rescale histogram to match cumulative/log/normed state.
"""
state = (self.normed, self.ylog, self.cumulative)
if state == self._scale_state:
return
self._scale_state = state
self._scale_histogram()
def update(self, view=None):
"""Sync plot.
The _change flag tracks whether the histogram needs to be
recalculated. If not, the properties of the existing
artists are updated
"""
self._check_subset_state_changed()
if self._changed:
if not self._calculate_histogram():
return
self._changed = False
self._scale_state = None
self._check_scale_histogram()
self._sync_style()
def _sync_style(self):
"""Update visual properties"""
style = self.layer.style
for artist in self.artists:
artist.set_facecolor(style.color)
artist.set_alpha(style.alpha)
artist.set_zorder(self.zorder)
artist.set_visible(self.visible and self.enabled)
|
import os
def transferRepos(matchList, WorkingDir, fromRepoProtocol = 'https', toRepoProtocol = 'https'):
# print(matchList)
# 切换路径
targetPath = os.path.abspath(WorkingDir)
os.chdir(targetPath)
if(os.path.abspath(os.getcwd()) != targetPath): # 展开为绝对路径,然后进行比较
print("[error] 切换路径失败")
print("当前路径", os.path.abspath(os.getcwd()))
print("想要切换到的路径", targetPath)
input("按回车键退出...")
exit()
commands = []
commands.append("@echo off") # echo off
# commands.append("cls") # 清屏
# commands.append('')
for repo in matchList:
# 查看当前目录
# commands.append('echo 当前目录')
commands.append('chdir')
# 克隆仓库
localRepoFolder = repo['from']['full_name'].split('/')[-1] + ".git"
if not os.path.exists(WorkingDir + "/" + localRepoFolder):
print(WorkingDir + "/" + localRepoFolder)
repo_url = repo['from']['html_url']
if toRepoProtocol != 'https':
repo_url = repo['from']['ssh_url']
# commands.append('echo 克隆仓库')
commands.append("git clone --mirror {repo_url}".format(repo_url = repo_url))
# 切换到仓库目录
# commands.append('echo 切换到仓库目录')
commands.append("cd {folder_name}".format(folder_name = localRepoFolder))
commands.append('chdir')
# 更新本地仓库
# 不可以使用 git fetch --all 如果仓库中有hidden ref,则推送时会报错
# commands.append('echo 更新本地仓库')
commands.append("git remote update")
# 本地存储库GC (没有必要)
# commands.append("git gc")
# 同步仓库
repo_url = repo['to']['html_url']
if toRepoProtocol != 'https':
repo_url = repo['to']['ssh_url']
# commands.append('echo 推送仓库到远程({repo_url})'.format(repo_url = repo_url))
commands.append("git push --mirror {repo_url}".format(repo_url = repo_url))
# 切换回上一级目录
# commands.append('echo 回到工作目录')
commands.append("cd ../")
# commands.append('echo 当前仓库克隆完成,等待用户确认,按任意键进行下一步操作 & pause')
# commands.append("pause")
# 空行
commands.append('')
commands.append('echo 命令执行完成')
commands.append("pause")
print("本项目还处于测试阶段,出于安全考虑,我们采用生成命令文件的方式对仓库进行操作,以免",
"由于脚本错误造成数据丢失。我们强烈建议您在继续前先手动备份您的仓库,以免丢失代码。",
"由于代码错误或您自己失误造成的代码仓库丢失,项目开发者不承担责任。在执行脚本前,请",
"务必确认您知晓该行命令的执行结果,切勿盲目执行您不知道的命令!", sep = "\n")
print("\033[1;37;41m继续前请务全量必备份仓库!\033[0m")
print("\033[1;37;41m继续前请务全量必备份仓库!\033[0m")
print("\033[1;37;41m继续前请务全量必备份仓库!\033[0m")
input("继续操作代表您已阅读上述内容,按回车键继续...")
batFilePath = os.path.abspath(WorkingDir + "/commands.bat")
f=open(batFilePath, "w")
f.write('\n'.join(commands))
f.close()
print("命令文件生成完毕,请查看:", batFilePath)
if input("是否直接执行(不推荐)?输入y执行,其他输入不执行并继续: ") == "y":
os.system('"{}"'.format(batFilePath))
# 下面这样执行不行,无法保证当前目录
# for commandForExecute in commands:
# print("[正在执行]", commandForExecute)
# os.system(commandForExecute)
# for command in commands:
# print(command)
# os.system(command)
# :: 创建文件夹
# mkdir D:\gitTransTempDir
# :: 如果之前有没删除的话就删除
# rd /s /q ./chrome-extension.git
# rd /s /q D:\gitTransTempDir
|
# -*- coding: utf-8 -*-
name = u'pytz'
version = '2018.5'
description = \
"""
pytz library
"""
requires = []
variants = []
def commands():
import os
pytz_libs_path = os.path.join(getenv("PYTHON_LIBS_PATH"), "pytz", "%s" % version)
# env.PATH.append(os.path.join(pytz_libs_path, 'lib'))
env.PYTHONPATH.append(os.path.join(pytz_libs_path, 'lib'))
|
from setuptools import setup
import sys, pathlib
# read the contents of your README file
from pathlib import Path
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
version = '0.0.2'
setup(
name='UltraSystray-noicons',
version=version,
description='Ultra simple cross-platform Python systray icon',
long_description=long_description,
long_description_content_type='text/markdown',
author='Ronny Rentner',
author_email='ultrasystray.code@ronny-rentner.de',
url='https://github.com/ronny-rentner/UltraSystray',
packages=['UltraSystray'],
zip_safe=False,
python_requires=">=3.9",
extras_require={
':sys_platform == "win32"': [],
':sys_platform == "linux"': ['pygobject'],
':sys_platform == "darwin"': [],
},
)
|
from libs.effects.effect import Effect # pylint: disable=E0611, E0401
import numpy as np
class EffectBars(Effect):
def run(self):
effect_config = self._device.device_config["effects"]["effect_bars"]
led_count = self._device.device_config["LED_Count"]
led_mid = self._device.device_config["LED_Mid"]
audio_data = self.get_audio_data()
y = self.get_mel(audio_data)
if y is None:
return
# Bit of fiddling with the y values.
y = np.copy(self._math_service.interpolate(y, led_count // 2))
self._dsp.common_mode.update(y)
self.prev_spectrum = np.copy(y)
# Color channel mappings.
r = self._dsp.r_filt.update(y - self._dsp.common_mode.value)
r = np.array([j for i in zip(r, r) for j in i])
# Split y into [resolution] chunks and calculate the average of each.
max_values = np.array([max(i) for i in np.array_split(r, effect_config["resolution"])])
max_values = np.clip(max_values, 0, 1)
color_sets = []
for i in range(effect_config["resolution"]):
# [r,g,b] values from a multicolor gradient array at [resolution] equally spaced intervals.
color_sets.append([self._color_service.full_gradients[effect_config["color_mode"]]
[j][i * (led_count // effect_config["resolution"])] for j in range(3)])
output = np.zeros((3, led_count))
chunks = np.array_split(output[0], effect_config["resolution"])
n = 0
# Assign blocks with heights corresponding to max_values and colors from color_sets.
for i in range(len(chunks)):
m = len(chunks[i])
for j in range(3):
output[j][n:n + m] = color_sets[i][j] * max_values[i]
n += m
# Calculate how many steps the array will roll.
steps = self.get_roll_steps(effect_config["roll_speed"])
self._color_service.full_gradients[effect_config["color_mode"]] = np.roll(
self._color_service.full_gradients[effect_config["color_mode"]],
steps * (-1 if effect_config["reverse_roll"] else 1),
axis=1
)
if effect_config["flip_lr"]:
output = np.fliplr(output)
if effect_config["mirror"]:
# Calculate the real mid.
real_mid = led_count / 2
# Add some tolerance for the real mid.
if (real_mid >= led_mid - 2) and (real_mid <= led_mid + 2):
# Use the option with shrinking the array.
output = np.concatenate((output[:, ::-2], output[:, ::2]), axis=1)
else:
# Mirror the whole array. After this the array has a two times bigger size than led_count.
big_mirrored_array = np.concatenate((output[:, ::-1], output[:, ::1]), axis=1)
start_of_array = led_count - led_mid
end_of_array = start_of_array + led_count
output = big_mirrored_array[:, start_of_array:end_of_array]
self.queue_output_array_noneblocking(output)
|
import os
from django.conf import settings
from django.test import TestCase, Client
class Exercise3Test(TestCase):
def test_view_and_template(self):
"""Test that the view, URLs and template are set up properly by checking the contents of the response."""
c = Client()
resp = c.get('/react-example/')
self.assertIn(b'<div id="react_container"></div>', resp.content)
self.assertIn(b'<script crossorigin src="https://unpkg.com/react@16/umd/react.development.js"></script>',
resp.content)
self.assertIn(
b'<script crossorigin src="https://unpkg.com/react-dom@16/umd/react-dom.development.js"></script>',
resp.content)
self.assertIn(b'<script src="https://unpkg.com/babel-standalone@6/babel.min.js"></script>', resp.content)
self.assertIn(b'<script src="/static/react-example.js" type="text/babel"></script>', resp.content)
self.assertIn(b'<script type="text/babel">', resp.content)
self.assertIn(b'let name = "Ben";', resp.content)
self.assertIn(b'let target = 5;', resp.content)
self.assertIn(b'ReactDOM.render(<ClickCounter name={ name } target={ target }/>,', resp.content)
def test_js_content(self):
"""Test that some expected things are in the JS file."""
with open(os.path.join(settings.BASE_DIR, 'static', 'react-example.js')) as f:
static_content = f.read()
self.assertIn('class ClickCounter extends React.Component {', static_content)
self.assertIn('this.state = { clickCount: 0, name: props.name, target: props.target };', static_content)
self.assertIn('if (this.state.clickCount === this.state.target) {', static_content)
self.assertIn('return <span>Well done, {this.state.name}!</span>;', static_content)
self.assertIn('return <button onClick={() => this.setState({ clickCount: this.state.clickCount + 1 })}>',
static_content)
self.assertIn('{this.state.clickCount}', static_content)
self.assertIn('</button>;', static_content)
self.assertNotIn('ReactDOM.render', static_content)
|
# Generated by Django 3.2.8 on 2021-10-12 14:35
import cloudinary.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('authentication', '0003_rename_profilepic_profile_profile_pic'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='profile_pic',
field=cloudinary.models.CloudinaryField(default='image/upload/v1632754417/24-248253_user-profile-default-image-png-clipart-png-download_obstgc.png', max_length=255, verbose_name='image'),
),
]
|
from scrapy import Spider, Request
from olx.spiders.items import Property
from olx.spiders.loaders import PropertyLoader
class SellPropertiesSpider(Spider):
name = "sell_properties"
def __init__(self, state=None, **kwargs):
super().__init__(**kwargs)
if state is None:
raise ValueError("%s must have a state" % self.name)
self.start_urls = [f"https://{state}.olx.com.br/imoveis/venda"]
def parse(self, response):
macro_regions = response.css("div.linkshelf-tabs-content ul.list li.item")
for region in macro_regions:
url = region.css("p.text a::attr(href)").get()
yield response.follow(url=url, callback=self.parse_micro_regions)
def parse_micro_regions(self, response):
micro_regions = response.css(
"div.linkshelf-tabs-content div.linkshelf-zone ul.list li.item a.link"
)
for micro_region in micro_regions:
url = micro_region.css("a::attr(href)").get()
yield response.follow(url=url, callback=self.parse_properties_list)
def parse_properties_list(self, response):
listing = response.css("div.section_listing")
announcements_items = listing.css("div.section_OLXad-list li.item a")
for item in announcements_items:
url = item.css("a::attr(href)").get()
yield response.follow(url=url, callback=self.parse_property)
next_page = listing.css("li.next a::attr(href)").get()
if next_page is not None:
yield Request(next_page, callback=self.parse_properties_list)
def parse_property(self, response):
loader = PropertyLoader(item=Property(), response=response)
# loader.add_css("id", "div.OLXad-id strong.description::text")
loader.add_xpath(
"area",
'//div[@data-testid="ad-properties"]//dt[contains(text(), "Área")]/following-sibling::dd/text()', # noqa: E501
)
loader.add_xpath("price", '//h2[contains(text(), "R$")]/text()')
loader.add_xpath(
"postal_code",
'//div[@data-testid="ad-properties"]//dt[contains(text(), "CEP")]/following-sibling::dd/text()', # noqa: E501
)
loader.add_value("url", response.url)
yield loader.load_item()
|
from saturnv.api.databases.postgresql.presets import Shortcut, Override, Preset, Version, Setting
from saturnv.api.databases.postgresql.shelves import Shelf, ShelfLink
from .core import Session
|
#! /usr/bin/env python
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open('README.md') as readme_file:
readme = readme_file.read()
setup(
name='MeaningCloud-python',
version='2.0.0',
description='Official Python SDK for MeaningCloud APIs',
long_description=readme,
long_description_content_type='text/markdown',
url='https://github.com/MeaningCloud/meaningcloud-python',
author='MeaningCloud',
author_email='support@meaningcloud.com',
keywords='nlp, MeaningCloud, text analytics',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Topic :: Text Processing',
'Topic :: Text Processing :: General',
'Topic :: Text Processing :: Linguistic',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis'
],
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[
'requests[security]'
],
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
)
|
import boto3
import os
import time
try: # Python3
from urllib.parse import urlparse, urlencode
except ImportError: # Python2
from urlparse import urlparse
from urllib import urlencode
class TransferMonitor(object):
def __init__(self, total_bytes, logger):
self._total_bytes = total_bytes
self._logger = logger
self._accumulated_bytes = 0
self._last_update_time = time.time()
def callback(self, chunk_bytes):
self._accumulated_bytes += chunk_bytes
curr_time = time.time()
if curr_time - self._last_update_time >= 1:
self._last_update_time = curr_time
percent = 100.0 * self._accumulated_bytes / self._total_bytes
self._logger.info("Transfer in progress ... {:.2f}%".format(percent))
def done(self):
self._logger.info("Transfer completed ... 100%")
class AwsHelper(object):
def __init__(self, logger):
self._logger = logger
def upload_file(self, local_filepath, bucket_name, aws_s3_filepath, skip_upload=False):
if aws_s3_filepath:
if aws_s3_filepath.endswith('/'):
aws_s3_filepath += os.path.basename(local_filepath)
else:
aws_s3_filepath = os.path.basename(local_filepath)
s3_url = 's3://{}/{}'.format(bucket_name, aws_s3_filepath)
self._logger.info("Uploading file to S3: {} ==> {}".format(local_filepath, s3_url))
if not skip_upload:
monitor = TransferMonitor(os.path.getsize(local_filepath), self._logger)
boto3.client('s3').upload_file(local_filepath, bucket_name, aws_s3_filepath,
Callback=monitor.callback)
monitor.done()
self._logger.info("File uploaded successfully!")
else:
self._logger.info("Skip uploading (test mode)!")
return s3_url
def upload_file_obj(self, file_obj, bucket_name, aws_s3_filepath, skip_upload=False):
s3_url = 's3://{}/{}'.format(bucket_name, aws_s3_filepath)
self._logger.info("Uploading file obj to S3 ... {}".format(s3_url))
if not skip_upload:
monitor = TransferMonitor(file_obj.getbuffer().nbytes, self._logger)
boto3.resource('s3')\
.Bucket(bucket_name)\
.Object(aws_s3_filepath)\
.upload_fileobj(file_obj, Callback=monitor.callback)
monitor.done()
self._logger.info("File obj uploaded successfully!")
else:
self._logger.info("Skip uploading (test mode)!")
return s3_url
def download_file(self, aws_s3_url, local_filepath):
self._logger.info("Downloading file from S3: {}, to: {}".format(aws_s3_url, local_filepath))
bucket_name, model_path = AwsHelper.s3_url_parse(aws_s3_url)
s3_bucket = boto3.resource('s3').Bucket(bucket_name)
total_size = s3_bucket.Object(model_path).content_length
monitor = TransferMonitor(total_size, self._logger)
s3_bucket.download_file(model_path, local_filepath, Callback=monitor.callback)
monitor.done()
self._logger.info("File downloaded successfully!")
@staticmethod
def s3_url_parse(aws_s3_url):
remove_leading_slash = lambda p: p[1:] if p[0] == '/' else p
parsed_url = urlparse(aws_s3_url)
if parsed_url.scheme == 's3':
bucket_name = parsed_url.netloc
rltv_path = remove_leading_slash(parsed_url.path)
else:
path = remove_leading_slash(parsed_url.path)
path_parts = path.split('/', 1)
bucket_name = path_parts[0]
rltv_path = path_parts[1]
return bucket_name, rltv_path
|
"""Espresso-Caller: automated and reproducible tool for identifying genomic variations at scale"""
# TODO: is it really necessary for packaging?
name = 'espresso-caller'
|
#!/usr/bin/env python
"""
Kevin Angstadt
University of Virginia
Convert VASim Statevectors into Traces of RAPID line numbers
"""
import argparse, csv, json, copy
def parse_tsv(filename):
mapping = dict()
with open(filename, "rb") as f:
reader = csv.reader(f, delimiter="\t")
for row in reader:
mapping[row[0]] = row[1]
return mapping
def process(filename, ste_to_ast, ast_to_line):
with open(filename, "r") as f:
lines = f.readlines()
vec = dict()
while(len(lines) > 0):
offset = int(lines.pop(0).strip())
num_stes = int(lines.pop(0).strip())
data = list()
cnt_vals = dict()
for i in range(num_stes):
s_data = lines.pop(0).strip().split(",")
ste = s_data[0]
val = s_data[1].strip()
if len(s_data) == 3:
port = s_data[2].split(":")
else:
port = []
for ast in ste_to_ast[ste]:
if ast["el_type"].strip() == "counter":
# store the counter value
cnt_vals[ste] = val
if len(port) == 0:
continue
if ast["port"] not in port:
continue
try:
lineno = int(ast_to_line[str(int(ast["ast_id"]))])
#print ast["port"], val
if(ast["port"] == "report" and val == "0"):
continue
elif(ast["port"] == "break" and val == "0"):
continue
elif ast["el_type"].strip() == "boolean":
if ast["port"].strip() != "report" and ast["port"].strip() != "break":
continue
new_ast = copy.deepcopy(ast)
new_entry = {
"ste" : ste,
"ast" : new_ast,
"lineno" : lineno
}
data.append(new_entry)
except KeyError as e:
print ast
print ast_to_line
exit(0)
#print data
# update counter values
for entry in data:
#print entry['ast']['state']
for var_entry in entry['ast']['state']:
if 'value' in var_entry.keys() and var_entry['value'] in cnt_vals.keys():
var_entry['value'] = cnt_vals[var_entry['value']]
vec[offset] = data
return vec
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog="parse_statevec.py",
description="Parse the Output of VASim to generate line info from RAPID program")
parser.add_argument("statevec", help="The state vector file")
parser.add_argument("astmap", help="The .debug.ast-line file generated by the RAPID compiler")
parser.add_argument("stemap", help="The .debug.json file generated by the RAPID compiler")
parser.add_argument("outfile", help="where to write the resulting JSON file")
args = parser.parse_args()
ast_to_line = parse_tsv(args.astmap)
with open(args.stemap, "r") as f:
ste_to_ast = json.load(f)
line_info = process(args.statevec, ste_to_ast, ast_to_line)
with open(args.outfile, "w") as of:
json.dump(line_info, of, sort_keys=True, indent=2, separators=(',', ': '))
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from fleetspeak.src.common.proto.fleetspeak import common_pb2 as fleetspeak_dot_src_dot_common_dot_proto_dot_fleetspeak_dot_common__pb2
from fleetspeak.src.server.proto.fleetspeak_server import admin_pb2 as fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2
class AdminStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateBroadcast = channel.unary_unary(
'/fleetspeak.server.Admin/CreateBroadcast',
request_serializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.CreateBroadcastRequest.SerializeToString,
response_deserializer=fleetspeak_dot_src_dot_common_dot_proto_dot_fleetspeak_dot_common__pb2.EmptyMessage.FromString,
)
self.ListActiveBroadcasts = channel.unary_unary(
'/fleetspeak.server.Admin/ListActiveBroadcasts',
request_serializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.ListActiveBroadcastsRequest.SerializeToString,
response_deserializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.ListActiveBroadcastsResponse.FromString,
)
self.ListClients = channel.unary_unary(
'/fleetspeak.server.Admin/ListClients',
request_serializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.ListClientsRequest.SerializeToString,
response_deserializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.ListClientsResponse.FromString,
)
self.ListClientContacts = channel.unary_unary(
'/fleetspeak.server.Admin/ListClientContacts',
request_serializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.ListClientContactsRequest.SerializeToString,
response_deserializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.ListClientContactsResponse.FromString,
)
self.GetMessageStatus = channel.unary_unary(
'/fleetspeak.server.Admin/GetMessageStatus',
request_serializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.GetMessageStatusRequest.SerializeToString,
response_deserializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.GetMessageStatusResponse.FromString,
)
self.InsertMessage = channel.unary_unary(
'/fleetspeak.server.Admin/InsertMessage',
request_serializer=fleetspeak_dot_src_dot_common_dot_proto_dot_fleetspeak_dot_common__pb2.Message.SerializeToString,
response_deserializer=fleetspeak_dot_src_dot_common_dot_proto_dot_fleetspeak_dot_common__pb2.EmptyMessage.FromString,
)
self.StoreFile = channel.unary_unary(
'/fleetspeak.server.Admin/StoreFile',
request_serializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.StoreFileRequest.SerializeToString,
response_deserializer=fleetspeak_dot_src_dot_common_dot_proto_dot_fleetspeak_dot_common__pb2.EmptyMessage.FromString,
)
self.KeepAlive = channel.unary_unary(
'/fleetspeak.server.Admin/KeepAlive',
request_serializer=fleetspeak_dot_src_dot_common_dot_proto_dot_fleetspeak_dot_common__pb2.EmptyMessage.SerializeToString,
response_deserializer=fleetspeak_dot_src_dot_common_dot_proto_dot_fleetspeak_dot_common__pb2.EmptyMessage.FromString,
)
self.BlacklistClient = channel.unary_unary(
'/fleetspeak.server.Admin/BlacklistClient',
request_serializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.BlacklistClientRequest.SerializeToString,
response_deserializer=fleetspeak_dot_src_dot_common_dot_proto_dot_fleetspeak_dot_common__pb2.EmptyMessage.FromString,
)
class AdminServicer(object):
# missing associated documentation comment in .proto file
pass
def CreateBroadcast(self, request, context):
"""CreateBroadcast creates a FS broadcast, potentially sending a message to
many machines in the fleet.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListActiveBroadcasts(self, request, context):
"""ListActiveBroadcasts lists the currently active FS broadcasts.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListClients(self, request, context):
"""ListClients lists the currently active FS clients.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListClientContacts(self, request, context):
"""ListClientContacts lists the contact history for a client.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetMessageStatus(self, request, context):
"""GetMessageStatus retrieves the current status of a message.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def InsertMessage(self, request, context):
"""InsertMessage inserts a message into the Fleetspeak system to be processed
by the server or delivered to a client.
TODO: Have this method return the message that is written to the
datastore (or at least the id).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StoreFile(self, request, context):
"""StoreFile inserts a file into the Fleetspeak system.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def KeepAlive(self, request, context):
"""KeepAlive does as little as possible.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BlacklistClient(self, request, context):
"""BlacklistClient marks a client_id as invalid, forcing all Fleetspeak
clients using it to rekey.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AdminServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateBroadcast': grpc.unary_unary_rpc_method_handler(
servicer.CreateBroadcast,
request_deserializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.CreateBroadcastRequest.FromString,
response_serializer=fleetspeak_dot_src_dot_common_dot_proto_dot_fleetspeak_dot_common__pb2.EmptyMessage.SerializeToString,
),
'ListActiveBroadcasts': grpc.unary_unary_rpc_method_handler(
servicer.ListActiveBroadcasts,
request_deserializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.ListActiveBroadcastsRequest.FromString,
response_serializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.ListActiveBroadcastsResponse.SerializeToString,
),
'ListClients': grpc.unary_unary_rpc_method_handler(
servicer.ListClients,
request_deserializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.ListClientsRequest.FromString,
response_serializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.ListClientsResponse.SerializeToString,
),
'ListClientContacts': grpc.unary_unary_rpc_method_handler(
servicer.ListClientContacts,
request_deserializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.ListClientContactsRequest.FromString,
response_serializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.ListClientContactsResponse.SerializeToString,
),
'GetMessageStatus': grpc.unary_unary_rpc_method_handler(
servicer.GetMessageStatus,
request_deserializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.GetMessageStatusRequest.FromString,
response_serializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.GetMessageStatusResponse.SerializeToString,
),
'InsertMessage': grpc.unary_unary_rpc_method_handler(
servicer.InsertMessage,
request_deserializer=fleetspeak_dot_src_dot_common_dot_proto_dot_fleetspeak_dot_common__pb2.Message.FromString,
response_serializer=fleetspeak_dot_src_dot_common_dot_proto_dot_fleetspeak_dot_common__pb2.EmptyMessage.SerializeToString,
),
'StoreFile': grpc.unary_unary_rpc_method_handler(
servicer.StoreFile,
request_deserializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.StoreFileRequest.FromString,
response_serializer=fleetspeak_dot_src_dot_common_dot_proto_dot_fleetspeak_dot_common__pb2.EmptyMessage.SerializeToString,
),
'KeepAlive': grpc.unary_unary_rpc_method_handler(
servicer.KeepAlive,
request_deserializer=fleetspeak_dot_src_dot_common_dot_proto_dot_fleetspeak_dot_common__pb2.EmptyMessage.FromString,
response_serializer=fleetspeak_dot_src_dot_common_dot_proto_dot_fleetspeak_dot_common__pb2.EmptyMessage.SerializeToString,
),
'BlacklistClient': grpc.unary_unary_rpc_method_handler(
servicer.BlacklistClient,
request_deserializer=fleetspeak_dot_src_dot_server_dot_proto_dot_fleetspeak__server_dot_admin__pb2.BlacklistClientRequest.FromString,
response_serializer=fleetspeak_dot_src_dot_common_dot_proto_dot_fleetspeak_dot_common__pb2.EmptyMessage.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'fleetspeak.server.Admin', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown]
# # Pre-process data:
#
# This notebook takes the csv files with ERF data (historical and scenario) and converts them into an xarray.
#
# Notes:
# - Historical emissions are used up until 2019.
# - After this the SSPs are used which results in a jump in ERF because these are not harmonized for 2019.
#
# %% [markdown]
# ## UPDATE:
#
# - Update HFCs, figures etc
# - new figures
# %%
from ar6_ch6_rcmipfigs import constants
# %%
# %load_ext autoreload
# %autoreload 2
# %%
import matplotlib.pyplot as plt
# %% [markdown]
# ### Define output paths
# %% jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
from ar6_ch6_rcmipfigs.constants import OUTPUT_DATA_DIR
SAVEPATH_DATASET = OUTPUT_DATA_DIR / 'ERF_data.nc'
# just minorGHGs_data here
SAVEPATH_DATASET_minor = OUTPUT_DATA_DIR / 'ERF_minorGHGs_data.nc'
SAVEPATH_DATASET
# %% [markdown]
# ## Load data:
# %% [markdown]
# Data for ERF historical period:
# %%
from ar6_ch6_rcmipfigs.utils.badc_csv import read_csv_badc
# %%
path_AR_hist = constants.INPUT_DATA_DIR_BADC /'AR6_ERF_1750-2019.csv'
path_AR_hist_minorGHG = constants.INPUT_DATA_DIR_BADC /'AR6_ERF_minorGHGs_1750-2019.csv'
# use historical up to 2019:
use_hist_to_year = 2019
df_hist = read_csv_badc(path_AR_hist, index_col=0).copy()
df_hist_minor_GHG = read_csv_badc(path_AR_hist_minorGHG, index_col=0).copy()
df_hist.columns
# %%
df_hist
# %% [markdown]
# Find SSP files:
# %% jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
path_ssps = constants.INPUT_DATA_DIR_BADC / 'SSPs'
paths = path_ssps.glob('*') # '^(minor).)*$')
files = [x for x in paths if x.is_file()]
files
# %% [markdown]
# Read all SSP files:
# %% jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
ERFs = {}
ERFs_minor = {}
nms = []
for file in files:
fn = file.name # filename
_ls = fn.split('_') # [1]
nm = _ls[1]
print(nm)
print(file)
if 'minorGHGs' in fn:
ERFs_minor[nm] = read_csv_badc(file, index_col=0).copy()
else:
ERFs[nm] = read_csv_badc(file, index_col=0).copy()
nms.append(nm)
# %% [markdown]
# ## Replace years up to 2019 by historical ERF
# %% [markdown]
# #### Controle plot before:
#
# %%
ERFs['ssp119']#['co2'][1750]#.loc[2010]
# %%
for scn in ERFs.keys():
ERFs[scn].loc[2010:2025]['ch4'].plot(label=scn)
plt.ylabel('ERF [W/m2]')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
# %%
for scn in ERFs.keys():
ERFs[scn].loc[2010:2040]['o3'].plot(label=scn)
plt.ylabel('ERF [W/m2]')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
# %%
for var in ERFs['ssp119'].columns:
for scn in ERFs.keys():
ERFs[scn].loc[2010:2040][var].plot(label=scn)
plt.ylabel('ERF [W/m2]')
plt.title(var)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.show()
# %%
for scn in ERFs.keys():
ERFs[scn].loc[2010:2025]['total_anthropogenic'].plot(label=scn)
plt.ylabel('ERF [W/m2]')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
# %%
for scn in ERFs_minor.keys():
ERFs_minor[scn].loc[2010:2025]['HFC-125'].plot(label=scn)
plt.ylabel('ERF [W/m2]')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
# %%
cols = ERFs['ssp119'].columns
print(cols)
cols_minorGHG = ERFs_minor['ssp119'].columns
print(cols_minorGHG)
# %% jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
for scn in ERFs.keys():
ERFs[scn].loc[1750:use_hist_to_year] = df_hist[cols].loc[1750:use_hist_to_year]
if scn in ERFs_minor:
ERFs_minor[scn].loc[1750:use_hist_to_year] = df_hist_minor_GHG[cols_minorGHG].loc[1750:use_hist_to_year]
# %% [markdown]
# #### Controle plot after:
# %%
for scn in ERFs.keys():
ERFs[scn].loc[2010:2025]['total_anthropogenic'].plot(label=scn)
plt.ylabel('ERF [W/m2]')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
# %%
for scn in ERFs_minor.keys():
ERFs_minor[scn].loc[2010:2025]['HFC-125'].plot(label=scn)
plt.ylabel('ERF [W/m2]')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
# %% [markdown]
# ## Pre-processing:
# %% [markdown]
# ### Add together aerosol forcing:
# %%
aero_tot = 'aerosol-total'
aero_cld = 'aerosol-cloud_interactions'
aero_rad = 'aerosol-radiation_interactions'
bc_on_snow = 'bc_on_snow'
aero_tot_wbc = 'aerosol-total-with_bc-snow'
for scn in ERFs.keys():
# add together:
ERFs[scn][aero_tot] = ERFs[scn][aero_cld] + ERFs[scn][aero_rad]
ERFs[scn][aero_tot_wbc] = ERFs[scn][aero_tot]+ ERFs[scn][bc_on_snow]
# %% [markdown]
# ### Compute sum of HFCs
# %%
HFCs_name = 'HFCs'
# list of variables
ls = list(ERFs_minor['ssp370-lowNTCF-aerchemmip'].columns)
# chocose only those with HFC in them
vars_HFCs = [v for v in ls if 'HFC' in v]
vars_HFCs
# %% [markdown]
# We define SLCFs as those with a lifetime of less than 20 years, and this excludes the following:
# HFC-23,HFC-125,HFC-143a,HFC-227ea,HFC-236fa
#
#
# %%
excluded_HFCs = ['HFC-23','HFC-236fa'] #'HFC-125','HFC-227ea','HFC-143a',
# %%
final_HFC_vars = [hfc for hfc in vars_HFCs if hfc not in excluded_HFCs]
# %%
final_HFC_vars
# %%
ERFs_minor['ssp585'][vars_HFCs].sum(axis=1).plot(label='All HFCs')
ERFs_minor['ssp585'][excluded_HFCs].sum(axis=1).plot(label='excluded HFCs')
ERFs_minor['ssp585'][final_HFC_vars].sum(axis=1).plot(label='Used HFCs')
#(ERFs_minor['ssp585'][excluded_HFCs].sum(axis=1)+ERFs_minor['ssp585'][final_HFC_vars].sum(axis=1)).plot(label='sum')
plt.legend()
# %%
for scn in ERFs_minor.keys():
# sum over HFC variables
ERFs_minor[scn][HFCs_name] = ERFs_minor[scn][final_HFC_vars].sum(axis=1)
# add row to ERFs as well
ERFs[scn][HFCs_name] = ERFs_minor[scn][HFCs_name]
ERFs[scn]
# %% [markdown]
# ## For SSP4-3.4 HFCs, use SSP1-1.9 for HFCs
# %%
ssp334 ='ssp334'
ssp119 = 'ssp119'
# %%
ERFs[ssp334][HFCs_name] = ERFs[ssp119][HFCs_name]
ERFs_minor[ssp334] = ERFs_minor[ssp119]#[HFCs_name]
# %% [markdown]
# ERFs_minor[ssp334] = ERFs_minor[ssp119].copy()#.keys()
# %% [markdown]
# ## Convert to xarray:
# %% jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
import xarray as xr
das = []
# loop over scenarios
for scn in ERFs.keys():
# convert to xarray
ds = ERFs[scn].to_xarray() # .squeeze()
# concatubate variables as new dimension
da = ds.to_array('variable')
# give scenario name
da = da.rename(scn)
das.append(da)
# %%
# %% jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
# let the new dimension be called scenario:
da_tot = xr.merge(das).to_array('scenario')
# rename the dataset to ERF
da_tot = da_tot.rename('ERF')
# save
da_tot.to_netcdf(SAVEPATH_DATASET)
da_tot.to_dataset()
# %% [markdown]
# ### Save minor GHGs as well:
# %% jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
import xarray as xr
das = []
for nm in nms:
if nm not in ERFs_minor.keys():
continue
ds = ERFs_minor[nm].to_xarray() # .squeeze()
da = ds.to_array('variable')
da = da.rename(nm)
das.append(da)
# %% jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
da_tot_minor = xr.merge(das).to_array('scenario')
da_tot_minor = da_tot_minor.rename('ERF')
da_tot_minor.to_netcdf(SAVEPATH_DATASET_minor)
da_tot_minor.to_dataset()
# %% [markdown]
# ## Check:
# %% jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
da_check = xr.open_dataset(SAVEPATH_DATASET)
da_check
# %%
da_check.sel(scenario='ssp334', variable='HFCs')
# %%
import matplotlib.pyplot as plt
# %%
for scn in da_check.scenario:
da_check.sel(variable='total_anthropogenic')['ERF'].sel(scenario=scn).plot(label=scn.values)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', )
# %%
SAVEPATH_DATASET
# %%
|
from vergeml.img import ImageType
from vergeml.operation import OperationPlugin, operation
from vergeml.option import option
from PIL import Image
from vergeml.utils import VergeMLError
@operation('random-crop', topic="image", descr="Crop random regions of an image.")
@option('width', type=int, descr="Width of the rectangle.", validate='>0')
@option('height', type=int, descr="Height of the rectangle.", validate='>0')
class RandomCropOperation(OperationPlugin):
type = ImageType
def __init__(self, width:int, height:int, apply=None):
super().__init__(apply)
self.width = width
self.height = height
def transform_xy(self, x, y, rng):
imgs = [img for img in (x,y) if isinstance(img, ImageType)]
if not len(imgs):
raise VergeMLError("random_crop needs samples of type image")
maxwidth = min([img.size[0] for img in imgs])
maxheight = min([img.size[1] for img in imgs])
if maxwidth < self.width:
raise VergeMLError("Can't crop sample with width {} to {}.".format(maxwidth, self.width))
if maxheight < self.height:
raise VergeMLError("Can't crop sample with height {} to {}.".format(maxheight, self.height))
maxx = maxwidth - self.width
maxy = maxheight - self.height
xco = rng.randint(0, maxx)
yco = rng.randint(0, maxy)
params = xco, yco, xco + self.width, yco + self.height
if isinstance(x, ImageType):
x = x.crop(params)
if isinstance(y, ImageType):
y = y.crop(params)
return x, y
|
"""
Given an array arr of size N, swap the Kth element from beginning
with Kth element from end.
Algorithm : - After user enters the values we will take 2 variables
pos1 and pos2. Pos1 will have index value of kth position. Pos2 will have index value of kth position
from end. We then use pop function to remove the element and insert function to swap the elements
at their respective positions.
"""
# Enter the size and value of element from user
n, k = map(int, input().split(" "))
A = list(map(int, input().split()))[:n] # input array of size n
pos1 = k - 1
pos2 = n - k
# popping both the elements from array
first_ele = A.pop(pos1)
second_ele = A.pop(pos2-1)
A.insert(pos1, second_ele)
A.insert(pos2, first_ele)
print(A)
"""
Expected Time Complexity: O(1)
Expected Auxiliary Space: O(1)
Testcase 1:
Input: N = 8, K = 3
arr[] = {1, 2, 3, 4, 5, 6, 7, 8}
Output: 1 2 6 4 5 3 7 8
Explanation: Kth element from beginning is 3 and from end is 6.
Testcase 2:
Input: N = 5, K = 2
arr[] = {5, 3, 6, 1, 2}
Output: 5 1 6 3 2
Explanation: Kth element from beginning is 3 and from end is 1.
Constraints: 1 ≤ K ≤ N ≤ 10^5
1 ≤ arr ≤ 10^3
"""
|
"""
PyPI tool
"""
import sys
from subprocess import Popen
import distutils.core # pylint: disable=no-name-in-module, import-error
import os
import importlib.machinery
import requests
PYPI_URL = os.environ.get('PYPI_URL', 'https://pypi.python.org/pypi/')
while PYPI_URL.endswith('/'):
PYPI_URL = PYPI_URL[:-1]
# pylint: disable=no-member
class PatchSetup():
"Patch Setup is used to get the data which is passed to setup"
def __init__(self):
self._restore = [{'obj':distutils.core, 'name':'setup',
'value':distutils.core.setup}]
self.kwargs = None
def __call__(self, **kwargs):
self.kwargs = kwargs
def patcher(self):
"Mock the setup attributes"
for kwargs in self._restore:
setattr(kwargs['obj'], kwargs['name'], self)
def restore(self):
"Restore the mocked attributes"
for kwargs in self._restore:
setattr(kwargs['obj'], kwargs['name'], kwargs['value'])
def _get_setup_data():
"Import setup and extract relevant data."
patch = PatchSetup()
patch.patcher()
loader = importlib.machinery.SourceFileLoader('setup', 'setup.py')
setup = loader.load_module()
patch.kwargs['__file__'] = os.path.abspath(setup.__file__)
patch.restore()
return patch.kwargs
def _get_pypi_info(package):
"Return the package info."
tmp = {'releases':dict()}
url = PYPI_URL
url = '/'.join([url, package, 'json'])
got = requests.get(url)
if got.status_code == 200:
tmp = got.json()
return tmp
def _call_setup(*args, cwd='', script='setup.py', sys_mod=sys):
"Subprocess call"
env = os.environ.copy()
env['PYTHONPATH'] = cwd+':'+ env.get('PYTHONPATH', '')
args = list(args)
script = os.path.abspath(script)
args.insert(0, script)
args.insert(0, sys_mod.executable)
popen = Popen(args, cwd=cwd, env=env,
stderr=sys_mod.stderr, stdout=sys_mod.stdout)
popen.wait()
def _create_pypirc(path='~/.pypirc'):
"Create the .pypirc file in the home directory"
# setuptools is a bit of a closed garden, so reverting back to using as it
# would be over the command line.
path = os.path.expanduser(path)
if os.path.exists(path):
return (False, path)
template = (
"[distutils]\n"
"index-servers = pypi\n"
"[pypi]\n"
"repository=%s\n"
"username:%s\n"
"password:%s\n")
text = template % (PYPI_URL,
os.environ['PP_USERNAME'],
os.environ['PP_PASSWORD'])
with open(path, 'w') as file_write:
file_write.truncate()
file_write.write(text)
return (True, path)
def _valid_version(data, info):
"Return True if the version can be uploaded."
if data['version'] in info['releases'].keys():
text = "# Package '%s' with version '%s' already exists."
text = text % (data['name'], data['version'])
print(text)
return False
return True
def _clean_up_rc(rc_status):
"Check if we need to remove the pypi rc file."
if rc_status[0]:
os.remove(rc_status[1])
def upload():
"Build the package and upload to pypi."
data = _get_setup_data()
info = _get_pypi_info(data['name'])
if _valid_version(data, info):
rc_status = _create_pypirc()
cwd = os.path.dirname(data['__file__'])
_call_setup('register', cwd=cwd)
_call_setup('sdist', 'upload', cwd=cwd)
_clean_up_rc(rc_status)
|
import plotly.graph_objects as go
import sys
import common
import table_single_provers
import figure_createall
def main_helper(prover_dict):
systems= ["D/const","D/cumul","D/vary","T/const","T/cumul","T/vary"] + ["S4/const","S4/cumul","S4/vary","S5/const","S5/cumul","S5/vary"]
data_mleanTotal = [25,35,45,55,66,77]*2
data_mleanUniqueVsOptho = [15,25,35,45,55,65]*2
data_opthoTotal = [60,62,64,65,73,85]*2
data_opthoUniqueVsMLean = [14,13,17,18,19,19]*2
data_leoTotal = [30,40,50,60,70,80]*2
data_satallaxTotal = [20,30,40,50,60,70]*2
data_leoUniqueVsMlean = [9,10,11,12,13,14]*2
data_satallaxUniqueVsMlean = [7,8,9,10,11,12]*2
data_mleanTotal[0] = len(set(prover_dict['mleancop']["Dall"]["constall"]["thm_single"]))
data_mleanTotal[1] = len(set(prover_dict['mleancop']["Dall"]["cumulall"]["thm_single"]))
data_mleanTotal[2] = len(set(prover_dict['mleancop']["Dall"]["varyall"]["thm_single"]))
data_mleanTotal[3] = len(set(prover_dict['mleancop']["Tall"]["constall"]["thm_single"]))
data_mleanTotal[4] = len(set(prover_dict['mleancop']["Tall"]["cumulall"]["thm_single"]))
data_mleanTotal[5] = len(set(prover_dict['mleancop']["Tall"]["varyall"]["thm_single"]))
data_mleanTotal[6] = len(set(prover_dict['mleancop']["S4all"]["constall"]["thm_single"]))
data_mleanTotal[7] = len(set(prover_dict['mleancop']["S4all"]["cumulall"]["thm_single"]))
data_mleanTotal[8] = len(set(prover_dict['mleancop']["S4all"]["varyall"]["thm_single"]))
data_mleanTotal[9] = len(set(prover_dict['mleancop']["S5all"]["constall"]["thm_single"]))
data_mleanTotal[10] = len(set(prover_dict['mleancop']["S5all"]["cumulall"]["thm_single"]))
data_mleanTotal[11] = len(set(prover_dict['mleancop']["S5all"]["varyall"]["thm_single"]))
data_mleanUniqueVsOptho[0] = len(set(prover_dict['mleancop']["Dall"]["constall"]["thm_unique_compared_to_optho"]))
data_mleanUniqueVsOptho[1] = len(set(prover_dict['mleancop']["Dall"]["cumulall"]["thm_unique_compared_to_optho"]))
data_mleanUniqueVsOptho[2] = len(set(prover_dict['mleancop']["Dall"]["varyall"]["thm_unique_compared_to_optho"]))
data_mleanUniqueVsOptho[3] = len(set(prover_dict['mleancop']["Tall"]["constall"]["thm_unique_compared_to_optho"]))
data_mleanUniqueVsOptho[4] = len(set(prover_dict['mleancop']["Tall"]["cumulall"]["thm_unique_compared_to_optho"]))
data_mleanUniqueVsOptho[5] = len(set(prover_dict['mleancop']["Tall"]["varyall"]["thm_unique_compared_to_optho"]))
data_mleanUniqueVsOptho[6] = len(set(prover_dict['mleancop']["S4all"]["constall"]["thm_unique_compared_to_optho"]))
data_mleanUniqueVsOptho[7] = len(set(prover_dict['mleancop']["S4all"]["cumulall"]["thm_unique_compared_to_optho"]))
data_mleanUniqueVsOptho[8] = len(set(prover_dict['mleancop']["S4all"]["varyall"]["thm_unique_compared_to_optho"]))
data_mleanUniqueVsOptho[9] = len(set(prover_dict['mleancop']["S5all"]["constall"]["thm_unique_compared_to_optho"]))
data_mleanUniqueVsOptho[10] = len(set(prover_dict['mleancop']["S5all"]["cumulall"]["thm_unique_compared_to_optho"]))
data_mleanUniqueVsOptho[11] = len(set(prover_dict['mleancop']["S5all"]["varyall"]["thm_unique_compared_to_optho"]))
data_leoTotal[0] = len(set(prover_dict['leo']["Dsem"]["constsem"]["thm_single"]))
data_leoTotal[1] = len(set(prover_dict['leo']["Dsem"]["cumulsem"]["thm_single"]))
data_leoTotal[2] = len(set(prover_dict['leo']["Dsem"]["varyall"]["thm_single"]))
data_leoTotal[3] = len(set(prover_dict['leo']["Tsem"]["constsem"]["thm_single"]))
data_leoTotal[4] = len(set(prover_dict['leo']["Tsem"]["cumulsem"]["thm_single"]))
data_leoTotal[5] = len(set(prover_dict['leo']["Tsem"]["varyall"]["thm_single"]))
data_leoTotal[6] = len(set(prover_dict['leo']["S4sem"]["constsem"]["thm_single"]))
data_leoTotal[7] = len(set(prover_dict['leo']["S4sem"]["cumulsem"]["thm_single"]))
data_leoTotal[8] = len(set(prover_dict['leo']["S4sem"]["varyall"]["thm_single"]))
data_leoTotal[9] = len(set(prover_dict['leo']["S5Usem"]["constsem"]["thm_single"]))
data_leoTotal[10] = len(set(prover_dict['leo']["S5Usem"]["cumulsem"]["thm_single"]))
data_leoTotal[11] = len(set(prover_dict['leo']["S5sem"]["varyall"]["thm_single"]))
data_leoUniqueVsMlean[0] = len(set(prover_dict['leo']["Dsem"]["constsem"]["thm_unique_compared_to_mleancop"]))
data_leoUniqueVsMlean[1] = len(set(prover_dict['leo']["Dsem"]["cumulsem"]["thm_unique_compared_to_mleancop"]))
data_leoUniqueVsMlean[2] = len(set(prover_dict['leo']["Dsem"]["varyall"]["thm_unique_compared_to_mleancop"]))
data_leoUniqueVsMlean[3] = len(set(prover_dict['leo']["Tsem"]["constsem"]["thm_unique_compared_to_mleancop"]))
data_leoUniqueVsMlean[4] = len(set(prover_dict['leo']["Tsem"]["cumulsem"]["thm_unique_compared_to_mleancop"]))
data_leoUniqueVsMlean[5] = len(set(prover_dict['leo']["Tsem"]["varyall"]["thm_unique_compared_to_mleancop"]))
data_leoUniqueVsMlean[6] = len(set(prover_dict['leo']["S4sem"]["constsem"]["thm_unique_compared_to_mleancop"]))
data_leoUniqueVsMlean[7] = len(set(prover_dict['leo']["S4sem"]["cumulsem"]["thm_unique_compared_to_mleancop"]))
data_leoUniqueVsMlean[8] = len(set(prover_dict['leo']["S4sem"]["varyall"]["thm_unique_compared_to_mleancop"]))
data_leoUniqueVsMlean[9] = len(set(prover_dict['leo']["S5Usem"]["constsem"]["thm_unique_compared_to_mleancop"]))
data_leoUniqueVsMlean[10] = len(set(prover_dict['leo']["S5Usem"]["cumulsem"]["thm_unique_compared_to_mleancop"]))
data_leoUniqueVsMlean[11] = len(set(prover_dict['leo']["S5sem"]["varyall"]["thm_unique_compared_to_mleancop"]))
data_satallaxTotal[0] = len(set(prover_dict['satallax']["Dsem"]["constsem"]["thm_single"]))
data_satallaxTotal[1] = len(set(prover_dict['satallax']["Dsem"]["cumulsem"]["thm_single"]))
data_satallaxTotal[2] = len(set(prover_dict['satallax']["Dsem"]["varyall"]["thm_single"]))
data_satallaxTotal[3] = len(set(prover_dict['satallax']["Tsem"]["constsem"]["thm_single"]))
data_satallaxTotal[4] = len(set(prover_dict['satallax']["Tsem"]["cumulsem"]["thm_single"]))
data_satallaxTotal[5] = len(set(prover_dict['satallax']["Tsem"]["varyall"]["thm_single"]))
data_satallaxTotal[6] = len(set(prover_dict['satallax']["S4sem"]["constsem"]["thm_single"]))
data_satallaxTotal[7] = len(set(prover_dict['satallax']["S4sem"]["cumulsem"]["thm_single"]))
data_satallaxTotal[8] = len(set(prover_dict['satallax']["S4sem"]["varyall"]["thm_single"]))
data_satallaxTotal[9] = len(set(prover_dict['satallax']["S5Usem"]["constsem"]["thm_single"]))
data_satallaxTotal[10] = len(set(prover_dict['satallax']["S5Usem"]["cumulsem"]["thm_single"]))
data_satallaxTotal[11] = len(set(prover_dict['satallax']["S5sem"]["varyall"]["thm_single"]))
data_satallaxUniqueVsMlean[0] = len(set(prover_dict['satallax']["Dsem"]["constsem"]["thm_unique_compared_to_mleancop"]))
data_satallaxUniqueVsMlean[1] = len(set(prover_dict['satallax']["Dsem"]["cumulsem"]["thm_unique_compared_to_mleancop"]))
data_satallaxUniqueVsMlean[2] = len(set(prover_dict['satallax']["Dsem"]["varyall"]["thm_unique_compared_to_mleancop"]))
data_satallaxUniqueVsMlean[3] = len(set(prover_dict['satallax']["Tsem"]["constsem"]["thm_unique_compared_to_mleancop"]))
data_satallaxUniqueVsMlean[4] = len(set(prover_dict['satallax']["Tsem"]["cumulsem"]["thm_unique_compared_to_mleancop"]))
data_satallaxUniqueVsMlean[5] = len(set(prover_dict['satallax']["Tsem"]["varyall"]["thm_unique_compared_to_mleancop"]))
data_satallaxUniqueVsMlean[6] = len(set(prover_dict['satallax']["S4sem"]["constsem"]["thm_unique_compared_to_mleancop"]))
data_satallaxUniqueVsMlean[7] = len(set(prover_dict['satallax']["S4sem"]["cumulsem"]["thm_unique_compared_to_mleancop"]))
data_satallaxUniqueVsMlean[8] = len(set(prover_dict['satallax']["S4sem"]["varyall"]["thm_unique_compared_to_mleancop"]))
data_satallaxUniqueVsMlean[9] = len(set(prover_dict['satallax']["S5Usem"]["constsem"]["thm_unique_compared_to_mleancop"]))
data_satallaxUniqueVsMlean[10] = len(set(prover_dict['satallax']["S5Usem"]["cumulsem"]["thm_unique_compared_to_mleancop"]))
data_satallaxUniqueVsMlean[11] = len(set(prover_dict['satallax']["S5sem"]["varyall"]["thm_unique_compared_to_mleancop"]))
data_opthoTotal[0] = len(set(prover_dict['leo']["Dsem"]["constsem"]["thm_single"] + prover_dict['satallax']["Dsem"]["constsem"]["thm_single"]))
data_opthoTotal[1] = len(set(prover_dict['leo']["Dsem"]["cumulsem"]["thm_single"] + prover_dict['satallax']["Dsem"]["cumulsem"]["thm_single"]))
data_opthoTotal[2] = len(set(prover_dict['leo']["Dsem"]["varyall"]["thm_single"] + prover_dict['satallax']["Dsem"]["varyall"]["thm_single"]))
data_opthoTotal[3] = len(set(prover_dict['leo']["Tsem"]["constsem"]["thm_single"] + prover_dict['satallax']["Tsem"]["constsem"]["thm_single"]))
data_opthoTotal[4] = len(set(prover_dict['leo']["Tsem"]["cumulsem"]["thm_single"] + prover_dict['satallax']["Tsem"]["cumulsem"]["thm_single"]))
data_opthoTotal[5] = len(set(prover_dict['leo']["Tsem"]["varyall"]["thm_single"] + prover_dict['satallax']["Tsem"]["varyall"]["thm_single"]))
data_opthoTotal[6] = len(set(prover_dict['leo']["S4sem"]["constsem"]["thm_single"] + prover_dict['satallax']["S4sem"]["constsem"]["thm_single"]))
data_opthoTotal[7] = len(set(prover_dict['leo']["S4sem"]["cumulsem"]["thm_single"] + prover_dict['satallax']["S4sem"]["cumulsem"]["thm_single"]))
data_opthoTotal[8] = len(set(prover_dict['leo']["S4sem"]["varyall"]["thm_single"] + prover_dict['satallax']["S4sem"]["varyall"]["thm_single"]))
data_opthoTotal[9] = len(set(prover_dict['leo']["S5Usem"]["constsem"]["thm_single"] + prover_dict['satallax']["S5Usem"]["constsem"]["thm_single"]))
data_opthoTotal[10] = len(set(prover_dict['leo']["S5Usem"]["cumulsem"]["thm_single"] + prover_dict['satallax']["S5Usem"]["cumulsem"]["thm_single"]))
data_opthoTotal[11] = len(set(prover_dict['leo']["S5sem"]["varyall"]["thm_single"] + prover_dict['satallax']["S5sem"]["varyall"]["thm_single"]))
data_opthoUniqueVsMLean[0] = len(set(prover_dict['leo']["Dsem"]["constsem"]["thm_unique_compared_to_mleancop"] + prover_dict['satallax']["Dsem"]["constsem"]["thm_unique_compared_to_mleancop"]))
data_opthoUniqueVsMLean[1] = len(set(prover_dict['leo']["Dsem"]["cumulsem"]["thm_unique_compared_to_mleancop"] + prover_dict['satallax']["Dsem"]["cumulsem"]["thm_unique_compared_to_mleancop"]))
data_opthoUniqueVsMLean[2] = len(set(prover_dict['leo']["Dsem"]["varyall"]["thm_unique_compared_to_mleancop"] + prover_dict['satallax']["Dsem"]["varyall"]["thm_unique_compared_to_mleancop"]))
data_opthoUniqueVsMLean[3] = len(set(prover_dict['leo']["Tsem"]["constsem"]["thm_unique_compared_to_mleancop"] + prover_dict['satallax']["Tsem"]["constsem"]["thm_unique_compared_to_mleancop"]))
data_opthoUniqueVsMLean[4] = len(set(prover_dict['leo']["Tsem"]["cumulsem"]["thm_unique_compared_to_mleancop"] + prover_dict['satallax']["Tsem"]["cumulsem"]["thm_unique_compared_to_mleancop"]))
data_opthoUniqueVsMLean[5] = len(set(prover_dict['leo']["Tsem"]["varyall"]["thm_unique_compared_to_mleancop"] + prover_dict['satallax']["Tsem"]["varyall"]["thm_unique_compared_to_mleancop"]))
data_opthoUniqueVsMLean[6] = len(set(prover_dict['leo']["S4sem"]["constsem"]["thm_unique_compared_to_mleancop"] + prover_dict['satallax']["S4sem"]["constsem"]["thm_unique_compared_to_mleancop"]))
data_opthoUniqueVsMLean[7] = len(set(prover_dict['leo']["S4sem"]["cumulsem"]["thm_unique_compared_to_mleancop"] + prover_dict['satallax']["S4sem"]["cumulsem"]["thm_unique_compared_to_mleancop"]))
data_opthoUniqueVsMLean[8] = len(set(prover_dict['leo']["S4sem"]["varyall"]["thm_unique_compared_to_mleancop"] + prover_dict['satallax']["S4sem"]["varyall"]["thm_unique_compared_to_mleancop"]))
data_opthoUniqueVsMLean[9] = len(set(prover_dict['leo']["S5Usem"]["constsem"]["thm_unique_compared_to_mleancop"] + prover_dict['satallax']["S5Usem"]["constsem"]["thm_unique_compared_to_mleancop"]))
data_opthoUniqueVsMLean[10] = len(set(prover_dict['leo']["S5Usem"]["cumulsem"]["thm_unique_compared_to_mleancop"] + prover_dict['satallax']["S5Usem"]["cumulsem"]["thm_unique_compared_to_mleancop"]))
data_opthoUniqueVsMLean[11] = len(set(prover_dict['leo']["S5sem"]["varyall"]["thm_unique_compared_to_mleancop"] + prover_dict['satallax']["S5sem"]["varyall"]["thm_unique_compared_to_mleancop"]))
WIDTH = 0.25
for i in range(2):
if i==0:
start = 0
end = 6
showLegend=False
else:
start=6
end=12
showLegend=True
mleanTotal = go.Bar(
name="MLeanCoP",
x=systems[start:end],
y=data_mleanTotal[start:end],
width = WIDTH,
offset = -1.5*WIDTH,
marker_color = figure_createall.COL_MLEANCOP_PRIMARY
)
mleanUniqueVsOptho = go.Bar(
name="MLeanCoP U vs. HOany",
x=systems[start:end],
y=data_mleanUniqueVsOptho[start:end],
width = WIDTH,
offset = -1.5*WIDTH,
marker_color = figure_createall.COL_MLEANCOP_SECONDARY
)
leoTotal = go.Bar(
name="Leo",
x=systems[start:end],
y=data_leoTotal[start:end],
width = WIDTH,
offset = -0.5*WIDTH,
marker_color = figure_createall.COL_LEO_PRIMARY
)
leoUniqueVsMlean = go.Bar(
name="Leo U vs. MLeanCoP",
x=systems[start:end],
y=data_leoUniqueVsMlean[start:end],
width = WIDTH,
offset = -0.5*WIDTH,
marker_color = figure_createall.COL_LEO_SECONDARY
)
satallaxTotal = go.Bar(
name="Satallax",
x=systems[start:end],
y=data_satallaxTotal[start:end],
width = WIDTH,
offset = 0.5*WIDTH,
marker_color = figure_createall.COL_SATALLAX_PRIMARY
)
satallaxUniqueVsMlean = go.Bar(
name="Satallax U vs. MLeanCoP",
x=systems[start:end],
y=data_satallaxUniqueVsMlean[start:end],
width = WIDTH,
offset = 0.5*WIDTH,
marker_color = figure_createall.COL_SATALLAX_SECONDARY
)
opthoTotal = go.Bar(
name="HOany",
x=systems[start:end],
y=data_opthoTotal[start:end],
width = 2*WIDTH,
offset = -0.5*WIDTH,
marker_color = figure_createall.COL_OPTHO_PRIMARY
)
opthoUniqueVsMLean = go.Bar(
name="HOany U vs. MLeanCoP",
x=systems[start:end],
y=data_opthoUniqueVsMLean[start:end],
width = 2*WIDTH,
offset = -0.5*WIDTH,
marker_color = figure_createall.COL_OPTHO_SECONDARY
)
fig = go.Figure([mleanTotal,mleanUniqueVsOptho,
opthoTotal,
leoTotal,satallaxTotal,
opthoUniqueVsMLean,
leoUniqueVsMlean,satallaxUniqueVsMlean])
fig.update_layout(
#title='US Export of Plastic Scrap',
showlegend=showLegend,
legend_orientation="h",
xaxis=dict(
tickfont_size=figure_createall.SIZE_FONT, # font size of T/cumul
tickfont_color="black"
),
yaxis=dict(
title='Number of theorems',
titlefont_size=figure_createall.SIZE_FONT, # font size of Number of theorems
tickfont_size=figure_createall.SIZE_FONT, # font size of numbers
titlefont_color="black",
tickfont_color="black"
),
legend=dict(
font_color="black",
font_size=figure_createall.SIZE_FONT
# x=0,
# y=1.0,
# bgcolor='rgba(255, 255, 255, 0)',
# bordercolor='rgba(255, 255, 255, 0)'
),
barmode='group',
bargroupgap=0 # gap between bars of the same location coordinate.
)
path="/home/tg/master_thesis/thesis/plots/thm_comparison_"+str(i)+".png"
fig.write_image(path,width=1600,height=900)
def main(csv_file_list):
problem_list = common.accumulate_csv(csv_file_list)
prover_dict = table_single_provers.getTableData(problem_list)
table_single_provers.createOptHo(prover_dict)
main_helper(prover_dict)
if __name__ == "__main__":
main(sys.argv[1:])
|
# -*- coding: utf-8 -*-
from rest_framework import serializers
from tandlr.payments.models import TeacherPaymentInformation
class TeacherPaymentInformationV2Serializer(serializers.ModelSerializer):
tutor = serializers.SerializerMethodField()
class Meta:
model = TeacherPaymentInformation
fields = (
'tutor',
'bank',
'account_number',
'social_security_number',
)
def get_tutor(self, instance):
return instance.teacher.get_full_name()
|
'''
Module utils
'''
import re
def nyaa_categories(b):
c = b.replace('/?c=', '')
cats = c.split('_')
cat = cats[0]
subcat = cats[1]
categories = {
"1": {
"name": "Anime",
"subcats": {
"1": "Anime Music Video",
"2": "English-translated",
"3": "Non-English-translated",
"4": "Raw"
}
},
"2": {
"name": "Audio",
"subcats": {
"1": "Lossless",
"2": "Lossy"
}
},
"3": {
"name": "Literature",
"subcats": {
"1": "English-translated",
"2": "Non-English-translated",
"3": "Raw"
}
},
"4": {
"name": "Live Action",
"subcats": {
"1": "English-translated",
"2": "Idol/Promotional Video",
"3": "Non-English-translated",
"4": "Raw"
}
},
"5": {
"name": "Pictures",
"subcats": {
"1": "Graphics",
"2": "Photos"
}
},
"6": {
"name": "Software",
"subcats": {
"1": "Applications",
"2": "Games"
}
}
}
try:
category_name = "{} - {}".format(
categories[cat]['name'], categories[cat]['subcats'][subcat])
except Exception:
pass
return category_name
def parse_nyaa(table_rows, limit):
if limit == 0:
limit = len(table_rows)
torrents = []
for row in table_rows[:limit]:
block = []
for td in row.find_all('td'):
if td.find_all('a'):
for link in td.find_all('a'):
if link.get('href')[-9:] != '#comments':
block.append(link.get('href'))
if link.text.rstrip():
block.append(link.text)
if td.text.rstrip():
block.append(td.text.rstrip())
if row.has_attr('class'):
if row['class'][0] == 'danger':
block.append("remake")
elif row['class'][0] == 'success':
block.append("trusted")
else:
block.append("default")
try:
torrent = {
'id': block[1].replace("/view/", ""),
'category': nyaa_categories(block[0]),
'url': "http://nyaa.si{}".format(block[1]),
'name': block[2],
'download_url': "http://nyaa.si{}".format(block[4]),
'magnet': block[5],
'size': block[6],
'date': block[7],
'seeders': block[8],
'leechers': block[9],
'completed_downloads': block[10],
'type': block[11],
}
torrents.append(torrent)
except IndexError as ie:
pass
return torrents
def parse_single(content):
torrent = {}
data = []
torrent_files = []
for row in content[0].find_all('div', {'class': 'row'}):
for div in row.find_all('div', {'class': 'col-md-5'}):
data.append(div.text.replace("\n", ""))
files = content[2].find('div',
{'class', 'torrent-file-list'}).find_all('li')
for file in files:
torrent_files.append(file.text)
torrent['title'] = re.sub('\n|\r|\t', '', content[0].find('h3', {
"class": "panel-title"}).text.replace("\n", ""))
torrent['category'] = data[0]
torrent['uploader'] = data[2]
torrent['uploader_profile'] = "https://nyaa.si/user/{}".format(data[2])
torrent['website'] = re.sub('\t', '', data[4])
torrent['size'] = data[6]
torrent['date'] = data[1]
torrent['seeders'] = data[3]
torrent['leechers'] = data[5]
torrent['completed'] = data[7]
torrent['hash'] = data[8]
torrent['description'] = re.sub('\t', '', content[1].find('div', {
'id': 'torrent-description'}).text)
torrent['files'] = torrent_files
return torrent
def parse_sukebei(table_rows, limit):
if limit == 0:
limit = len(table_rows)
torrents = []
for row in table_rows[:limit]:
block = []
for td in row.find_all('td'):
for link in td.find_all('a'):
if link.get('href')[-9:] != '#comments':
block.append(link.get('href'))
block.append(link.text.rstrip())
if td.text.rstrip():
block.append(td.text.rstrip())
try:
torrent = {
'id': block[1].replace("/view/", ""),
'category': sukebei_categories(block[0]),
'url': "http://sukebei.nyaa.si{}".format(block[1]),
'name': block[2],
'download_url': "http://sukebei.nyaa.si{}".format(
block[4]),
'magnet': block[5],
'size': block[6],
'date': block[7],
'seeders': block[8],
'leechers': block[9],
'completed_downloads': block[10],
}
except IndexError as ie:
pass
torrents.append(torrent)
return torrents
def sukebei_categories(b):
c = b.replace('/?c=', '')
cats = c.split('_')
cat = cats[0]
subcat = cats[1]
categories = {
"1": {
"name": "Art",
"subcats": {
"1": "Anime",
"2": "Doujinshi",
"3": "Games",
"4": "Manga",
"5": "Pictures",
}
},
"2": {
"name": "Real Life",
"subcats": {
"1": "Photobooks & Pictures",
"2": "Videos"
}
}
}
try:
category_name = "{} - {}".format(
categories[cat]['name'], categories[cat]['subcats'][subcat])
except Exception:
pass
return category_name
# Pantsu Utils
def query_builder(q, params):
available_params = ["category", "page", "limit", "userID", "fromID",
"status", "maxage", "toDate", "fromDate",
"dateType", "minSize", "maxSize", "sizeType",
"sort", "order", "lang"]
query = "?q={}".format(q.replace(" ", "+"))
for param, value in params.items():
if param in available_params:
if (param != "category" and param != "status" and
param != "lang"):
query += "&{}={}".format(param, value)
elif param == "category":
query += "&c={}_{}".format(value[0], value[1])
elif param == "status":
query += "&s={}".format(value)
elif param == "lang":
for lang in value:
query += "&lang={}".format(lang)
return query
|
#!/usr/bin/env python
import glob
import subprocess
import textwrap
import re
import sys
import os
import yaml
import textwrap
from pprint import pprint
s = {}
def compile_papers():
papers = sorted(glob.glob('../hid-sp*/project-paper/content.tex'))
for paper in papers:
print (79* "%")
print ("% BEGIN", paper)
print (79 * "%")
d = os.path.dirname(paper)
command = "cd {d}; make clean; make".format(d=d)
status = os.system(command)
s[paper] = status
print (79* "%")
print ("% END", paper)
print (79 * "%")
compile_papers()
pprint(s)
|
from . import actors
from .actors import *
|
import FWCore.ParameterSet.Config as cms
hltL1TkElectronsEllipticMatchHGC = cms.EDProducer("L1TkElectronTrackProducer",
DRmax = cms.double(0.2),
DRmin = cms.double(0.03),
DeltaZ = cms.double(0.6),
ETmin = cms.double(-1.0),
IsoCut = cms.double(-0.1),
L1EGammaInputTag = cms.InputTag("l1EGammaEEProducer","L1EGammaCollectionBXVWithCuts"),
L1TrackInputTag = cms.InputTag("TTTracksFromTrackletEmulation","Level1TTTracks"),
PTMINTRA = cms.double(2.0),
RelativeIsolation = cms.bool(True),
TrackChi2 = cms.double(10000000000.0),
TrackEGammaDeltaEta = cms.vdouble(0.01, 0.01, 10000000000.0),
TrackEGammaDeltaPhi = cms.vdouble(0.07, 0.0, 0.0),
TrackEGammaDeltaR = cms.vdouble(0.08, 0.0, 0.0),
TrackEGammaMatchType = cms.string('EllipticalCut'),
TrackMinPt = cms.double(10.0),
label = cms.string('EG'),
maxChi2IsoTracks = cms.double(100),
minNStubsIsoTracks = cms.int32(4),
useClusterET = cms.bool(False),
useTwoStubsPT = cms.bool(False)
)
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from typing import Union, Iterable, Tuple
from scipy.spatial.distance import cdist
def get_points_and_targets(data: Union[str, Iterable[np.ndarray]]) -> Tuple[np.ndarray, np.ndarray]:
"""
Depending on the type of the parameter 'data', returns correctly the points and the targets
:param data:
Either str: path to the file containing the data in the format Nx2, col 0 is the data, col 1 the targets.
Or Iterable containing 2 numpy ndarrays: points and targets
:returns: points and targets
"""
if isinstance(data, str):
data_path = data
# read data
linear_func_data = pd.read_csv(data_path, sep=" ", header=None, dtype=np.float64)
# divide data into auxiliary variables
points, targets = linear_func_data.iloc[:, 0], linear_func_data.iloc[:, 1]
points = np.expand_dims(points, 1) # add 1 dimension, needed for np.linalg.lstsq
else:
if len(data) != 2:
raise ValueError(f"Parameter data must be either a string or an Iterable of 2 numpy ndarrays, got {len(data)} elements")
points, targets = data[0], data[1]
return points, targets
def rbf(x, x_l, eps):
"""
radial basic function
:param x: point/s
:param x_l: center/s
:param eps: radius of gaussians
:return: matrix contains radial basic function
"""
return np.exp(-cdist(x, x_l) ** 2 / eps ** 2)
def compute_bases(points: np.ndarray, eps: float, n_bases: int, centers: np.ndarray = None):
"""
Compute the basis functions
:param points: the points on which to calculate the basis functions
:param centers: the center points to pick to compute the basis functions
:param eps: epsilon param of the basis functions
:param n_bases: number of basis functions to compute
:returns: list of basis functions evaluated on every point in 'points'
"""
if centers is None:
# create n_bases basis functions' center points
# centers = points[np.random.choice(points.ravel(), replace=False, size=n_bases)]
centers = points[np.random.choice(range(points.shape[0]), replace=False, size=n_bases)]
phi = rbf(points, centers, eps)
return phi, centers
def approx_lin_func(data: Union[str, Iterable[np.ndarray]] = "../data/linear_function_data.txt") -> Tuple[np.ndarray, np.ndarray, int, np.ndarray]:
"""
Approximate a linear function through least squares
:param data:
Either str: path to the file containing the data in the format Nx2, col 0 is the data, col 1 the targets.
Or Iterable containing 2 numpy ndarrays: points and targets
:returns: tuple (least squares solution, residuals, rank of coefficients matrix, singular values of coefficient matrix)
"""
# get coefficients and targets from data
points, targets = get_points_and_targets(data)
# solve least square
sol, residuals, rank, singvals = np.linalg.lstsq(a=points, b=targets, rcond=1e-5)
return sol, residuals, rank, singvals
def approx_nonlin_func(data: Union[str, Iterable[np.ndarray]] = "../data/nonlinear_function_data.txt", n_bases: int = 5, eps: float = 0.1,
centers: np.ndarray = None):
"""
Approximate a non-linear function through least squares
:param data:
Either str: path to the file containing the data in the format Nx2, col 0 is the data, col 1 the targets.
Or Iterable containing 2 numpy ndarrays: points and targets
:param n_bases: the number of basis functions to approximate the nonlinear function
:param eps: bandwidth of the basis functions
:param centers: list of center points to compute the basis functions
:returns: tuple (least squares solution (transposed), residuals, rank of coefficients matrix, singular values of coefficient matrix,
centers, eps and phi (list_of_basis))
"""
# get coefficients and targets form the data
points, targets = get_points_and_targets(data)
# evaluate the basis functions on the whole data and putting each basis' result in an array
list_of_bases, centers = compute_bases(points=points, centers=centers, eps=eps, n_bases=n_bases)
# solve least square using the basis functions in place of the coefficients to use linear method with nonlinear function
sol, residuals, rank, singvals = np.linalg.lstsq(a=list_of_bases, b=targets, rcond=1e-5)
return sol, residuals, rank, singvals, centers, eps, list_of_bases
def plot_func_over_data(lstsqr_sol: np.ndarray, data: Union[str, Iterable[np.ndarray]], linear: bool, centers=None, eps=None, **kwargs):
"""
Plot the approximated function over the actual data, given the solution of the least squares problem and the data
:param lstsqr_sol: solution of the least squares problem
:param data:
Either str: path to the file containing the data in the format Nx2, col 0 is the data, col 1 the targets.
Or Iterable containing 2 numpy ndarrays: points and targets
:param linear: if True, plots the linear approximated function, otherwise the non-linear one
:param centers: (optional) list of center points to compute the basis functions in case linear=False
:param eps: (optional) epsilon parameter to compute the basis functions in case linear=False
:param kwargs: (optional) can contain more data to include in the title of the plot, e.g. MSE of the approximation
"""
plot_title = "Approximated function plotted over the actual data"
# get the data's coefficients and targets
points, targets = get_points_and_targets(data)
# compute approximated function for every point on the x axis
x = np.linspace(start=-5, stop=5, num=100) # x axis
if linear:
y = lstsqr_sol * x # y value for each x, used to plot the approximated data
else:
list_of_bases, centers = compute_bases(points=np.expand_dims(x, 1), centers=centers, eps=eps, n_bases=len(centers))
y = np.sum(lstsqr_sol * list_of_bases, axis=1) # '*' indicates and elementwise product (dimensions broadcast to common shape)
plot_title += f"\nn_bases: {len(centers)}, eps: {eps}"
# add eventual more data to the plot title
for k, v in kwargs.items():
plot_title += f", {k}: {v}"
# plot approximated function over the actual data
plt.figure(figsize=(5, 5))
plt.scatter(points, targets, label="Data")
plt.plot(x, y, color='r', label="Approximated function")
plt.legend()
plt.title(plot_title)
plt.tight_layout()
plt.show()
# Functions for solve_ivp
def rbf_approx(t, y, centers, eps, C):
"""
function to return vector field of a single point (rbf)
:param t: time (for solve_ivp)
:param y: single point
:param centers: all centers
:param eps: radius of gaussians
:param C: coefficient matrix, found with least squares
:return: derivative for point y
"""
y = y.reshape(1, y.shape[-1])
phi = np.exp(-cdist(y, centers) ** 2 / eps ** 2)
return phi @ C
def linear_approx(t, y, A):
"""
function to return vector field of a single point (linear)
:param t: time (for solve_ivp)
:param y: single point
:param A: coefficient matrix, found with least squares
:return: derivative for point y
"""
return A @ y
|
import pygame
import json
import random
from tool.init import *
from . colliderbox import *
class Mario(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = mario_small_right_img[0][6]
self.rect = self.image.get_rect()
self.rect.x = 0
self.rect.y = 300
self.shape = SMALL
self.status = STAND
self.direction = NODIRECTION
self.animation_num = 0
self.speed_x = 0
self.speed_y = 0
self.jump_time = 0
self.jump_num = 0
self.is_collider = True
self.is_new_life = False
self.flash_time = 0
self.score = 0
self.coin_num = 0
self.life = 3
self.level_num = 1
self.init_image()
self.set_shape(SMALL)
self.globalData = GlobalData()
def update(self):
if abs(self.speed_y + GRAVITY_Y) < MAX_SPEED_Y:
self.speed_y += GRAVITY_Y
if self.status == DEATH:
self.death()
elif self.status == STAND:
self.stand()
elif self.status == WALK:
self.walk()
elif self.status == JUMP:
self.jump()
if self.is_new_life:
self.flash()
def flash(self):
if self.flash_time > 60:
self.is_new_life = False
self.flash_time = 0
self.stand_left.set_alpha(255)
self.stand_right.set_alpha(255)
for img in self.walk_left: img.set_alpha(255)
for img in self.walk_right: img.set_alpha(255)
self.jump_left.set_alpha(255)
self.jump_right.set_alpha(255)
return
if self.flash_time % 3 == 0:
self.image.set_alpha(0)
else:
self.image.set_alpha(255)
self.flash_time += 1
def death(self):
if self.rect.y >= 560:
self.kill()
self.rect.x = 0
self.rect.y = 300
self.status = STAND
self.is_collider = True
self.image = self.small_dead_img
def jump(self):
key = pygame.key.get_pressed()
if key[pygame.K_LEFT]:
self.speed_x = -3
self.image = self.jump_left
elif key[pygame.K_RIGHT]:
self.speed_x = 3
self.image = self.jump_right
# elif key[pygame.K_a]:
# if self.jump_time > 3 and self.jump_num == 1:
# self.speed_y = BIG_JUMP_SPEED_Y
# self.jump_num = 0
self.jump_time += 1
def walk(self):
# 循环播放动画
self.animation_num = (self.animation_num + 1) % (len(self.walk_left))
key = pygame.key.get_pressed()
if key[pygame.K_LEFT]:
if abs(self.speed_x) < MAX_SPEED_X:
self.speed_x -= GRAVITY_X
self.image = self.walk_left[self.animation_num]
elif key[pygame.K_RIGHT]:
if abs(self.speed_x) < MAX_SPEED_X:
self.speed_x += GRAVITY_X
self.image = self.walk_right[self.animation_num]
else:
self.speed_x = 0
self.status = STAND
def stand(self):
if self.direction == LEFT:
self.image = self.stand_left
else:
self.image = self.stand_right
def set_status(self, status):
if self.status == DEATH or self.status == JUMP:
return
# 要变成死亡状态
if status == DEATH:
if self.is_new_life:
return
if random.random() < self.globalData.game_probability:
self.is_new_life = True
return
if self.shape == BIG:
self.set_shape(SMALL)
self.is_new_life = True
elif self.shape == SMALL:
self.life -= 1
self.status = DEATH
self.is_collider = False
self.speed_y = BIG_JUMP_SPEED_Y
self.speed_x = 0
pygame.mixer.Sound.play(sound['death'])
self.globalData.scene = DEATH_SCENE
elif status == JUMP:
key = pygame.key.get_pressed()
if key[pygame.K_a]:
self.speed_y = BIG_JUMP_SPEED_Y
else:
self.speed_y = SMALL_JUMP_SPEED_Y
pygame.mixer.Sound.play(sound['small_jump'])
self.status = JUMP
self.jump_time = 0
self.jump_num = 1
elif status == STAND:
self.speed_x = 0
self.speed_y = 0
self.status = STAND
else:
self.status = status
def set_direction(self, direction):
if self.status != JUMP:
self.direction = direction
def save_data(self):
data = {}
data['level'] = self.level_num
data['score'] = self.score
data['coin_num'] = self.coin_num
data['x'] = self.rect.x
data['y'] = self.rect.y
with open('./savedata/default.json', 'w') as fp:
fp.write(json.dumps(data, indent=4))
print('save data')
def set_shape(self, shape):
self.shape = shape
if self.shape == SMALL:
self.stand_left = self.small_stand_left_img
self.stand_right = self.small_stand_right_img
self.walk_left = self.small_walk_left_img
self.walk_right = self.small_walk_right_img
self.jump_left = self.small_jump_left_img
self.jump_right = self.small_jump_right_img
self.rect.width = self.stand_left.get_rect().width
self.rect.height = self.stand_left.get_rect().height
self.rect.y += 40
elif self.shape == BIG:
self.stand_left = self.big_stand_left_img
self.stand_right = self.big_stand_right_img
self.walk_left = self.big_walk_left_img
self.walk_right = self.big_walk_right_img
self.jump_left = self.big_jump_left_img
self.jump_right = self.big_jump_right_img
self.rect.width = self.stand_left.get_rect().width
self.rect.height = self.stand_left.get_rect().height
self.rect.y -= 40
pygame.mixer.Sound.play(sound['powerup'])
def init_image(self):
self.big_stand_right_img = mario_big_right_img_img[0][6]
self.big_stand_left_img = mario_big_left[0][6]
self.big_walk_right_img = [mario_big_right_img_img[0][0], mario_big_right_img_img[0][1],
mario_big_right_img_img[0][2]]
self.big_walk_left_img = [mario_big_left[0][0], mario_big_left[0][1],
mario_big_left[0][2]]
self.big_jump_right_img = mario_big_right_img_img[0][4]
self.big_jump_left_img = mario_big_left[0][4]
self.small_dead_img = mario_small_right_img[0][5]
self.small_stand_right_img = mario_small_right_img[0][6]
self.small_stand_left_img = mario_small_left_img[0][6]
self.small_walk_right_img = [mario_small_right_img[0][0], mario_small_right_img[0][1],
mario_small_right_img[0][2]]
self.small_walk_left_img = [mario_small_left_img[0][0], mario_small_left_img[0][1],
mario_small_left_img[0][2]]
self.small_jump_right_img = mario_small_right_img[0][4]
self.small_jump_left_img = mario_small_left_img[0][4]
self.small_dead_img = mario_small_right_img[0][5]
|
# Copyright 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the raw LPAR long term metrics."""
import json
from pypowervm.tests.test_utils import pvmhttp
from pypowervm.wrappers.pcm import lpar as pcm_lpar
import testtools
LPAR_DATA = 'lpar_pcm_data.txt'
class TestLparLTM(testtools.TestCase):
def setUp(self):
super(TestLparLTM, self).setUp()
self.raw_json = pvmhttp.PVMFile(LPAR_DATA).body
def test_parse(self):
info = pcm_lpar.LparInfo(self.raw_json)
self.assertIsNotNone(info)
# Validate the Lpar metrics.
# There are metrics for four Lpars.
self.assertEqual(6, len(info.lpars_util))
# Get the first Lpar and assert its metrics
lpar = info.lpars_util[0]
self.assertEqual("Ubuntu1410", lpar.name)
self.assertIsNotNone(lpar.memory)
self.assertEqual(80, lpar.memory.pct_real_mem_avbl)
self.assertEqual(1024, lpar.memory.total_pg_count)
self.assertEqual(512, lpar.memory.free_pg_count)
self.assertEqual(64, lpar.memory.active_pg_count)
self.assertEqual(1048576, lpar.memory.real_mem_size_bytes)
self.assertEqual(61, lpar.memory.pct_real_mem_free)
self.assertEqual(25, lpar.memory.vm_pg_out_rate)
# Get 3rd(random) VM and assert its metrics
lpar = info.lpars_util[2]
self.assertEqual("test_vm3", lpar.name)
self.assertIsNotNone(lpar.memory)
self.assertEqual(82, lpar.memory.pct_real_mem_avbl)
self.assertEqual(4096, lpar.memory.total_pg_count)
self.assertEqual(2048, lpar.memory.free_pg_count)
self.assertEqual(256, lpar.memory.active_pg_count)
self.assertEqual(1048576, lpar.memory.real_mem_size_bytes)
self.assertEqual(60, lpar.memory.pct_real_mem_free)
self.assertEqual(0, lpar.memory.vm_pg_out_rate)
# Assert that we have entries in JSON for VMs which were in error
metric_json = json.loads(self.raw_json)
self.assertEqual("3B0237F9-26F1-41C7-BE57-A08C9452AD9D",
metric_json['lparUtil'][4]['name'])
self.assertEqual("vm_inactive_rmc",
metric_json['lparUtil'][5]['name'])
# Assert that powered off VM has 100 percent free memory.
lpar = info.lpars_util[4]
self.assertEqual("3B0237F9-26F1-41C7-BE57-A08C9452AD9D", lpar.name)
self.assertIsNotNone(lpar.memory)
self.assertIsNone(lpar.memory.pct_real_mem_avbl)
self.assertIsNone(lpar.memory.total_pg_count)
self.assertIsNone(lpar.memory.free_pg_count)
self.assertIsNone(lpar.memory.active_pg_count)
self.assertIsNone(lpar.memory.real_mem_size_bytes)
self.assertEqual(100, lpar.memory.pct_real_mem_free)
# Assert that LPAR with inactive RMC has no free memory.
lpar = info.lpars_util[5]
self.assertEqual("vm_inactive_rmc", lpar.name)
self.assertIsNotNone(lpar.memory)
self.assertIsNone(lpar.memory.pct_real_mem_avbl)
self.assertIsNone(lpar.memory.total_pg_count)
self.assertIsNone(lpar.memory.free_pg_count)
self.assertIsNone(lpar.memory.active_pg_count)
self.assertIsNone(lpar.memory.real_mem_size_bytes)
self.assertEqual(0, lpar.memory.pct_real_mem_free)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 10 14:27:10 2021
@author: Sergio G. Lopez from the Bioimaging Facility of the John Innes Centre.
"""
# Imports the necessary libraries.
from ncempy.io import dm
import numpy as np
import matplotlib.pyplot as plt
from skimage import filters, morphology, segmentation, measure, color
from scipy import ndimage as ndi
import pandas as pd
import glob
import tkinter as tk
from tkinter import filedialog
import os
import mrcfile
from datetime import datetime
def open_mrc(filepath):
"""This function opens an MRC file and returns the name of the image, the image itself, and the pixel size in nm."""
mrc = mrcfile.open(filepath) # Opens the mrc file.
filename = filepath.split('.')[-2].split('\\')[-1] # This gets the filename.
img = mrc.data # Gets the image.
pixel_size = mrc.voxel_size['x'] * 0.1 # Gets the pixel size in nm.
return filename, img, pixel_size
def open_DM4(filepath):
"""This function opens the DM4 image and returns the name of the image, the image itself, and the pixel size."""
fileDM4 = dm.dmReader(filepath) # Imports the dm4 image as a dictionary.
filename = fileDM4['filename'] # Gets the name of the image.
filename = filename.split('.')[0] # Removes the '.dm4' extension from the name of the image.
img = fileDM4['data'] # Gets the image itself as a float32 Numpy array.
pixel_size = fileDM4['pixelSize'][0] # Gets the pixel size in nm.
return filename, img, pixel_size
def img_prep(img,
block_size=301,
erosions=1,
dilations=5,
small_object_removal=2000,
small_holes_removal=500):
"""This function performs an adaptive thresholding, followed by erosions, followed by small objects removal, followed by dilations,
followed by small holes removal. The output is the processed image"""
thresh = filters.threshold_local(img, block_size, offset=0) # Computes a threshold mask image based on the local pixel neighborhood. Also known as adaptive or dynamic thresholding.
binary_local = img > thresh # Uses the threshold to obtain a binary image.
for i in range(erosions): # Erodes the image a number of times.
binary_local = morphology.binary_erosion(binary_local)
binary_local = morphology.remove_small_objects(binary_local, small_object_removal) # Removes small objects.
for i in range(dilations): # Dilates the image a number of times.
binary_local = morphology.binary_dilation(binary_local)
binary_local = morphology.remove_small_holes(binary_local, small_holes_removal) # Removes small holes in the objects.
return binary_local
def watershedding(binary_img, seed_threshold=0.2):
"""This function watersheds the objects to separate them. It's followed by the removal of small objects."""
distance = ndi.distance_transform_edt(binary_img) # Applies a distance transform to the image.
local_maxi = np.copy(distance) # We make a copy of our image so as not to destroy the original.
local_maxi = local_maxi > (np.max(local_maxi) * seed_threshold) # We take a threshold based on the size of the objects. The middle 20% remains as a seed for each region.
markers = ndi.label(local_maxi)[0]
labels = segmentation.watershed(-distance, markers, mask=binary_img) # Now we run the watershed algorithm and connect the objects to each seed point.
labels = segmentation.clear_border(labels) # Removes the objects that touch the edges of the image.
return labels
def plotfig(img, labels, region_properties, filename, output_dir):
"""This function takes the labelled image, the properties of the labels, and the name of the image and then plots (and saves) the figure."""
fig, ax = plt.subplots(1, 2, figsize=(15, 8))
ax[0].imshow(color.label2rgb(
labels,
bg_label=0,
colors=[
'red', 'violet', 'orange', 'green', 'blue',
'magenta', 'purple', 'crimson', 'lime', 'maroon',
'mediumvioletred', 'goldenrod', 'darkgreen',
'fuchsia', 'cornflowerblue', 'navy', 'hotpink',
'grey', 'chocolate', 'peru'
]
))
ax[0].set_title('Selected objects', fontsize=16)
for i in region_properties:
ax[0].text(i.centroid[1], i.centroid[0], i.label, color='white')
ax[1].imshow(img, cmap='Greys_r')
ax[1].contour(labels, colors='r', linewidths=0.8)
ax[1].set_title('Original', fontsize=16)
plt.tight_layout()
plot_path = os.path.join(output_dir, os.path.basename(filename) + '.png')
# print(plot_path)
plt.savefig(plot_path, dpi=600)
plt.close()
def filter_labels_by_eccentricity(labels, eccentricity):
"""This function filters out labels that have an eccentricity below the value of the "eccentricity" parameter. The output is a labelled image."""
props = measure.regionprops(labels)
labels_cleaned = np.zeros_like(labels)
for i in props:
if i.eccentricity > eccentricity:
labels_cleaned[labels == i.label] = i.label
return labels_cleaned
def filter_labels_by_minor_axis_length(labels, length_in_nm, pixel_size):
"""This function filters out labels that have a minor axis length above the value of the "length in nm" parameter. The output is a labelled image."""
props = measure.regionprops(labels)
labels_cleaned = np.zeros_like(labels)
for i in props:
if i.minor_axis_length * pixel_size < length_in_nm:
labels_cleaned[labels == i.label] = i.label
return labels_cleaned
def create_length_prop(properties, pixel_size):
"""This function creates two additional region properties in the list of region properties that is produced by skimage.measure.regionprops.
The input is a regionprops list and the output is a regionprops list with a property called "length" and a property called "area_to_length".
length = np.sqrt((feret_diameter_max*pixel_size)**2 - (18**2) # From Pythagoras's theorem.
area_to_length = (area*pixel_size*pixel_size) / length # Should be arounf 18."""
for i in properties:
i.length = np.sqrt((i.feret_diameter_max * pixel_size)**2 - 18**2)
i.area_to_length = (i.area * pixel_size * pixel_size) / i.length
return properties
def filter_labels_by_area(labels, area_in_nm2, pixel_size):
"""This function filters out labels that have an area below the value of the "area" parameter. The output is a labelled image."""
props = measure.regionprops(labels)
labels_cleaned = np.zeros_like(labels)
for i in props:
if i.area * pixel_size * pixel_size > area_in_nm2:
labels_cleaned[labels == i.label] = i.label
return labels_cleaned
def filter_labels_by_area_to_width_ratio(labels, pixel_size, min_ratio,
max_ratio):
"""This function filters out labels that have area to width ratios that fall outside the min_ratio to max_ratio interval."""
props = measure.regionprops(labels)
props = create_length_prop(props, pixel_size)
labels_cleaned = np.zeros_like(labels)
for i in props:
if i.area_to_length >= min_ratio and i.area_to_length <= max_ratio:
labels_cleaned[labels == i.label] = i.label
return labels_cleaned
def reorder_labels(labels):
"""This function reorders the labels so as to make them start from 1."""
props = measure.regionprops(labels)
labels_cleaned = np.zeros_like(labels)
for i, j in enumerate(props):
labels_cleaned[labels == j.label] = i + 1
return labels_cleaned
def run_pipeline(filepath, out_df, output_dir):
# Opens and labels the images.
filename, img, pixel_size = open_mrc(filepath) # Opens the image.
binary = img_prep(img) # Prepares the image to be labelled.
labels = watershedding(binary) # Watersheds and labels the image.
labels = filter_labels_by_area(labels, 500, pixel_size)
labels = filter_labels_by_minor_axis_length(labels, 40, pixel_size)
labels = reorder_labels(labels)
# Obtains the properties of the labels.
labels_properties = measure.regionprops(labels)
labels_properties = create_length_prop(labels_properties, pixel_size)
if len(labels_properties) > 0:
# Plots and saves the images.
plotfig(img, labels, labels_properties, filename, output_dir)
# Creates a table containing the nanorod properties.
table = measure.regionprops_table(labels, properties=('label', 'centroid', 'area'))
# Transforms the table into a Pandas dataframe.
data = pd.DataFrame(table)
# Converts the area of the nanorod in pixels into area in nm square.
data['area'] = pixel_size * pixel_size * data['area'] # Transforms the area in pixels into areas in nm square.
# Creates a list with the name of the image.
list_image_name = [os.path.basename(filename) + '.mrc' for i in range(data.shape[0])]
# Inserts this list as a column in the dataframe.
data.insert(0, 'Image name', list_image_name)
# Creates a list with the lengths obtained from the Pythagoras theorem.
lengths = []
for i in labels_properties:
lengths.append(i.length)
# Inserts this list as a column in the dataframe.
data.insert(5, 'Length in nm', lengths)
# Renames the columns of the dataframe.
data.rename(
columns={
'label': 'Nanorod ID',
'centroid-0': 'Coordinate in Y',
'centroid-1': 'Coordinate in X',
'area': 'Area in nm square'
},
inplace=True
)
# Appends this local dataframe to the great dataframe that contains nanorods from all of the images.
out_df = out_df.append(data, ignore_index=True)
# Deletes the variables to release memory.
del img, labels, binary, data, labels_properties
return (out_df)
# Creates a dialog window to obtain the folder in which the images are.
root = tk.Tk()
root.withdraw()
folder_selected = filedialog.askdirectory(title='Select the folder that contains the images.')
base_path = os.path.dirname(folder_selected)
# Create analysis results folder
analysis_folder = 'Analysis_' + os.path.basename(
folder_selected) + '_' + datetime.strftime(datetime.now(), "%Y-%m-%d_%H%M")
analysis_folder = analysis_folder.replace(' ', '_')
os.mkdir(os.path.join(base_path, analysis_folder))
print("Results will be saved in", analysis_folder, "!")
# List of GridSquare folders
folder_selected = os.path.join(folder_selected, 'Images-Disc1')
grid_folders = os.listdir(folder_selected)
grid_count = 1
for folder in grid_folders:
print("\n----------------------------------------")
print("Processing", folder, "(", grid_count, "/", len(grid_folders), ")...\n")
folder_path = os.path.join(folder, 'Data')
folder_path = os.path.join(folder_selected, folder_path)
# Create GridSquare output folders
output_folder = os.path.join(analysis_folder, folder)
output_folder = os.path.join(base_path, output_folder)
os.mkdir(output_folder)
# Creates the dataframe to which all the local dataframes will be appended.
great_dataframe = pd.DataFrame(columns=['Image name', 'Nanorod ID', 'Coordinate in Y', 'Coordinate in X', 'Area in nm square', 'Length in nm'])
# It opens each one of the images.
path_list = glob.glob(os.path.join(folder_path, '*.mrc'))
# Run main pipeline
count = 1
for filepath in path_list:
print("[", datetime.now(), "] ", "Processing file (", count, "/", len(path_list), ")...")
great_dataframe = run_pipeline(filepath, great_dataframe, output_folder)
count += 1
# Saves the great dataframe with all the data as an Excel spreadsheet.
xlsx_filename = os.path.join(output_folder, 'Nanorod.xlsx')
great_dataframe.to_excel(xlsx_filename)
grid_count += 1
|
# coding: utf-8
from __future__ import print_function, absolute_import, division, unicode_literals
from ruamel_yaml.compat import text_type
if False: # MYPY
from typing import Text, Any, Dict, List # NOQA
__all__ = ["ScalarString", "PreservedScalarString", "SingleQuotedScalarString",
"DoubleQuotedScalarString"]
class ScalarString(text_type):
__slots__ = ()
def __new__(cls, *args, **kw):
# type: (Any, Any) -> Any
return text_type.__new__(cls, *args, **kw) # type: ignore
def replace(self, old, new, maxreplace=-1):
# type: (Any, Any, int) -> Any
return type(self)((text_type.replace(self, old, new, maxreplace)))
class PreservedScalarString(ScalarString):
__slots__ = ()
style = "|"
def __new__(cls, value):
# type: (Text) -> Any
return ScalarString.__new__(cls, value)
class SingleQuotedScalarString(ScalarString):
__slots__ = ()
style = "'"
def __new__(cls, value):
# type: (Text) -> Any
return ScalarString.__new__(cls, value)
class DoubleQuotedScalarString(ScalarString):
__slots__ = ()
style = '"'
def __new__(cls, value):
# type: (Text) -> Any
return ScalarString.__new__(cls, value)
def preserve_literal(s):
# type: (Text) -> Text
return PreservedScalarString(s.replace('\r\n', '\n').replace('\r', '\n'))
def walk_tree(base):
# type: (Any) -> None
"""
the routine here walks over a simple yaml tree (recursing in
dict values and list items) and converts strings that
have multiple lines to literal scalars
"""
from ruamel_yaml.compat import string_types
if isinstance(base, dict):
for k in base:
v = base[k] # type: Text
if isinstance(v, string_types) and '\n' in v:
base[k] = preserve_literal(v)
else:
walk_tree(v)
elif isinstance(base, list):
for idx, elem in enumerate(base):
if isinstance(elem, string_types) and '\n' in elem: # type: ignore
base[idx] = preserve_literal(elem) # type: ignore
else:
walk_tree(elem)
|
import argparse
import os
from os import system
from os.path import isdir, isfile
from glob import glob
from multiprocessing import Pool
def get_args():
parser = argparse.ArgumentParser(description='TartanAir')
parser.add_argument('--dataset-dir', default='./',
help='root directory for downloaded files')
args = parser.parse_args()
return args
def unzip_wrapper(path):
datadir = '/'.join(path.split('/')[:-3])
envname, difflevel, filename = path.split('/')[-3:]
tmpdir = '/'.join([datadir, envname, difflevel, filename.replace('.zip', '')])
logfile = path.replace('zip', 'log')
cmd = f'unzip -o {path} -d {tmpdir} > {logfile}'
print(cmd)
system(cmd)
if __name__ == '__main__':
args = get_args()
# dataset directory
datadir = args.dataset_dir
if not isdir(datadir):
print(f'dataset dir {dataset} does not exists!')
exit()
# unzip
zipfiles = glob(datadir + '/**/*.zip', recursive=True)
zipfiles.sort()
with Pool(len(zipfiles)) as p:
p.map(unzip_wrapper, zipfiles)
# move
tmpdirs = [path.replace('.zip', '') for path in zipfiles]
for path in tmpdirs:
envname, difflevel, dataname = path.split('/')[-3:]
for t in glob('/'.join([path, envname, envname, difflevel, '*'])):
trajectory = t.split('/')[-1]
destination = '/'.join([datadir, envname, difflevel, trajectory])
if not isdir(destination):
system(f'mkdir -p {destination}')
cmd = f'mv -f {trajectory}/* {destination}'
print(cmd)
system(cmd)
system(f'rm -rf {path}')
|
class Solution:
def largestRectangleArea(self, heights):
"""
:type heights: List[int]
:rtype: int
"""
stack = []
maxArea = 0
area = 0
i = 0
while i < len(heights):
if len(stack) == 0 or heights[stack[0]] <= heights[i]:
stack.insert(0, i)
i += 1
else:
top = stack.pop(0)
if len(stack) == 0:
area = heights[top] * i
else:
area = heights[top] * (i - stack[0] - 1)
if area > maxArea: maxArea = area
while len(stack) != 0:
top = stack.pop(0)
if len(stack) == 0:
area = heights[top] * i
else:
area = heights[top] * (i - stack[0] - 1)
if area > maxArea: maxArea = area
return maxArea
|
# pylint: disable=invalid-name
import time
import pytest
import sys
from unittest.mock import MagicMock
from slack_sdk.errors import SlackApiError
sys.path.append('plugins/sdm')
sys.path.append('e2e/')
from test_common import create_config, DummyAccount, DummyRole, get_rate_limited_slack_response_error
from lib import ShowRolesHelper
pytest_plugins = ["errbot.backends.test"]
extra_plugin_dir = "plugins/sdm"
account_name = "myaccount@test.com"
account_roles_tag = 'sdm-roles'
class Test_show_roles:
@pytest.fixture
def mocked_testbot(self, testbot):
config = create_config()
return inject_mocks(testbot, config)
def test_show_roles_command(self, mocked_testbot):
mocked_testbot.push_message("show available roles")
message = mocked_testbot.pop_message()
assert "Aaa" in message
assert "Bbb" in message
class Test_show_roles_except_hidden_roles:
@pytest.fixture
def mocked_testbot(self, testbot):
config = create_config()
config['HIDE_ROLE_TAG'] = 'hide-role'
return inject_mocks(testbot, config, roles=[DummyRole("Bbb", {}), DummyRole("Aaa", {'hide-role': 'true'})])
def test_show_roles_command(self, mocked_testbot):
mocked_testbot.push_message("show available roles")
message = mocked_testbot.pop_message()
assert "Aaa" not in message
assert "Bbb" in message
class Test_auto_approve_by_tag:
@pytest.fixture
def mocked_testbot(self, testbot):
config = create_config()
config['AUTO_APPROVE_ROLE_TAG'] = 'auto-approve-role'
return inject_mocks(testbot, config, roles = [DummyRole("Bbb", {}), DummyRole("Aaa", {'auto-approve-role': 'true'})])
def test_show_roles_command(self, mocked_testbot):
mocked_testbot.push_message("show available roles")
message = mocked_testbot.pop_message()
# For some reason we cannot assert the text enclosed between stars
assert "Aaa (auto-approve)" in message
assert "Bbb" in message
class Test_not_allowed_by_tag:
@pytest.fixture
def mocked_testbot(self, testbot):
config = create_config()
config['USER_ROLES_TAG'] = account_roles_tag
return inject_mocks(testbot, config, roles = [DummyRole("Bbb", {}), DummyRole("Aaa", {})], account_permitted_roles=['Aaa'])
def test_show_roles_command(self, mocked_testbot):
mocked_testbot.push_message("show available roles")
message = mocked_testbot.pop_message()
# For some reason we cannot assert the text enclosed between stars
assert "Aaa" in message
assert "~Bbb~ (not allowed)" in message
class Test_alternative_email:
alternative_email_tag = 'alternative-email'
@pytest.fixture
def mocked_user_profile(self):
return {
'fields': {
'XXX': {
'label': self.alternative_email_tag,
'value': account_name,
}
}
}
@pytest.fixture
def mocked_testbot_with_profile(self, testbot, mocked_user_profile):
config = create_config()
config['SENDER_EMAIL_OVERRIDE'] = None
config['EMAIL_SLACK_FIELD'] = self.alternative_email_tag
testbot.bot.sender.userid = 'XXX'
testbot.bot.find_user_profile = MagicMock(return_value=mocked_user_profile)
return inject_mocks(testbot, config)
@pytest.fixture
def mocked_testbot_with_ratelimited_error(self, testbot):
config = create_config()
config['SENDER_EMAIL_OVERRIDE'] = None
config['EMAIL_SLACK_FIELD'] = self.alternative_email_tag
testbot.bot.sender.userid = 'XXX'
testbot.bot.find_user_profile = MagicMock(side_effect=get_rate_limited_slack_response_error())
return inject_mocks(testbot, config)
def test_when_has_profile(self, mocked_testbot_with_profile):
mocked_testbot_with_profile.push_message("show available roles")
message = mocked_testbot_with_profile.pop_message()
assert "Aaa" in message
assert "Bbb" in message
def test_when_throws_ratelimited_error(self, mocked_testbot_with_ratelimited_error):
mocked_testbot_with_ratelimited_error.push_message("show available roles")
message = mocked_testbot_with_ratelimited_error.pop_message()
assert "An error occurred" in message
assert "Too many requests were made" in message
def default_dummy_roles():
return [ DummyRole("Bbb", {}), DummyRole("Aaa", {}) ]
# pylint: disable=dangerous-default-value
def inject_mocks(testbot, config, roles = default_dummy_roles(), account_permitted_roles = None):
accessbot = testbot.bot.plugin_manager.plugins['AccessBot']
accessbot.config = config
accessbot.get_admins = MagicMock(return_value = ["gbin@localhost"])
accessbot.get_api_access_key = MagicMock(return_value = "api-access_key")
accessbot.get_api_secret_key = MagicMock(return_value = "c2VjcmV0LWtleQ==") # valid base64 string
accessbot.get_sdm_service = MagicMock(return_value = create_sdm_service_mock(roles, account_permitted_roles))
accessbot.get_show_roles_helper = MagicMock(return_value = ShowRolesHelper(accessbot))
return testbot
def create_sdm_service_mock(roles, account_permitted_roles):
service_mock = MagicMock()
service_mock.get_all_roles = MagicMock(return_value = roles)
service_mock.get_account_by_email = MagicMock(return_value = DummyAccount('user', {account_roles_tag: account_permitted_roles}))
return service_mock
|
import torch
from torch import nn
from torch.autograd import Variable
from torch import transpose as t
from torch import inverse as inv
from torch import mm
from torch import gesv
from fewshots import labels_lrd2_multi, labels_lrd2_bin, labels_r2d2
from fewshots.models.adjust import AdjustLayer, LambdaLayer
from fewshots.models.utils import roll
from fewshots.data.queries import shuffle_queries_multi, shuffle_queries_bin
class LRD2(nn.Module):
def __init__(self, encoder, debug, out_dim, learn_lambda, init_lambda, init_adj_scale, lambda_base, adj_base,
n_augment, irls_iterations, linsys):
super(LRD2, self).__init__()
self.encoder = encoder
self.debug = debug
self.lambda_ = LambdaLayer(learn_lambda, init_lambda, lambda_base)
self.L = nn.CrossEntropyLoss()
self.L_bin = nn.BCEWithLogitsLoss()
self.adjust = AdjustLayer(init_scale=init_adj_scale, base=adj_base)
self.output_dim = out_dim
self.n_augment = n_augment
assert (irls_iterations > 0)
self.iterations = irls_iterations
self.linsys = linsys
def loss(self, sample):
xs, xq = Variable(sample['xs']), Variable(sample['xq'])
assert (xs.size(0) == xq.size(0))
n_way, n_shot, n_query = xs.size(0), xs.size(1), xq.size(1)
x = torch.cat([xs.view(n_way * n_shot * self.n_augment, *xs.size()[2:]),
xq.view(n_way * n_query, *xq.size()[2:])], 0)
if n_way > 2:
# 1-vs-all for multi-class
y_inner_binary = labels_lrd2_multi.make_float_label(n_shot, n_way * n_shot * self.n_augment)
y_outer_binary = labels_r2d2.make_float_label(n_way, n_query)
y_outer = labels_r2d2.make_long_label(n_way, n_query)
x, y_outer_binary, y_outer = shuffle_queries_multi(x, n_way, n_shot, n_query, self.n_augment,
y_outer_binary, y_outer)
zs, zq = self.encode(x, n_way, n_shot)
# save n_way scores per query, pick best for each query to know which class it is
scores = Variable(torch.FloatTensor(n_query * n_way, n_way).zero_().cuda())
for i in range(n_way):
# re-init weight
w0 = Variable(torch.FloatTensor(n_way * n_shot * self.n_augment).zero_().cuda())
wb = self.ir_logistic(zs, w0, y_inner_binary)
y_hat = mm(zq, wb)
# y_hat = self.adjust(out)
scores[:, i] = y_hat
# re-generate base-learner label by circ-shift of n_shot steps
y_inner_binary = roll(y_inner_binary, n_shot)
_, ind_prediction = torch.max(scores, 1)
_, ind_gt = torch.max(y_outer_binary, 1)
loss_val = self.L(scores, y_outer)
acc_val = torch.eq(ind_prediction, ind_gt).float().mean()
# print('Loss: %.3f Acc: %.3f' % (loss_val.data[0], acc_val.data[0]))
return loss_val, {
'loss': loss_val.data[0],
'acc': acc_val.data[0]
}
else:
y_inner_binary = labels_lrd2_bin.make_float_label(n_way, n_shot * self.n_augment)
y_outer = labels_lrd2_bin.make_byte_label(n_way, n_query)
y_outer_2d = labels_lrd2_bin.make_float_label(n_way, n_query).unsqueeze(1)
x, y_outer, y_outer_2d = shuffle_queries_bin(x, n_way, n_shot, n_query, self.n_augment, y_outer, y_outer_2d)
zs, zq = self.encode(x, n_way, n_shot)
w0 = Variable(torch.FloatTensor(n_way * n_shot * self.n_augment).zero_().cuda())
wb = self.ir_logistic(zs, w0, y_inner_binary)
y_hat = mm(zq, wb)
# y_hat = self.adjust(out)
ind_prediction = (torch.sigmoid(y_hat) >= 0.5).squeeze(1)
loss_val = self.L_bin(y_hat, y_outer_2d)
acc_val = torch.eq(ind_prediction, y_outer).float().mean()
# print('Loss: %.3f Acc: %.3f' % (loss_val.data[0], acc_val.data[0]))
return loss_val, {
'loss': loss_val.data[0],
'acc': acc_val.data[0]
}
def encode(self, X, n_way, n_shot):
z = self.encoder.forward(X)
zs = z[:n_way * n_shot * self.n_augment]
zq = z[n_way * n_shot * self.n_augment:]
ones = Variable(torch.unsqueeze(torch.ones(zs.size(0)).cuda(), 1))
zs = torch.cat((zs, ones), 1)
ones = Variable(torch.unsqueeze(torch.ones(zq.size(0)).cuda(), 1))
zq = torch.cat((zq, ones), 1)
return zs, zq
def ir_logistic(self, X, w0, y_inner):
# iteration 0
eta = w0 # + zeros
mu = torch.sigmoid(eta)
s = mu * (1 - mu)
z = eta + (y_inner - mu) / s
S = torch.diag(s)
# Woodbury with regularization
w_ = mm(t(X, 0, 1), inv(mm(X, t(X, 0, 1)) + self.lambda_(inv(S))))
z_ = t(z.unsqueeze(0), 0, 1)
w = mm(w_, z_)
# it 1...N
for i in range(self.iterations - 1):
eta = w0 + mm(X, w).squeeze(1)
mu = torch.sigmoid(eta)
s = mu * (1 - mu)
z = eta + (y_inner - mu) / s
S = torch.diag(s)
z_ = t(z.unsqueeze(0), 0, 1)
if not self.linsys:
w_ = mm(t(X, 0, 1), inv(mm(X, t(X, 0, 1)) + self.lambda_(inv(S))))
w = mm(w_, z_)
else:
A = mm(X, t(X, 0, 1)) + self.lambda_(inv(S))
w_, _ = gesv(z_, A)
w = mm(t(X, 0, 1), w_)
return w
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StorageServiceProperties(Model):
"""Storage Service Properties.
:param logging: Azure Analytics Logging settings
:type logging: ~xmlservice.models.Logging
:param hour_metrics: A summary of request statistics grouped by API in
hourly aggregates for blobs
:type hour_metrics: ~xmlservice.models.Metrics
:param minute_metrics: a summary of request statistics grouped by API in
minute aggregates for blobs
:type minute_metrics: ~xmlservice.models.Metrics
:param cors: The set of CORS rules.
:type cors: list[~xmlservice.models.CorsRule]
:param default_service_version: The default version to use for requests to
the Blob service if an incoming request's version is not specified.
Possible values include version 2008-10-27 and all more recent versions
:type default_service_version: str
:param delete_retention_policy: The Delete Retention Policy for the
service
:type delete_retention_policy: ~xmlservice.models.RetentionPolicy
"""
_attribute_map = {
'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}},
'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}},
'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}},
'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}},
'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str', 'xml': {'name': 'DefaultServiceVersion'}},
'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'DeleteRetentionPolicy'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(StorageServiceProperties, self).__init__(**kwargs)
self.logging = kwargs.get('logging', None)
self.hour_metrics = kwargs.get('hour_metrics', None)
self.minute_metrics = kwargs.get('minute_metrics', None)
self.cors = kwargs.get('cors', None)
self.default_service_version = kwargs.get('default_service_version', None)
self.delete_retention_policy = kwargs.get('delete_retention_policy', None)
|
import global_settings as gs
import pylab
import numpy as np
from find_affine import *
from scipy import linalg
from PyQt4.QtCore import pyqtRemoveInputHook
import pdb
def generate_generic_grid(nx,ny, offset=(0,0), spacing=(1.0,1.0)):
# generates an generic position list for
# a grid with nx*ny postions spaced by spacing and
# located at offset
xpos = np.arange(nx)*spacing[0] + offset[0]
ypos = np.arange(ny)*spacing[1] + offset[1]
xx,yy = np.meshgrid(xpos,ypos)
return np.hstack((xx.reshape(nx*ny,1), yy.reshape(nx*ny,1)))
def generate_all_positions():
spotpositions = generate_generic_grid(gs.spotnumber[0],gs.spotnumber[1], spacing=gs.spotdistance)
tmp = np.zeros((0,2))
for spot in spotpositions:
print "Spot ", spot
print "offset ", -np.array(gs.subpos_nr)/2.0+0.5
print "distance ",gs.subpos_distance
#pdb.set_trace()
subpos = generate_generic_grid(gs.subpos_nr[0], gs.subpos_nr[1], spot-(np.array(gs.subpos_nr)/2.0-0.5)*gs.subpos_distance, gs.subpos_distance)
print "Subpos "< subpos
print "xxxxxxxx"
tmp = np.vstack((tmp,subpos))
return tmp
def transferPoint(H, x, y, inverse=False):
# transfers a single point using a Homography
tmp = np.float32([x,y,1.0])
if not inverse:
trh = np.dot(H, tmp.transpose())
else:
trh = np.dot(linalg.inv(H), tmp.transpose())
trh /= trh[2]
return(trh[0:2])
def calculate_transform(stage_corners=None):
grid_corners = np.zeros((4, 3), dtype=np.float32)
#TL
grid_corners[0,0] = 0
grid_corners[0,1] = 0
grid_corners[0,2] = 1
# TR
grid_corners[1,0] = gs.spotdistance[0]*(gs.spotnumber[0]-1)
grid_corners[1,1] = 0
grid_corners[1,2] = 1
# BL
grid_corners[2,0] = 0
grid_corners[2,1] = gs.spotdistance[1]*(gs.spotnumber[1]-1)
grid_corners[2,2] = 1
# BR
grid_corners[3,0] = gs.spotdistance[0]*(gs.spotnumber[0]-1)
grid_corners[3,1] = gs.spotdistance[1]*(gs.spotnumber[1]-1)
grid_corners[3,2] = 1
if stage_corners is None:
stage_corners = np.zeros((4, 3), dtype=np.float32)
#TL
stage_corners[0,0] = 0
stage_corners[0,1] = 0
stage_corners[0,2] = 1
# TR
stage_corners[1,0] = 1
stage_corners[1,1] = 0
stage_corners[1,2] = 1
# BL
stage_corners[2,0] = 0
stage_corners[2,1] = 1
stage_corners[2,2] = 1
# BR
stage_corners[3,0] = 1
stage_corners[3,1] = 1
stage_corners[3,2] = 1
print grid_corners
print stage_corners
#for i in range(len(self.coords.pickedx)):
# grid_corners[i,0]=self.coords.pickedx[i]
# grid_corners[i,1]=self.coords.pickedy[i]
# stage[i,0]= self.coords.stageCoords[i][0]
# stage[i,1]= self.coords.stageCoords[i][1]
# print(grid_corners)
# print(stage)
H= Haffine_from_points(grid_corners.transpose(),stage_corners.transpose())
return(H)
def test_transform():
pts, centrepts = generate_all_positions() # requires modification to return centrepoints
#pylab.plot(pts[:,0],pts_transfor[:,1],"*")
#pylab.plot(centrepts[:,0], centrepts_transformed[:,1],"+")
H = calculate_transform()
pts_transformed = pts.copy()
for i, pt in enumerate(pts):
pts_transformed[i]=transferPoint(H, pt[0], pt[1])
centrepts_transformed = centrepts.copy()
for i, pt in enumerate(centrepts):
centrepts_transformed[i]=transferPoint(H, pt[0], pt[1])
print "Transformed Points:"
print pts_transformed
pylab.plot(pts_transformed[:,0],pts_transformed[:,1],"*")
pylab.plot(centrepts_transformed[:,0], centrepts_transformed[:,1],"+")
pylab.show()
|
from flask_restful import Resource
class Endpoint(Resource):
def get(self):
return 'Got get', 200
def post(self):
return 'Got post', 201
|
"""Snakemake wrapper for BUSCO assessment"""
__author__ = "Tessa Pierce"
__copyright__ = "Copyright 2018, Tessa Pierce"
__email__ = "ntpierce@gmail.com"
__license__ = "MIT"
from snakemake.shell import shell
from os import path
log = snakemake.log_fmt_shell(stdout=True, stderr=True)
extra = snakemake.params.get("extra", "")
mode = snakemake.params.get("mode")
assert mode is not None, "please input a run mode: genome, transcriptome or proteins"
lineage = snakemake.params.get("lineage_path")
download = snakemake.params.get("download_path")
assert lineage is not None, "please input the path to a lineage for busco assessment"
# busco does not allow you to direct output location: handle this by moving output
outdir = path.dirname(snakemake.output[0])
out_name = "btemp_" + snakemake.params.get("asm", "t")
##Add --offline and --download_path to temporarily get it to bypass trying to go online
#change download_path to just busco_downloads to see if error is fixed
# note: --force allows snakemake to handle rewriting files as necessary
# without needing to specify *all* busco outputs as snakemake outputs
shell(
"busco --in {snakemake.input[1]} --out {out_name} --force --offline"
" --cpu {snakemake.threads} --mode {mode} --lineage {lineage} --download_path {download} "
" {extra} {log}"
)
# move to intended location
shell("cp {out_name}/short_summary*.txt {outdir}/busco_summary.txt")
#shell("rm -rf {out_name}")
|
from setuptools import setup, find_packages
setup(
name='django-mininews',
version='0.1',
packages=find_packages(exclude=['example_project']),
license='MIT',
description='Boilerplate for creating publishable lists of objects',
long_description=open('README.rst').read(),
author='Richard Barran',
author_email='richard@arbee-design.co.uk',
)
|
from tensorflow.keras.applications.imagenet_utils import preprocess_input
import tensorflow as tf
from utils.augmentations import *
AUTO = tf.data.experimental.AUTOTUNE
@tf.function
def data_augment(image, boxes, labels):
if tf.random.uniform([]) > 0.5:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5) # Random Saturation
if tf.random.uniform([]) > 0.5:
image = tf.image.random_brightness(image, max_delta=0.15) # Random brightness
if tf.random.uniform([]) > 0.5:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5) # Random Contrast
if tf.random.uniform([]) > 0.5:
image = tf.image.random_hue(image, max_delta=0.2) # Random Hue
image = random_lighting_noise(image)
image, boxes = expand(image, boxes)
image, boxes, labels = random_crop(image, boxes, labels) # Random Crop
image, boxes = random_flip(image, boxes) # Random Flip
return (image, boxes, labels)
def prepare_input(sample, convert_to_normal=True):
img = tf.cast(sample['image'], tf.float32)
# img = img - image_mean
labels = sample['objects']['label']+1
bbox = sample['objects']['bbox']
if convert_to_normal:
bbox = tf.stack([bbox[:,1], bbox[:,0], bbox[:,3], bbox[:,2]], axis=1)
img = preprocess_input(img, mode='torch')
# img = tf.image.resize(img, IMAGE_SIZE) / 255.0
# img = tf.cast(img, tf.float32)
# img = (img - image_mean) / image_std
return (img, bbox, labels)
def join_target(image, bbox, labels, image_size, target_transform):
locations, labels = target_transform(tf.cast(bbox, tf.float32), labels)
labels = tf.one_hot(labels, 21, axis=1, dtype=tf.float32)
targets = tf.concat([labels, locations], axis=1)
return (tf.image.resize(image, image_size), targets)
def prepare_dataset(dataset, image_size, batch_size, target_transform, train=False):
# dataset = dataset.cache() # This dataset fits in RAM
dataset = dataset.map(prepare_input, num_parallel_calls=AUTO)
if train:
# Best practices for Keras:
# Training dataset: repeat then batch
# Evaluation dataset: do not repeat
dataset = dataset.shuffle(1000)
dataset = dataset.repeat()
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
dataset = dataset.map(lambda image, boxes, labels: join_target(image, boxes, labels, image_size, target_transform), num_parallel_calls=AUTO)
dataset = dataset.padded_batch(batch_size)
dataset = dataset.prefetch(AUTO)
return dataset
def prepare_for_prediction(file_path, image_size=[300, 300]):
img = tf.io.read_file(file_path)
img = decode_img(img, image_size)
img = preprocess_input(img, mode='torch')
return img
def decode_img(img, image_size=[300, 300]):
# convert the compressed string to a 3D uint8 tensor
img = tf.image.decode_jpeg(img, channels=3)
# resize the image to the desired size
return tf.image.resize(img, image_size)
|
#coding: UTF-8
# 信息冗余:对于某些定量特征,其包含的有效信息为区间划分,例如学习成绩,假若只关心“及格”或不“及格”,那么需要将定量的考分,转换成“1”和“0”表示及格和未及格。二值化可以解决这一问题。
import numpy as np
from sklearn.preprocessing import Binarizer
X = np.array([[ 1., -1., 2.],[ 2., 0., 0.],[ 0., 1., -1.]])
print(X)
binarizer = Binarizer(threshold=0.0).fit(X) # fit does nothing
print(binarizer.transform(X))
|
import json
from unittest.mock import patch
import pytest
from airflow import AirflowException
from dbt.contracts.results import RunStatus
from airflow_dbt_python.operators.dbt import DbtSeedOperator
def test_dbt_seed_mocked_all_args():
op = DbtSeedOperator(
task_id="dbt_task",
project_dir="/path/to/project/",
profiles_dir="/path/to/profiles/",
profile="dbt-profile",
target="dbt-target",
vars={"target": "override"},
log_cache_events=True,
bypass_cache=True,
full_refresh=True,
select=["/path/to/data.csv"],
show=True,
threads=2,
exclude=["/path/to/data/to/exclude.csv"],
selector="a-selector",
state="/path/to/state/",
)
args = [
"seed",
"--project-dir",
"/path/to/project/",
"--profiles-dir",
"/path/to/profiles/",
"--profile",
"dbt-profile",
"--target",
"dbt-target",
"--vars",
"{target: override}",
"--log-cache-events",
"--bypass-cache",
"--full-refresh",
"--select",
"/path/to/data.csv",
"--show",
"--threads",
"2",
"--exclude",
"/path/to/data/to/exclude.csv",
"--selector",
"a-selector",
"--state",
"/path/to/state/",
]
with patch.object(DbtSeedOperator, "run_dbt_command") as mock:
mock.return_value = ([], True)
op.execute({})
mock.assert_called_once_with(args)
def test_dbt_seed_mocked_default():
op = DbtSeedOperator(
task_id="dbt_task",
)
assert op.command == "seed"
args = ["seed"]
with patch.object(DbtSeedOperator, "run_dbt_command") as mock:
mock.return_value = ([], True)
op.execute({})
mock.assert_called_once_with(args)
def test_dbt_seed_non_existent_file(profiles_file, dbt_project_file, seed_files):
op = DbtSeedOperator(
task_id="dbt_task",
project_dir=dbt_project_file.parent,
profiles_dir=profiles_file.parent,
select=["fake"],
do_xcom_push=True,
)
execution_results = op.execute({})
assert len(execution_results["results"]) == 0
assert isinstance(json.dumps(execution_results), str)
def test_dbt_seed_models(profiles_file, dbt_project_file, seed_files):
op = DbtSeedOperator(
task_id="dbt_task",
project_dir=dbt_project_file.parent,
profiles_dir=profiles_file.parent,
select=[str(s.stem) for s in seed_files],
do_xcom_push=True,
)
execution_results = op.execute({})
run_result = execution_results["results"][0]
assert run_result["status"] == RunStatus.Success
assert run_result["agate_table"] == {"country_code": "Text", "country_name": "Text"}
assert isinstance(json.dumps(execution_results), str)
def test_dbt_seed_models_full_refresh(profiles_file, dbt_project_file, seed_files):
op = DbtSeedOperator(
task_id="dbt_task",
project_dir=dbt_project_file.parent,
profiles_dir=profiles_file.parent,
select=[str(s.stem) for s in seed_files],
full_refresh=True,
do_xcom_push=True,
)
execution_results = op.execute({})
run_result = execution_results["results"][0]
assert run_result["status"] == RunStatus.Success
assert isinstance(json.dumps(execution_results), str)
BROKEN_CSV = """\
id,name
1,A name,
2
"""
@pytest.fixture
def broken_file(dbt_project_dir):
d = dbt_project_dir / "data"
s = d / "broken_seed.csv"
s.write_text(BROKEN_CSV)
return s
def test_dbt_seed_fails_with_malformed_csv(
profiles_file, dbt_project_file, broken_file
):
op = DbtSeedOperator(
task_id="dbt_task",
project_dir=dbt_project_file.parent,
profiles_dir=profiles_file.parent,
select=[str(broken_file.stem)],
full_refresh=True,
)
with pytest.raises(AirflowException):
op.execute({})
|
#!/usr/bin/env python3
import depthai as dai
import json
# Connect device
with dai.Device(dai.OpenVINO.VERSION_2021_4, dai.UsbSpeed.HIGH) as device:
print(f'Is EEPROM available: {device.isEepromAvailable()}')
# User calibration
try:
print(f'User calibration: {json.dumps(device.readCalibration2().eepromToJson(), indent=2)}')
except Exception as ex:
print(f'No user calibration: {ex}')
# Factory calibration
try:
print(f'Factory calibration: {json.dumps(device.readFactoryCalibration().eepromToJson(), indent=2)}')
except Exception as ex:
print(f'No factory calibration: {ex}')
|
import cp1_1_05_swarl_matrix_a
def solution(n):
return cp1_1_05_swarl_matrix_a.solution(n)
if __name__ == "__main__":
input = 2
print("input=", input, "result=", solution(input));
input = 3
print("input=", input, "result=", solution(input));
input = 4
print("input=", input, "result=", solution(input));
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author: luzhongyang@huoxian.cn
# datetime: 2021/11/03 下午2:26
# project: DongTai-engine
from dongtai.models.project import IastProject
import time
from dongtai.models.vulnerablity import IastVulnerabilityModel
from collections import namedtuple
from django.db.models import Q
from dongtai.models.hook_type import HookType
from dongtai.models.agent import IastAgent
from django.utils.translation import gettext_lazy as _
from django.utils.translation import override
import re
import json
from dongtai.models.vul_level import IastVulLevel
from dongtai.models.message import IastMessage, IastMessageType
from docx import Document
from docx.enum.style import WD_STYLE_TYPE
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT
from docx.shared import Pt
from lingzhi_engine.settings import MEDIA_ROOT
from django.utils.translation import gettext as _
from django.utils.translation import activate
import os
def get_model_field(model, exclude=[], include=[]):
fields = [field.name for field in model._meta.fields]
if include:
return [
field for field in list(set(fields) - set(exclude))
if field in include
]
return list(set(fields) - set(exclude))
def delete_old_files(path, save_seconds=3600):
for f in os.listdir(path):
if f == ".gitignore":
continue
if os.stat(os.path.join(path, f)).st_mtime < time.time() - save_seconds:
os.remove(os.path.join(path, f))
def get_vul_count_by_agent(agent_ids, vid, user):
typeInfo = IastVulnerabilityModel.objects.filter(
agent_id__in=agent_ids).values().order_by("level")
if vid:
typeInfo = typeInfo.filter(id=vid)
type_summary = []
levelCount = {}
vulDetail = {}
if typeInfo:
typeArr = {}
typeLevel = {}
for one in typeInfo:
hook_type = HookType.objects.filter(pk=one['hook_type_id']).first()
one['type'] = hook_type.name if hook_type else ''
typeArr[one['type']] = typeArr.get(one['type'], 0) + 1
typeLevel[one['type']] = one['level_id']
levelCount[one['level_id']] = levelCount.get(one['level_id'], 0) + 1
language = IastAgent.objects.filter(
pk=one['agent_id']).values_list('language', flat=True).first()
one['language'] = language if language is not None else ''
if one['type'] not in vulDetail.keys():
vulDetail[one['type']] = []
detailStr1 = _(
"We found that there is {1} in the {0} page, attacker can modify the value of {2} to attack:").format(
one['uri'], one['type'], one['taint_position'])
try:
one['req_params'] = str(one['req_params'])
except Exception as e:
one['req_params'] = ""
detailStr2 = str(one['http_method']) + " " + str(one['uri']) + "?" + str(one['req_params']) + str(one['http_protocol'])
try:
fileData = one['full_stack'][-1].get("stack", "")
pattern = r'.*?\((.*?)\).*?'
resMatch = re.match(pattern, fileData)
uriArr = resMatch.group(1).split(":")
fileName = uriArr[0]
if len(uriArr) > 1:
rowStr = _("{} Line").format(str(uriArr[1]))
else:
rowStr = ""
except Exception as e:
fileName = ""
rowStr = ""
classname = ""
methodname = ""
if one['full_stack']:
try:
full_stack_arr = json.loads(one['full_stack'])
full_stack = full_stack_arr[-1]
classname = str(full_stack.get("classname", ""))
methodname = str(full_stack.get("methodname", ""))
except Exception as e:
print("======")
detailStr3 = _("In {} {} call {}. {} (), Incoming parameters {}").format(
str(fileName), rowStr, classname, methodname,
str(one['taint_value']))
cur_tile = _("{} Appears in {} {}").format(one['type'], str(one['uri']), str(one['taint_position']))
if one['param_name']:
cur_tile = cur_tile + "\"" + str(one['param_name']) + "\""
vulDetail[one['type']].append({
"title": cur_tile,
"type_name": one['type'],
"level_id": one['level_id'],
"first_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(one['first_time'])),
"latest_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(one['latest_time'])),
"language": one['language'],
"url": one['url'] if one['url'] else "",
"detail_data": [detailStr1, detailStr2, detailStr3],
})
typeArrKeys = typeArr.keys()
for item_type in typeArrKeys:
type_summary.append(
{
'type_name': item_type,
'type_count': typeArr[item_type],
'type_level': typeLevel[item_type]
}
)
return {
'type_summary': type_summary,
'levelCount': levelCount,
'vulDetail': vulDetail
}
def get_translation_in(language, s):
with override(language):
return _(s)
class ExportPort():
def export(self, report):
# print(_("Department does not exist"))
if report:
self.generate_report(report)
def get_agents_with_project_id(self, pid):
"""
:param pid:
:param auth_users:
:return:
"""
relations = IastAgent.objects.filter(bind_project_id=pid).values("id")
agent_ids = [relation['id'] for relation in relations]
return agent_ids
def generate_report(self, report):
if report.language:
activate(report.language)
else:
activate("zh")
type = report.type
pid = 0
if report.project:
pid = report.project.id
vid = report.vul_id
user = report.user
timestamp = time.time()
project = IastProject.objects.filter(Q(id=pid)).first()
vul = IastVulnerabilityModel.objects.filter(pk=vid).first()
if project or vul:
if not project:
Project = namedtuple(
'Project',
get_model_field(IastProject,
include=[
'id', 'name', 'mode', 'latest_time',
'vul_count', 'agent_count'
]))
project = Project(id=0,
name='NAN',
mode='NAN',
latest_time=time.time(),
vul_count=1,
agent_count=0)
agent_ids = self.get_agents_with_project_id(project.id)
count_result = get_vul_count_by_agent(agent_ids, vid, user)
levelInfo = IastVulLevel.objects.all()
file_path = ""
if type == 'docx':
file_path = self.generate_word_report(user, project, vul, count_result, levelInfo, timestamp)
elif type == 'pdf':
file_path = self.generate_pdf_report(user, project, vul, count_result, levelInfo, timestamp)
elif type == 'xlsx':
file_path = self.generate_xlsx_report(user, project, vul, count_result, levelInfo, timestamp)
if file_path != "":
bin_file = open(file_path, "rb")
file_data = bin_file.read()
bin_file.close()
report.file = file_data
report.status = 1
report.save()
IastMessage.objects.create(
message= str(project.name) + " " + _("Report export success"),
relative_url="/api/v1/project/report/download?id=" + str(report.id),
create_time=time.time(),
message_type=IastMessageType.objects.filter(pk=1).first(),
to_user_id=report.user.id,
)
def generate_word_report(self, user, project, vul, count_result, levelInfo, timestamp):
document = Document()
document.styles.add_style('TitleOne', WD_STYLE_TYPE.PARAGRAPH).font.name = 'Arial'
document.styles.add_style('TitleTwo', WD_STYLE_TYPE.PARAGRAPH).font.name = 'Arial'
document.styles.add_style('TitleThree', WD_STYLE_TYPE.PARAGRAPH).font.name = 'Arial'
document.styles.add_style('TitleFour', WD_STYLE_TYPE.PARAGRAPH).font.name = 'Arial'
document.add_heading(u'%s' % project.name, 0)
document.add_heading(u'%s' % project.mode, 2)
timeArray = time.localtime(project.latest_time)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
pTime = document.add_paragraph(u'%s' % otherStyleTime)
pTime.paragraph_format.space_before = Pt(400)
pTime.paragraph_format.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
pReport = document.add_paragraph(_(u'Security Testing Report'))
pReport.paragraph_format.line_spacing = Pt(20)
pReport.paragraph_format.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
footer = document.sections[0].footer
paragraph = footer.paragraphs[0]
paragraph.add_run(u'北京安全共识科技有限公司')
paragraph.paragraph_format.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
document.add_page_break()
oneTitle = document.add_paragraph()
oneTitle.add_run(_(u'First, project information')).font.name = 'Arial'
oneTitle.style = "TitleOne"
table = document.add_table(rows=1, cols=2, style='Table Grid')
hdr_cells = table.rows[0].cells
new_cells = table.add_row().cells
new_cells[0].text = _('Application name')
new_cells[1].text = project.name
new_cells = table.add_row().cells
new_cells[0].text = _('Author')
new_cells[1].text = user.username
new_cells = table.add_row().cells
new_cells[0].text = _('Application type')
new_cells[1].text = project.mode
new_cells = table.add_row().cells
new_cells[0].text = _('Number of Vulnerability')
new_cells[1].text = str(project.vul_count)
new_cells = table.add_row().cells
new_cells[0].text = _('Number of Agent')
new_cells[1].text = str(project.agent_count)
new_cells = table.add_row().cells
new_cells[0].text = _('Latest time')
new_cells[1].text = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
levelNameArr = {}
levelIdArr = {}
if levelInfo:
for level_item in levelInfo:
levelNameArr[level_item.name_value] = level_item.id
levelIdArr[level_item.id] = level_item.name_value
type_summary = count_result['type_summary']
levelCount = count_result['levelCount']
vulDetail = count_result['vulDetail']
oneTitle = document.add_paragraph()
oneTitle.add_run(_(u'Second, the result analysis'))
oneTitle.style = "TitleOne"
twoTitle = document.add_paragraph()
twoTitle.add_run(_(u'2.1 Vulnerability Severity Levels Distribution'))
twoTitle.style = "TitleTwo"
levelCountArr = []
if levelCount:
for ind in levelCount.keys():
levelCountArr.append(str(levelIdArr[ind]) + " " + str(levelCount[ind]))
levelCountStr = ",".join(levelCountArr)
document.add_paragraph(levelCountStr)
twoTitle = document.add_paragraph()
twoTitle.add_run(_(u'2.2 Distribution of Vulnerability'))
twoTitle.style = "TitleTwo"
table = document.add_table(rows=1, cols=3, style='Table Grid')
hdr_cells = table.rows[0].cells
hdr_cells[0].text = _('Severity levels')
hdr_cells[1].text = _('Vulnerability type name')
hdr_cells[2].text = _('Number')
if type_summary:
for type_item in type_summary:
new_cells = table.add_row().cells
new_cells[0].text = levelIdArr[type_item['type_level']]
new_cells[1].text = type_item['type_name']
new_cells[2].text = str(type_item['type_count'])
document.add_page_break()
twoTitle = document.add_paragraph()
twoTitle.add_run(_(u'2.3 Vulnerability details'))
twoTitle.style = "TitleTwo"
if vulDetail:
type_ind = 1
for vul in vulDetail.keys():
threeTitle = document.add_paragraph()
threeTitle.add_run(u'%s(%s)' % ("2.3." + str(type_ind) + " " + vul, len(vulDetail[vul])))
threeTitle.style = "TitleThree"
if vulDetail[vul]:
ind = 1
for one in vulDetail[vul]:
p = document.add_paragraph()
p.add_run("2.3." + str(type_ind) + "." + str(ind) + " " + one['title']).bold = True
p.style = "TitleFour"
ind = ind + 1
document.add_heading(_(u'Summary'), level=4)
table = document.add_table(rows=1, cols=2, style='Table Grid')
new_cells = table.add_row().cells
new_cells[0].text = _("Severity level")
new_cells[1].text = levelIdArr[one['level_id']]
new_cells = table.add_row().cells
new_cells[0].text = _("First scan time")
new_cells[1].text = one['first_time']
new_cells = table.add_row().cells
new_cells[0].text = _("Last scan time")
new_cells[1].text = one['latest_time']
new_cells = table.add_row().cells
new_cells[0].text = _("Development language")
new_cells[1].text = one['language']
new_cells = table.add_row().cells
new_cells[0].text = _("Vulnerability URL")
new_cells[1].text = one['url']
document.add_heading(_(u'Vulnerability description'), level=4)
if one['detail_data']:
for item in one['detail_data']:
document.add_paragraph(u'%s' % item)
type_ind = type_ind + 1
document.styles['TitleOne'].font.size = Pt(20)
document.styles['TitleOne'].font.name = "Arial"
document.styles['TitleTwo'].font.size = Pt(18)
document.styles['TitleTwo'].font.name = "Arial"
document.styles['TitleThree'].font.size = Pt(16)
document.styles['TitleFour'].font.size = Pt(14)
filename = f"{MEDIA_ROOT}/reports/vul-report-{user.id}-{timestamp}.docx"
document.save(filename)
return filename
def generate_pdf_report(self, user, project, vul, count_result, levelInfo, timestamp):
from django.template.loader import render_to_string
import os
levelNameArr = {}
levelIdArr = {}
if levelInfo:
for level_item in levelInfo:
levelNameArr[level_item.name_value] = level_item.id
levelIdArr[level_item.id] = level_item.name_value
type_summary = count_result['type_summary']
levelCount = count_result['levelCount']
vulDetail = count_result['vulDetail']
levelCountArr = []
if levelCount:
for ind in levelCount.keys():
levelCountArr.append(str(levelIdArr[ind]) + " " + str(levelCount[ind]))
levelCountStr = ",".join(levelCountArr)
vulTypeTableBodyRows = []
if type_summary:
for type_item in type_summary:
vulTypeTableBodyRow = {
"type_level": levelIdArr[type_item['type_level']],
"type_name": type_item['type_name'],
"type_count": str(type_item['type_count'])
}
vulTypeTableBodyRows.append(vulTypeTableBodyRow)
vulTypeDetailArray = []
if vulDetail:
type_ind = 1
for vul in vulDetail.keys():
vulTypeDetail = {
"title": u'%s(%s)' % ("2.3." + str(type_ind) + " " + vul, len(vulDetail[vul])),
"vuls": []
}
if vulDetail[vul]:
ind = 1
for one in vulDetail[vul]:
oneVul = {
"title": "2.3." + str(type_ind) + "." + str(ind) + " " + one['title'],
"summary": _(u'Summary'),
"severity_level": _("Severity level"),
"level_id": levelIdArr[one['level_id']],
"first_scan_time": _("First scan time"),
"first_time": one['first_time'],
"last_scan_time": _("First scan time"),
"latest_time": one['first_time'],
"development_language": _("Development language"),
"language": one['language'],
"vulnerability_url": _("Vulnerability URL"),
"url": one['url'],
"description": _(u'Vulnerability description'),
"detail": "",
}
vulTypeDetail['vuls'].append(
oneVul
)
ind = ind + 1
if one['detail_data']:
for item in one['detail_data']:
oneVul['detail'] += u'%s' % item
vulTypeDetailArray.append(vulTypeDetail)
type_ind = type_ind + 1
pdf_filename = f"{MEDIA_ROOT}/reports/vul-report-{user.id}-{timestamp}.pdf"
html_filename = f"{MEDIA_ROOT}/reports/vul-report-{user.id}-{timestamp}.html"
rendered = render_to_string(
'./pdf.html',
{
"user": user,
"project": project,
"vul": vul,
"count_result": count_result,
"level_info": levelInfo,
"time_str": time.strftime('%Y-%m-%d %H:%M', time.localtime(timestamp)),
"levelCountStr": levelCountStr,
"vulTypeDetailArray": vulTypeDetailArray,
"vulTypeTableBodyRows": vulTypeTableBodyRows,
"i18n": {
"application_name": _("Application name"),
"author": _("Author"),
"number_of_vulnerability": _("Number of Vulnerability"),
"number_of_agent": _("Number of Agent"),
"first_project_information": _("First, project information"),
"second_the_result_analysis": _("Second, the result analysis"),
"vulnerability_severity_levels_distribution": _("Vulnerability Severity Levels Distribution"),
"distribution_of_vulnerability": _("Distribution of Vulnerability"),
"severity_levels": _("Severity levels"),
"vulnerability_type_name": _("Vulnerability type name"),
"number": _("Number"),
"vulnerability_details": _("Vulnerability details"),
"security_testing_report": _(u'Security Testing Report')
}
}
)
f = open(html_filename, 'w')
f.write(rendered)
f.close()
os.system("cat {} | /opt/dongtai/engine/bin/wkhtmltopdf --margin-top 10 --margin-bottom 10 - {} ".format(
html_filename,
pdf_filename,
))
delete_old_files(f"{MEDIA_ROOT}/reports/")
return pdf_filename
def generate_xlsx_report(self, user, project, vul, count_result, levelInfo, timestamp):
levelNameArr = {}
levelIdArr = {}
if levelInfo:
for level_item in levelInfo:
levelNameArr[level_item.name_value] = level_item.id
levelIdArr[level_item.id] = level_item.name_value
type_summary = count_result['type_summary']
levelCount = count_result['levelCount']
vulDetail = count_result['vulDetail']
levelCountArr = []
if levelCount:
for ind in levelCount.keys():
levelCountArr.append(str(levelIdArr[ind]) + " " + str(levelCount[ind]))
levelCountStr = ",".join(levelCountArr)
vulTypeTableBodyRows = []
new_cells = []
if type_summary:
for type_item in type_summary:
vulTypeTableBodyRow = {
"type_level": levelIdArr[type_item['type_level']],
"type_name": type_item['type_name'],
"type_count": str(type_item['type_count'])
}
vulTypeTableBodyRows.append(vulTypeTableBodyRow)
vulTypeDetailArray = []
if vulDetail:
type_ind = 1
for vul in vulDetail.keys():
vulTypeDetail = {
"title": vul,
"vuls": []
}
vulTypeDetailArray.append(vulTypeDetail)
if vulDetail[vul]:
ind = 1
for one in vulDetail[vul]:
oneVul = {
"title": "2.3." + str(type_ind) + "." + str(ind) + " " + one['title'],
"summary": _(u'Summary'),
"severity_level": _("Severity level"),
"level_id": levelIdArr[one['level_id']],
"first_scan_time": _("First scan time"),
"first_time": one['first_time'],
"last_scan_time": _("Last scan time"),
"latest_time": one['latest_time'],
"development_language": _("Development language"),
"language": one['language'],
"vulnerability_url": _("Vulnerability URL"),
"url": one['url'],
"description": _(u'Vulnerability description'),
"detail": "",
}
vulTypeDetail['vuls'].append(
oneVul
)
ind = ind + 1
if one['detail_data']:
for item in one['detail_data']:
oneVul['detail'] += u'%s' % item
type_ind = type_ind + 1
from openpyxl import Workbook
wb = Workbook()
sheet1 = wb.active
xlsx_filename = f"{MEDIA_ROOT}/reports/vul-report-{user.id}-{timestamp}.xlsx"
sheet1['A1'] = str(_("Vulnerability type name"))
sheet1['B1'] = str(_("Severity levels"))
sheet1['C1'] = str(_("First scan time"))
sheet1['D1'] = str(_("Last scan time"))
sheet1['E1'] = str(_("Development language"))
sheet1['F1'] = str(_("Vulnerability URL"))
sheet1['G1'] = str(_('Vulnerability description'))
line = 0
for vulTypeDetail in vulTypeDetailArray:
line += 1
for oneVul in vulTypeDetail['vuls']:
sheet1.append(
[vulTypeDetail['title'], oneVul['level_id'], oneVul['first_time'], oneVul['latest_time'],
oneVul['language'],
oneVul['url'], oneVul['detail']])
for col in sheet1.columns:
max_length = 0
column = col[0].column_letter
for cell in col:
try:
if len(str(cell.value)) > max_length:
max_length = len(str(cell.value))
except:
pass
adjusted_width = (max_length + 2) * 1.2
sheet1.column_dimensions[column].width = adjusted_width
wb.save(xlsx_filename)
delete_old_files(f"{MEDIA_ROOT}/reports/")
return xlsx_filename
|
import re
from itertools import permutations
def calculate(expression, perm):
for op in perm:
pattern = re.compile(r"(\d+|\(-?\d+\))+[%s](\d+|\(-?\d+\))" % op)
while True:
formula = pattern.search(expression)
if formula is None: break
result = "(" + str(eval(formula.group())) + ")"
expression = expression.replace(formula.group(), str(result))
return abs(eval(expression))
def solution(expression):
answer = float('-inf')
op_set = set(re.compile(r"[*+-]").findall(expression))
for perm in permutations(op_set):
result = calculate(expression, perm)
if result > answer:
answer = result
return answer
if __name__ == '__main__':
result = solution("50*6-3*2")
print(result)
|
import os
import sys
import shutil
from core import TextStyle
__all__ = [
'link_command',
'unlink_command',
'remove_command',
'list_command',
'root_command'
]
def container_exist(**kwargs):
app_args, logger, manbrew_root = kwargs['app_args'], kwargs['logger'], kwargs['manbrew_root']
container_path = os.path.join(manbrew_root, "Containers", app_args.container)
if not os.path.exists(container_path):
logger.warning("%s does not exist.", TextStyle.bold(app_args.container))
return False
return True
def link_command(**kwargs):
if not container_exist(**kwargs):
return
app_args, manager, logger, manbrew_root = (
kwargs['app_args'], kwargs['manager'], kwargs['logger'], kwargs['manbrew_root'])
src_path = os.path.join(manbrew_root, "Containers", app_args.container)
manager.link(container=app_args.container, src_path=src_path, dst_path=app_args.dst)
def unlink_command(**kwargs):
if not container_exist(**kwargs):
return
app_args, manager = kwargs['app_args'], kwargs['manager']
manager.unlink(container=app_args.container)
def remove_command(**kwargs):
if not container_exist(**kwargs):
return
app_args, manager, logger, manbrew_root = (
kwargs['app_args'], kwargs['manager'], kwargs['logger'], kwargs['manbrew_root'])
if manager.container_linked(app_args.container):
manager.unlink(container=app_args.container)
rm_dir = os.path.join(manbrew_root, "Containers", app_args.container)
if os.path.exists(rm_dir):
logger.info("remove dir: %s", rm_dir)
shutil.rmtree(rm_dir)
logger.info("%s removed.", TextStyle.bold(app_args.container))
def list_command(**kwargs):
manager, logger, manbrew_root = kwargs['manager'], kwargs['logger'], kwargs['manbrew_root']
container_dir = os.path.join(manbrew_root, "Containers")
message = "List All Containers\n"
containers = []
for container in os.listdir(container_dir):
if not os.path.isdir(os.path.join(container_dir, container)):
continue
message += "%s" + (": linked\n" if manager.container_linked(container) else ": not linked\n")
containers.append(TextStyle.bold(container))
logger.info(message, *containers)
def root_command(**kwargs):
sys.stdout.write(kwargs['manbrew_root'])
|
from RamachanDraw.fetch import fetch
from RamachanDraw.phi_psi import phi_psi
from RamachanDraw.plot import plot
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.