hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
17108f80d9976671704207b0f343135d3f08816f
| 34,227
|
py
|
Python
|
src/cachew/__init__.py
|
karlicoss/cachew
|
49d349f5c32ae25d6f5a36279c8f0c5090242da2
|
[
"MIT"
] | 177
|
2019-08-20T18:42:12.000Z
|
2022-02-24T11:47:07.000Z
|
src/cachew/__init__.py
|
karlicoss/cachew
|
49d349f5c32ae25d6f5a36279c8f0c5090242da2
|
[
"MIT"
] | 11
|
2020-01-05T13:33:26.000Z
|
2021-07-06T14:52:19.000Z
|
src/cachew/__init__.py
|
karlicoss/cachew
|
49d349f5c32ae25d6f5a36279c8f0c5090242da2
|
[
"MIT"
] | 4
|
2020-01-09T13:03:19.000Z
|
2022-02-13T02:03:23.000Z
|
from pkg_resources import get_distribution, DistributionNotFound
try:
# Change here if project is renamed and does not equal the package name
dist_name = __name__
__version__ = get_distribution(dist_name).version
except DistributionNotFound:
__version__ = 'unknown'
finally:
del get_distribution, DistributionNotFound
__author__ = "Dima Gerasimov"
__copyright__ = "Dima Gerasimov"
__license__ = "mit"
import functools
import logging
from itertools import chain, islice
import inspect
from datetime import datetime, date
import stat
import tempfile
from pathlib import Path
import time
import sqlite3
import sys
import typing
from typing import (Any, Callable, Iterator, List, NamedTuple, Optional, Tuple,
Type, Union, TypeVar, Generic, Sequence, Iterable, Set, cast)
import dataclasses
import warnings
import appdirs # type: ignore[import]
import sqlalchemy # type: ignore[import]
from sqlalchemy import Column, Table, event
if sys.version_info[1] < 7:
from .compat import fromisoformat
else:
fromisoformat = datetime.fromisoformat
from .compat import fix_sqlalchemy_StatementError_str
try:
fix_sqlalchemy_StatementError_str()
except Exception as e:
# todo warn or something??
pass
# in case of changes in the way cachew stores data, this should be changed to discard old caches
CACHEW_VERSION: str = __version__
PathIsh = Union[Path, str]
'''
Global settings, you can override them after importing cachew
'''
class settings:
'''
Toggle to disable caching
'''
ENABLE: bool = True
DEFAULT_CACHEW_DIR: PathIsh = Path(appdirs.user_cache_dir('cachew'))
'''
Set to true if you want to fail early. Otherwise falls back to non-cached version
'''
THROW_ON_ERROR: bool = False
def get_logger() -> logging.Logger:
return logging.getLogger('cachew')
class IsoDateTime(sqlalchemy.TypeDecorator):
# in theory could use something more effecient? e.g. blob for encoded datetime and tz?
# but practically, the difference seems to be pretty small, so perhaps fine for now
impl = sqlalchemy.String
@property
def python_type(self): return datetime
def process_literal_param(self, value, dialect): raise NotImplementedError() # make pylint happy
def process_bind_param(self, value: Optional[datetime], dialect) -> Optional[str]:
if value is None:
return None
# ok, it's a bit hacky... attempt to preserve pytz infromation
iso = value.isoformat()
tz = getattr(value, 'tzinfo', None)
if tz is None:
return iso
try:
import pytz # type: ignore
except ImportError:
self.warn_pytz()
return iso
else:
if isinstance(tz, pytz.BaseTzInfo):
return iso + ' ' + tz.zone
else:
return iso
def process_result_value(self, value: Optional[str], dialect) -> Optional[datetime]:
if value is None:
return None
spl = value.split(' ')
dt = fromisoformat(spl[0])
if len(spl) <= 1:
return dt
zone = spl[1]
# else attempt to decypher pytz tzinfo
try:
import pytz # type: ignore
except ImportError:
self.warn_pytz()
return dt
else:
tz = pytz.timezone(zone)
return dt.astimezone(tz)
def warn_pytz(self) -> None:
warnings.warn('install pytz for better timezone support while serializing with cachew')
# a bit hacky, but works...
class IsoDate(IsoDateTime):
impl = sqlalchemy.String
@property
def python_type(self): return date
def process_literal_param(self, value, dialect): raise NotImplementedError() # make pylint happy
def process_result_value(self, value: Optional[str], dialect) -> Optional[date]: # type: ignore
res = super().process_result_value(value, dialect)
if res is None:
return None
return res.date()
import json
from typing import Dict
class Json(sqlalchemy.TypeDecorator):
impl = sqlalchemy.String
@property
def python_type(self): return Dict
def process_literal_param(self, value, dialect): raise NotImplementedError() # make pylint happy
def process_bind_param(self, value: Optional[Dict], dialect) -> Optional[str]:
if value is None:
return None
return json.dumps(value)
def process_result_value(self, value: Optional[str], dialect) -> Optional[datetime]:
if value is None:
return None
return json.loads(value)
jtypes = (int, float, bool, type(None))
class ExceptionAdapter(sqlalchemy.TypeDecorator):
'''
Enables support for caching Exceptions. Exception is treated as JSON and serialized.
It's useful for defensive error handling, in case of cachew in particular for preserving error state.
I elaborate on it here: [mypy-driven error handling](https://beepb00p.xyz/mypy-error-handling.html#kiss).
'''
impl = Json
@property
def python_type(self): return Exception
def process_literal_param(self, value, dialect): raise NotImplementedError() # make pylint happy
def process_bind_param(self, value: Optional[Exception], dialect) -> Optional[List[Any]]:
if value is None:
return None
sargs: List[Any] = []
for a in value.args:
if any(isinstance(a, t) for t in jtypes):
sargs.append(a)
elif isinstance(a, date):
sargs.append(a.isoformat())
else:
sargs.append(str(a))
return sargs
def process_result_value(self, value: Optional[str], dialect) -> Optional[Exception]:
if value is None:
return None
# sadly, can't do much to convert back from the strings? Unless I serialize the type info as well?
return Exception(*value)
PRIMITIVES = {
str : sqlalchemy.String,
int : sqlalchemy.Integer,
float : sqlalchemy.Float,
bool : sqlalchemy.Boolean,
datetime : IsoDateTime,
date : IsoDate,
dict : Json,
list : Json,
Exception: ExceptionAdapter,
}
Types = Union[
Type[str],
Type[int],
Type[float],
Type[bool],
Type[datetime],
Type[date],
Type[dict],
Type[list],
Type[Exception],
Type[NamedTuple],
]
Values = Union[
str,
int,
float,
bool,
datetime,
date,
dict,
list,
Exception,
NamedTuple,
]
# TODO assert all PRIMITIVES are also in Types/Values?
def is_primitive(cls: Type) -> bool:
"""
>>> from typing import Dict, Any
>>> is_primitive(int)
True
>>> is_primitive(set)
False
>>> is_primitive(dict)
True
"""
return cls in PRIMITIVES
# https://stackoverflow.com/a/2166841/706389
def is_dataclassish(t: Type) -> bool:
"""
>>> is_dataclassish(int)
False
>>> is_dataclassish(tuple)
False
>>> from typing import NamedTuple
>>> class N(NamedTuple):
... field: int
>>> is_dataclassish(N)
True
>>> from dataclasses import dataclass
>>> @dataclass
... class D:
... field: str
>>> is_dataclassish(D)
True
"""
if dataclasses.is_dataclass(t):
return True
b = t.__bases__
if len(b) != 1 or b[0] != tuple:
return False
f = getattr(t, '_fields', None)
if not isinstance(f, tuple):
return False
# pylint: disable=unidiomatic-typecheck
return all(type(n) == str for n in f)
class CachewException(RuntimeError):
pass
def get_union_args(cls) -> Optional[Tuple[Type]]:
if getattr(cls, '__origin__', None) != Union:
return None
args = cls.__args__
args = [e for e in args if e != type(None)]
assert len(args) > 0
return args
def is_union(cls):
return get_union_args(cls) is not None
def strip_optional(cls) -> Tuple[Type, bool]:
"""
>>> from typing import Optional, NamedTuple
>>> strip_optional(Optional[int])
(<class 'int'>, True)
>>> class X(NamedTuple):
... x: int
>>> strip_optional(X)
(<class 'cachew.X'>, False)
"""
is_opt: bool = False
args = get_union_args(cls)
if args is not None and len(args) == 1:
cls = args[0] # meh
is_opt = True
return (cls, is_opt)
def strip_generic(tp):
"""
>>> strip_generic(List[int])
<class 'list'>
>>> strip_generic(str)
<class 'str'>
"""
if sys.version_info[1] < 7:
# pylint: disable=no-member
if isinstance(tp, typing.GenericMeta):
return tp.__extra__ # type: ignore
else:
GA = getattr(typing, '_GenericAlias') # ugh, can't make both mypy and pylint happy here?
if isinstance(tp, GA):
return tp.__origin__
return tp
# release mode friendly assert
def kassert(x: bool) -> None:
if x is False:
raise AssertionError
NT = TypeVar('NT')
# sadly, bound=NamedTuple is not working yet in mypy
# https://github.com/python/mypy/issues/685
class NTBinder(NamedTuple):
"""
>>> class Job(NamedTuple):
... company: str
... title: Optional[str]
>>> class Person(NamedTuple):
... name: str
... age: int
... job: Optional[Job]
NTBinder is a helper class for inteacting with sqlite database.
Hierarchy is flattened:
>>> binder = NTBinder.make(Person)
>>> [(c.name, type(c.type)) for c in binder.columns]
... # doctest: +NORMALIZE_WHITESPACE
[('name', <class 'sqlalchemy.sql.sqltypes.String'>),
('age', <class 'sqlalchemy.sql.sqltypes.Integer'>),
('_job_is_null', <class 'sqlalchemy.sql.sqltypes.Boolean'>),
('job_company', <class 'sqlalchemy.sql.sqltypes.String'>),
('job_title', <class 'sqlalchemy.sql.sqltypes.String'>)]
>>> person = Person(name='alan', age=40, job=None)
to_row converts object to a sql-friendly tuple. job=None, so we end up with True in _job_is_null field
>>> tuple(binder.to_row(person))
('alan', 40, True, None, None)
from_row does reverse conversion
>>> binder.from_row(('alan', 40, True, None, None))
Person(name='alan', age=40, job=None)
>>> binder.from_row(('ann', 25, True, None, None, 'extra'))
Traceback (most recent call last):
...
cachew.CachewException: unconsumed items in iterator ['extra']
"""
name : Optional[str] # None means toplevel
type_ : Types
span : int # not sure if span should include optional col?
primitive: bool
optional : bool
union : Optional[Type] # helper, which isn't None if type is Union
fields : Sequence[Any] # mypy can't handle cyclic definition at this point :(
@staticmethod
def make(tp: Type, name: Optional[str]=None) -> 'NTBinder':
tp, optional = strip_optional(tp)
union: Optional[Type]
fields: Tuple[Any, ...]
primitive: bool
union_args = get_union_args(tp)
if union_args is not None:
CachewUnion = NamedTuple('_CachewUnionRepr', [ # type: ignore[misc]
(x.__name__, Optional[x]) for x in union_args
])
union = CachewUnion
primitive = False
fields = (NTBinder.make(tp=CachewUnion, name='_cachew_union_repr'),)
span = 1
else:
union = None
tp = strip_generic(tp)
primitive = is_primitive(tp)
if primitive:
if name is None:
name = '_cachew_primitive' # meh. presumably, top level
if primitive:
fields = ()
span = 1
else:
annotations = getattr(tp, '__annotations__', None)
if annotations is None:
raise CachewException(f"{tp}: doesn't look like a supported type to cache. See https://github.com/karlicoss/cachew#features for the list of supported types.")
fields = tuple(NTBinder.make(tp=ann, name=fname) for fname, ann in annotations.items())
span = sum(f.span for f in fields) + (1 if optional else 0)
return NTBinder(
name=name,
type_=tp,
span=span,
primitive=primitive,
optional=optional,
union=union,
fields=fields,
)
@property
def columns(self) -> List[Column]:
return list(self.iter_columns())
# TODO not necessarily namedtuple? could be primitive type
def to_row(self, obj: NT) -> Tuple[Optional[Values], ...]:
return tuple(self._to_row(obj))
def from_row(self, row: Iterable[Any]) -> NT:
riter = iter(row)
res = self._from_row(riter)
remaining = list(islice(riter, 0, 1))
if len(remaining) != 0:
raise CachewException(f'unconsumed items in iterator {remaining}')
assert res is not None # nosec # help mypy; top level will not be None
return res
def _to_row(self, obj) -> Iterator[Optional[Values]]:
if self.primitive:
yield obj
elif self.union is not None:
CachewUnion = self.union
(uf,) = self.fields
# TODO assert only one of them matches??
union = CachewUnion(**{
f.name: obj if isinstance(obj, f.type_) else None
for f in uf.fields
})
yield from uf._to_row(union)
else:
if self.optional:
is_none = obj is None
yield is_none
else:
is_none = False; kassert(obj is not None) # TODO hmm, that last assert is not very symmetric...
if is_none:
for _ in range(self.span - 1):
yield None
else:
yield from chain.from_iterable(
f._to_row(getattr(obj, f.name))
for f in self.fields
)
def _from_row(self, row_iter):
if self.primitive:
return next(row_iter)
elif self.union is not None:
CachewUnion = self.union
(uf,) = self.fields
# TODO assert only one of them is not None?
union_params = [
r
for r in uf._from_row(row_iter) if r is not None
]
kassert(len(union_params) == 1); return union_params[0]
else:
if self.optional:
is_none = next(row_iter)
else:
is_none = False
if is_none:
for _ in range(self.span - 1):
x = next(row_iter); kassert(x is None) # huh. assert is kinda opposite of producing value
return None
else:
return self.type_(*(
f._from_row(row_iter)
for f in self.fields
))
# TODO not sure if we want to allow optionals on top level?
def iter_columns(self) -> Iterator[Column]:
used_names: Set[str] = set()
def col(name: str, tp) -> Column:
while name in used_names:
name = '_' + name
used_names.add(name)
return Column(name, tp)
if self.primitive:
if self.name is None: raise AssertionError
yield col(self.name, PRIMITIVES[self.type_])
else:
prefix = '' if self.name is None else self.name + '_'
if self.optional:
yield col(f'_{prefix}is_null', sqlalchemy.Boolean)
for f in self.fields:
for c in f.iter_columns():
yield col(f'{prefix}{c.name}', c.type)
def __str__(self):
lines = [' ' * level + str(x.name) + ('?' if x.optional else '') + f' <span {x.span}>' for level, x in self.flatten()]
return '\n'.join(lines)
def __repr__(self):
return str(self)
def flatten(self, level=0):
yield (level, self)
for f in self.fields:
yield from f.flatten(level=level + 1)
# TODO better name to represent what it means?
SourceHash = str
class DbHelper:
def __init__(self, db_path: Path, cls: Type) -> None:
self.engine = sqlalchemy.create_engine(f'sqlite:///{db_path}', connect_args={'timeout': 0})
# NOTE: timeout is necessary so we don't lose time waiting during recursive calls
# by default, it's several seconds? you'd see 'test_recursive' test performance degrade
@event.listens_for(self.engine, 'connect')
def set_sqlite_pragma(dbapi_connection, connection_record):
# without wal, concurrent reading/writing is not gonna work
# ugh. that's odd, how are we supposed to set WAL if the very fact of setting wal might lock the db?
while True:
try:
dbapi_connection.execute('PRAGMA journal_mode=WAL')
break
except sqlite3.OperationalError as oe:
if 'database is locked' not in str(oe):
# ugh, pretty annoying that exception doesn't include database path for some reason
raise CachewException(f'Error while setting WAL on {db_path}') from oe
time.sleep(0.1)
self.connection = self.engine.connect()
"""
Erm... this is pretty confusing.
https://docs.sqlalchemy.org/en/13/dialects/sqlite.html#transaction-isolation-level
Somehow without this thing sqlalchemy logs BEGIN (implicit) instead of BEGIN TRANSACTION which actually works in sqlite...
Judging by sqlalchemy/dialects/sqlite/base.py, looks like some sort of python sqlite driver problem??
test_transaction should check this behaviour
"""
@event.listens_for(self.connection, 'begin')
# pylint: disable=unused-variable
def do_begin(conn):
# NOTE there is also BEGIN CONCURRENT in newer versions of sqlite. could use it later?
conn.execute('BEGIN DEFERRED')
self.meta = sqlalchemy.MetaData(self.connection)
self.table_hash = Table('hash', self.meta, Column('value', sqlalchemy.String))
self.binder = NTBinder.make(tp=cls)
self.table_data = Table('table', self.meta, *self.binder.columns)
# TODO FIXME database/tables need to be created atomically?
def __enter__(self):
return self
def __exit__(self, *args):
self.connection.close()
HashFunction = Callable[..., SourceHash]
def default_hash(*args, **kwargs) -> SourceHash:
# TODO eh, demand hash? it's not safe either... ugh
# can lead to werid consequences otherwise..
return str(args + tuple(sorted(kwargs.items()))) # good enough??
# TODO give it as an example in docs
def mtime_hash(path: Path, *args, **kwargs) -> SourceHash:
mt = path.stat().st_mtime
return default_hash(f'{path}.{mt}', *args, **kwargs)
Failure = str
# pylint: disable=too-many-return-statements
def infer_type(func) -> Union[Failure, Type[Any]]:
"""
>>> from typing import Collection, NamedTuple
>>> class Person(NamedTuple):
... name: str
... age: int
>>> def person_provider() -> Collection[Person]:
... return []
>>> infer_type(person_provider)
<class 'cachew.Person'>
>>> from typing import Sequence
>>> def int_provider() -> Sequence[int]:
... return (1, 2, 3)
>>> infer_type(int_provider)
<class 'int'>
>> from typing import Iterator, Union
>>> def union_provider() -> Iterator[Union[str, int]]:
... yield 1
... yield 'aaa'
>>> infer_type(union_provider)
typing.Union[str, int]
"""
rtype = getattr(func, '__annotations__', {}).get('return', None)
if rtype is None:
return f"no return type annotation on {func}"
def bail(reason):
return f"can't infer type from {rtype}: " + reason
# need to get erased type, otherwise subclass check would fail
if not hasattr(rtype, '__origin__'):
return bail("expected __origin__")
if not issubclass(rtype.__origin__, Iterable):
return bail("not subclassing Iterable")
args = getattr(rtype, '__args__', None)
if args is None:
return bail("has no __args__")
if len(args) != 1:
return bail(f"wrong number of __args__: {args}")
arg = args[0]
if is_primitive(arg):
return arg
if is_union(arg):
return arg # meh?
if not is_dataclassish(arg):
return bail(f"{arg} is not NamedTuple/dataclass")
return arg
# https://stackoverflow.com/questions/653368/how-to-create-a-python-decorator-that-can-be-used-either-with-or-without-paramet
def doublewrap(f):
@functools.wraps(f)
def new_dec(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# actual decorated function
return f(args[0])
else:
# decorator arguments
return lambda realf: f(realf, *args, **kwargs)
return new_dec
PathProvider = Union[PathIsh, Callable[..., PathIsh]]
def cachew_error(e: Exception) -> None:
if settings.THROW_ON_ERROR:
raise e
else:
logger = get_logger()
# todo add func name?
logger.error("cachew: error while setting up cache, falling back to non-cached version")
logger.exception(e)
use_default_path = cast(Path, object())
@doublewrap
# pylint: disable=too-many-arguments
def cachew(
func=None,
cache_path: Optional[PathProvider]=use_default_path,
force_file: bool=False,
cls=None,
depends_on: HashFunction=default_hash,
logger=None,
chunk_by=100,
# NOTE: allowed values for chunk_by depend on the system.
# some systems (to be more specific, sqlite builds), it might be too large and cause issues
# ideally this would be more defensive/autodetected, maybe with a warning?
# you can use 'test_many' to experiment
# - too small values (e.g. 10) are slower than 100 (presumably, too many sql statements)
# - too large values (e.g. 10K) are slightly slower as well (not sure why?)
**kwargs,
):
r"""
Database-backed cache decorator. TODO more description?
# TODO use this doc in readme?
:param cache_path: if not set, `cachew.settings.DEFAULT_CACHEW_DIR` will be used.
:param force_file: if set to True, assume `cache_path` is a regular file (instead of a directory)
:param cls: if not set, cachew will attempt to infer it from return type annotation. See :func:`infer_type` and :func:`cachew.tests.test_cachew.test_return_type_inference`.
:param depends_on: hash function to determine whether the underlying . Can potentially benefit from the use of side effects (e.g. file modification time). TODO link to test?
:param logger: custom logger, if not specified will use logger named `cachew`. See :func:`get_logger`.
:return: iterator over original or cached items
Usage example:
>>> from typing import NamedTuple, Iterator
>>> class Link(NamedTuple):
... url : str
... text: str
...
>>> @cachew
... def extract_links(archive_path: str) -> Iterator[Link]:
... for i in range(5):
... # simulate slow IO
... # this function runs for five seconds for the purpose of demonstration, but realistically it might take hours
... import time; time.sleep(1)
... yield Link(url=f'http://link{i}.org', text=f'text {i}')
...
>>> list(extract_links(archive_path='wikipedia_20190830.zip')) # that would take about 5 seconds on first run
[Link(url='http://link0.org', text='text 0'), Link(url='http://link1.org', text='text 1'), Link(url='http://link2.org', text='text 2'), Link(url='http://link3.org', text='text 3'), Link(url='http://link4.org', text='text 4')]
>>> from timeit import Timer
>>> res = Timer(lambda: list(extract_links(archive_path='wikipedia_20190830.zip'))).timeit(number=1)
... # second run is cached, so should take less time
>>> print(f"call took {int(res)} seconds")
call took 0 seconds
>>> res = Timer(lambda: list(extract_links(archive_path='wikipedia_20200101.zip'))).timeit(number=1)
... # now file has changed, so the cache will be discarded
>>> print(f"call took {int(res)} seconds")
call took 5 seconds
"""
if logger is None:
logger = get_logger()
hashf = kwargs.get('hashf', None)
if hashf is not None:
warnings.warn("'hashf' is deprecated. Please use 'depends_on' instead")
depends_on = hashf
cn = cname(func)
# todo not very nice that ENABLE check is scattered across two places
if not settings.ENABLE or cache_path is None:
logger.info('[%s]: cache explicitly disabled (settings.ENABLE is False or cache_path is None)', cn)
return func
if cache_path is use_default_path:
cache_path = settings.DEFAULT_CACHEW_DIR
logger.info('[%s]: no cache_path specified, using the default %s', cn, cache_path)
# TODO fuzz infer_type, should never crash?
inferred = infer_type(func)
if isinstance(inferred, Failure):
msg = f"failed to infer cache type: {inferred}. See https://github.com/karlicoss/cachew#features for the list of supported types."
if cls is None:
ex = CachewException(msg)
cachew_error(ex)
return func
else:
# it's ok, assuming user knows better
logger.debug(msg)
else:
if cls is None:
logger.debug('[%s] using inferred type %s', cn, inferred)
cls = inferred
else:
if cls != inferred:
logger.warning("inferred type %s mismatches specified type %s", inferred, cls)
# TODO not sure if should be more serious error...
ctx = Context(
func =func,
cache_path=cache_path,
force_file=force_file,
cls =cls,
depends_on=depends_on,
logger =logger,
chunk_by =chunk_by,
)
# hack to avoid extra stack frame (see test_recursive, test_deep-recursive)
@functools.wraps(func)
def binder(*args, **kwargs):
kwargs['_cachew_context'] = ctx
return cachew_wrapper(*args, **kwargs)
return binder
def cname(func: Callable) -> str:
# some functions don't have __module__
mod = getattr(func, '__module__', None) or ''
return f'{mod}:{func.__qualname__}'
class Context(NamedTuple):
func : Callable
cache_path: PathProvider
force_file: bool
cls : Type
depends_on: HashFunction
logger : logging.Logger
chunk_by : int
def composite_hash(self, *args, **kwargs) -> SourceHash:
fsig = inspect.signature(self.func)
# defaults wouldn't be passed in kwargs, but they can be an implicit dependency (especially inbetween program runs)
defaults = {
k: v.default
for k, v in fsig.parameters.items()
if v.default is not inspect.Parameter.empty
}
# but only pass default if the user wants it in the hash function?
hsig = inspect.signature(self.depends_on)
defaults = {
k: v
for k, v in defaults.items()
if k in hsig.parameters or 'kwargs' in hsig.parameters
}
kwargs = {**defaults, **kwargs}
binder = NTBinder.make(tp=self.cls)
schema = str(binder.columns) # TODO not super nice, but works fine for now
return f'cachew: {CACHEW_VERSION}, schema: {schema}, dependencies: {self.depends_on(*args, **kwargs)}'
def cachew_wrapper(
*args,
_cachew_context: Context,
**kwargs,
):
C = _cachew_context
func = C.func
cache_path = C.cache_path
force_file = C.force_file
cls = C.cls
depend_on = C.depends_on
logger = C.logger
chunk_by = C.chunk_by
cn = cname(func)
if not settings.ENABLE:
logger.info('[%s]: cache explicitly disabled (settings.ENABLE is False)', cn)
yield from func(*args, **kwargs)
return
early_exit = False
# WARNING: annoyingly huge try/catch ahead...
# but it lets us save a function call, hence a stack frame
# see test_recursive and test_deep_recursive
try:
dbp: Path
if callable(cache_path):
pp = cache_path(*args, **kwargs) # type: ignore
if pp is None:
logger.info('[%s]: cache explicitly disabled (cache_path is None)', cn)
yield from func(*args, **kwargs)
return
else:
dbp = Path(pp)
else:
dbp = Path(cache_path)
dbp.parent.mkdir(parents=True, exist_ok=True)
# need to be atomic here
try:
# note: stat follows symlinks (which is what we want)
st = dbp.stat()
except FileNotFoundError:
# doesn't exist. then it's controlled by force_file
if force_file:
dbp = dbp
else:
dbp.mkdir(parents=True, exist_ok=True)
dbp = dbp / cn
else:
# already exists, so just use cname if it's a dir
if stat.S_ISDIR(st.st_mode):
dbp = dbp / cn
logger.debug('using %s for db cache', dbp)
h = C.composite_hash(*args, **kwargs); kassert(h is not None) # just in case
logger.debug('new hash: %s', h)
with DbHelper(dbp, cls) as db, \
db.connection.begin():
# NOTE: defferred transaction
conn = db.connection
binder = db.binder
values_table = db.table_data
# first, try to do as much as possible read-only, benefiting from deferred transaction
try:
# not sure if there is a better way...
prev_hashes = conn.execute(db.table_hash.select()).fetchall()
except sqlalchemy.exc.OperationalError as e:
# meh. not sure if this is a good way to handle this..
if 'no such table: hash' in str(e):
prev_hashes = []
else:
raise e
kassert(len(prev_hashes) <= 1) # shouldn't happen
prev_hash: Optional[SourceHash]
if len(prev_hashes) == 0:
prev_hash = None
else:
prev_hash = prev_hashes[0][0] # returns a tuple...
logger.debug('old hash: %s', prev_hash)
if h == prev_hash:
logger.debug('hash matched: loading from cache')
rows = conn.execute(values_table.select())
for row in rows:
yield binder.from_row(row)
return
logger.debug('hash mismatch: computing data and writing to db')
# NOTE on recursive calls
# somewhat magically, they should work as expected with no extra database inserts?
# the top level call 'wins' the write transaction and once it's gathered all data, will write it
# the 'intermediate' level calls fail to get it and will pass data through
# the cached 'bottom' level is read only and will be yielded withotu a write transaction
try:
# first write statement will upgrade transaction to write transaction which might fail due to concurrency
# see https://www.sqlite.org/lang_transaction.html
# note I guess, because of 'checkfirst', only the last create is actually guaranteed to upgrade the transaction to write one
# drop and create to incorporate schema changes
db.table_hash.create(conn, checkfirst=True)
values_table.drop(conn, checkfirst=True)
values_table.create(conn)
except sqlalchemy.exc.OperationalError as e:
if e.code == 'e3q8':
# database is locked; someone else must be have won the write lock
# not much we can do here
# NOTE: important to close early, otherwise we might hold onto too many file descriptors during yielding
# see test_deep_recursive
conn.close()
yield from func(*args, **kwargs)
return
else:
raise e
# at this point we're guaranteed to have an exclusive write transaction
datas = func(*args, **kwargs)
chunk: List[Any] = []
def flush():
nonlocal chunk
if len(chunk) > 0:
# pylint: disable=no-value-for-parameter
conn.execute(values_table.insert().values(chunk))
chunk = []
for d in datas:
try:
yield d
except GeneratorExit:
early_exit = True
return
chunk.append(binder.to_row(d))
if len(chunk) >= chunk_by:
flush()
flush()
# TODO insert and replace instead?
# pylint: disable=no-value-for-parameter
conn.execute(db.table_hash.delete())
# pylint: disable=no-value-for-parameter
conn.execute(db.table_hash.insert().values([{'value': h}]))
except Exception as e:
# sigh... see test_early_exit_shutdown...
if early_exit and 'Cannot operate on a closed database' in str(e):
return
# todo hmm, kinda annoying that it tries calling the function twice?
# but gonna require some sophisticated cooperation with the cached wrapper otherwise
cachew_error(e)
yield from func(*args, **kwargs)
__all__ = ['cachew', 'CachewException', 'SourceHash', 'HashFunction', 'get_logger', 'NTBinder']
| 33.359649
| 229
| 0.599381
|
bd530f2a6b0312a06d5c95a19dfe8e1bffcb31ba
| 4,314
|
py
|
Python
|
glow/core/wrap/concurrency.py
|
arquolo/glow
|
c4c63e36551cd1eec2e34129dbc0f06c788099de
|
[
"MIT"
] | null | null | null |
glow/core/wrap/concurrency.py
|
arquolo/glow
|
c4c63e36551cd1eec2e34129dbc0f06c788099de
|
[
"MIT"
] | null | null | null |
glow/core/wrap/concurrency.py
|
arquolo/glow
|
c4c63e36551cd1eec2e34129dbc0f06c788099de
|
[
"MIT"
] | null | null | null |
__all__ = ['stream_batched', 'call_once', 'threadlocal', 'shared_call']
import functools
import threading
import time
from collections.abc import Callable, Sequence
from concurrent.futures import Future
from contextlib import ExitStack
from queue import Empty, SimpleQueue
from threading import Thread
from typing import Any, TypeVar, cast
from weakref import WeakValueDictionary
_T = TypeVar('_T')
_F = TypeVar('_F', bound=Callable)
_ZeroArgsF = TypeVar('_ZeroArgsF', bound=Callable[[], Any])
def threadlocal(fn: Callable[..., _T], *args: object,
**kwargs: object) -> Callable[[], _T]:
"""Thread-local singleton factory, mimics `functools.partial`"""
local_ = threading.local()
def wrapper() -> _T:
try:
return local_.obj
except AttributeError:
local_.obj = fn(*args, **kwargs)
return local_.obj
return wrapper
class _DeferredStack(ExitStack):
"""
ExitStack that allows deferring.
When return value of callback function should be accessible, use this.
"""
def defer(self, fn: Callable[..., _T], *args, **kwargs) -> Future[_T]:
future: Future[_T] = Future()
def apply(future: Future[_T]) -> None:
try:
result = fn(*args, **kwargs)
except BaseException as exc: # noqa: PIE786
future.set_exception(exc)
else:
future.set_result(result)
self.callback(apply, future)
return future
def call_once(fn: _ZeroArgsF) -> _ZeroArgsF:
"""Makes `fn()` callable a singleton"""
lock = threading.RLock()
def wrapper():
with _DeferredStack() as stack, lock:
if fn._future is None:
# This way setting future is protected, but fn() is not
fn._future = stack.defer(fn)
return fn._future.result()
fn._future = None # type: ignore
return cast(_ZeroArgsF, functools.update_wrapper(wrapper, fn))
def shared_call(fn: _F) -> _F:
"""Merges concurrent calls to `fn` with the same `args` to single one"""
lock = threading.RLock()
futures: WeakValueDictionary[str, Future] = WeakValueDictionary()
def wrapper(*args, **kwargs):
key = f'{fn}{args}{kwargs}'
with _DeferredStack() as stack, lock:
try:
future = futures[key]
except KeyError:
futures[key] = future = stack.defer(fn, *args, **kwargs)
return future.result()
return cast(_F, functools.update_wrapper(wrapper, fn))
def _batch_apply(func: Callable, args: Sequence, futures: Sequence[Future]):
try:
results = func(args)
assert len(args) == len(results)
except BaseException as exc: # noqa: PIE786
for fut in futures:
fut.set_exception(exc)
else:
for fut, res in zip(futures, results):
fut.set_result(res)
def stream_batched(func=None, *, batch_size, latency=0.1, timeout=20.):
"""
Delays start of computation up to `latency` seconds
in order to fill batch to batch_size items and
send it at once to target function.
`timeout` specifies timeout to wait results from worker.
Simplified version of https://github.com/ShannonAI/service-streamer
"""
if func is None:
return functools.partial(
stream_batched,
batch_size=batch_size,
latency=latency,
timeout=timeout)
assert callable(func)
buf = SimpleQueue()
def _fetch_batch():
end_time = time.monotonic() + latency
for _ in range(batch_size):
try:
yield buf.get(timeout=end_time - time.monotonic())
except (Empty, ValueError): # ValueError on negative timeout
return
def _serve_forever():
while True:
if batch := [*_fetch_batch()]:
_batch_apply(func, *zip(*batch))
else:
time.sleep(0.001)
def wrapper(batch):
futures = [Future() for _ in batch]
for item, fut in zip(batch, futures):
buf.put((item, fut))
return [f.result(timeout=timeout) for f in futures]
Thread(target=_serve_forever, daemon=True).start()
return functools.update_wrapper(wrapper, func)
| 30.167832
| 76
| 0.613584
|
c48039ec1f9239f30efaa83dfd2e4e178c253158
| 3,666
|
py
|
Python
|
huaweicloud-sdk-ocr/huaweicloudsdkocr/v1/model/recognize_flight_itinerary_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-11-03T07:54:50.000Z
|
2021-11-03T07:54:50.000Z
|
huaweicloud-sdk-ocr/huaweicloudsdkocr/v1/model/recognize_flight_itinerary_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-ocr/huaweicloudsdkocr/v1/model/recognize_flight_itinerary_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pprint
import re
import six
class RecognizeFlightItineraryRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'project_id': 'str',
'body': 'FlightItineraryRequestBody'
}
attribute_map = {
'project_id': 'project_id',
'body': 'body'
}
def __init__(self, project_id=None, body=None):
"""RecognizeFlightItineraryRequest - a model defined in huaweicloud sdk"""
self._project_id = None
self._body = None
self.discriminator = None
self.project_id = project_id
if body is not None:
self.body = body
@property
def project_id(self):
"""Gets the project_id of this RecognizeFlightItineraryRequest.
项目ID,您可以从[获取项目ID](https://support.huaweicloud.com/api-ocr/ocr_03_0130.html)中获取。
:return: The project_id of this RecognizeFlightItineraryRequest.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this RecognizeFlightItineraryRequest.
项目ID,您可以从[获取项目ID](https://support.huaweicloud.com/api-ocr/ocr_03_0130.html)中获取。
:param project_id: The project_id of this RecognizeFlightItineraryRequest.
:type: str
"""
self._project_id = project_id
@property
def body(self):
"""Gets the body of this RecognizeFlightItineraryRequest.
:return: The body of this RecognizeFlightItineraryRequest.
:rtype: FlightItineraryRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this RecognizeFlightItineraryRequest.
:param body: The body of this RecognizeFlightItineraryRequest.
:type: FlightItineraryRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RecognizeFlightItineraryRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.155556
| 87
| 0.574741
|
cffdf6602d27715086ed526881c0421338842249
| 462
|
py
|
Python
|
example/python/pipelines/convert/save_vocab_from_dict.py
|
SidaZh/EET
|
6414faa734abfdb666556304ca3df5b7f5e54c38
|
[
"Apache-2.0"
] | null | null | null |
example/python/pipelines/convert/save_vocab_from_dict.py
|
SidaZh/EET
|
6414faa734abfdb666556304ca3df5b7f5e54c38
|
[
"Apache-2.0"
] | null | null | null |
example/python/pipelines/convert/save_vocab_from_dict.py
|
SidaZh/EET
|
6414faa734abfdb666556304ca3df5b7f5e54c38
|
[
"Apache-2.0"
] | null | null | null |
save_file = open("../../resource/vocab.txt","w")
dict_file = open('../../resource/dict.txt', 'r')
pre_file = open('./Special_Symbols.txt', 'r')
contents=pre_file.readlines() #读取全部行
for content in contents: #显示一行
save_file.write(content)
save_file.write('\n')
pre_file.close()
contents=dict_file.readlines() #读取全部行
for content in contents: #显示一行
save_file.write(content.split(' ')[0] + '\n')
dict_file.close()
save_file.close()
| 28.875
| 49
| 0.658009
|
c7623a9e5a4d37fad4d933fe46938a8553cb616e
| 521
|
py
|
Python
|
Emailer/main/migrations/0009_alter_receiver_email.py
|
Pavel-Petkov03/Emailer
|
8f2b73e82136374e3f8b86537740ae0a2244c03d
|
[
"MIT"
] | null | null | null |
Emailer/main/migrations/0009_alter_receiver_email.py
|
Pavel-Petkov03/Emailer
|
8f2b73e82136374e3f8b86537740ae0a2244c03d
|
[
"MIT"
] | 8
|
2022-02-20T03:17:56.000Z
|
2022-03-29T17:20:14.000Z
|
Emailer/main/migrations/0009_alter_receiver_email.py
|
Pavel-Petkov03/Emailer
|
8f2b73e82136374e3f8b86537740ae0a2244c03d
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.2 on 2022-04-08 20:39
import Emailer.authentication.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0008_alter_receiver_preferences'),
]
operations = [
migrations.AlterField(
model_name='receiver',
name='email',
field=models.EmailField(max_length=254, validators=[Emailer.authentication.validators.is_gmail_validator]),
),
]
| 26.05
| 120
| 0.642994
|
cf61ceaefe5a3a9c04e58c67d56431c6c3554a68
| 15,020
|
py
|
Python
|
applications/app2/languages/ar.py
|
udaay7446/uday_python
|
2c8adf1652004582482c9ffed2d5d73c70da5819
|
[
"BSD-3-Clause"
] | null | null | null |
applications/app2/languages/ar.py
|
udaay7446/uday_python
|
2c8adf1652004582482c9ffed2d5d73c70da5819
|
[
"BSD-3-Clause"
] | null | null | null |
applications/app2/languages/ar.py
|
udaay7446/uday_python
|
2c8adf1652004582482c9ffed2d5d73c70da5819
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
{
'!langcode!': 'ar',
'!langname!': 'Arabic',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%s %%(shop)': '%s %%(shop)',
'%s %%(shop[0])': '%s %%(shop[0])',
'%s %%{quark[0]}': '%s %%{quark[0]}',
'%s %%{row} deleted': '%s %%{row} deleted',
'%s %%{row} updated': '%s %%{row} updated',
'%s %%{shop[0]}': '%s %%{shop[0]}',
'%s %%{shop}': '%s %%{shop}',
'%s selected': '%s selected',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(**%.0d MB**)': '(**%.0d MB**)',
'**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}',
'**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}',
'**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'?': '?',
'@markmin\x01(**%.0d MB**)': '(**%.0d MB**)',
'@markmin\x01**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}',
'@markmin\x01**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}',
'@markmin\x01**Hello World**': '**مرحباً بالعالم**',
'@markmin\x01**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'@markmin\x01``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'@markmin\x01An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'@markmin\x01Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'@markmin\x01DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'@markmin\x01Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})',
'@markmin\x01Number of entries: **%s**': 'Number of entries: **%s**',
'@markmin\x01RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'A new password was emailed to you': 'A new password was emailed to you',
'About': 'نبذة',
'Access Control': 'متحكمات الوصول',
'admin': 'admin',
'Administrative Interface': 'واجهة التحكم',
'Ajax Recipes': 'وصفات أجاكس',
'An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'API Example': 'API Example',
'appadmin is disabled because insecure channel': 'appadmin is disabled because insecure channel',
'Apply changes': 'Apply changes',
'Are you sure you want to delete this object?': 'هل أنت متأكد بحذف هذا الكائن ؟',
'Authentication code': 'Authentication code',
'Available Databases and Tables': 'Available Databases and Tables',
'Buy this book': 'أشتري هذا الكتاب',
"Buy web2py's book": "Buy web2py's book",
'cache': 'cache',
'Cache': 'Cache',
'Cache Cleared': 'Cache Cleared',
'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'لا يمكن بأن يكون خالي',
'Change Password': 'Change Password',
'Change password': 'Change password',
'Check to delete': 'أختر للحذف',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Click on the link %(link)s to reset your password': 'Click on the link %(link)s to reset your password',
'Client IP': 'IP المستخدم',
'Community': 'المجتمع',
'Components and Plugins': 'العناصر والإضافات',
'Config.ini': 'Config.ini',
'Controller': 'متحكم',
'Copyright': 'الحقوق',
'Created By': 'أنشئ بواسطة',
'Created On': 'أنشئ في',
'Current request': 'Current request',
'Current response': 'Current response',
'Current session': 'Current session',
'customize me!': 'التخصيص',
'data uploaded': 'data uploaded',
'Database': 'قاعدة البيانات',
'Database %s select': 'Database %s select',
'Database Administration (appadmin)': 'Database Administration (appadmin)',
'db': 'db',
'DB Model': 'نموذج قاعدة البيانات',
'Delete:': 'Delete:',
'Demo': 'تجربة',
'Deployment Recipes': 'الوصفات المنشورة',
'Description': 'الوصف',
'design': 'design',
'Design': 'Design',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Documentation': 'المستندات',
"Don't know what to do?": 'لا تعلم ماذا ستفعل ؟',
'done!': 'done!',
'Download': 'تحميل',
'E-mail': 'البريد الإلكتروني',
'Edit current record': 'Edit current record',
'Email and SMS': 'البريد الإلكتروني والرسالة النصية',
'Email sent': 'Email sent',
'Email verification': 'Email verification',
'Email verified': 'Email verified',
'enter an integer between %(min)g and %(max)g': 'أدخل عدد صحيح بين %(min)g و %(man)g',
'enter date and time as %(format)s': 'أدخل التاريخ والوقت كالنمط %(format)',
'Errors': 'الأخطاء',
'export as csv file': 'export as csv file',
'FAQ': 'الأسئلة الشائعة',
'First name': 'الأسم الأول',
'Forms and Validators': 'الإستمارات والمدققات',
'Free Applications': 'تطبيقات مجانية',
'Function disabled': 'Function disabled',
'Graph Model': 'Graph Model',
'Grid Example': 'Grid Example',
'Group %(group_id)s created': 'المجموعة %(group_id)s قد أنشئت',
'Group %(group_id)s deleted': 'Group %(group_id)s deleted',
'Group ID': 'هوية المجموعة',
'Group uniquely assigned to user %(id)s': 'المجموعة مخصصة للمستخدم %(id)s',
'Groups': 'مجموعات',
'Hello World': 'مرحباً بالعالم',
'Hello World ## comment': ' مرحباً بالعالم',
'Hello World## comment': 'مرحباً بالعالم',
'Helping web2py': 'Helping web2py',
'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})',
'Home': 'الرئيسية',
'How did you get here?': 'كيف أستطعت الوصول إلى هنا ؟',
'import': 'import',
'Import/Export': 'Import/Export',
'Incorrect code. {0} more attempt(s) remaining.': 'Incorrect code. {0} more attempt(s) remaining.',
'Insufficient privileges': 'Insufficient privileges',
'Internal State': 'Internal State',
'Introduction': 'مقدمة',
'Invalid email': 'بريد إلكتروني غير صالح',
'Invalid key': 'Invalid key',
'Invalid login': 'Invalid login',
'Invalid password': 'Invalid password',
'Invalid Query': 'Invalid Query',
'invalid request': 'invalid request',
'Invalid reset password': 'Invalid reset password',
'Invalid user': 'Invalid user',
'Invalid username': 'Invalid username',
'Invitation to join %(site)s': 'Invitation to join %(site)s',
'Is Active': 'نشط',
'Key': 'Key',
'Key verified': 'Key verified',
'Last name': 'أسم العائلة',
'Layout': 'النسق',
'Layout Plugins': 'نسّق الإضافات',
'Layouts': 'لأنساق',
'Live Chat': 'المحادثة الحيّة',
'Log In': 'Log In',
'Logged in': 'تم تسجيل الدخول',
'Logged out': 'تم تسجيل الخروج',
'Login': 'تسجيل الدخول',
'Login disabled by administrator': 'Login disabled by administrator',
'Logout': 'تسجيل الخروج',
'Lost Password': 'فقدت كلمة المرور',
'Lost password?': 'هل فقدت كلمة المرور ؟',
'Manage %(action)s': 'Manage %(action)s',
'Manage Access Control': 'Manage Access Control',
'Manage Cache': 'Manage Cache',
'Memberships': 'Memberships',
'Menu Model': 'قالب القوائم',
'Modified By': 'عًدلت بواسطة',
'Modified On': 'عُدلت في',
'My Sites': 'موقعي',
'Name': 'الأسم',
'New password': 'New password',
'New Record': 'New Record',
'new record inserted': 'new record inserted',
'next %s rows': 'next %s rows',
'No databases in this application': 'No databases in this application',
'Number of entries: **%s**': 'Number of entries: **%s**',
'Object or table name': 'أسم الكائن أو الجدول',
'Old password': 'Old password',
'Online book': 'Online book',
'Online examples': 'أمثلة على الأنترنت',
'or import from csv file': 'or import from csv file',
'Origin': 'أصل',
'Other Plugins': 'إضافات أخرى',
'Other Recipes': 'وصفات أخرى',
'Overview': 'نظرة عامة',
'Password': 'كلمة المرور',
'Password changed': 'Password changed',
"Password fields don't match": 'حقول كلمة المرور لا تتطابق',
'Password reset': 'Password reset',
'Password retrieve': 'Password retrieve',
'Permission': 'Permission',
'Permissions': 'Permissions',
'please input your password again': 'الرجاء إعادة إدخال كلمة المرور',
'Plugins': 'الإضافات',
'Powered by': 'مدعوم بواسطة',
'Preface': 'المدخل',
'previous %s rows': 'previous %s rows',
'Profile': 'الملف الشخصي',
'Profile updated': 'Profile updated',
'pygraphviz library not found': 'pygraphviz library not found',
'Python': 'بايثون',
'Query:': 'Query:',
'Quick Examples': 'أمثلة سريعة',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Recipes': 'وصفات',
'Record': 'Record',
'Record %(id)s created': 'Record %(id)s created',
'Record %(id)s deleted': 'Record %(id)s deleted',
'Record %(id)s read': 'Record %(id)s read',
'Record %(id)s updated': 'Record %(id)s updated',
'Record Created': 'Record Created',
'Record Deleted': 'Record Deleted',
'record does not exist': 'record does not exist',
'Record ID': 'هوية السجل ',
'Record id': 'Record id',
'Record Updated': 'Record Updated',
'Register': 'التسجيل',
'Registration identifier': 'مُعرف التسجيل',
'Registration is pending approval': 'Registration is pending approval',
'Registration key': 'رمز التسجيل',
'Registration needs verification': 'Registration needs verification',
'Registration successful': 'تم التسجيل بنجاح',
'Remember me (for 30 days)': 'تذكرني ( إلى 30 يوم)',
'Request reset password': 'Request reset password',
'Reset Password key': 'إعادة ظبط مفتاح كلمة المرور',
'Role': 'دور',
'Roles': 'Roles',
'Rows in Table': 'Rows in Table',
'Rows selected': 'Rows selected',
'Save model as...': 'Save model as...',
'Semantic': 'دليل لفظي',
'Services': 'خدمات',
'Sign Up': 'Sign Up',
'Sign up': 'Sign up',
'Size of cache:': 'Size of cache:',
'state': 'state',
'Statistics': 'Statistics',
'Stylesheet': 'أسلوب النمط',
'submit': 'submit',
'Submit': 'Submit',
'Support': 'الدعم',
'Table': 'Table',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'The Core': 'النواة',
'The output of the file is a dictionary that was rendered by the view %s': 'نتاج هذا الملف هو قاموس قًدم بواسطة العارض %s',
'The Views': 'المشاهدات',
'This App': 'هذا التطبيق',
'This code was emailed to you and is required for login.': 'This code was emailed to you and is required for login.',
'This email already has an account': 'This email already has an account',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'البصمة الزمنية',
'Traceback': 'Traceback',
'Twitter': 'تويتر',
'Two-step Login Authentication Code': 'Two-step Login Authentication Code',
'unable to parse csv file': 'unable to parse csv file',
'Unable to send email': 'Unable to send email',
'Update:': 'Update:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'User': 'User',
'User %(id)s is impersonating %(other_id)s': 'User %(id)s is impersonating %(other_id)s',
'User %(id)s Logged-in': 'المستخدم %(id)s قد سجل دخوله',
'User %(id)s Logged-out': 'المستخدم %(id)s قد سجل خروجه',
'User %(id)s Password changed': 'User %(id)s Password changed',
'User %(id)s Password reset': 'User %(id)s Password reset',
'User %(id)s Password retrieved': 'User %(id)s Password retrieved',
'User %(id)s Profile updated': 'User %(id)s Profile updated',
'User %(id)s Registered': 'المستخدم %(id)s مسجل',
'User %(id)s Username retrieved': 'User %(id)s Username retrieved',
'User %(id)s Verification email sent': 'User %(id)s Verification email sent',
'User %(id)s verified registration key': 'User %(id)s verified registration key',
'User ID': 'هوية المستخدم',
'Username': 'Username',
'Username already taken': 'Username already taken',
'Username retrieve': 'Username retrieve',
'Users': 'Users',
'value already in database or empty': 'القيمة موجودة مسبقاً أو فارغة',
'Verify Password': 'تأكيد كلمة المرور',
'Videos': 'الفيديوهات',
'View': 'العرض',
'Welcome': 'مرحباً',
'Welcome %(username)s! Click on the link %(link)s to verify your email': 'Welcome %(username)s! Click on the link %(link)s to verify your email',
'Welcome to web2py!': 'مرحباً بكم في ويب2 باي !',
'Which called the function %s located in the file %s': 'الدالة المسماة %s موجودة في ملف %s',
'Wiki Example': 'Wiki Example',
'Working...': 'Working...',
'You are successfully running web2py': 'أستطعت تثبيت web2py بنجاح !',
'You can modify this application and adapt it to your needs': 'تستطيع تعديل هذا التطبيق لما يناسب إحتياجك',
'You have been invited to join %(site)s, click %(link)s to complete the process': 'You have been invited to join %(site)s, click %(link)s to complete the process',
'You visited the url %s': ' ملقد زرت الرابط %s',
'Your password is: %(password)s': 'Your password is: %(password)s',
'Your temporary login code is {0}': 'Your temporary login code is {0}',
'Your username is: %(username)s': 'Your username is: %(username)s',
'Your username was emailed to you': 'Your username was emailed to you',
}
| 51.61512
| 281
| 0.654128
|
ddd0889901c97ae271e4ac063acc90cc02f4611f
| 7,023
|
py
|
Python
|
kymatio/backend/torch_backend.py
|
GReguig/kymatio
|
e0fc10057f5f8bb947068bc40afff8d3d3729052
|
[
"BSD-3-Clause"
] | 516
|
2018-11-18T06:11:16.000Z
|
2022-03-21T22:35:06.000Z
|
kymatio/backend/torch_backend.py
|
GReguig/kymatio
|
e0fc10057f5f8bb947068bc40afff8d3d3729052
|
[
"BSD-3-Clause"
] | 558
|
2018-11-19T22:21:12.000Z
|
2022-03-28T14:59:15.000Z
|
kymatio/backend/torch_backend.py
|
GReguig/kymatio
|
e0fc10057f5f8bb947068bc40afff8d3d3729052
|
[
"BSD-3-Clause"
] | 119
|
2018-11-18T06:05:39.000Z
|
2022-03-26T06:59:37.000Z
|
from torch.autograd import Function
import torch
BACKEND_NAME = 'torch'
def input_checks(x):
if x is None:
raise TypeError('The input should be not empty.')
if not x.is_contiguous():
raise RuntimeError('The input must be contiguous.')
def _is_complex(x):
return x.shape[-1] == 2
def _is_real(x):
return x.shape[-1] == 1
class ModulusStable(Function):
"""Stable complex modulus
This class implements a modulus transform for complex numbers which is
stable with respect to very small inputs (z close to 0), avoiding
returning nans in all cases.
Usage
-----
modulus = ModulusStable.apply # apply inherited from Function
x_mod = modulus(x)
Parameters
---------
x : tensor
The complex tensor (i.e., whose last dimension is two) whose modulus
we want to compute.
Returns
-------
output : tensor
A tensor of same size as the input tensor, except for the last
dimension, which is removed. This tensor is differentiable with respect
to the input in a stable fashion (so gradent of the modulus at zero is
zero).
"""
@staticmethod
def forward(ctx, x):
"""Forward pass of the modulus.
This is a static method which does not require an instantiation of the
class.
Arguments
---------
ctx : context object
Collected during the forward pass. These are automatically added
by PyTorch and should not be touched. They are then used for the
backward pass.
x : tensor
The complex tensor whose modulus is to be computed.
Returns
-------
output : tensor
This contains the modulus computed along the last axis, with that
axis removed.
"""
ctx.p = 2
ctx.dim = -1
ctx.keepdim = False
output = (x[...,0] * x[...,0] + x[...,1] * x[...,1]).sqrt()
ctx.save_for_backward(x, output)
return output
@staticmethod
def backward(ctx, grad_output):
"""Backward pass of the modulus
This is a static method which does not require an instantiation of the
class.
Arguments
---------
ctx : context object
Collected during the forward pass. These are automatically added
by PyTorch and should not be touched. They are then used for the
backward pass.
grad_output : tensor
The gradient with respect to the output tensor computed at the
forward pass.
Returns
-------
grad_input : tensor
The gradient with respect to the input.
"""
x, output = ctx.saved_tensors
if ctx.dim is not None and ctx.keepdim is False and x.dim() != 1:
grad_output = grad_output.unsqueeze(ctx.dim)
output = output.unsqueeze(ctx.dim)
grad_input = x.mul(grad_output).div(output)
# Special case at 0 where we return a subgradient containing 0
grad_input.masked_fill_(output == 0, 0)
return grad_input
# shortcut for ModulusStable.apply
modulus = ModulusStable.apply
class Modulus():
"""This class implements a modulus transform for complex numbers.
Usage
-----
modulus = Modulus()
x_mod = modulus(x)
Parameters
---------
x : tensor
Complex torch tensor.
Returns
-------
output : tensor
A tensor with the same dimensions as x, such that output[..., 0]
contains the complex modulus of x, while output[..., 1] = 0.
"""
def __call__(self, x):
type_checks(x)
norm = torch.zeros_like(x)
norm[..., 0] = modulus(x)
return norm
def type_checks(x):
if not _is_complex(x):
raise TypeError('The input should be complex (i.e. last dimension is 2).')
if not x.is_contiguous():
raise RuntimeError('Tensors must be contiguous.')
def cdgmm(A, B, inplace=False):
"""Complex pointwise multiplication.
Complex pointwise multiplication between (batched) tensor A and tensor B.
Parameters
----------
A : tensor
A is a complex tensor of size (B, C, M, N, 2).
B : tensor
B is a complex tensor of size (M, N, 2) or real tensor of (M, N, 1).
inplace : boolean, optional
If set to True, all the operations are performed in place.
Raises
------
RuntimeError
In the event that the filter B is not a 3-tensor with a last
dimension of size 1 or 2, or A and B are not compatible for
multiplication.
TypeError
In the event that A is not complex, or B does not have a final
dimension of 1 or 2, or A and B are not of the same dtype, or if
A and B are not on the same device.
Returns
-------
C : tensor
Output tensor of size (B, C, M, N, 2) such that:
C[b, c, m, n, :] = A[b, c, m, n, :] * B[m, n, :].
"""
if not _is_real(B):
type_checks(B)
else:
if not B.is_contiguous():
raise RuntimeError('Tensors must be contiguous.')
type_checks(A)
if A.shape[-len(B.shape):-1] != B.shape[:-1]:
raise RuntimeError('The filters are not compatible for multiplication.')
if A.dtype is not B.dtype:
raise TypeError('Input and filter must be of the same dtype.')
if B.device.type == 'cuda':
if A.device.type == 'cuda':
if A.device.index != B.device.index:
raise TypeError('Input and filter must be on the same GPU.')
else:
raise TypeError('Input must be on GPU.')
if B.device.type == 'cpu':
if A.device.type == 'cuda':
raise TypeError('Input must be on CPU.')
if _is_real(B):
if inplace:
return A.mul_(B)
else:
return A * B
else:
C = A.new(A.shape)
A_r = A[..., 0].view(-1, B.nelement() // 2)
A_i = A[..., 1].view(-1, B.nelement() // 2)
B_r = B[..., 0].view(-1).unsqueeze(0).expand_as(A_r)
B_i = B[..., 1].view(-1).unsqueeze(0).expand_as(A_i)
C[..., 0].view(-1, B.nelement() // 2)[:] = A_r * B_r - A_i * B_i
C[..., 1].view(-1, B.nelement() // 2)[:] = A_r * B_i + A_i * B_r
return C if not inplace else A.copy_(C)
def concatenate(arrays, dim):
return torch.stack(arrays, dim=dim)
def real(x):
"""Real part of complex tensor
Takes the real part of a complex tensor, where the last axis corresponds
to the real and imaginary parts.
Parameters
----------
x : tensor
A complex tensor (that is, whose last dimension is equal to 2).
Returns
-------
x_real : tensor
The tensor x[..., 0] which is interpreted as the real part of x.
"""
return x[..., 0]
| 28.54878
| 82
| 0.569415
|
38ea223a0ad4adb15de02f0c71ec07de02f5868d
| 2,238
|
py
|
Python
|
src/main/resources/docs/tests/B304.py
|
paulopontesm/codacy-bandit
|
38a1637717652efbc865badd43f336398881680b
|
[
"Apache-2.0"
] | 6
|
2016-08-31T07:55:40.000Z
|
2020-01-20T08:32:50.000Z
|
src/main/resources/docs/tests/B304.py
|
Acidburn0zzz/codacy-bandit
|
92b65ec73b89a915127e1b29a4fcc8e2a7b64943
|
[
"Apache-2.0"
] | 20
|
2017-06-23T11:58:11.000Z
|
2022-03-07T02:17:43.000Z
|
src/main/resources/docs/tests/B304.py
|
Acidburn0zzz/codacy-bandit
|
92b65ec73b89a915127e1b29a4fcc8e2a7b64943
|
[
"Apache-2.0"
] | 15
|
2016-10-07T13:38:57.000Z
|
2021-02-14T21:54:25.000Z
|
##Patterns: B304
from Crypto.Cipher import ARC2
from Crypto.Cipher import ARC4
from Crypto.Cipher import Blowfish
from Crypto.Cipher import DES
from Crypto.Cipher import XOR
from Crypto.Hash import SHA
from Crypto import Random
from Crypto.Util import Counter
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers import algorithms
from cryptography.hazmat.primitives.ciphers import modes
from cryptography.hazmat.backends import default_backend
from struct import pack
import socket
key = b'Sixteen byte key'
iv = Random.new().read(ARC2.block_size)
##Warn: B304
cipher = ARC2.new(key, ARC2.MODE_CFB, iv)
msg = iv + cipher.encrypt(b'Attack at dawn')
key = b'Very long and confidential key'
nonce = Random.new().read(16)
tempkey = SHA.new(key+nonce).digest()
##Warn: B304
cipher = ARC4.new(tempkey)
msg = nonce + cipher.encrypt(b'Open the pod bay doors, HAL')
bs = Blowfish.block_size
key = b'An arbitrarily long key'
iv = Random.new().read(bs)
##Warn: B304
cipher = Blowfish.new(key, Blowfish.MODE_CBC, iv)
plaintext = b'docendo discimus '
plen = bs - divmod(len(plaintext),bs)[1]
padding = [plen]*plen
padding = pack('b'*plen, *padding)
msg = iv + cipher.encrypt(plaintext + padding)
key = b'-8B key-'
nonce = Random.new().read(DES.block_size/2)
ctr = Counter.new(DES.block_size*8/2, prefix=nonce)
##Warn: B304
cipher = DES.new(key, DES.MODE_CTR, counter=ctr)
plaintext = b'We are no longer the knights who say ni!'
msg = nonce + cipher.encrypt(plaintext)
key = b'Super secret key'
##Warn: B304
cipher = XOR.new(key)
plaintext = b'Encrypt me'
msg = cipher.encrypt(plaintext)
##Warn: B304
cipher = Cipher(algorithms.ARC4(key), mode=None, backend=default_backend())
encryptor = cipher.encryptor()
ct = encryptor.update(b"a secret message")
##Warn: B304
cipher = Cipher(algorithms.Blowfish(key), mode=None, backend=default_backend())
encryptor = cipher.encryptor()
ct = encryptor.update(b"a secret message")
##Warn: B304
cipher = Cipher(algorithms.IDEA(key), mode=None, backend=default_backend())
encryptor = cipher.encryptor()
ct = encryptor.update(b"a secret message")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('0.0.0.0', 31137))
s.bind(('192.168.0.1', 8080))
| 30.243243
| 79
| 0.74933
|
32367af1812a8bb8f33893d90cbedf35aed58692
| 2,950
|
py
|
Python
|
tests/test_sim_procedure.py
|
fmagin/angr
|
c41563b0f82a3d031c3aa482ebe2973c87ec4adb
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_sim_procedure.py
|
fmagin/angr
|
c41563b0f82a3d031c3aa482ebe2973c87ec4adb
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_sim_procedure.py
|
fmagin/angr
|
c41563b0f82a3d031c3aa482ebe2973c87ec4adb
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import angr
import claripy
import nose
from angr.codenode import BlockNode, HookNode, SyscallNode
BIN_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries')
def test_ret_float():
p = angr.load_shellcode(b'X', arch='i386')
class F1(angr.SimProcedure):
def run(self):
return 12.5
p.hook(0x1000, F1(cc=p.factory.cc(func_ty=angr.sim_type.parse_file('float (x)();')[0]['x'])))
p.hook(0x2000, F1(cc=p.factory.cc(func_ty=angr.sim_type.parse_file('double (x)();')[0]['x'])))
s = p.factory.call_state(addr=0x1000, ret_addr=0)
succ = s.step()
nose.tools.assert_equal(len(succ.successors), 1)
s2 = succ.flat_successors[0]
nose.tools.assert_false(s2.regs.st0.symbolic)
nose.tools.assert_equal(s2.solver.eval(s2.regs.st0.get_bytes(4, 4).raw_to_fp()), 12.5)
s = p.factory.call_state(addr=0x2000, ret_addr=0)
succ = s.step()
nose.tools.assert_equal(len(succ.successors), 1)
s2 = succ.flat_successors[0]
nose.tools.assert_false(s2.regs.st0.symbolic)
nose.tools.assert_equal(s2.solver.eval(s2.regs.st0.raw_to_fp()), 12.5)
def test_syscall_and_simprocedure():
bin_path = os.path.join(BIN_PATH, 'tests', 'cgc', 'CADET_00002')
proj = angr.Project(bin_path, auto_load_libs=False)
cfg = proj.analyses.CFGFast(normalize=True)
# check syscall
node = cfg.get_any_node(proj.loader.kernel_object.mapped_base + 1)
func = proj.kb.functions[node.addr]
nose.tools.assert_true(node.is_simprocedure)
nose.tools.assert_true(node.is_syscall)
nose.tools.assert_false(node.to_codenode().is_hook)
nose.tools.assert_false(proj.is_hooked(node.addr))
nose.tools.assert_true(func.is_syscall)
nose.tools.assert_true(func.is_simprocedure)
nose.tools.assert_equal(type(proj.factory.snippet(node.addr)), SyscallNode)
# check normal functions
node = cfg.get_any_node(0x80480a0)
func = proj.kb.functions[node.addr]
nose.tools.assert_false(node.is_simprocedure)
nose.tools.assert_false(node.is_syscall)
nose.tools.assert_false(proj.is_hooked(node.addr))
nose.tools.assert_false(func.is_syscall)
nose.tools.assert_false(func.is_simprocedure)
nose.tools.assert_equal(type(proj.factory.snippet(node.addr)), BlockNode)
# check hooked functions
proj.hook(0x80480a0, angr.SIM_PROCEDURES['libc']['puts']())
cfg = proj.analyses.CFGFast(normalize=True)# rebuild cfg to updated nodes
node = cfg.get_any_node(0x80480a0)
func = proj.kb.functions[node.addr]
nose.tools.assert_true(node.is_simprocedure)
nose.tools.assert_false(node.is_syscall)
nose.tools.assert_true(proj.is_hooked(node.addr))
nose.tools.assert_false(func.is_syscall)
nose.tools.assert_true(func.is_simprocedure)
nose.tools.assert_equal(type(proj.factory.snippet(node.addr)), HookNode)
if __name__ == '__main__':
test_ret_float()
test_syscall_and_simprocedure()
| 37.820513
| 98
| 0.721695
|
561e6ae194a39c24e67b338515d18648565ef77a
| 3,450
|
py
|
Python
|
tests/test_cairo.py
|
evanblank3/ethpm-types
|
129bb7d95e93ca7bd135908d62474e9af131e1df
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cairo.py
|
evanblank3/ethpm-types
|
129bb7d95e93ca7bd135908d62474e9af131e1df
|
[
"Apache-2.0"
] | 9
|
2022-01-28T22:38:49.000Z
|
2022-03-31T21:29:15.000Z
|
tests/test_cairo.py
|
evanblank3/ethpm-types
|
129bb7d95e93ca7bd135908d62474e9af131e1df
|
[
"Apache-2.0"
] | 1
|
2022-01-05T22:33:08.000Z
|
2022-01-05T22:33:08.000Z
|
from ethpm_types import ContractType
CAIRO_ABI = [
{
"type": "struct",
"name": "MyStruct",
"members": [
{"name": "foo", "type": "felt", "offset": 0},
{"name": "bar", "type": "felt", "offset": 1},
],
"size": 2,
},
{"type": "event", "name": "Upgraded", "inputs": [], "anonymous": False},
{
"type": "constructor",
"stateMutability": "nonpayable",
"inputs": [{"name": "implementation_address", "type": "felt"}],
},
{
"type": "function",
"name": "compare_arrays",
"stateMutability": "nonpayable",
"inputs": [
{"name": "a_len", "type": "felt"},
{"name": "a", "type": "felt*"},
{"name": "b_len", "type": "felt"},
{"name": "b", "type": "felt*"},
],
"outputs": [],
},
{
"type": "function",
"name": "increase_balance",
"stateMutability": "nonpayable",
"inputs": [{"name": "amount", "type": "felt"}],
"outputs": [],
},
{
"type": "function",
"name": "get_balance",
"stateMutability": "view",
"inputs": [],
"outputs": [{"name": "res", "type": "felt"}],
},
{
"type": "function",
"name": "sum_points",
"stateMutability": "view",
"inputs": [{"name": "points", "type": "(Point, Point)"}],
"outputs": [{"name": "res", "type": "Point"}],
},
{
"type": "function",
"name": "__default__",
"stateMutability": "nonpayable",
"inputs": [
{"name": "selector", "type": "felt"},
{"name": "calldata_size", "type": "felt"},
{"name": "calldata", "type": "felt*"},
],
"outputs": [{"name": "retdata_size", "type": "felt"}, {"name": "retdata", "type": "felt*"}],
},
{
"inputs": [
{"name": "selector", "type": "felt"},
{"name": "calldata_size", "type": "felt"},
{"name": "calldata", "type": "felt*"},
],
"name": "__l1_default__",
"outputs": [],
"type": "l1_handler",
},
]
def test_cairo_abi():
contract_type = ContractType.parse_obj({"abi": CAIRO_ABI})
abi = contract_type.abi
# Verify struct
struct = abi[0]
raw_struct = struct.dict()
assert struct.type == raw_struct["type"] == "struct"
assert struct.size == raw_struct["size"] == 2
struct_member_0 = struct.members[0]
raw_struct_member_0 = struct_member_0.dict()
struct_member_1 = struct.members[1]
raw_struct_member_1 = struct_member_1.dict()
assert struct_member_0.name == raw_struct_member_0["name"] == "foo"
assert struct_member_0.offset == raw_struct_member_0["offset"] == 0
assert struct_member_1.name == raw_struct_member_1["name"] == "bar"
assert struct_member_1.offset == raw_struct_member_1["offset"] == 1
# Verify event
event = abi[1]
event_raw = event.dict()
assert event.name == event_raw["name"] == "Upgraded"
# Verify constructor
constructor = abi[2]
constructor_raw = constructor.dict()
assert constructor.type == constructor_raw["type"] == "constructor"
# Verify L1 handler
l1_handler = abi[-1]
l1_handler_raw = l1_handler.dict()
assert l1_handler.type == l1_handler_raw["type"] == "l1_handler"
assert l1_handler.name == l1_handler_raw["name"] == "__l1_default__"
| 31.363636
| 100
| 0.518551
|
c390688e9872884fd42b9d94b174373b2101ee02
| 1,865
|
py
|
Python
|
hangman.py
|
Coskntkk/Hangman-Game
|
46876f19c989fac2b713d35fe33005db24951d95
|
[
"MIT"
] | null | null | null |
hangman.py
|
Coskntkk/Hangman-Game
|
46876f19c989fac2b713d35fe33005db24951d95
|
[
"MIT"
] | null | null | null |
hangman.py
|
Coskntkk/Hangman-Game
|
46876f19c989fac2b713d35fe33005db24951d95
|
[
"MIT"
] | 1
|
2021-03-08T00:46:02.000Z
|
2021-03-08T00:46:02.000Z
|
from random import choice
# read word list from words.txt
def readFile(file_name):
wordList = []
with open(file_name, 'r', encoding='utf8') as myFile:
for row in myFile:
data=row.rstrip('\n')
wordList.append(data)
return wordList
# to find given letter in the map and return map back to main
def findCharacter(word,letter,map):
for i in range(len(word)):
if word[i] == letter: map[i] = letter
return map
def main():
usedLetters = list()
fileName = "words.txt"
wordList = readFile(fileName)
word = choice(wordList).lower()
wordLen = len(word)
# conflicts in team
if wordLen < 5: lives = 5
else: lives = wordLen
map = list([])
for _ in range(wordLen):
map.append('_')
mapStr = ' '.join(map)
print(mapStr)
while (mapStr[0::2] != word) and lives > 0:
letter = input("give me a letter: ").lower()
if len(letter) != 1:
print("LETTER PLEASE")
continue
while letter in usedLetters:
print("this letter has already used.")
letter = input("give me another letter:")
usedLetters.append(letter)
# compare the before and after versions of maps
map1 = list(map)
map = findCharacter(word,letter,map)
map2 = list(map)
if(map1 == map2):
lives -=1
print("WRONG GUESS!")
print("{} lives left.".format(lives))
mapStr = ' '.join(map)
print(mapStr)
print("........................................................")
# end of while
# end of the game..
if lives > 0: print("you won!!!unlem1!")
else: print("you have no more lives")
print("the word was.." , word)
main()
| 25.547945
| 73
| 0.52118
|
6e4e1904f57a3c92fb4945d01753bd19c0611cb1
| 3,598
|
py
|
Python
|
python/tvm/relax/op/base.py
|
psrivas2/relax
|
4329af78eb1dc4c4ff8a61d3bf39aa4034e9cb2a
|
[
"Apache-2.0"
] | 90
|
2021-11-30T11:58:10.000Z
|
2022-03-31T02:24:04.000Z
|
python/tvm/relax/op/base.py
|
psrivas2/relax
|
4329af78eb1dc4c4ff8a61d3bf39aa4034e9cb2a
|
[
"Apache-2.0"
] | 64
|
2021-11-22T23:58:23.000Z
|
2022-03-31T03:19:22.000Z
|
python/tvm/relax/op/base.py
|
psrivas2/relax
|
4329af78eb1dc4c4ff8a61d3bf39aa4034e9cb2a
|
[
"Apache-2.0"
] | 27
|
2021-12-09T22:39:27.000Z
|
2022-03-24T23:21:48.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
"""The base Relax operators."""
from typing import Union, List, Optional
from tvm.runtime.object import Object
from . import _ffi_api
from ..expr import Expr, ShapeExpr, Tuple, Call
from ..ty import DynTensorType, TupleType
from ...ir import Array
def call_tir(
func: Expr,
args: Union[Tuple, List[Expr]],
shape: Union[Tuple, ShapeExpr, List[int]],
dtype: Union[str, List[str]],
tir_vars: Optional[ShapeExpr] = None,
) -> Call:
"""
Call a destination-passing-style function and return the output.
Parameters
----------
func : Expr
The destination-passing-style function, can be ExternFunc or PrimFunc.
args : Union[Tuple, List[Expr]]
The input arguments.
shape: Union[Tuple, ShapeExpr, List[int]]
The output shape. Tuple(ShapeExpr) if multiple outputs, ShapeExpr if single output.
dtype: Union[str, List[str]]
The output dtype. List[str] if multiple outputs, str if single output.
tir_vars : ShapeExpr, optional
ShapeExpr representing a tuple of integers to unpack when calling func. Is null if not used
Returns
-------
ret: Call
A call node for the call_tir operator.
"""
if isinstance(shape, (list, tuple, Array)):
shape = ShapeExpr(shape)
if isinstance(args, (list, tuple)):
args = Tuple(args)
if isinstance(dtype, str):
output_type = DynTensorType(len(shape), dtype)
elif isinstance(dtype, (list, tuple)):
if len(shape) != len(dtype):
raise ValueError("The number of output_shape and output_dtype of call_tir mismatch")
output_type = TupleType([DynTensorType(len(x), y) for x, y in zip(shape, dtype)])
else:
raise TypeError("Not supported dtype for call_tir: " + str(type(dtype)))
return _ffi_api.call_tir(func, args, shape, output_type, tir_vars)
def make_closure(
func: Expr,
args: Union[Tuple, List[Expr]],
) -> Object:
"""
Create a closure with free variables and return the closure.
Parameters
----------
func : Expr
The closure, can be ExternFunc or PrimFunc.
args : Union[Tuple, List[Expr]]
The input arguments.
Returns
-------
ret: Object
The VMClosure.
"""
if isinstance(args, (list, tuple)):
args = Tuple(args)
return _ffi_api.make_closure(func, args)
def invoke_closure(
closure: Expr,
args: Union[Tuple, List[Expr]],
) -> Object:
"""
Invoke a closure.
Parameters
----------
closure : Expr
The VMClosure object.
args : Union[Tuple, List[Expr]]
The input arguments.
Returns
-------
ret: Object
The result.
"""
if isinstance(args, (list, tuple)):
args = Tuple(args)
return _ffi_api.invoke_closure(closure, args)
| 27.465649
| 99
| 0.659255
|
9c4bb5e842b11791f01ed35469b0f53b7d68715f
| 2,679
|
py
|
Python
|
metrics.py
|
elitonfilho/segsr
|
74ee5a4794f0894cffbb08f696f601b7d914513c
|
[
"MIT"
] | null | null | null |
metrics.py
|
elitonfilho/segsr
|
74ee5a4794f0894cffbb08f696f601b7d914513c
|
[
"MIT"
] | null | null | null |
metrics.py
|
elitonfilho/segsr
|
74ee5a4794f0894cffbb08f696f601b7d914513c
|
[
"MIT"
] | null | null | null |
import math
import numpy as np
from PIL import Image
import cv2
def calculate_psnr(img1, img2):
# img1 and img2 have range [0, 255]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
mse = np.mean((img1 - img2)**2)
if mse == 0:
return float('inf')
return 20 * math.log10(255.0 / math.sqrt(mse))
def ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calculate_ssim(img1, img2):
'''calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
'''
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1, img2))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.')
if __name__ == "__main__":
# img1 = Image.open('data/HR/2953-3-SO_0_HR.png').resize((256,256), resample=Image.BICUBIC)
# img1 = np.array(img1)
# img2 = Image.open('1e-2.png').resize((256,256), resample=Image.BICUBIC)
img2 = Image.open('lr.png')
img2 = np.array(img2)
imgHR = Image.open('data/HR/2953-3-SO_0_HR.png')
# w, h = imgHR.size
# imgHR = imgHR.resize((4*w, 4*h), resample=Image.BICUBIC)
imgHR = np.array(imgHR)
# 1: Com seg, 2: Sem Seg
# print('1 e 2', calculate_psnr(img1, img2))
print('1 e HR', calculate_psnr(img2, imgHR))
# print('2 e HR', calculate_psnr(img2, imgHR))
# print('1 e 2', calculate_ssim(img1, img2))
print('1 e HR', calculate_ssim(img2, imgHR))
# print('2 e HR', calculate_ssim(img2, imgHR))
| 35.72
| 96
| 0.564016
|
e18e97e79c4a2450e3bc06f3bd224b40036c94d0
| 14,747
|
py
|
Python
|
tensorflow_quantum/python/differentiators/gradient_test.py
|
dlyongemallo/tensorflow-quantum
|
22cf3058ae5bfb4f0ca7ed70cb691bd7be650e61
|
[
"Apache-2.0"
] | 1
|
2020-09-25T14:01:24.000Z
|
2020-09-25T14:01:24.000Z
|
tensorflow_quantum/python/differentiators/gradient_test.py
|
dlyongemallo/tensorflow-quantum
|
22cf3058ae5bfb4f0ca7ed70cb691bd7be650e61
|
[
"Apache-2.0"
] | 1
|
2022-02-10T02:16:17.000Z
|
2022-02-10T02:16:17.000Z
|
tensorflow_quantum/python/differentiators/gradient_test.py
|
dlyongemallo/tensorflow-quantum
|
22cf3058ae5bfb4f0ca7ed70cb691bd7be650e61
|
[
"Apache-2.0"
] | 1
|
2020-08-03T20:20:26.000Z
|
2020-08-03T20:20:26.000Z
|
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing for gradient calculation consistency in TFQ."""
import copy
import numpy as np
import sympy
import tensorflow as tf
from absl.testing import parameterized
import cirq
from tensorflow_quantum.python import util
from tensorflow_quantum.python.differentiators import linear_combination
from tensorflow_quantum.python.differentiators import parameter_shift
from tensorflow_quantum.python.differentiators import stochastic_differentiator
from tensorflow_quantum.core.ops import circuit_execution_ops, batch_util
DIFFS = [
linear_combination.ForwardDifference(grid_spacing=0.0001),
linear_combination.ForwardDifference(error_order=2, grid_spacing=0.0001),
linear_combination.CentralDifference(grid_spacing=0.0001),
linear_combination.CentralDifference(error_order=4, grid_spacing=0.0001),
parameter_shift.ParameterShift(),
]
STOCHASTIC_DIFFS = [
stochastic_differentiator.SGDifferentiator(stochastic_coordinate=False,
stochastic_generator=False,
stochastic_cost=False),
stochastic_differentiator.SGDifferentiator(stochastic_coordinate=True,
stochastic_generator=False,
stochastic_cost=False),
stochastic_differentiator.SGDifferentiator(stochastic_coordinate=False,
stochastic_generator=True,
stochastic_cost=False),
stochastic_differentiator.SGDifferentiator(stochastic_coordinate=True,
stochastic_generator=True,
stochastic_cost=False),
]
OPS = [
circuit_execution_ops.get_expectation_op(cirq.sim.Simulator()), # WF
circuit_execution_ops.get_expectation_op(
cirq.DensityMatrixSimulator()), # DM
circuit_execution_ops.get_expectation_op() # C++
]
def _cirq_simple_finite_difference(circuit_batch,
resolvers,
symbol_names,
op_batch,
grid_spacing=0.0001):
"""A simple finite difference code that calculates the gradient of a
batch of circuits using cirq."""
simulator = cirq.sim.Simulator()
init_vals = batch_util.batch_calculate_expectation(circuit_batch, resolvers,
op_batch, simulator)
grad_circuits = []
grad_resolvers = []
grad_pauli_sums = []
for this_program, this_pauli_sums, this_resolver in \
zip(circuit_batch, op_batch, resolvers):
for symbol in symbol_names:
perturbed_resolver = copy.deepcopy(this_resolver)
perturbed_resolver.param_dict[symbol] += grid_spacing
grad_circuits.append(this_program)
grad_pauli_sums.append(this_pauli_sums)
grad_resolvers.append(perturbed_resolver)
# shape: [n_programs * len(symbol_names), n_pauli_sums]
results = np.array(
batch_util.batch_calculate_expectation(circuits=grad_circuits,
param_resolvers=grad_resolvers,
ops=grad_pauli_sums,
simulator=simulator))
# shape: [n_pauli_sums, n_programs, len(symbol_names)]
gradient_generator = results.transpose().reshape(
(len(op_batch[0]), len(circuit_batch), len(symbol_names)))
# shape: [n_pauli_sums, n_programs, len(symbol_names)]
forward_pass_vals = np.transpose(
np.vstack([np.expand_dims(init_vals, axis=0)] * len(symbol_names)),
(2, 1, 0))
return np.sum(1 / grid_spacing * (gradient_generator - forward_pass_vals),
axis=0)
class GradientCorrectnessTest(tf.test.TestCase, parameterized.TestCase):
"""Test correctness of the differentiators to reference cirq algorithm."""
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'differentiator': DIFFS + STOCHASTIC_DIFFS,
'op': OPS,
'stochastic_cost': [False, True]
})))
def test_backprop(self, differentiator, op, stochastic_cost):
"""Test that gradients are correctly backpropagated through a quantum
circuit via comparison to analytical results.
"""
# hack to add stoachastic cost. TODO (jaeyoo): remove this hack.
differentiator.stochastic_cost = stochastic_cost
differentiator.refresh()
op = differentiator.generate_differentiable_op(analytic_op=op)
def exact_grad(theta):
new_theta = 2 * np.pi * theta
return -2 * np.pi * np.sin(new_theta) * np.exp(np.cos(new_theta))
bit = cirq.GridQubit(0, 0)
circuits = util.convert_to_tensor(
[cirq.Circuit(cirq.X(bit)**sympy.Symbol('rx')) for _ in range(2)])
pstring = util.convert_to_tensor([[
cirq.PauliSum.from_pauli_strings([cirq.PauliString({bit: cirq.Z})])
] for _ in circuits])
base_rot_angles = tf.constant([[0.25], [0.125]])
with tf.GradientTape() as g:
g.watch(base_rot_angles)
input_angles = 2 * base_rot_angles
exp_res = tf.exp(op(circuits, ['rx'], input_angles, pstring))
grad = g.gradient(exp_res, base_rot_angles)
exact = [[exact_grad(0.25)], [exact_grad(0.125)]]
# will this be too tight? time will tell.
self.assertAllClose(exact, grad.numpy(), rtol=0.01, atol=0.01)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'differentiator': DIFFS,
'op': OPS,
'n_qubits': [5],
'n_programs': [3],
'n_ops': [3],
'symbol_names': [['a', 'b']]
})))
def test_gradients_vs_cirq_finite_difference(self, differentiator, op,
n_qubits, n_programs, n_ops,
symbol_names):
"""Compare TFQ differentiators to fine-grained noiseless cirq finite
differencing.
DISCLAIMER : the consistency of STOCHASTIC_DIFFS is hard to be checked.
Its expectation value should be checked, but it takes long time because
SGDifferentiator is not optimized. Until optimized, the consistency
will be performed in benchmarks/scripts/differentiators:convergence_test
TODO(jaeyoo) : move convergence_test here once SGDifferentiator is
optimized.
"""
differentiator.refresh()
op = differentiator.generate_differentiable_op(analytic_op=op)
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
cirq.GridQubit.rect(1, n_qubits), symbol_names, n_programs)
psums = [
util.random_pauli_sums(qubits, 1, n_ops) for _ in circuit_batch
]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch],
dtype=np.float32)
# calculate tfq gradient
symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
programs = util.convert_to_tensor(circuit_batch)
ops = util.convert_to_tensor(psums)
with tf.GradientTape() as g:
g.watch(symbol_values_tensor)
expectations = op(programs, symbol_names, symbol_values_tensor, ops)
tfq_grads = g.gradient(expectations, symbol_values_tensor)
# calculate gradients in cirq using a very simple forward differencing
# scheme
cirq_grads = _cirq_simple_finite_difference(circuit_batch,
resolver_batch,
symbol_names, psums)
# will this be too tight? time will tell.
self.assertAllClose(cirq_grads, tfq_grads, rtol=1e-2, atol=1e-2)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'differentiator': DIFFS + STOCHASTIC_DIFFS,
'op': OPS,
'stochastic_cost': [False, True]
})))
def test_analytic_value_with_simple_circuit(self, differentiator, op,
stochastic_cost):
"""Test the value of differentiator with simple circuit.
Since there are only one symbol, one gate and one op, there is only one
samling result, STOCHATIC_DIFFS shows the same result with that of
deterministic differentiators."""
# Get an expectation op, with this differentiator attached.
differentiator.refresh()
differentiator.stochastic_cost = stochastic_cost
op = differentiator.generate_differentiable_op(analytic_op=op)
qubit = cirq.GridQubit(0, 0)
circuit = util.convert_to_tensor(
[cirq.Circuit(cirq.X(qubit)**sympy.Symbol('alpha'))])
psums = util.convert_to_tensor([[cirq.Z(qubit)]])
symbol_values_array = np.array([[0.123]], dtype=np.float32)
# Calculate tfq gradient.
symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
with tf.GradientTape() as g:
g.watch(symbol_values_tensor)
expectations = op(circuit, ['alpha'], symbol_values_tensor, psums)
grads = g.gradient(expectations, symbol_values_tensor)
ground_truth_grads = np.array([[-1.1839752]])
self.assertAllClose(ground_truth_grads, grads, rtol=1e-2, atol=1e-2)
class StochasticDifferentiatorCorrectnessTest(tf.test.TestCase,
parameterized.TestCase):
"""Test correctness of the stochastic differentiators to reference cirq
algorithm.
DISCLAIMER: this test allows for a larger margin of error and as long
as convergence is happening then it passes"""
# TODO(zaqqwerty): only this test was failing after adding cirq.I
# support, so it is disabled pending diagnosis
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'differentiator': STOCHASTIC_DIFFS,
'op': OPS,
'n_qubits': [5],
'n_programs': [3],
'n_ops': [3],
'symbol_names': [['a', 'b']],
'stochastic_cost_eps': [(False, 5e-1), (True, 7e-1)],
})))
def gradients_vs_cirq_finite_difference(self, differentiator, op, n_qubits,
n_programs, n_ops, symbol_names,
stochastic_cost_eps):
"""Compare TFQ differentiators to fine-grained noiseless cirq finite
differencing with a larger margin of error."""
# TODO (jaeyoo): cleanup this hacky wordkaround so variable
# assignment doesn't need to take place like this.
differentiator.stochastic_cost, eps = stochastic_cost_eps
differentiator.refresh()
op = differentiator.generate_differentiable_op(analytic_op=op)
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
cirq.GridQubit.rect(1, n_qubits), symbol_names, n_programs)
psums = [
util.random_pauli_sums(qubits, 1, n_ops) for _ in circuit_batch
]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch],
dtype=np.float32)
# calculate tfq gradient
symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
programs = util.convert_to_tensor(circuit_batch)
ops = util.convert_to_tensor(psums)
# calculate gradients in cirq using a very simple forward differencing
# scheme
cirq_grads = _cirq_simple_finite_difference(circuit_batch,
resolver_batch,
symbol_names, psums)
def _get_gradient():
with tf.GradientTape() as g:
g.watch(symbol_values_tensor)
expectations = op(programs, symbol_names, symbol_values_tensor,
ops)
return g.gradient(expectations, symbol_values_tensor)
def _abs_diff(grad, mask):
return np.sum(np.abs(grad - cirq_grads * mask))
def _get_nonzero_mask(grad):
return (grad.numpy() != 0.0).astype(np.float32)
# Get the non-zero mask because a few initial gradients have not sampled
# zero values.
tfq_grads_1 = _get_gradient()
mask_1 = _get_nonzero_mask(tfq_grads_1)
if not np.allclose(tfq_grads_1, cirq_grads * mask_1, atol=eps):
tfq_grads_2 = 0.5 * (tfq_grads_1 + _get_gradient())
mask_2 = _get_nonzero_mask(tfq_grads_2)
# Check if the 2nd error becomes smaller that 1st one.
if not _abs_diff(tfq_grads_1, mask_1) > _abs_diff(
tfq_grads_2, mask_2):
cnt = 2
tfq_grads = (cnt * tfq_grads_2 + _get_gradient()) / (cnt + 1)
while (cnt < 10 and
not np.allclose(cirq_grads, tfq_grads, atol=eps)):
cnt += 1
tfq_grads = (cnt * tfq_grads + _get_gradient()) / (cnt + 1)
self.assertAllClose(cirq_grads, tfq_grads, atol=eps)
if __name__ == '__main__':
tf.test.main()
| 44.418675
| 80
| 0.605004
|
d00db8408e17563988be0779872e006b18097475
| 460
|
py
|
Python
|
data/scripts/templates/object/static/structure/naboo/shared_wall_naboo_theed_style_1.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/static/structure/naboo/shared_wall_naboo_theed_style_1.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/static/structure/naboo/shared_wall_naboo_theed_style_1.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/naboo/shared_wall_naboo_theed_style_1.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 27.058824
| 86
| 0.732609
|
3a1dc568e9964dfab166f2834652a89752cd23d9
| 17,083
|
py
|
Python
|
datacube_ows/styles/colormap.py
|
FlexiGroBots-H2020/datacube-ows
|
8e3e1343582c00ae46b498247ac98d8e98bd000f
|
[
"Apache-2.0"
] | 4
|
2017-11-02T04:22:30.000Z
|
2018-05-01T14:16:23.000Z
|
datacube_ows/styles/colormap.py
|
FlexiGroBots-H2020/datacube-ows
|
8e3e1343582c00ae46b498247ac98d8e98bd000f
|
[
"Apache-2.0"
] | 33
|
2018-05-23T01:32:06.000Z
|
2018-11-05T01:07:09.000Z
|
datacube_ows/styles/colormap.py
|
FlexiGroBots-H2020/datacube-ows
|
8e3e1343582c00ae46b498247ac98d8e98bd000f
|
[
"Apache-2.0"
] | 7
|
2017-10-09T00:09:44.000Z
|
2018-07-27T00:41:19.000Z
|
# This file is part of datacube-ows, part of the Open Data Cube project.
# See https://opendatacube.org for more information.
#
# Copyright (c) 2017-2021 OWS Contributors
# SPDX-License-Identifier: Apache-2.0
import io
import logging
from datetime import datetime
from typing import Callable, List, MutableMapping, Optional, Union, cast
import numpy
import xarray
from colour import Color
from datacube.utils.masking import make_mask
from matplotlib import patches as mpatches
from matplotlib import pyplot as plt
from xarray import DataArray, Dataset
from datacube_ows.config_utils import (CFG_DICT, AbstractMaskRule,
ConfigException, OWSMetadataConfig)
from datacube_ows.styles.base import StyleDefBase
_LOG = logging.getLogger(__name__)
class AbstractValueMapRule(AbstractMaskRule):
"""
A Value Map Rule.
Construct a ValueMap rule-set with ValueMapRule.value_map_from_config
"""
def __init__(self, style_def: "ColorMapStyleDef", band: str, cfg: CFG_DICT) -> None:
"""
Construct a Value Map Rule
:param style_def: The owning ColorMapStyleDef object
:param band: The name of the flag-band the rules apply to
:param cfg: The rule specification
"""
self.style = style_def
super().__init__(band, cfg, mapper=style_def.local_band)
cfg = cast(CFG_DICT, self._raw_cfg)
self.title = cast(str, cfg["title"])
self.abstract = cast(str, cfg.get("abstract"))
if self.title and self.abstract:
self.label: Optional[str] = f"{self.title} - {self.abstract}"
elif self.title:
self.label = self.title
elif self.abstract:
self.label = self.abstract
else:
self.label = None
self.parse_color(cfg)
@property
def context(self) -> str:
return f"style {self.style.name} in layer {self.style.product.name} valuemap rule"
def parse_color(self, cfg: CFG_DICT):
self.color_str = cast(str, cfg["color"])
self.rgb = Color(self.color_str)
if cfg.get("mask"):
self.alpha = 0.0
else:
self.alpha = float(cast(Union[float, int, str], cfg.get("alpha", 1.0)))
@classmethod
def value_map_from_config(cls,
style_or_mdh: Union["ColorMapStyleDef", "ColorMapStyleDef.MultiDateHandler"],
cfg: CFG_DICT
) -> MutableMapping[str, List["AbstractValueMapRule"]]:
"""
Create a multi-date value map rule set from a config specification
:param style: The parent style definition object
:param cfg: The specification for the multi-date value map.
:return: A value map ruleset dictionary.
"""
if isinstance(style_or_mdh, ColorMapStyleDef):
typ = ValueMapRule
else:
mdh = cast(ColorMapStyleDef.MultiDateHandler, style_or_mdh)
if mdh.aggregator:
style_or_mdh = mdh.style
typ = ValueMapRule
else:
if mdh.min_count != mdh.max_count:
raise ConfigException(
"MultiDate value map only supported on multi-date handlers with min_count and max_count equal.")
typ = MultiDateValueMapRule
vmap: MutableMapping[str, List["AbstractValueMapRule"]] = {}
for band_name, rules in cfg.items():
band_rules = [typ(style_or_mdh, band_name, rule) for rule in cast(List[CFG_DICT], rules)]
vmap[band_name] = band_rules
return vmap
class ValueMapRule(AbstractValueMapRule):
"""
A Value Map Rule.
Construct a ValueMap rule-set with ValueMapRule.value_map_from_config
"""
def __init__(self, style_cfg: "ColorMapStyleDef", band: str,
cfg: CFG_DICT) -> None:
"""
Construct a Multi-date Value Map Rule
:param mdh: The owning ColorMapStyleDef object
:param band: The name of the flag-band the rules apply to
:param cfg: The rule specification
"""
super().__init__(style_def=style_cfg, band=band, cfg=cfg)
class MultiDateValueMapRule(AbstractValueMapRule):
"""
A Multi-Date Value Map Rule.
Construct a Multi-Date ValueMap rule-set with MultiDateValueMapRule.value_map_from_config
"""
def __init__(self, mdh: "ColorMapStyleDef.MultiDateHandler", band: str,
cfg: CFG_DICT) -> None:
"""
Construct a Multi-date Value Map Rule
:param mdh: The owning ColorMapStyleDef object
:param band: The name of the flag-band the rules apply to
:param cfg: The rule specification
"""
self.mdh = mdh
self.invert: List[bool] = []
self.flags: Optional[List[CFG_DICT]] = []
self.or_flags: Optional[List[bool]] = []
self.values: Optional[List[List[int]]] = []
super().__init__(style_def=mdh.style, band=band, cfg=cfg)
def parse_rule_spec(self, cfg: CFG_DICT):
if "invert" in cfg:
self.invert = [bool(b) for b in cfg["invert"]]
else:
self.invert = [False] * self.mdh.max_count
if len(self.invert) != self.mdh.max_count:
raise ConfigException(f"Invert entry has wrong number of rule sets for date count")
if "flags" in cfg:
date_flags = cast(CFG_DICT, cfg["flags"])
if len(date_flags) != self.mdh.max_count:
raise ConfigException(f"Flags entry has wrong number of rule sets for date count")
for flags in date_flags:
or_flag: bool = False
if "or" in flags and "and" in flags:
raise ConfigException(f"MultiDateValueMap rule in {self.mdh.style.name} of layer {self.mdh.style.product.name} combines 'and' and 'or' rules")
elif "or" in flags:
or_flag = True
flags = cast(CFG_DICT, flags["or"])
elif "and" in flags:
flags = cast(CFG_DICT, flags["and"])
self.flags.append(flags)
self.or_flags.append(or_flag)
else:
self.flags = None
self.or_flags = None
if "values" in cfg:
self.values = cast(List[List[int]], list(cfg["values"]))
else:
self.values = None
if not self.flags and not self.values:
raise ConfigException(f"Multi-Date Value map rule in {self.context} must have a non-empty 'flags' or 'values' section.")
if self.flags and self.values:
raise ConfigException(f"Multi-Date Value map rule in {self.context} has both a 'flags' and a 'values' section - choose one.")
def create_mask(self, data: DataArray) -> DataArray:
"""
Create a mask from raw flag band data.
:param data: Multi-date Raw flag data, assumed to be for this rule's flag band.
:return: A boolean dateless DataArray, True where the data matches this rule
"""
date_slices = (data.sel(time=dt) for dt in data.coords["time"].values)
mask: Optional[DataArray] = None
if self.values:
for d_slice, vals, invert in zip(date_slices, self.values, self.invert):
d_mask: Optional[DataArray] = None
if len(vals) == 0:
d_mask = d_slice == d_slice
else:
for v in cast(List[int], vals):
vmask = d_slice == v
if d_mask is None:
d_mask = vmask
else:
d_mask |= vmask
if d_mask is not None and invert:
d_mask = ~d_mask # pylint: disable=invalid-unary-operand-type
if mask is None:
mask = d_mask
else:
mask &= d_mask
else:
for d_slice, flags, or_flags, invert in zip(date_slices, self.flags, self.or_flags, self.invert):
d_mask: Optional[DataArray] = None
if not flags:
d_mask = d_slice == d_slice
elif or_flags:
for f in cast(CFG_DICT, flags).items():
f = {f[0]: f[1]}
if d_mask is None:
d_mask = make_mask(d_slice, **f)
else:
d_mask |= make_mask(d_slice, **f)
else:
d_mask = make_mask(d_slice, **cast(CFG_DICT, flags))
if invert:
d_mask = ~d_mask # pylint: disable=invalid-unary-operand-type
if mask is None:
mask = d_mask
else:
mask &= d_mask
return mask
def convert_to_uint8(fval):
scaled = int(fval * 255.0 + 0.5)
clipped = min(max(scaled, 0), 255)
return clipped
def apply_value_map(value_map: MutableMapping[str, List[AbstractValueMapRule]],
data: Dataset,
band_mapper: Callable[[str], str]) -> Dataset:
imgdata = Dataset(coords={k: v for k, v in data.coords.items() if k != "time"})
shape = list(imgdata.dims.values())
for channel in ("red", "green", "blue", "alpha"):
c = numpy.full(shape, 0, dtype="uint8")
imgdata[channel] = DataArray(c, coords=imgdata.coords)
for cfg_band, rules in value_map.items():
# Run through each item
band = band_mapper(cfg_band)
bdata = cast(DataArray, data[band])
if bdata.dtype.kind == 'f':
# Convert back to int for bitmasking
bdata = ColorMapStyleDef.reint(bdata)
for rule in reversed(rules):
mask = rule.create_mask(bdata)
if mask.data.any():
for channel in ("red", "green", "blue", "alpha"):
if channel == "alpha":
val = convert_to_uint8(rule.alpha)
else:
val = convert_to_uint8(getattr(rule.rgb, channel))
imgdata[channel] = xarray.where(mask, val, imgdata[channel])
return imgdata
class PatchTemplate:
def __init__(self, idx: int, rule: AbstractValueMapRule) -> None:
self.idx = idx
self.colour = rule.rgb.hex_l
self.label = rule.label
class ColorMapLegendBase(StyleDefBase.Legend, OWSMetadataConfig):
METADATA_ABSTRACT: bool = False
METADATA_VALUE_RULES: bool = True
def __init__(self, style_or_mdh: Union["StyleDefBase", "StyleDefBase.Legend"], cfg: CFG_DICT) -> None:
super().__init__(style_or_mdh, cfg)
raw_cfg = cast(CFG_DICT, self._raw_cfg)
self.ncols = int(raw_cfg.get("ncols", 1))
if self.ncols < 1:
raise ConfigException("ncols must be a positive integer")
self.patches: List[PatchTemplate] = []
def register_value_map(self, value_map: MutableMapping[str, List["AbstractValueMapRule"]]) -> None:
for band in value_map.keys():
for idx, rule in reversed(list(enumerate(value_map[band]))):
# only include values that are not transparent (and that have a non-blank title or abstract)
if rule.alpha > 0.001 and rule.label:
self.patches.append(PatchTemplate(idx, rule))
self.parse_metadata(self._raw_cfg)
def render(self, bytesio: io.BytesIO) -> None:
patches = [
mpatches.Patch(color=pt.colour, label=self.patch_label(pt.idx))
for pt in self.patches
]
plt.rcdefaults()
if self.mpl_rcparams:
plt.rcParams.update(self.mpl_rcparams)
plt.figure(figsize=(self.width, self.height))
plt.axis('off')
if self.title:
plt.legend(handles=patches,
loc='center',
ncol=self.ncols,
frameon=False,
title=self.title)
else:
plt.legend(handles=patches,
loc='center',
ncol=self.ncols,
frameon=False)
plt.savefig(bytesio, format='png')
def patch_label(self, idx: int):
return self.read_local_metadata(f"rule_{idx}")
# For MetadataConfig
@property
def default_title(self) -> Optional[str]:
return ""
class ColorMapStyleDef(StyleDefBase):
"""
Style subclass for value-map styles
"""
auto_legend = True
def __init__(self,
product: "datacube_ows.ows_configuration.OWSNamedLayer",
style_cfg: CFG_DICT,
stand_alone: bool = False,
user_defined: bool = False) -> None:
""""
Constructor - refer to StyleDefBase
"""
super().__init__(product, style_cfg, stand_alone=stand_alone, user_defined=user_defined)
style_cfg = cast(CFG_DICT, self._raw_cfg)
self.value_map = AbstractValueMapRule.value_map_from_config(self, cast(CFG_DICT, style_cfg["value_map"]))
self.legend_cfg.register_value_map(self.value_map)
for mdh in self.multi_date_handlers:
mdh.legend_cfg.register_value_map(mdh.value_map)
for band in self.value_map.keys():
self.raw_needed_bands.add(band)
@staticmethod
def reint(data: DataArray) -> DataArray:
"""
Convert a data-array to int.
:param data: input data (potentially non-integer)
:return: same data cast to integer
"""
inted = data.astype("int")
if hasattr(data, "attrs"):
attrs = data.attrs
inted.attrs = attrs
return inted
@staticmethod
def create_colordata(data: DataArray, rgb: Color, alpha: float, mask: DataArray) -> Dataset:
"""Colour a mask with a given colour/alpha"""
target = Dataset(coords=data.coords)
colors = ["red", "green", "blue", "alpha"]
for color in colors:
val = alpha if color == "alpha" else getattr(rgb, color)
c = numpy.full(data.shape, val)
target[color] = DataArray(c, dims=data.dims, coords=data.coords)
# pyre-ignore[6]
masked = target.where(mask).where(numpy.isfinite(data)) # remask
return masked
def transform_single_date_data(self, data: Dataset) -> Dataset:
"""
Apply style to raw data to make an RGBA image xarray (single time slice only)
:param data: Raw data, all bands.
:return: RGBA uint8 xarray
"""
# pylint: disable=too-many-locals, too-many-branches
# extent mask data per band to preseve nodata
_LOG.debug("transform begin %s", datetime.now())
# if extent_mask is not None:
# for band in data.data_vars:
# try:
# data[band] = data[band].where(extent_mask, other=data[band].attrs['nodata'])
# except AttributeError:
# data[band] = data[band].where(extent_mask)
return apply_value_map(self.value_map, data, self.product.band_idx.band)
class Legend(ColorMapLegendBase):
pass
class MultiDateHandler(StyleDefBase.MultiDateHandler):
auto_legend = True
non_animate_requires_aggregator = False
def __init__(self, style: "ColorMapStyleDef", cfg: CFG_DICT) -> None:
"""
First stage initialisation
:param style: The parent style object
:param cfg: The multidate handler configuration
"""
super().__init__(style, cfg)
self._value_map: Optional[MutableMapping[str, AbstractValueMapRule]] = None
if self.animate:
if "value_map" in self._raw_cfg:
raise ConfigException("Multidate value maps not supported for animation handlers")
else:
self._value_map = AbstractValueMapRule.value_map_from_config(self,
cast(CFG_DICT, self._raw_cfg["value_map"]))
@property
def value_map(self):
if self._value_map is None:
self._value_map = self.style.value_map
return self._value_map
def transform_data(self, data: "xarray.Dataset") -> "xarray.Dataset":
"""
Apply image transformation
:param data: Raw data
:return: RGBA image xarray. May have a time dimension
"""
if self.aggregator is None:
return apply_value_map(self.value_map, data, self.style.product.band_idx.band)
else:
agg = self.aggregator(data)
return apply_value_map(self.value_map, agg, self.style.product.band_idx.band)
class Legend(ColorMapLegendBase):
pass
# Register ColorMapStyleDef as a style subclass.
StyleDefBase.register_subclass(ColorMapStyleDef, "value_map")
| 39.543981
| 162
| 0.584733
|
1d128c5247002692c94e8ec909209b34e8347a2a
| 597
|
py
|
Python
|
models/__init__.py
|
PistonY/ModelZoo.pytorch
|
eb4cc16bfbb6bdf1c021f5f6faba7fbfc8c04612
|
[
"MIT"
] | 41
|
2019-11-13T02:09:47.000Z
|
2022-02-20T11:31:18.000Z
|
models/__init__.py
|
PistonY/ModelZoo.pytorch
|
eb4cc16bfbb6bdf1c021f5f6faba7fbfc8c04612
|
[
"MIT"
] | 5
|
2020-09-22T10:47:22.000Z
|
2021-05-11T10:10:37.000Z
|
models/__init__.py
|
PistonY/ModelZoo.pytorch
|
eb4cc16bfbb6bdf1c021f5f6faba7fbfc8c04612
|
[
"MIT"
] | 5
|
2020-07-07T13:27:04.000Z
|
2021-06-09T03:21:27.000Z
|
# -*- coding: utf-8 -*-
# @Author : DevinYang(pistonyang@gmail.com)
from .oct_resnet_re import *
from .resnet import *
from .fairnet import *
from .mobilenet import *
from .proxyless_nas import *
from .efficientnet import *
from .evo_norm import *
from .resnest import *
from .regnet import *
from .ghostnet import *
from .lamdba_net import *
from torchvision.models.alexnet import *
from torchvision.models.densenet import *
from torchvision.models.googlenet import *
from torchvision.models.inception import *
from torchvision.models.shufflenetv2 import *
from torchvision.models.vgg import *
| 28.428571
| 45
| 0.773869
|
b322d454002e8672ff18f237ed6d1dd5392a3d0b
| 5,005
|
py
|
Python
|
homeassistant/components/tahoma/__init__.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 1
|
2017-05-30T22:21:05.000Z
|
2017-05-30T22:21:05.000Z
|
homeassistant/components/tahoma/__init__.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 51
|
2020-10-14T01:19:07.000Z
|
2022-03-31T06:02:48.000Z
|
homeassistant/components/tahoma/__init__.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 1
|
2021-08-16T02:53:15.000Z
|
2021-08-16T02:53:15.000Z
|
"""Support for Tahoma devices."""
from collections import defaultdict
import logging
from requests.exceptions import RequestException
from tahoma_api import Action, TahomaApi
import voluptuous as vol
from homeassistant.const import CONF_EXCLUDE, CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DOMAIN = "tahoma"
TAHOMA_ID_FORMAT = "{}_{}"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_EXCLUDE, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
PLATFORMS = ["binary_sensor", "cover", "lock", "scene", "sensor", "switch"]
TAHOMA_TYPES = {
"io:AwningValanceIOComponent": "cover",
"io:ExteriorVenetianBlindIOComponent": "cover",
"io:DiscreteGarageOpenerIOComponent": "cover",
"io:DiscreteGarageOpenerWithPartialPositionIOComponent": "cover",
"io:HorizontalAwningIOComponent": "cover",
"io:GarageOpenerIOComponent": "cover",
"io:LightIOSystemSensor": "sensor",
"io:OnOffIOComponent": "switch",
"io:OnOffLightIOComponent": "switch",
"io:RollerShutterGenericIOComponent": "cover",
"io:RollerShutterUnoIOComponent": "cover",
"io:RollerShutterVeluxIOComponent": "cover",
"io:RollerShutterWithLowSpeedManagementIOComponent": "cover",
"io:SomfyBasicContactIOSystemSensor": "sensor",
"io:SomfyContactIOSystemSensor": "sensor",
"io:TemperatureIOSystemSensor": "sensor",
"io:VerticalExteriorAwningIOComponent": "cover",
"io:VerticalInteriorBlindVeluxIOComponent": "cover",
"io:WindowOpenerVeluxIOComponent": "cover",
"opendoors:OpenDoorsSmartLockComponent": "lock",
"rtds:RTDSContactSensor": "sensor",
"rtds:RTDSMotionSensor": "sensor",
"rtds:RTDSSmokeSensor": "smoke",
"rts:BlindRTSComponent": "cover",
"rts:CurtainRTSComponent": "cover",
"rts:DualCurtainRTSComponent": "cover",
"rts:ExteriorVenetianBlindRTSComponent": "cover",
"rts:GarageDoor4TRTSComponent": "switch",
"rts:LightRTSComponent": "switch",
"rts:RollerShutterRTSComponent": "cover",
"rts:OnOffRTSComponent": "switch",
"rts:VenetianBlindRTSComponent": "cover",
"somfythermostat:SomfyThermostatTemperatureSensor": "sensor",
"somfythermostat:SomfyThermostatHumiditySensor": "sensor",
"zwave:OnOffLightZWaveComponent": "switch",
}
def setup(hass, config):
"""Set up Tahoma integration."""
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
exclude = conf.get(CONF_EXCLUDE)
try:
api = TahomaApi(username, password)
except RequestException:
_LOGGER.exception("Error when trying to log in to the Tahoma API")
return False
try:
api.get_setup()
devices = api.get_devices()
scenes = api.get_action_groups()
except RequestException:
_LOGGER.exception("Error when getting devices from the Tahoma API")
return False
hass.data[DOMAIN] = {"controller": api, "devices": defaultdict(list), "scenes": []}
for device in devices:
_device = api.get_device(device)
if all(ext not in _device.type for ext in exclude):
device_type = map_tahoma_device(_device)
if device_type is None:
_LOGGER.warning(
"Unsupported type %s for Tahoma device %s",
_device.type,
_device.label,
)
continue
hass.data[DOMAIN]["devices"][device_type].append(_device)
for scene in scenes:
hass.data[DOMAIN]["scenes"].append(scene)
for platform in PLATFORMS:
discovery.load_platform(hass, platform, DOMAIN, {}, config)
return True
def map_tahoma_device(tahoma_device):
"""Map Tahoma device types to Home Assistant platforms."""
return TAHOMA_TYPES.get(tahoma_device.type)
class TahomaDevice(Entity):
"""Representation of a Tahoma device entity."""
def __init__(self, tahoma_device, controller):
"""Initialize the device."""
self.tahoma_device = tahoma_device
self.controller = controller
self._name = self.tahoma_device.label
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return {"tahoma_device_id": self.tahoma_device.url}
def apply_action(self, cmd_name, *args):
"""Apply Action to Device."""
action = Action(self.tahoma_device.url)
action.add_command(cmd_name, *args)
self.controller.apply_actions("HomeAssistant", [action])
| 33.366667
| 87
| 0.668531
|
ed1133dfcc828c717240d84d1ca1c8e6c2baf1e3
| 15,704
|
py
|
Python
|
pylr/parser.py
|
NPRA/PyLR
|
063f308c81fe3d0b765c6459d7ea609b50d2d503
|
[
"Apache-2.0"
] | 7
|
2015-02-15T06:11:37.000Z
|
2019-10-07T12:29:20.000Z
|
pylr/parser.py
|
NPRA/PyLR
|
063f308c81fe3d0b765c6459d7ea609b50d2d503
|
[
"Apache-2.0"
] | 1
|
2018-03-08T16:35:41.000Z
|
2019-08-19T10:48:19.000Z
|
pylr/parser.py
|
NPRA/PyLR
|
063f308c81fe3d0b765c6459d7ea609b50d2d503
|
[
"Apache-2.0"
] | 12
|
2016-05-21T09:55:44.000Z
|
2020-12-15T03:42:28.000Z
|
# -*- coding: utf-8 -*-
''' Location parser
.. moduleauthor:: David Marteau <david.marteau@mappy.com>
'''
from collections import namedtuple
from bitstring import BitStream
from .utils import lazyproperty
from .constants import (LATEST_BINARY_VERSION,
BINARY_VERSION_2,
MIN_BYTES_LINE_LOCATION,
MIN_BYTES_POINT_LOCATION,
MIN_BYTES_CLOSED_LINE_LOCATION,
MIN_BYTES_POLYGON,
RELATIVE_COORD_SIZE,
IS_POINT,
HAS_ATTRIBUTES,
GEOCOORD_SIZE,
POINT_ALONG_LINE_SIZE,
POINT_WITH_ACCESS_SIZE,
POINT_OFFSET_SIZE,
AREA_CODE_CIRCLE,
AREA_CODE_RECTANGLE,
AREA_CODE_POLYGON,
RECTANGLE_SIZE,
LARGE_RECTANGLE_SIZE,
GRID_SIZE,
LARGE_GRID_SIZE,
LRP_SIZE,
CIRCLE_BASE_SIZE,
LocationType)
class BinaryParseError(Exception):
pass
class BinaryVersionError(BinaryParseError):
pass
class InvalidDataSizeError(BinaryParseError):
pass
# The Constant RFU (Reserved for Future Use)
RFU_BITS = 'uint:1'
# number of bits used for attributes flag
ATTR_FLAG_BITS = 'uint:1'
# number of bits used for point flag
POINT_FLAG_BITS = 'uint:1'
# number of bits used for version
VERSION_BITS = 'uint:3'
AREA_FLAG_BIT0 = 'uint:1'
AREA_FLAG_BIT1 = 'uint:1'
HEADER_BITS = (RFU_BITS,
ATTR_FLAG_BITS,
POINT_FLAG_BITS,
AREA_FLAG_BIT1,
POINT_FLAG_BITS,
VERSION_BITS)
_BinaryHeader = namedtuple('_BinaryHeader', ('arf', 'af', 'pf', 'ver'))
class _RawBinaryData(object):
""" Hold a location reference description as a bit stream."""
MIN_VERSION = BINARY_VERSION_2
MAX_VERSION = LATEST_BINARY_VERSION
def __init__(self, data, base64=False):
""" Constructor.
:param string data: Binaray data
:param bool base64: True if data is coded in base64
"""
if base64:
data = data.decode("base64")
#: raw data size
self._sz = len(data)
#: bit stream used to read data
self._bs = BitStream(bytes=data)
def getbits(self, *bits):
""" Read the given numbers of bits.
:param tuple bits: Tuple of number of bits to read
:returns: Tuple of bit fields
:rtype: tuple
"""
return tuple(self._bs.read(v) for v in bits)
def get_position(self):
""" Returns position in the bit stream.
:returns: Position in the bit stream
:rtype: int
"""
return self._bs.pos
@property
def num_bytes(self):
""" Size of the decoded data.
:returns: Size of the decoded data.
:rtype: int
"""
return self._sz
@property
def version(self):
""" Return binary version of the data
:returns: Binary version of the data.
:rtype: int
"""
return self.header.ver
@lazyproperty
def header(self):
""" Parse header (once) location type
:returns: Header data
:rtype: _BinaryHeader
"""
# Validate data size
if self._sz < min(MIN_BYTES_LINE_LOCATION,
MIN_BYTES_POINT_LOCATION,
MIN_BYTES_CLOSED_LINE_LOCATION):
raise InvalidDataSizeError("not enough bytes in data")
_, arf1, pf, arf0, af, ver = self.getbits(*HEADER_BITS)
arf = 2 * arf1 + arf0
return _BinaryHeader(arf, af, pf, ver)
@lazyproperty
def location_type(self):
""" Parse location type (once)
:returns: Location type
:rtype: LocationType
"""
header = self.header
# Check version
if not self.MIN_VERSION <= header.ver <= self.MAX_VERSION:
raise BinaryVersionError("Invalid binary version {}".format(header.ver))
is_point = (header.pf == IS_POINT)
has_attributes = (header.af == HAS_ATTRIBUTES)
area_code = header.arf
is_area = ((area_code == 0 and not is_point and not has_attributes) or area_code > 0)
total_bytes = self._sz
loc_type = LocationType.UNKNOWN
if not is_point and not is_area and has_attributes:
loc_type = LocationType.LINE_LOCATION
elif is_point and not is_area:
if not has_attributes:
if total_bytes == GEOCOORD_SIZE:
loc_type = LocationType.GEO_COORDINATES
else:
raise InvalidDataSizeError("Invalid byte size")
else:
if total_bytes == POINT_ALONG_LINE_SIZE or total_bytes == (POINT_ALONG_LINE_SIZE + POINT_OFFSET_SIZE):
loc_type = LocationType.POINT_ALONG_LINE
elif total_bytes == POINT_WITH_ACCESS_SIZE or total_bytes == (POINT_WITH_ACCESS_SIZE + POINT_OFFSET_SIZE):
loc_type = LocationType.POI_WITH_ACCESS_POINT
else:
raise InvalidDataSizeError("Invalid byte size")
elif is_area and not is_point and has_attributes:
if total_bytes >= MIN_BYTES_CLOSED_LINE_LOCATION:
loc_type = LocationType.CLOSED_LINE
else:
raise InvalidDataSizeError("Invalid byte size")
else:
if area_code == AREA_CODE_CIRCLE:
loc_type = LocationType.CIRCLE
elif area_code == AREA_CODE_RECTANGLE:
# includes case AREA_CODE_GRID
if total_bytes == RECTANGLE_SIZE or total_bytes == LARGE_RECTANGLE_SIZE:
loc_type = LocationType.RECTANGLE
elif total_bytes == GRID_SIZE or total_bytes == LARGE_GRID_SIZE:
loc_type = LocationType.GRID
else:
raise InvalidDataSizeError("Invalid byte size")
elif area_code == AREA_CODE_POLYGON:
if not has_attributes and total_bytes >= MIN_BYTES_POLYGON:
loc_type = LocationType.POLYGON
else:
raise InvalidDataSizeError("Invalid byte size")
else:
raise BinaryParseError('Invalid header')
return loc_type
def init_binary_parsing(data, base64=False):
""" Create an instance of _RawBinaryData
The returned object can be passed to 'parse_binary'
:param string data: string describing the location
:param bool base64: True if encoded in base 64
:returns: Parsable data structure
:rtype: _RawBinaryData
"""
return _RawBinaryData(data, base64)
def parse_binary(data, base64=False):
""" Parse binary data.
Input is original data or an object returned by init_binary_parsing(...)
:param data: string (encoded or not) describing the location
:param bool base64: True if encoded in base 64
:returns: Object describing the parsed location, or an error object
"""
if not isinstance(data, _RawBinaryData):
data = _RawBinaryData(data, base64)
# Get header
loc_type = data.location_type
if loc_type == LocationType.LINE_LOCATION:
return parse_line(data)
elif loc_type == LocationType.POINT_ALONG_LINE:
return parse_point_along_line(data)
elif loc_type == LocationType.GEO_COORDINATES:
return parse_geo_coordinates(data)
elif loc_type == LocationType.POI_WITH_ACCESS_POINT:
return parse_poi_with_access_point(data)
elif loc_type == LocationType.RECTANGLE:
return parse_rectangle(data)
elif loc_type == LocationType.CLOSED_LINE:
return parse_closed_line(data)
elif loc_type == LocationType.CIRCLE:
return parse_circle(data)
elif loc_type == LocationType.GRID:
return parse_grid(data)
elif loc_type == LocationType.POLYGON:
return parse_polygon(data)
else:
return BinaryParseError("Invalid location type")
# ----------------
# Location parsers
# ----------------
HEAD_FIELDS = ('version', 'type')
from .binary import (_parse_first_lrp,
_parse_intermediate_lrp,
_parse_last_line_lrp,
_parse_last_closed_line_attrs,
_parse_offset,
_parse_relative_coordinates,
_parse_absolute_coordinates,
_parse_radius,
_parse_grid_dimensions)
# LINE_LOCATION
LineLocation = namedtuple('LineLocation', HEAD_FIELDS+('flrp', 'llrp', 'points', 'poffs', 'noffs'))
""" Line Location type
"""
def parse_line(rb):
""" Parse line location
:param _RawBinaryData rb: Binary data describing the location
:returns: Line location
:rtype: LineLocation
"""
assert rb.location_type == LocationType.LINE_LOCATION
# number of intermediates points
num_intermediates = (rb.num_bytes - MIN_BYTES_LINE_LOCATION) / LRP_SIZE
flrp = _parse_first_lrp(rb)
points = []
rel = flrp
for _ in range(num_intermediates):
ilrp = _parse_intermediate_lrp(rb, rel)
points.append(ilrp)
rel = ilrp
llrp, pofff, nofff = _parse_last_line_lrp(rb, rel)
poffs = _parse_offset(rb) if pofff else 0
noffs = _parse_offset(rb) if nofff else 0
return LineLocation(rb.version, rb.location_type, flrp, llrp, points, poffs, noffs)
# POINT_ALONG_LINE
PointAlongLineLocation = namedtuple('PointAlongLineLocation', HEAD_FIELDS+('flrp', 'llrp', 'poffs'))
""" Point along location type
"""
def parse_point_along_line(rb):
""" Parse point along line location
:param _RawBinaryData rb: Binary data describing the location
:returns: Point along line location
:rtype: PointAlongLineLocation
"""
assert rb.location_type == LocationType.POINT_ALONG_LINE
flrp = _parse_first_lrp(rb)
llrp, pofff, _ = _parse_last_line_lrp(rb, flrp)
poffs = _parse_offset(rb) if pofff else 0
return PointAlongLineLocation(rb.version, rb.location_type, flrp, llrp, poffs)
# GEO_COORDINATES
GeoCoordinateLocation = namedtuple('GeoCoordinateLocation', HEAD_FIELDS+('coords',))
""" Coordinate location type
"""
def parse_geo_coordinates(rb):
""" Parse geo coordinates location
:param _RawBinaryData rb: Binary data describing the location
:returns: Geographic coordinates location
:rtype: GeoCoordinateLocation
"""
assert rb.location_type == LocationType.GEO_COORDINATES
coords = _parse_absolute_coordinates(rb)
return GeoCoordinateLocation(rb.version, rb.location_type, coords)
# POI_WITH_ACCESS_POINT
PoiWithAccessPointLocation = namedtuple('PoiWithAccessPointLocation', HEAD_FIELDS+(
'flrp', 'llrp', 'poffs', 'coords'))
""" Poi with access location type
"""
def parse_poi_with_access_point(rb):
""" Parse POI with access point
:param _RawBinaryData rb: Binary data describing the location
:returns: POI with access point location
:rtype: PoiWithAccessPointLocation
"""
assert rb.location_type == LocationType.POI_WITH_ACCESS_POINT
flrp = _parse_first_lrp(rb)
llrp, pofff, _ = _parse_last_line_lrp(rb, flrp)
poffs = _parse_offset(rb) if pofff else 0
coords = _parse_relative_coordinates(rb, flrp.coords)
return PoiWithAccessPointLocation(rb.version, rb.location_type, flrp, llrp,
poffs, coords)
# CIRCLE
CircleLocation = namedtuple('CircleLocation', HEAD_FIELDS+('coords', 'radius'))
""" Circle Location type
"""
def parse_circle(rb):
""" Parse circle location
:param _RawBinaryData rb: Binary data describing the location
:returns: Circle location
:rtype: CircleLocation
"""
assert rb.location_type == LocationType.CIRCLE
radius_size = rb.num_bytes - CIRCLE_BASE_SIZE
coords = _parse_absolute_coordinates(rb)
radius = _parse_radius(rb, radius_size)
return CircleLocation(rb.version, rb.location_type, coords, radius)
# RECTANGLE
BBox = namedtuple('BBox', ('minx', 'miny', 'maxx', 'maxy'))
RectangleLocation = namedtuple('RectangleLocation', HEAD_FIELDS+('bbox',))
""" Rectangle Location type
"""
def parse_rectangle(rb):
""" Parse rectangle location
:param _RawBinaryData rb: Binary data describing the location
:returns: Rectangle location
:rtype: RectangleLocation
"""
assert rb.location_type == LocationType.RECTANGLE
bl = _parse_absolute_coordinates(rb)
if rb.num_bytes == LARGE_RECTANGLE_SIZE:
tr = _parse_absolute_coordinates(rb)
else:
tr = _parse_relative_coordinates(rb, bl)
bbox = BBox(bl.lon, bl.lat, tr.lon, tr.lat)
return RectangleLocation(rb.version, rb.location_type, bbox)
# GRID
GridLocation = namedtuple('GridLocation', HEAD_FIELDS+('bbox', 'cols', 'rows'))
""" Grid Location type
"""
def parse_grid(rb):
""" Parse grid location
:param _RawBinaryData rb: Binary data describing the location
:returns: Grid location
:rtype: GridLocation
"""
assert rb.location_type == LocationType.GRID
bl = _parse_absolute_coordinates(rb)
if rb.num_bytes == LARGE_GRID_SIZE:
tr = _parse_absolute_coordinates(rb)
else:
tr = _parse_relative_coordinates(rb, bl)
bbox = BBox(bl.lon, bl.lat, tr.lon, tr.lat)
cols, rows = _parse_grid_dimensions(rb)
return GridLocation(rb.version, rb.location_type, bbox, cols, rows)
# CLOSED LINE
ClosedLineLocation = namedtuple('ClosedLineLocation', HEAD_FIELDS+('flrp', 'points', 'frc', 'fow', 'bear'))
def parse_closed_line(rb):
""" Parse closed line location
:param _RawBinaryData rb: Binary data describing the location
:returns: Closed line location
:rtype: ClosedLineLocation
"""
assert rb.location_type == LocationType.CLOSED_LINE
# number of intermediates points
num_intermediates = (rb.num_bytes - MIN_BYTES_CLOSED_LINE_LOCATION) / LRP_SIZE
flrp = _parse_first_lrp(rb)
points = []
rel = flrp
for _ in range(num_intermediates):
ilrp = _parse_intermediate_lrp(rb, rel)
points.append(ilrp)
rel = ilrp
frc, fow, bear = _parse_last_closed_line_attrs(rb)
return ClosedLineLocation(rb.version, rb.location_type, flrp, points, frc, fow, bear)
# CLOSED LINE
PolygonLocation = namedtuple('PolygonLocation', HEAD_FIELDS+('points',))
def parse_polygon(rb):
""" Parse polygon location
:param _RawBinaryData rb: Binary data describing the location
:returns: Polygon location
:rtype: PolygonLocation
"""
assert rb.location_type == LocationType.POLYGON
# number of points
# MIN_BYTES_POLYGON include first point and 2 relatives points
num_intermediates = 2 + (rb.num_bytes - MIN_BYTES_POLYGON) / RELATIVE_COORD_SIZE
points = []
rel = _parse_absolute_coordinates(rb)
points.append(rel)
for _ in range(num_intermediates):
ilrp = _parse_relative_coordinates(rb, rel)
points.append(ilrp)
rel = ilrp
return PolygonLocation(rb.version, rb.location_type, points)
| 30.974359
| 122
| 0.628884
|
e3cc0185664cb8f16eb0487687628879a801c77c
| 21,028
|
py
|
Python
|
xfel/merging/application/phil/phil.py
|
TiankunZhou/cctbx_project
|
373f302f00c12d7239f8e37e3165e62bc1d852cc
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
xfel/merging/application/phil/phil.py
|
TiankunZhou/cctbx_project
|
373f302f00c12d7239f8e37e3165e62bc1d852cc
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2020-05-26T17:46:17.000Z
|
2020-05-26T17:55:19.000Z
|
xfel/merging/application/phil/phil.py
|
TiankunZhou/cctbx_project
|
373f302f00c12d7239f8e37e3165e62bc1d852cc
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2022-02-08T10:11:07.000Z
|
2022-02-08T10:11:07.000Z
|
from __future__ import absolute_import, division, print_function
from iotbx.phil import parse
help_message = '''
Redesign script for merging xfel data
'''
dispatch_phil = """
dispatch {
step_list = None
.type = strings
.help = List of steps to use. None means use the full set of steps to merge.
}
"""
input_phil = """
input {
keep_imagesets = False
.type = bool
.help = If True, keep imagesets attached to experiments
path = None
.type = str
.multiple = True
.help = paths are validated as a glob, directory or file.
.help = however, validation is delayed until data are assigned to parallel ranks.
.help = integrated experiments (.expt) and reflection tables (.refl) must both be
.help = present as matching files. Only one need be explicitly specified.
reflections_suffix = _integrated.refl
.type = str
.help = Find file names with this suffix for reflections
experiments_suffix = _integrated.expt
.type = str
.help = Find file names with this suffix for experiments
parallel_file_load {
method = *uniform node_memory
.type = choice
.help = uniform: distribute input experiments/reflections files uniformly over all available ranks
.help = node_memory: distribute input experiments/reflections files over the nodes such that the node memory limit is not exceeded.
.help = Within each node distribute the input files uniformly over all ranks of that node.
node_memory {
architecture = "Cori KNL"
.type = str
.help = node architecture name. Currently not used.
limit = 90.0
.type = float
.help = node memory limit, GB. On Cori KNL each node has 96 GB of memory, but we use 6 GB as a cushion, so the default value is 90 GB.
pickle_to_memory = 3.5
.type = float
.help = an empirical coefficient to convert pickle file size to anticipated run-time process memory required to load a file of that size
}
ranks_per_node = 68
.type = int
.help = number of MPI ranks per node
balance = *global per_node
.type = choice
.multiple = False
.help = Balance the input file load by distributing experiments uniformly over all available ranks (global) or over the ranks on each node (per_node)
.help = The idea behind the "per_node" method is that it doesn't require MPI communications across nodes. But if the input file load varies strongly
.help = between the nodes, "global" is a much better option.
balance_mpi_alltoall_slices = 1
.type = int
.expert_level = 2
.help = memory reduction factor for MPI alltoall.
.help = Use mpi_alltoall_slices > 1, when available RAM is insufficient for doing MPI alltoall on all data at once.
.help = The data will then be split into mpi_alltoall_slices parts and, correspondingly, alltoall will be performed in mpi_alltoall_slices iterations.
reset_experiment_id_column = False
.type = bool
.expert_level = 3
}
}
mp {
method = *mpi
.type = choice
.help = Muliprocessing method (only mpi at present)
}
"""
tdata_phil = """
tdata{
output_path = None
.type = path
.help = If output_path is not None, the tdata worker writes out a list of unit cells to a file.
.help = Generally speaking the program should then stop. The tdata worker is not active by default, so it is necessary to have
.help = the following phil configuration: dispatch.step_list=input,tdata.
.help = The output_path assumes the *.tdata filename extension will be appended.
.help = More information about using this option is given in the source code, xfel/merging/application/tdata/README.md
}
"""
filter_phil = """
filter
.help = The filter section defines criteria to accept or reject whole experiments
.help = or to modify the entire experiment by a reindexing operator
.help = refer to the select section for filtering of individual reflections
{
algorithm = n_obs a_list reindex resolution unit_cell report
.type = choice
.multiple = True
n_obs {
min = 15
.type = int
.help = Minimum number of observations for subsequent processing
}
a_list
.help = a_list is a text file containing a list of acceptable experiments
.help = for example, those not misindexed, wrong type, or otherwise rejected as determined separately
.help = suggested use, string matching, can include timestamp matching, directory name, etc
{
file = None
.type = path
.multiple = True
operation = *select deselect
.type = choice
.multiple = True
.help = supposedly have same number of files and operations. Different lists can be provided for select and deselect
}
reindex {
data_reindex_op = h,k,l
.type = str
.help = Reindex, e.g. to change C-axis of an orthorhombic cell to align Bravais lattice from indexing with actual space group
reverse_lookup = None
.type = str
.help = filename, pickle format, generated by the cxi.brehm_diederichs program. Contains a
.help = (key,value) dictionary where key is the filename of the integrated data pickle file (supplied
.help = with the data phil parameter and value is the h,k,l reindexing operator that resolves the
.help = indexing ambiguity.
sampling_number_of_lattices = 1000
.type = int
.help = Number of lattices to be gathered from all ranks to run the brehm-diederichs procedure
}
resolution {
d_min = None
.type = float
.help = Reject the experiment unless some reflections extend beyond this resolution limit
model_or_image = model image
.type = choice
.help = Calculate resolution either using the scaling model unit cell or from the image itself
}
unit_cell
.help = Various algorithms to restrict unit cell and space group
{
algorithm = range *value cluster
.type = choice
value
.help = Discard lattices that are not close to the given target.
.help = If the target is left as Auto, use the scaling model
.help = (derived from either PDB file cryst1 record or MTZ header)
{
target_unit_cell = Auto
.type = unit_cell
relative_length_tolerance = 0.1
.type = float
.help = Fractional change in unit cell dimensions allowed (versus target cell).
absolute_angle_tolerance = 2.
.type = float
target_space_group = Auto
.type = space_group
}
cluster
.help = CLUSTER implies an implementation (standalone program or fork?) where all the
.help = unit cells are brought together prior to any postrefinement or merging,
.help = and analyzed in a global sense to identify the isoforms.
.help = the output of this program could potentially form the a_list for a subsequent
.help = run where the pre-selected events are postrefined and merged.
{
algorithm = rodgriguez_laio dbscan *covariance
.type = choice
covariance
.help = Read a pickle file containing the previously determined clusters,
.help = represented by estimated covariance models for unit cell parameters.
{
file = None
.type = path
component = 0
.type = int(value_min=0)
mahalanobis = 4.0
.type = float(value_min=0)
.help = Is essentially the standard deviation cutoff. Given that the unit cells
.help = are estimated to be distributed by a multivariate Gaussian, this is the
.help = maximum deviation (in sigmas) from the central value that is allowable for the
.help = unit cell to be considered part of the cluster.
}
isoform = None
.type=str
.help = unknown at present. if there is more than one cluster, such as in PSII,
.help = perhaps the program should write separate a_lists.
.help = Alternatively identify a particular isoform to carry forward for merging.
}
}
outlier {
min_corr = 0.1
.type = float
.help = Correlation cutoff for rejecting individual experiments by comparing observed intensities to the model.
.help = This filter is not applied if scaling.model==None. No experiments are rejected with min_corr=-1.
.help = This either keeps or rejects the whole experiment.
assmann_diederichs {}
}
}
"""
modify_phil = """
modify
.help = The MODIFY section defines operations on the integrated intensities
{
algorithm = *polarization
.type = choice
.multiple = True
}
"""
select_phil = """
select
.help = The select section accepts or rejects specified reflections
.help = refer to the filter section for filtering of whole experiments
{
algorithm = panel cspad_sensor significance_filter
.type = choice
.multiple = True
cspad_sensor {
number = None
.type = int(value_min=0, value_max=31)
.multiple = True
.help = Index in the range(32) specifying sensor on the CSPAD to deselect from merging, for the purpose
.help = of testing whether an individual sensor is poorly calibrated.
operation = *deselect select
.type = choice
.multiple = True
}
significance_filter
.help = If listed as an algorithm, apply a sigma cutoff (on unmerged data) to limit
.help = the resolution from each diffraction pattern.
.help = Implement an alternative filter for fuller-kapton geometry
{
n_bins = 12
.type = int (value_min=2)
.help = Initial target number of resolution bins for sigma cutoff calculation
min_ct = 10
.type = int
.help = Decrease number of resolution bins to require mean bin population >= min_ct
max_ct = 50
.type = int
.help = Increase number of resolution bins to require mean bin population <= max_ct
sigma = 0.5
.type = float
.help = Remove highest resolution bins such that all accepted bins have <I/sigma> >= sigma
}
}
"""
scaling_phil = """
scaling {
model = None
.type = str
.help = PDB filename containing atomic coordinates & isomorphous cryst1 record
.help = or MTZ filename from a previous cycle. If MTZ, specify mtz.mtz_column_F.
unit_cell = None
.type = unit_cell
.help = Unit cell to be used during scaling and merging. Used if model is not provided
.help = (e.g. mark1).
space_group = None
.type = space_group
.help = Space group to be used during scaling and merging. Used if model is not provided
.help = (e.g. mark1).
model_reindex_op = h,k,l
.type = str
.help = Kludge for cases with an indexing ambiguity, need to be able to adjust scaling model
resolution_scalar = 0.969
.type = float
.help = Accommodates a few more miller indices at the high resolution limit to account for
.help = unit cell variation in the sample. merging.d_min is multiplied by resolution_scalar
.help = when computing which reflections are within the resolution limit.
mtz {
mtz_column_F = fobs
.type = str
.help = scaling reference column name containing reference structure factors. Can be
.help = intensities or amplitudes
minimum_common_hkls = -1
.type = int
.help = minimum required number of common hkls between mtz reference and data
.help = used to validate mtz-based model. No validation with -1.
}
pdb {
include_bulk_solvent = True
.type = bool
.help = Whether to simulate bulk solvent
k_sol = 0.35
.type = float
.help = If model is taken from coordinates, use k_sol for the bulk solvent scale factor
.help = default is approximate mean value in PDB (according to Pavel)
b_sol = 46.00
.type = float
.help = If model is taken from coordinates, use b_sol for bulk solvent B-factor
.help = default is approximate mean value in PDB (according to Pavel)
}
algorithm = *mark0 mark1
.type = choice
.help = "mark0: original per-image scaling by reference to isomorphous PDB model"
.help = "mark1: no scaling, just averaging (i.e. Monte Carlo
algorithm). Individual image scale factors are set to 1."
}
"""
postrefinement_phil = """
postrefinement {
enable = False
.type = bool
.help = enable the preliminary postrefinement algorithm (monochromatic)
.expert_level = 3
algorithm = *rs rs2 rs_hybrid eta_deff
.type = choice
.help = rs only, eta_deff protocol 7
.expert_level = 3
rs {
fix = thetax thetay *RS G BFACTOR
.type = choice(multi=True)
.help = Which parameters to fix during postrefinement
}
rs2
.help = Reimplement postrefinement with the following (Oct 2016):
.help = Refinement engine now work on analytical derivatives instead of finite differences
.help = Better convergence using "traditional convergence test"
{}
rs_hybrid
.help = More aggressive postrefinement with the following (Oct 2016):
.help = One round of 'rs2' using LBFGS minimizer as above to refine G,B,rotx,roty
.help = Gentle weighting rather than unit weighting for the postrefinement target
.help = Second round of LevMar adding an Rs refinement parameter
.help = Option of weighting the merged terms by partiality
{
partiality_threshold = 0.2
.type = float ( value_min = 0.01 )
.help = throw out observations below this value. Hard coded as 0.2 for rs2, allow value for hybrid
.help = must enforce minimum positive value because partiality appears in the denominator
}
target_weighting = *unit variance gentle extreme
.type = choice
.help = weights for the residuals in the postrefinement target (for rs2 or rs_hybrid)
.help = Unit: each residual weighted by 1.0
.help = Variance: weighted by 1/sigma**2. Doesn't seem right, constructive feedback invited
.help = Gentle: weighted by |I|/sigma**2. Seems like best option
.help = Extreme: weighted by (I/sigma)**2. Also seems right, but severely downweights weak refl
merge_weighting = *variance
.type = choice
.help = assumed that individual reflections are weighted by the counting variance
merge_partiality_exponent = 0
.type = float
.help = additionally weight each measurement by partiality**exp when merging
.help = 0 is no weighting, 1 is partiality weighting, 2 is weighting by partiality-squared
lineshape = *lorentzian gaussian
.type = choice
.help = Soft sphere RLP modeled with Lorentzian radial profile as in prime
.help = or Gaussian radial profile. (for rs2 or rs_hybrid)
show_trumpet_plot = False
.type = bool
.help = each-image trumpet plot showing before-after plot. Spot color warmth indicates I/sigma
.help = Spot radius for lower plot reflects partiality. Only implemented for rs_hybrid
}
"""
merging_phil = """
merging {
minimum_multiplicity = 2
.type = int(value_min=2)
.help = If defined, merged structure factors not produced for the Miller indices below this threshold.
error {
model = ha14 ev11 errors_from_sample_residuals
.type = choice
.multiple = False
.help = ha14, formerly sdfac_auto, apply sdfac to each-image data assuming negative
.help = intensities are normally distributed noise
.help = errors_from_sample_residuals, use the distribution of intensities in a given miller index
.help = to compute the error for each merged reflection
ev11
.help = formerly sdfac_refine, correct merged sigmas refining sdfac, sdb and sdadd as Evans 2011.
{
random_seed = None
.help = Random seed. May be int or None. Only used for the simplex minimizer
.type = int
.expert_level = 1
minimizer = *lbfgs LevMar
.type = choice
.help = Which minimizer to use while refining the Sdfac terms
refine_propagated_errors = False
.type = bool
.help = If True then during sdfac refinement, also \
refine the estimated error used for error propagation.
show_finite_differences = False
.type = bool
.help = If True and minimizer is lbfgs, show the finite vs. analytical differences
plot_refinement_steps = False
.type = bool
.help = If True, plot refinement steps during refinement.
}
}
plot_single_index_histograms = False
.type = bool
set_average_unit_cell = True
.type = bool
.help = Output file adopts the unit cell of the data rather than of the reference model.
.help = How is it determined? Not a simple average, use a cluster-driven method for
.help = deriving the best unit cell value.
d_min = None
.type = float
.help = limiting resolution for scaling and merging
d_max = None
.type = float
.help = limiting resolution for scaling and merging. Implementation currently affects only the CCiso cal
merge_anomalous = False
.type = bool
.help = Merge anomalous contributors
}
"""
output_phil = """
output {
prefix = iobs
.type = str
.help = Prefix for all output file names
title = None
.type = str
.help = Title for run - will appear in MTZ file header
output_dir = .
.type = str
.help = output file directory
tmp_dir = None
.type = str
.help = temporary file directory
do_timing = False
.type = bool
.help = When True, calculate and log elapsed time for execution steps
log_level = 1
.type = int
.help = determines how much information to log. Level 0 means: log all, while a non-zero level reduces the logging amount.
save_experiments_and_reflections = False
.type = bool
.help = If True, dump the final set of experiments and reflections from the last worker
}
"""
statistics_phil = """
statistics {
n_bins = 10
.type = int(value_min=1)
.help = Number of resolution bins in statistics table
cc1_2 {
hash_filenames = False
.type = bool
.help = For CC1/2, instead of using odd/even filenames to split images into two sets,
.help = hash the filename using md5 and split the images using odd/even hashes.
}
cciso {
mtz_file = None
.type = str
.help = for Riso/ CCiso, the reference structure factors, must have data type F
.help = a fake file is written out to this file name if model is None
mtz_column_F = fobs
.type = str
.help = for Riso/ CCiso, the column name containing reference structure factors
}
predictions_to_edge {
apply = False
.type = bool
.help = If True and key 'indices_to_edge' not found in integration pickles, predictions
.help = will be made to the edge of the detector based on current unit cell, orientation,
.help = and mosaicity.
image = None
.type = path
.help = Path to an example image from which to extract active areas and pixel size.
detector_phil = None
.type = path
.help = Path to the detector version phil file used to generate the selected data.
}
report_ML = True
.type = bool
.help = Report statistics on per-frame attributes modeled by max-likelihood fit (expert only).
}
"""
group_phil = """
parallel {
a2a = 1
.type = int
.expert_level = 2
.help = memory reduction factor for MPI alltoall.
.help = Use a2a > 1, when available RAM is insufficient for doing MPI alltoall on all data at once.
.help = The data will be split into a2a parts and, correspondingly, alltoall will be performed in a2a iterations.
}
"""
master_phil = dispatch_phil + input_phil + tdata_phil + filter_phil + modify_phil + \
select_phil + scaling_phil + postrefinement_phil + merging_phil + \
output_phil + statistics_phil + group_phil
phil_scope = parse(master_phil)
class Script(object):
'''A class for running the script.'''
def __init__(self):
# The script usage
import libtbx.load_env
self.usage = "usage: %s [options] [param.phil] " % libtbx.env.dispatcher_name
self.parser = None
def initialize(self):
'''Initialise the script.'''
from dials.util.options import OptionParser
# Create the parser
self.parser = OptionParser(
usage=self.usage,
phil=phil_scope,
epilog=help_message)
self.parser.add_option(
'--plots',
action='store_true',
default=False,
dest='show_plots',
help='Show some plots.')
# Parse the command line. quick_parse is required for MPI compatibility
params, options = self.parser.parse_args(show_diff_phil=True,quick_parse=True)
self.params = params
self.options = options
def validate(self):
from xfel.merging.application.validation.application import application
application(self.params)
def modify(self, experiments, reflections):
return experiments, reflections #nop
def run(self):
print('''Initializing and validating phil...''')
self.initialize()
self.validate()
# do other stuff
return
if __name__ == '__main__':
script = Script()
result = script.run()
print ("OK")
| 38.442413
| 156
| 0.687464
|
900e1e2e1d6e2765ccc894ebea17d630a61be6d0
| 8,120
|
py
|
Python
|
reinforce_baselines.py
|
DaehanKim/attention-learn-to-route
|
9ce4fa9a3a136768f92adf3d1e7d62620442f1b7
|
[
"MIT"
] | null | null | null |
reinforce_baselines.py
|
DaehanKim/attention-learn-to-route
|
9ce4fa9a3a136768f92adf3d1e7d62620442f1b7
|
[
"MIT"
] | null | null | null |
reinforce_baselines.py
|
DaehanKim/attention-learn-to-route
|
9ce4fa9a3a136768f92adf3d1e7d62620442f1b7
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from scipy.stats import ttest_rel
import copy
from train import rollout, get_inner_model
class Baseline(object):
def wrap_dataset(self, dataset):
return dataset
def unwrap_batch(self, batch):
return batch, None
def eval(self, x, c):
raise NotImplementedError("Override this method")
def get_learnable_parameters(self):
return []
def epoch_callback(self, model, epoch):
pass
def state_dict(self):
return {}
def load_state_dict(self, state_dict):
pass
class WarmupBaseline(Baseline):
def __init__(self, baseline, n_epochs=1, warmup_exp_beta=0.8, ):
super(Baseline, self).__init__()
self.baseline = baseline
assert n_epochs > 0, "n_epochs to warmup must be positive"
self.warmup_baseline = ExponentialBaseline(warmup_exp_beta)
self.alpha = 0
self.n_epochs = n_epochs
def wrap_dataset(self, dataset):
if self.alpha > 0:
return self.baseline.wrap_dataset(dataset)
return self.warmup_baseline.wrap_dataset(dataset)
def unwrap_batch(self, batch):
if self.alpha > 0:
return self.baseline.unwrap_batch(batch)
return self.warmup_baseline.unwrap_batch(batch)
def eval(self, x, c):
if self.alpha == 1:
return self.baseline.eval(x, c)
if self.alpha == 0:
return self.warmup_baseline.eval(x, c)
v, l = self.baseline.eval(x, c)
vw, lw = self.warmup_baseline.eval(x, c)
# Return convex combination of baseline and of loss
return self.alpha * v + (1 - self.alpha) * vw, self.alpha * l + (1 - self.alpha * lw)
def epoch_callback(self, model, epoch):
# Need to call epoch callback of inner model (also after first epoch if we have not used it)
self.baseline.epoch_callback(model, epoch)
self.alpha = (epoch + 1) / float(self.n_epochs)
if epoch < self.n_epochs:
print("Set warmup alpha = {}".format(self.alpha))
def state_dict(self):
# Checkpointing within warmup stage makes no sense, only save inner baseline
return self.baseline.state_dict()
def load_state_dict(self, state_dict):
# Checkpointing within warmup stage makes no sense, only load inner baseline
self.baseline.load_state_dict(state_dict)
class NoBaseline(Baseline):
def eval(self, x, c):
return 0, 0 # No baseline, no loss
class ExponentialBaseline(Baseline):
def __init__(self, beta):
super(Baseline, self).__init__()
self.beta = beta
self.v = None
def eval(self, x, c):
if self.v is None:
v = c.mean()
else:
v = self.beta * self.v + (1. - self.beta) * c.mean()
self.v = v.detach() # Detach since we never want to backprop
return self.v, 0 # No loss
def state_dict(self):
return {
'v': self.v
}
def load_state_dict(self, state_dict):
self.v = state_dict['v']
class CriticBaseline(Baseline):
def __init__(self, critic):
super(Baseline, self).__init__()
self.critic = critic
def eval(self, x, c):
v = self.critic(x)
# Detach v since actor should not backprop through baseline, only for loss
return v.detach(), F.mse_loss(v, c.detach())
def get_learnable_parameters(self):
return list(self.critic.parameters())
def epoch_callback(self, model, epoch):
pass
def state_dict(self):
return {
'critic': self.critic.state_dict()
}
def load_state_dict(self, state_dict):
critic_state_dict = state_dict.get('critic', {})
if not isinstance(critic_state_dict, dict): # backwards compatibility
critic_state_dict = critic_state_dict.state_dict()
self.critic.load_state_dict({**self.critic.state_dict(), **critic_state_dict})
class RolloutBaseline(Baseline):
def __init__(self, model, problem, opts, epoch=0):
super(Baseline, self).__init__()
self.problem = problem
self.opts = opts
self._update_model(model, epoch)
def _update_model(self, model, epoch, dataset=None):
self.model = copy.deepcopy(model)
# Always generate baseline dataset when updating model to prevent overfitting to the baseline dataset
if dataset is not None:
if len(dataset) != self.opts.val_size:
print("Warning: not using saved baseline dataset since val_size does not match")
dataset = None
elif (dataset[0] if self.problem.NAME == 'tsp' else dataset[0]['loc']).size(0) != self.opts.graph_size:
print("Warning: not using saved baseline dataset since graph_size does not match")
dataset = None
if dataset is None:
self.dataset = self.problem.make_dataset(
size=self.opts.question_num, num_samples=self.opts.val_size)
else:
self.dataset = dataset
print("Evaluating baseline model on evaluation dataset")
self.bl_vals = rollout(self.model, self.dataset, self.opts).cpu().numpy()
self.mean = self.bl_vals.mean()
self.epoch = epoch
def wrap_dataset(self, dataset):
print("Evaluating baseline on dataset...")
# Need to convert baseline to 2D to prevent converting to double, see
# https://discuss.pytorch.org/t/dataloader-gives-double-instead-of-float/717/3
return BaselineDataset(dataset, rollout(self.model, dataset, self.opts).view(-1, 1))
def unwrap_batch(self, batch):
return batch['data'], batch['baseline'].view(-1) # Flatten result to undo wrapping as 2D
def eval(self, x, c):
# Use volatile mode for efficient inference (single batch so we do not use rollout function)
with torch.no_grad():
_, _, v, _ = self.model(x)
# There is no loss
return v, 0
def epoch_callback(self, model, epoch):
"""
Challenges the current baseline with the model and replaces the baseline model if it is improved.
:param model: The model to challenge the baseline by
:param epoch: The current epoch
"""
print("Evaluating candidate model on evaluation dataset")
candidate_vals = rollout(model, self.dataset, self.opts).cpu().numpy()
candidate_mean = candidate_vals.mean()
print("Epoch {} candidate mean {}, baseline epoch {} mean {}, difference {}".format(
epoch, candidate_mean, self.epoch, self.mean, candidate_mean - self.mean))
if candidate_mean - self.mean < 0:
# Calc p value
t, p = ttest_rel(candidate_vals, self.bl_vals)
p_val = p / 2 # one-sided
assert t < 0, "T-statistic should be negative"
print("p-value: {}".format(p_val))
if p_val < self.opts.bl_alpha:
print('Update baseline')
self._update_model(model, epoch)
def state_dict(self):
return {
'model': self.model,
'dataset': self.dataset,
'epoch': self.epoch
}
def load_state_dict(self, state_dict):
# We make it such that it works whether model was saved as data parallel or not
load_model = copy.deepcopy(self.model)
get_inner_model(load_model).load_state_dict(get_inner_model(state_dict['model']).state_dict())
self._update_model(load_model, state_dict['epoch'], state_dict['dataset'])
class BaselineDataset(Dataset):
def __init__(self, dataset=None, baseline=None):
super(BaselineDataset, self).__init__()
self.dataset = dataset
self.baseline = baseline
assert (len(self.dataset) == len(self.baseline))
def __getitem__(self, item):
return {
'data': self.dataset[item],
'baseline': self.baseline[item]
}
def __len__(self):
return len(self.dataset)
| 32.874494
| 115
| 0.626355
|
3a53ce562b53befac9d849b4672272b230c1ce52
| 1,702
|
py
|
Python
|
check_weight.py
|
jeongjuns/Control_yolact
|
dad800dcc1aa0b02445e302256b4508b7688880c
|
[
"MIT"
] | null | null | null |
check_weight.py
|
jeongjuns/Control_yolact
|
dad800dcc1aa0b02445e302256b4508b7688880c
|
[
"MIT"
] | null | null | null |
check_weight.py
|
jeongjuns/Control_yolact
|
dad800dcc1aa0b02445e302256b4508b7688880c
|
[
"MIT"
] | null | null | null |
import torch, torchvision
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.resnet import Bottleneck
import numpy as np
from itertools import product
from math import sqrt
from typing import List
from collections import defaultdict
from data.config import cfg, mask_type
from layers import Detect
from layers.interpolate import InterpolateModule
from backbone import construct_backbone
import torch.backends.cudnn as cudnn
from utils import timer
from utils.functions import MovingAverage, make_net
import argparse
import datetime
import os
from torch.autograd import Variable
from torchvision import datasets, transforms
parser = argparse.ArgumentParser(description='Prune Yolact')
parser.add_argument('--path', default='weights/',
help='Directory for load weight.')
args = parser.parse_args()
torch.cuda.current_device()
use_jit = torch.cuda.device_count() <= 1
if not use_jit:
print('Multiple GPUs detected! Turning off JIT.')
ScriptModuleWrapper = torch.jit.ScriptModule if use_jit else nn.Module
script_method_wrapper = torch.jit.script_method if use_jit else lambda fn, _rcn=None: fn
path = args.path
print('path :', path)
print(torch.cuda.is_available())
class Prune(nn.Module):
def __init__(self):
super().__init__()
def load_weights(self, path):
'''Loads weights from a compressed save file.'''
total=0
state_dict = torch.load(path)
name = []
for key in list(state_dict.keys()):
print(key,'\t\t',state_dict[key].shape)
pruning = Prune()
pruning.load_weights(path)
print('Pruning Proccess finished')
| 27.015873
| 89
| 0.720917
|
837ec818d9a80cb92448390cda728b2d45999869
| 1,525
|
py
|
Python
|
bbb-exporter/settings.py
|
icts-ca/bigbluebutton-exporter
|
f3d73639c002b27a09973681c1c9d560086b7bd8
|
[
"MIT"
] | 1
|
2020-05-26T19:19:17.000Z
|
2020-05-26T19:19:17.000Z
|
bbb-exporter/settings.py
|
man10to/bigbluebutton-exporter
|
56a9fdf5390b63a9b82e5b3098f897fc3be29fd2
|
[
"MIT"
] | null | null | null |
bbb-exporter/settings.py
|
man10to/bigbluebutton-exporter
|
56a9fdf5390b63a9b82e5b3098f897fc3be29fd2
|
[
"MIT"
] | 1
|
2021-05-07T06:48:52.000Z
|
2021-05-07T06:48:52.000Z
|
import os
import api_lib
from helpers import validate_api_base_url, validate_buckets
MAJOR = 0
MINOR = 4
BUGFIX = 0
INFO = ""
VERSION = "{}.{}.{}".format(MAJOR, MINOR, BUGFIX)
if INFO:
VERSION += "-" + INFO
debug_env = os.getenv("DEBUG", "false")
DEBUG = True if debug_env.lower() == "true" else False
API_BASE_URL = validate_api_base_url(os.environ["API_BASE_URL"])
# SSH into server and run: `$ bbb-conf --secret` to get secret
API_SECRET = os.environ["API_SECRET"]
API_CLIENT = api_lib.Client(API_BASE_URL, API_SECRET)
ROOM_PARTICIPANTS_CUSTOM_BUCKETS = validate_buckets(os.getenv("ROOM_PARTICIPANTS_CUSTOM_BUCKETS", default=""))
ROOM_LISTENERS_CUSTOM_BUCKETS = validate_buckets(os.getenv("ROOM_LISTENERS_CUSTOM_BUCKETS", default=""))
ROOM_VOICE_PARTICIPANTS_CUSTOM_BUCKETS = validate_buckets(os.getenv("ROOM_VOICE_PARTICIPANTS_CUSTOM_BUCKETS", default=""))
ROOM_VIDEO_PARTICIPANTS_CUSTOM_BUCKETS = validate_buckets(os.getenv("ROOM_VIDEO_PARTICIPANTS_CUSTOM_BUCKETS", default=""))
PORT = int(os.getenv("PORT", 9688))
BIND_IP = os.getenv("BIND_IP", "0.0.0.0")
RECORDINGS_METRICS_ENABLE = False if os.getenv("RECORDINGS_METRICS", "true").lower() == "false" else True
RECORDINGS_METRICS_READ_FROM_DISK = False if os.getenv("RECORDINGS_METRICS_READ_FROM_DISK", "false").lower() == "false" else True
recordings_metrics_base_dir = "/var/bigbluebutton"
# Global (gasp.) variable flag that is used to mark if communicating with BigBlueButton's API is possible.
# Used to set the `bbb_api_up` metric.
_api_up = False
| 39.102564
| 129
| 0.772459
|
8dc86e14e6ff2c69f607a84e5037ce8a6727cf1b
| 212
|
py
|
Python
|
rafiki/model/__init__.py
|
Yirui-Wang/rafiki
|
22a58d592e6aa49f41ad895c94f099a30dc2023b
|
[
"Apache-2.0"
] | 1
|
2018-12-19T13:14:34.000Z
|
2018-12-19T13:14:34.000Z
|
rafiki/model/__init__.py
|
Yirui-Wang/rafiki
|
22a58d592e6aa49f41ad895c94f099a30dc2023b
|
[
"Apache-2.0"
] | null | null | null |
rafiki/model/__init__.py
|
Yirui-Wang/rafiki
|
22a58d592e6aa49f41ad895c94f099a30dc2023b
|
[
"Apache-2.0"
] | null | null | null |
from .model import BaseModel, test_model_class, load_model_class, \
parse_model_install_command, InvalidModelClassException, InvalidModelParamsException, \
ModelUtils
from .log import ModelLogUtilsLogger
| 42.4
| 91
| 0.839623
|
5569100dea9e3e64d5696807912b32ce853dec37
| 12,029
|
py
|
Python
|
src/gdata/sample_util.py
|
morrison12/gdata-python-client
|
d11a5f0b333f6ec5e519d96b131c5100c9518feb
|
[
"Apache-2.0"
] | null | null | null |
src/gdata/sample_util.py
|
morrison12/gdata-python-client
|
d11a5f0b333f6ec5e519d96b131c5100c9518feb
|
[
"Apache-2.0"
] | null | null | null |
src/gdata/sample_util.py
|
morrison12/gdata-python-client
|
d11a5f0b333f6ec5e519d96b131c5100c9518feb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides utility functions used with command line samples."""
# This module is used for version 2 of the Google Data APIs.
import sys
import getpass
import urllib.request, urllib.parse, urllib.error
import gdata.gauth
__author__ = 'j.s@google.com (Jeff Scudder)'
CLIENT_LOGIN = 1
AUTHSUB = 2
OAUTH = 3
OAUTH2 = 4
HMAC = 1
RSA = 2
class SettingsUtil(object):
"""Gather's user preferences from flags or command prompts.
An instance of this object stores the choices made by the user. At some
point it might be useful to save the user's preferences so that they do
not need to always set flags or answer preference prompts.
"""
def __init__(self, prefs=None):
self.prefs = prefs or {}
def get_param(self, name, prompt='', secret=False, ask=True, reuse=False):
# First, check in this objects stored preferences.
if name in self.prefs:
return self.prefs[name]
# Second, check for a command line parameter.
value = None
for i in range(len(sys.argv)):
if sys.argv[i].startswith('--%s=' % name):
value = sys.argv[i].split('=')[1]
elif sys.argv[i] == '--%s' % name:
value = sys.argv[i + 1]
# Third, if it was not on the command line, ask the user to input the
# value.
if value is None and ask:
prompt = '%s: ' % prompt
if secret:
value = getpass.getpass(prompt)
else:
value = input(prompt)
# If we want to save the preference for reuse in future requests, add it
# to this object's prefs.
if value is not None and reuse:
self.prefs[name] = value
return value
def authorize_client(self, client, auth_type=None, service=None,
source=None, scopes=None, oauth_type=None,
consumer_key=None, consumer_secret=None):
"""Uses command line arguments, or prompts user for token values."""
if 'client_auth_token' in self.prefs:
return
if auth_type is None:
auth_type = int(self.get_param(
'auth_type', 'Please choose the authorization mechanism you want'
' to use.\n'
'1. [deprecated] to use your email address and password (ClientLogin)\n'
'2. [deprecated] to use a web browser to visit an auth web page (AuthSub)\n'
'3. [deprecated] if you have registed to use OAuth\n'
'4. if you have registed to use OAuth 2.0\n', reuse=True))
# Get the scopes for the services we want to access.
if auth_type == AUTHSUB or auth_type == OAUTH or auth_type == OAUTH2:
if scopes is None:
scopes = self.get_param(
'scopes', 'Enter the URL prefixes (scopes) for the resources you '
'would like to access.\nFor multiple scope URLs, place a comma '
'between each URL.\n'
'Example: http://www.google.com/calendar/feeds/,'
'http://www.google.com/m8/feeds/\n', reuse=True).split(',')
elif isinstance(scopes, str):
scopes = scopes.split(',')
if auth_type == CLIENT_LOGIN:
email = self.get_param('email', 'Please enter your username',
reuse=False)
password = self.get_param('password', 'Password', True, reuse=False)
if service is None:
service = self.get_param(
'service', 'What is the name of the service you wish to access?'
'\n(See list:'
' http://code.google.com/apis/gdata/faq.html#clientlogin)',
reuse=True)
if source is None:
source = self.get_param('source', ask=False, reuse=True)
client.client_login(email, password, source=source, service=service)
elif auth_type == AUTHSUB:
auth_sub_token = self.get_param('auth_sub_token', ask=False, reuse=True)
session_token = self.get_param('session_token', ask=False, reuse=True)
private_key = None
auth_url = None
single_use_token = None
rsa_private_key = self.get_param(
'rsa_private_key',
'If you want to use secure mode AuthSub, please provide the\n'
' location of your RSA private key which corresponds to the\n'
' certificate you have uploaded for your domain. If you do not\n'
' have an RSA key, simply press enter', reuse=True)
if rsa_private_key:
try:
private_key_file = open(rsa_private_key, 'rb')
private_key = private_key_file.read()
private_key_file.close()
except IOError:
print('Unable to read private key from file')
if private_key is not None:
if client.auth_token is None:
if session_token:
client.auth_token = gdata.gauth.SecureAuthSubToken(
session_token, private_key, scopes)
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
return
elif auth_sub_token:
client.auth_token = gdata.gauth.SecureAuthSubToken(
auth_sub_token, private_key, scopes)
client.upgrade_token()
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
return
auth_url = gdata.gauth.generate_auth_sub_url(
'http://gauthmachine.appspot.com/authsub', scopes, True)
print('with a private key, get ready for this URL', auth_url)
else:
if client.auth_token is None:
if session_token:
client.auth_token = gdata.gauth.AuthSubToken(session_token,
scopes)
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
return
elif auth_sub_token:
client.auth_token = gdata.gauth.AuthSubToken(auth_sub_token,
scopes)
client.upgrade_token()
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
return
auth_url = gdata.gauth.generate_auth_sub_url(
'http://gauthmachine.appspot.com/authsub', scopes)
print('Visit the following URL in your browser to authorize this app:')
print(str(auth_url))
print('After agreeing to authorize the app, copy the token value from')
print(' the URL. Example: "www.google.com/?token=ab12" token value is')
print(' ab12')
token_value = input('Please enter the token value: ')
if private_key is not None:
single_use_token = gdata.gauth.SecureAuthSubToken(
token_value, private_key, scopes)
else:
single_use_token = gdata.gauth.AuthSubToken(token_value, scopes)
client.auth_token = single_use_token
client.upgrade_token()
elif auth_type == OAUTH:
if oauth_type is None:
oauth_type = int(self.get_param(
'oauth_type', 'Please choose the authorization mechanism you want'
' to use.\n'
'1. use an HMAC signature using your consumer key and secret\n'
'2. use RSA with your private key to sign requests\n',
reuse=True))
consumer_key = self.get_param(
'consumer_key', 'Please enter your OAuth conumer key '
'which identifies your app', reuse=True)
if oauth_type == HMAC:
consumer_secret = self.get_param(
'consumer_secret', 'Please enter your OAuth conumer secret '
'which you share with the OAuth provider', True, reuse=False)
# Swap out this code once the client supports requesting an oauth
# token.
# Get a request token.
request_token = client.get_oauth_token(
scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key,
consumer_secret=consumer_secret)
elif oauth_type == RSA:
rsa_private_key = self.get_param(
'rsa_private_key',
'Please provide the location of your RSA private key which\n'
' corresponds to the certificate you have uploaded for your'
' domain.',
reuse=True)
try:
private_key_file = open(rsa_private_key, 'rb')
private_key = private_key_file.read()
private_key_file.close()
except IOError:
print('Unable to read private key from file')
request_token = client.get_oauth_token(
scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key,
rsa_private_key=private_key)
else:
print('Invalid OAuth signature type')
return None
# Authorize the request token in the browser.
print('Visit the following URL in your browser to authorize this app:')
print(str(request_token.generate_authorization_url()))
print('After agreeing to authorize the app, copy URL from the browser\'s')
print(' address bar.')
url = input('Please enter the url: ')
gdata.gauth.authorize_request_token(request_token, url)
# Exchange for an access token.
client.auth_token = client.get_access_token(request_token)
elif auth_type == OAUTH2:
oauth_type = HMAC
client_id = self.get_param(
'client_id', 'Please enter your OAuth 2.0 Client ID '
'which identifies your app', reuse=True)
if oauth_type == HMAC:
client_secret = self.get_param(
'client_secret', 'Please enter your OAuth 2.0 Client secret '
'which you share with the OAuth provider',
True, reuse=False)
# Create a request token.
request_token = gdata.gauth.OAuth2Token(
client_id = client_id, client_secret = client_secret,
scope = " ".join(scopes), # http://stackoverflow.com/a/8451199/198219
user_agent = 'GdataPythonClientExample')
else:
print('Invalid OAuth signature type')
return None
# Authorize the request token in the browser.
print('\nVisit the following URL in your browser '\
'to authorize this app:\n\n{0}\n'.format(
str(request_token.generate_authorize_url())))
code = input('What is the verification code? ').strip()
request_token.get_access_token(code)
client.auth_token = request_token
else:
print('Invalid authorization type.')
return None
if client.auth_token:
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
def get_param(name, prompt='', secret=False, ask=True):
settings = SettingsUtil()
return settings.get_param(name=name, prompt=prompt, secret=secret, ask=ask)
def authorize_client(client, auth_type=None, service=None, source=None,
scopes=None, oauth_type=None, consumer_key=None,
consumer_secret=None):
"""Uses command line arguments, or prompts user for token values."""
settings = SettingsUtil()
return settings.authorize_client(client=client, auth_type=auth_type,
service=service, source=source,
scopes=scopes, oauth_type=oauth_type,
consumer_key=consumer_key,
consumer_secret=consumer_secret)
def print_options():
"""Displays usage information, available command line params."""
# TODO: fill in the usage description for authorizing the client.
print('')
| 39.831126
| 86
| 0.632388
|
01a9c875c36dbb33509a8197f73fbad745f7987f
| 2,977
|
py
|
Python
|
gaetk2/views/backup.py
|
mdornseif/appengine-toolkit2
|
47ee6bf99b8e461ee64eae75bf24fb462d99b0ab
|
[
"MIT"
] | 1
|
2018-08-16T16:15:30.000Z
|
2018-08-16T16:15:30.000Z
|
gaetk2/views/backup.py
|
mdornseif/appengine-toolkit2
|
47ee6bf99b8e461ee64eae75bf24fb462d99b0ab
|
[
"MIT"
] | 3
|
2018-08-14T09:52:11.000Z
|
2021-12-13T19:54:07.000Z
|
gaetk2/views/backup.py
|
mdornseif/appengine-toolkit2
|
47ee6bf99b8e461ee64eae75bf24fb462d99b0ab
|
[
"MIT"
] | 1
|
2018-09-28T05:55:27.000Z
|
2018-09-28T05:55:27.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
backup.py - Trigger scheduled Backups via Cron.
See :ref:`backupreplication` for further Information.
See https://cloud.google.com/appengine/articles/scheduled_backups
also https://cloud.google.com/datastore/docs/schedule-export
Created by Christian Klein on 2017-02-17.
Copyright (c) 2017, 2018 HUDORA. MIT Licensed.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import logging
from google.appengine.api import taskqueue
from google.appengine.api.app_identity import get_application_id
from google.appengine.api.app_identity import get_default_gcs_bucket_name
from google.appengine.ext.db.metadata import Kind
from gaetk2.config import gaetkconfig
from gaetk2.handlers import DefaultHandler
logger = logging.getLogger(__name__)
# TODO: this woll stop to work https://cloud.google.com/appengine/docs/deprecations/datastore-admin-backups
# needs to be replaced with https://cloud.google.com/datastore/docs/schedule-export
# Something like
# curl \
# -H "Authorization: Bearer $(gcloud auth print-access-token)" \
# -H "Content-Type: application/json" \
# https://datastore.googleapis.com/v1/projects/${PROJECT_ID}:export \
# -d '{
# "outputUrlPrefix": "gs://'${BUCKET}'",
# "entityFilter": {
# "kinds": ["KIND1", "KIND2", …],
# "namespaceIds": ["NAMESPACE1", "NAMESPACE2", …],
# },
# }'
#
# https://cloud.google.com/datastore/docs/export-import-entities
#
# Scopes:
# https://www.googleapis.com/auth/datastore or
# https://www.googleapis.com/auth/cloud-platform
class BackupHandler(DefaultHandler):
"""Handler to start scheduled backups."""
def get(self):
"""To be called by cron and only by cron."""
# if 'X-AppEngine-Cron' not in self.request.headers:
# raise HTTP403_Forbidden('Scheduled backups must be started via cron')
if not gaetkconfig.BACKUP_BUCKET:
bucket = get_default_gcs_bucket_name()
else:
bucket = gaetkconfig.BACKUP_BUCKET
today = datetime.date.today()
kinds = [kind for kind in _get_all_datastore_kinds()]
# if kind not in config.BACKUP_BLACKLIST]
bucketname = '/'.join(
[bucket, get_application_id(), today.strftime('%Y-%m-%d')]
)
bucketname = bucketname.lstrip('/')
params = {
'name': 'ds',
'gs_bucket_name': bucketname,
'filesystem': 'gs',
'queue': gaetkconfig.BACKUP_QUEUE,
'kind': kinds,
}
logger.info('backup to %r %r', bucketname, params)
taskqueue.add(
url='/_ah/datastore_admin/backup.create',
method='POST',
target='ah-builtin-python-bundle',
params=params,
)
self.return_text('OK')
def _get_all_datastore_kinds():
for kind in Kind.all():
if not kind.kind_name.startswith('_'):
yield kind.kind_name
| 31.010417
| 107
| 0.668122
|
145cf8059ae363602529f873bb1f041108962451
| 2,414
|
py
|
Python
|
sync_clusters.py
|
mvxxx/cluster_utils
|
84fc4dcd78768eb5f8058d05f5e82192c0ce77a5
|
[
"MIT"
] | null | null | null |
sync_clusters.py
|
mvxxx/cluster_utils
|
84fc4dcd78768eb5f8058d05f5e82192c0ce77a5
|
[
"MIT"
] | null | null | null |
sync_clusters.py
|
mvxxx/cluster_utils
|
84fc4dcd78768eb5f8058d05f5e82192c0ce77a5
|
[
"MIT"
] | null | null | null |
import argparse
import json
from uuid import uuid4
import shutil
import os
from utils.communication import exec_on_rem_workspace
_USERNAME_REMOTE = 'username@remote'
_TREE_ROOT = 'tree_root'
def sync_clusters(target_cluster: str, local_path: str, exclude_cmd: str):
target_cluster_host = cfg['clusters'][target_cluster][_USERNAME_REMOTE]
target_tree_root = cfg['clusters'][target_cluster][_TREE_ROOT]
sync_paths = cfg['sync_paths']
for sync_path in sync_paths:
# rsync from local to remote
exec_on_rem_workspace(target_cluster_host, target_tree_root, [f'mkdir -p {sync_path}'])
os.system(f'rsync -vuar {os.path.join(local_path, sync_path)}/* {target_cluster_host}:{os.path.join(target_tree_root, sync_path)} {exclude_cmd}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-c', '--config', help='Path to clusters config file', required=True, type=str)
parser.add_argument(
'-s', '--source-cluster', help='Name of cluster which is synced', required=True, type=str)
parser.add_argument(
'-t', '--target-cluster', action='append', help='Names of clusters to be synced', required=True)
args = parser.parse_args()
config_path = args.config
source_cluster = args.source_cluster
target_clusters = args.target_cluster
with open(config_path, 'r') as cfg_f:
content = cfg_f.read()
cfg = json.loads(content)
src_cluster_host = cfg['clusters'][source_cluster][_USERNAME_REMOTE]
src_tree_root = cfg['clusters'][source_cluster][_TREE_ROOT]
local_path = cfg['local'][_TREE_ROOT]
ignore_paths = cfg['ignore']
exclude_cmd = ' '.join([f'--exclude *{ignore_path}*' for ignore_path in ignore_paths])
if not os.path.exists(local_path):
print("Local path doesn't exists, recreating it")
os.makedirs(local_path, exist_ok=True)
# rsync from remote to local
for sync_path in cfg['sync_paths']:
os.makedirs(os.path.join(local_path, sync_path), exist_ok=True)
os.system(f'rsync -vuar {src_cluster_host}:{os.path.join(src_tree_root, sync_path)}/* {os.path.join(local_path, sync_path)} {exclude_cmd}')
for target in target_clusters:
try:
sync_clusters(target, local_path, exclude_cmd)
except Exception as e:
print(f'Failed syncing to {target} due to exception: {str(e)}')
| 39.57377
| 153
| 0.699254
|
eabb799e00d489d65c491c9c376f9b9ff167f9c0
| 313
|
py
|
Python
|
designs/config/docs.py
|
ahmadRagheb/designs
|
7aa567ddc78d3c73167b0b92ced2ceb5e8db0cdb
|
[
"MIT"
] | null | null | null |
designs/config/docs.py
|
ahmadRagheb/designs
|
7aa567ddc78d3c73167b0b92ced2ceb5e8db0cdb
|
[
"MIT"
] | null | null | null |
designs/config/docs.py
|
ahmadRagheb/designs
|
7aa567ddc78d3c73167b0b92ced2ceb5e8db0cdb
|
[
"MIT"
] | null | null | null |
"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/designs"
# docs_base_url = "https://[org_name].github.io/designs"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Designs"
| 26.083333
| 68
| 0.722045
|
b8bcd878acb9cb9415e841b609833a97498d8829
| 7,346
|
py
|
Python
|
lost/modes/office_gui.py
|
carstenfuchs/LoST
|
7a88057808b4973d1b182d0efd7e145dbd898cd1
|
[
"MIT"
] | null | null | null |
lost/modes/office_gui.py
|
carstenfuchs/LoST
|
7a88057808b4973d1b182d0efd7e145dbd898cd1
|
[
"MIT"
] | null | null | null |
lost/modes/office_gui.py
|
carstenfuchs/LoST
|
7a88057808b4973d1b182d0efd7e145dbd898cd1
|
[
"MIT"
] | null | null | null |
from babel.dates import format_datetime
from datetime import datetime
from tkinter import *
# from tkinter import ttk
from lost.modes.office_terminal import State
from lost.widgets import adjust_wraplength, cp, fp, DisplayServerReplyFrame, PauseButtonsRow, SystemPanelFrame, TitleBar, TouchButton, WaitForServerFrame
class RootWindow(Tk):
def __init__(self, terminal, main_con, *args, **kwargs):
super().__init__(*args, **kwargs)
self.terminal = terminal
self.main_con = main_con
self.title("LoST - Lori Stempeluhr Terminal")
# The native resolution of the official Raspberry Pi Touch Display
# (https://www.raspberrypi.com/products/raspberry-pi-touch-display/)
# is 800 x 480 pixels. Still, let's pick something smaller as the
# default size so that dragging the window with the mouse is easier.
self.geometry("640x384")
self.attributes("-fullscreen", True)
if True: # if localconfig.DEBUG:
success_reply = {
'ma': "Konrad Zuse (lokales erzeugtes Beispiel)",
'now': "not used at this time",
'feedback': {
'anfang': "8:00",
'ende': None,
'pause': None,
'pause_error': "Eine Netzwerkübertragung hat nicht stattgefunden.",
'result': None,
},
}
messages_reply = {
'messages': [
"Dies ist ein Beispiel für eine Rückmeldung vom Lori-Server vom " \
"Typ „messages“. Dazu wurde eine auf dem Terminal vorbereitete " \
"Antwort eingestellt, eine echte Netzwerkübertragung hat nicht stattgefunden.",
],
}
errors_reply = {
'errors': [
"Dies ist ein Beispiel für eine Meldung vom Typ „errors“. "
"Sie kann lokal vom Terminal oder vom Lori-Server ausgehen. "
"Dazu wurde eine auf dem Terminal vorbereitete Antwort eingestellt, "
"eine echte Netzwerkübertragung hat nicht stattgefunden.",
],
'detail_info': "optionale Detailangaben, z.B. Timeout, unbekanntes Terminal o.ä.",
}
self.bind('<Escape>', lambda x: self.destroy() if self.terminal.state == State.WELCOME else self.terminal.set_state(State.WELCOME))
self.bind('<F1>', lambda x: self.terminal.set_state(State.WELCOME))
self.bind('<F5>', lambda x: self.terminal.on_server_reply_received(success_reply))
self.bind('<F6>', lambda x: self.terminal.on_server_reply_received(messages_reply))
self.bind('<F7>', lambda x: self.terminal.on_server_reply_received(errors_reply))
self.bind('<F8>', lambda x: self.terminal.on_server_reply_received({}))
self.bind('<F9>', lambda x: self.main_con.simulate_smartcard_input('Sonderkarte: Verbindungstest'))
self.bind('<F11>', lambda x: self.attributes("-fullscreen", not self.attributes("-fullscreen")))
self.bind('<F12>', lambda x: self.terminal.set_state_system_panel())
self.frame_Welcome = WelcomeFrame(self)
self.frame_WaitForServer = WaitForServerFrame(self)
self.frame_DisplayServerReply = DisplayServerReplyFrame(self)
self.frame_SystemPanel = SystemPanelFrame(self)
self.active_frame = None
self.bind('<Configure>', self.on_resize)
self.drive_main_connector()
self.drive_terminal_clock()
def on_resize(self, event):
if event.widget == self:
# print(event)
fp.resize(event.height)
def drive_main_connector(self):
"""
Forward clock tick events to the main connector.
In a Tkinter program, GUI functions like this are the natural place to receive
timer events. As these events are also needed elsewhere (in non-GUI code), we pass
them to the main connector that will further distribute them.
"""
self.main_con.on_clock_tick()
self.after(100, self.drive_main_connector)
def drive_terminal_clock(self):
if self.terminal is not None:
self.terminal.on_clock_tick()
self.after(500, self.drive_terminal_clock)
def update_to_model(self, terminal):
next_frame = self.frame_Welcome
if terminal.state == State.WAIT_FOR_SERVER_REPLY:
next_frame = self.frame_WaitForServer
elif terminal.state == State.DISPLAY_SERVER_REPLY:
next_frame = self.frame_DisplayServerReply
elif terminal.state == State.SYSTEM_PANEL:
next_frame = self.frame_SystemPanel
if hasattr(next_frame, "update_to_model"):
next_frame.update_to_model(terminal)
if self.active_frame == next_frame:
return
if self.active_frame is not None:
self.active_frame.pack_forget()
self.active_frame = next_frame
self.active_frame.pack(side=TOP, fill=BOTH, expand=True)
class WelcomeFrame(Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs, background=cp.get_bg_col())
self.rowconfigure(0, weight=0) # title bar
self.rowconfigure(1, weight=1) # vertical space
self.rowconfigure(2, weight=1) # HH:MM
self.rowconfigure(3, weight=1) # day, month
self.rowconfigure(4, weight=1) # vertical space
self.rowconfigure(5, weight=1) # pause label
self.rowconfigure(6, weight=1) # pause buttons
self.rowconfigure(7, weight=1) # vertical space
# Have the grid span the full width of the frame
# (instead of only the minimal width to enclose its children).
self.columnconfigure(0, weight=1)
title_bar = TitleBar(self, show_clock=False)
title_bar.grid(row=0, column=0, sticky="NESW")
self.time_label = Label(self, text="", foreground='white', background=cp.get_bg_col(), font=fp.get_font(250))
self.time_label.grid(row=2, column=0, sticky="NESW")
self.date_label = Label(self, text="", anchor='n', foreground='#666666', background=cp.get_bg_col(), font=fp.get_font(120))
self.date_label.grid(row=3, column=0, sticky="NESW")
self.pause_label = Label(self, text="Pause", foreground='#3380E6', background=cp.get_bg_col(), font=fp.get_font(120))
self.pause_label.grid(row=5, column=0, sticky="NESW")
self.pause_buttons = PauseButtonsRow(self)
self.pause_buttons.grid(row=6, column=0, sticky="NESW")
self.update_clock()
def update_clock(self):
now = datetime.now()
# https://stackoverflow.com/questions/985505/locale-date-formatting-in-python
self.time_label.config(text=format_datetime(now, 'HH:mm', locale='de_DE'))
self.date_label.config(text=format_datetime(now, 'EEEE, d. MMMM', locale='de_DE')) # Mittwoch, 5. August
self.after(1000, self.update_clock)
def update_to_model(self, terminal):
p_str = "Pause"
if terminal.pause is not None:
p_str += f" {terminal.pause // 60}:{terminal.pause % 60:02}"
self.pause_label.config(text=p_str)
self.pause_buttons.update_to_model(terminal)
| 43.211765
| 153
| 0.633678
|
86aab671e0bff8603bff554a2086fb86d2d4c283
| 10,727
|
py
|
Python
|
sdk/python/pulumi_azure_native/consumption/v20181001/budget_by_resource_group_name.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/consumption/v20181001/budget_by_resource_group_name.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/consumption/v20181001/budget_by_resource_group_name.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['BudgetByResourceGroupName']
class BudgetByResourceGroupName(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
amount: Optional[pulumi.Input[float]] = None,
budget_name: Optional[pulumi.Input[str]] = None,
category: Optional[pulumi.Input[Union[str, 'CategoryType']]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
filters: Optional[pulumi.Input[pulumi.InputType['FiltersArgs']]] = None,
notifications: Optional[pulumi.Input[Mapping[str, pulumi.Input[pulumi.InputType['NotificationArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
time_grain: Optional[pulumi.Input[Union[str, 'TimeGrainType']]] = None,
time_period: Optional[pulumi.Input[pulumi.InputType['BudgetTimePeriodArgs']]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A budget resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[float] amount: The total amount of cost to track with the budget
:param pulumi.Input[str] budget_name: Budget Name.
:param pulumi.Input[Union[str, 'CategoryType']] category: The category of the budget, whether the budget tracks cost or usage.
:param pulumi.Input[str] e_tag: eTag of the resource. To handle concurrent update scenario, this field will be used to determine whether the user is updating the latest version or not.
:param pulumi.Input[pulumi.InputType['FiltersArgs']] filters: May be used to filter budgets by resource group, resource, or meter.
:param pulumi.Input[Mapping[str, pulumi.Input[pulumi.InputType['NotificationArgs']]]] notifications: Dictionary of notifications associated with the budget. Budget can have up to five notifications.
:param pulumi.Input[str] resource_group_name: Azure Resource Group Name.
:param pulumi.Input[Union[str, 'TimeGrainType']] time_grain: The time covered by a budget. Tracking of the amount will be reset based on the time grain.
:param pulumi.Input[pulumi.InputType['BudgetTimePeriodArgs']] time_period: Has start and end date of the budget. The start date must be first of the month and should be less than the end date. Budget start date must be on or after June 1, 2017. Future start date should not be more than three months. Past start date should be selected within the timegrain period. There are no restrictions on the end date.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if amount is None and not opts.urn:
raise TypeError("Missing required property 'amount'")
__props__['amount'] = amount
__props__['budget_name'] = budget_name
if category is None and not opts.urn:
raise TypeError("Missing required property 'category'")
__props__['category'] = category
__props__['e_tag'] = e_tag
__props__['filters'] = filters
__props__['notifications'] = notifications
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if time_grain is None and not opts.urn:
raise TypeError("Missing required property 'time_grain'")
__props__['time_grain'] = time_grain
if time_period is None and not opts.urn:
raise TypeError("Missing required property 'time_period'")
__props__['time_period'] = time_period
__props__['current_spend'] = None
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:consumption/v20181001:BudgetByResourceGroupName"), pulumi.Alias(type_="azure-native:consumption:BudgetByResourceGroupName"), pulumi.Alias(type_="azure-nextgen:consumption:BudgetByResourceGroupName"), pulumi.Alias(type_="azure-native:consumption/latest:BudgetByResourceGroupName"), pulumi.Alias(type_="azure-nextgen:consumption/latest:BudgetByResourceGroupName"), pulumi.Alias(type_="azure-native:consumption/v20180131:BudgetByResourceGroupName"), pulumi.Alias(type_="azure-nextgen:consumption/v20180131:BudgetByResourceGroupName"), pulumi.Alias(type_="azure-native:consumption/v20180331:BudgetByResourceGroupName"), pulumi.Alias(type_="azure-nextgen:consumption/v20180331:BudgetByResourceGroupName"), pulumi.Alias(type_="azure-native:consumption/v20180630:BudgetByResourceGroupName"), pulumi.Alias(type_="azure-nextgen:consumption/v20180630:BudgetByResourceGroupName"), pulumi.Alias(type_="azure-native:consumption/v20180831:BudgetByResourceGroupName"), pulumi.Alias(type_="azure-nextgen:consumption/v20180831:BudgetByResourceGroupName")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(BudgetByResourceGroupName, __self__).__init__(
'azure-native:consumption/v20181001:BudgetByResourceGroupName',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'BudgetByResourceGroupName':
"""
Get an existing BudgetByResourceGroupName resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["amount"] = None
__props__["category"] = None
__props__["current_spend"] = None
__props__["e_tag"] = None
__props__["filters"] = None
__props__["name"] = None
__props__["notifications"] = None
__props__["time_grain"] = None
__props__["time_period"] = None
__props__["type"] = None
return BudgetByResourceGroupName(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def amount(self) -> pulumi.Output[float]:
"""
The total amount of cost to track with the budget
"""
return pulumi.get(self, "amount")
@property
@pulumi.getter
def category(self) -> pulumi.Output[str]:
"""
The category of the budget, whether the budget tracks cost or usage.
"""
return pulumi.get(self, "category")
@property
@pulumi.getter(name="currentSpend")
def current_spend(self) -> pulumi.Output['outputs.CurrentSpendResponse']:
"""
The current amount of cost which is being tracked for a budget.
"""
return pulumi.get(self, "current_spend")
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> pulumi.Output[Optional[str]]:
"""
eTag of the resource. To handle concurrent update scenario, this field will be used to determine whether the user is updating the latest version or not.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def filters(self) -> pulumi.Output[Optional['outputs.FiltersResponse']]:
"""
May be used to filter budgets by resource group, resource, or meter.
"""
return pulumi.get(self, "filters")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def notifications(self) -> pulumi.Output[Optional[Mapping[str, 'outputs.NotificationResponse']]]:
"""
Dictionary of notifications associated with the budget. Budget can have up to five notifications.
"""
return pulumi.get(self, "notifications")
@property
@pulumi.getter(name="timeGrain")
def time_grain(self) -> pulumi.Output[str]:
"""
The time covered by a budget. Tracking of the amount will be reset based on the time grain.
"""
return pulumi.get(self, "time_grain")
@property
@pulumi.getter(name="timePeriod")
def time_period(self) -> pulumi.Output['outputs.BudgetTimePeriodResponse']:
"""
Has start and end date of the budget. The start date must be first of the month and should be less than the end date. Budget start date must be on or after June 1, 2017. Future start date should not be more than three months. Past start date should be selected within the timegrain period. There are no restrictions on the end date.
"""
return pulumi.get(self, "time_period")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 51.325359
| 1,126
| 0.671856
|
965029dc8c9fb652f674bf01136879ca51b92682
| 3,419
|
py
|
Python
|
karbor-1.3.0/karbor/services/protection/flows/protect.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 1
|
2021-05-23T01:48:25.000Z
|
2021-05-23T01:48:25.000Z
|
karbor-1.3.0/karbor/services/protection/flows/protect.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
karbor-1.3.0/karbor/services/protection/flows/protect.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from karbor.common import constants
from karbor.resource import Resource
from karbor.services.protection.flows import utils
from karbor.services.protection import resource_flow
from oslo_log import log as logging
from oslo_utils import timeutils
from taskflow import task
LOG = logging.getLogger(__name__)
class InitiateProtectTask(task.Task):
def execute(self, context, checkpoint, operation_log, *args, **kwargs):
LOG.debug("Initiate protect checkpoint_id: %s", checkpoint.id)
checkpoint.status = constants.CHECKPOINT_STATUS_PROTECTING
checkpoint.commit()
update_fields = {"status": checkpoint.status}
utils.update_operation_log(context, operation_log, update_fields)
def revert(self, context, checkpoint, operation_log, *args, **kwargs):
LOG.debug("Failed to protect checkpoint_id: %s", checkpoint.id)
checkpoint.status = constants.CHECKPOINT_STATUS_ERROR
checkpoint.commit()
update_fields = {
"status": checkpoint.status,
"ended_at": timeutils.utcnow()
}
utils.update_operation_log(context, operation_log, update_fields)
class CompleteProtectTask(task.Task):
def execute(self, context, checkpoint, operation_log):
LOG.debug("Complete protect checkpoint_id: %s", checkpoint.id)
checkpoint.status = constants.CHECKPOINT_STATUS_AVAILABLE
checkpoint.commit()
update_fields = {
"status": checkpoint.status,
"ended_at": timeutils.utcnow()
}
utils.update_operation_log(context, operation_log, update_fields)
def get_flow(context, protectable_registry, workflow_engine, plan, provider,
checkpoint):
resources = set(Resource(**item) for item in plan.get("resources"))
resource_graph = protectable_registry.build_graph(context,
resources)
checkpoint.resource_graph = resource_graph
checkpoint.commit()
operation_log = utils.create_operation_log(context, checkpoint)
flow_name = "Protect_" + plan.get('id')
protection_flow = workflow_engine.build_flow(flow_name, 'linear')
plugins = provider.load_plugins()
parameters = plan.get('parameters')
resources_task_flow = resource_flow.build_resource_flow(
operation_type=constants.OPERATION_PROTECT,
context=context,
workflow_engine=workflow_engine,
resource_graph=resource_graph,
plugins=plugins,
parameters=parameters,
)
workflow_engine.add_tasks(
protection_flow,
InitiateProtectTask(),
resources_task_flow,
CompleteProtectTask(),
)
flow_engine = workflow_engine.get_engine(protection_flow, store={
'context': context,
'checkpoint': checkpoint,
'operation_log': operation_log
})
return flow_engine
| 38.852273
| 76
| 0.708102
|
91c350495f90aeb093d65ec853f11a5a85bf1598
| 3,195
|
py
|
Python
|
tests/flows/test_saml-saml.py
|
mrkday/SATOSA
|
43fd13273d7633b1d496d9c9aaef97c472ebd448
|
[
"Apache-2.0"
] | 92
|
2017-11-08T08:01:27.000Z
|
2022-03-14T09:44:09.000Z
|
tests/flows/test_saml-saml.py
|
mrkday/SATOSA
|
43fd13273d7633b1d496d9c9aaef97c472ebd448
|
[
"Apache-2.0"
] | 155
|
2017-10-31T15:11:06.000Z
|
2022-03-11T16:59:23.000Z
|
tests/flows/test_saml-saml.py
|
mrkday/SATOSA
|
43fd13273d7633b1d496d9c9aaef97c472ebd448
|
[
"Apache-2.0"
] | 73
|
2017-11-05T13:53:40.000Z
|
2022-03-23T15:34:00.000Z
|
from urllib.parse import parse_qsl, urlparse, urlencode
from saml2 import BINDING_HTTP_REDIRECT
from saml2.config import SPConfig, IdPConfig
from werkzeug.test import Client
from werkzeug.wrappers import Response
from satosa.metadata_creation.saml_metadata import create_entity_descriptors
from satosa.proxy_server import make_app
from satosa.satosa_config import SATOSAConfig
from tests.users import USERS
from tests.util import FakeSP, FakeIdP
class TestSAMLToSAML:
def run_test(self, satosa_config_dict, sp_conf, idp_conf, saml_backend_config, frontend_config):
subject_id = "testuser1"
# proxy config
satosa_config_dict["FRONTEND_MODULES"] = [frontend_config]
satosa_config_dict["BACKEND_MODULES"] = [saml_backend_config]
satosa_config_dict["INTERNAL_ATTRIBUTES"]["attributes"] = {attr_name: {"saml": [attr_name]} for attr_name in
USERS[subject_id]}
frontend_metadata, backend_metadata = create_entity_descriptors(SATOSAConfig(satosa_config_dict))
# application
test_client = Client(make_app(SATOSAConfig(satosa_config_dict)), Response)
# config test SP
frontend_metadata_str = str(frontend_metadata[frontend_config["name"]][0])
sp_conf["metadata"]["inline"].append(frontend_metadata_str)
fakesp = FakeSP(SPConfig().load(sp_conf))
# create auth req
destination, req_args = fakesp.make_auth_req(frontend_metadata[frontend_config["name"]][0].entity_id)
auth_req = urlparse(destination).path + "?" + urlencode(req_args)
# make auth req to proxy
proxied_auth_req = test_client.get(auth_req)
assert proxied_auth_req.status == "303 See Other"
# config test IdP
backend_metadata_str = str(backend_metadata[saml_backend_config["name"]][0])
idp_conf["metadata"]["inline"].append(backend_metadata_str)
fakeidp = FakeIdP(USERS, config=IdPConfig().load(idp_conf))
# create auth resp
req_params = dict(parse_qsl(urlparse(proxied_auth_req.data.decode("utf-8")).query))
url, authn_resp = fakeidp.handle_auth_req(
req_params["SAMLRequest"],
req_params["RelayState"],
BINDING_HTTP_REDIRECT,
subject_id,
response_binding=BINDING_HTTP_REDIRECT)
# make auth resp to proxy
authn_resp_req = urlparse(url).path + "?" + urlencode(authn_resp)
authn_resp = test_client.get(authn_resp_req)
assert authn_resp.status == "303 See Other"
# verify auth resp from proxy
resp_dict = dict(parse_qsl(urlparse(authn_resp.data.decode("utf-8")).query))
auth_resp = fakesp.parse_authn_request_response(resp_dict["SAMLResponse"], BINDING_HTTP_REDIRECT)
assert auth_resp.ava == USERS[subject_id]
def test_full_flow(self, satosa_config_dict, sp_conf, idp_conf, saml_backend_config,
saml_frontend_config, saml_mirror_frontend_config):
for conf in [saml_frontend_config, saml_mirror_frontend_config]:
self.run_test(satosa_config_dict, sp_conf, idp_conf, saml_backend_config, conf)
| 46.304348
| 116
| 0.703599
|
36afcae81724dc2fa68893bcb232e5af68f295d3
| 2,109
|
py
|
Python
|
backend/apps/dapps/validators.py
|
grinay/smartz
|
44ca50ebcac26d5776b90b4fc96c63edfe804dfe
|
[
"Apache-2.0"
] | null | null | null |
backend/apps/dapps/validators.py
|
grinay/smartz
|
44ca50ebcac26d5776b90b4fc96c63edfe804dfe
|
[
"Apache-2.0"
] | null | null | null |
backend/apps/dapps/validators.py
|
grinay/smartz
|
44ca50ebcac26d5776b90b4fc96c63edfe804dfe
|
[
"Apache-2.0"
] | 1
|
2018-10-23T15:04:52.000Z
|
2018-10-23T15:04:52.000Z
|
from django.core.exceptions import ValidationError
from smartz.json_schema import is_conforms2schema_part, load_schema, is_conforms2schema
def validate_function_args(value):
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Function argument",
"type": "array",
"items": {
"type": "object",
"additionalProperties": False,
"properties": {
"title": {"type": "string"},
"description": {"type": "string"},
"value": {"type": "string"}
}
}
}
if not is_conforms2schema(value, schema):
raise ValidationError("Incorrect function arguments")
def validate_tx_info(value):
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Transaction additional info",
"type": "object",
"additionalProperties": False,
"properties": {
"ethereum": {
"description": "Ethereum tx properties",
"type": "object",
"additionalProperties": False,
"properties": {
"gas_price": {"type": "number"},
"gas_limit": {"type": "number"},
"gas_used": {"type": "number"},
"block": {"type": "number"},
"block_hash": {"type": "string"},
}
},
"eos": {
"description": "Eos tx properties",
"type": "object",
"additionalProperties": False,
"properties": {
}
}
}
}
if not is_conforms2schema(value, schema):
raise ValidationError("Incorrect transaction additional info")
def validate_log_data(value):
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Transaction additional info",
"type": "object"
}
if not is_conforms2schema(value, schema):
raise ValidationError("Incorrect transaction additional info")
| 31.014706
| 87
| 0.512091
|
5f00205518f8b24ec1fe48be103d509ecc19d8e9
| 5,964
|
py
|
Python
|
lib/galaxy/openid/providers.py
|
rikeshi/galaxy
|
c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a
|
[
"CC-BY-3.0"
] | 4
|
2015-05-12T20:36:41.000Z
|
2017-06-26T15:34:02.000Z
|
lib/galaxy/openid/providers.py
|
rikeshi/galaxy
|
c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a
|
[
"CC-BY-3.0"
] | 52
|
2015-03-16T14:02:14.000Z
|
2021-12-24T09:50:23.000Z
|
lib/galaxy/openid/providers.py
|
rikeshi/galaxy
|
c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a
|
[
"CC-BY-3.0"
] | 1
|
2016-03-21T12:54:06.000Z
|
2016-03-21T12:54:06.000Z
|
"""
Contains OpenID provider functionality
"""
import logging
import os
from collections import OrderedDict
from galaxy.util import parse_xml, string_as_bool
log = logging.getLogger(__name__)
NO_PROVIDER_ID = 'None'
RESERVED_PROVIDER_IDS = [NO_PROVIDER_ID]
class OpenIDProvider:
'''An OpenID Provider object.'''
@classmethod
def from_file(cls, filename):
return cls.from_elem(parse_xml(filename).getroot())
@classmethod
def from_elem(cls, xml_root):
provider_elem = xml_root
provider_id = provider_elem.get('id', None)
provider_name = provider_elem.get('name', provider_id)
op_endpoint_url = provider_elem.find('op_endpoint_url')
if op_endpoint_url is not None:
op_endpoint_url = op_endpoint_url.text
never_associate_with_user = string_as_bool(provider_elem.get('never_associate_with_user', 'False'))
assert (provider_id and provider_name and op_endpoint_url), Exception("OpenID Provider improperly configured")
assert provider_id not in RESERVED_PROVIDER_IDS, Exception('Specified OpenID Provider uses a reserved id: %s' % (provider_id))
sreg_required = []
sreg_optional = []
use_for = {}
store_user_preference = {}
use_default_sreg = True
for elem in provider_elem.findall('sreg'):
use_default_sreg = False
for field_elem in elem.findall('field'):
sreg_name = field_elem.get('name')
assert sreg_name, Exception('A name is required for a sreg element')
if string_as_bool(field_elem.get('required')):
sreg_required.append(sreg_name)
else:
sreg_optional.append(sreg_name)
for use_elem in field_elem.findall('use_for'):
use_for[use_elem.get('name')] = sreg_name
for store_user_preference_elem in field_elem.findall('store_user_preference'):
store_user_preference[store_user_preference_elem.get('name')] = sreg_name
if use_default_sreg:
sreg_required = None
sreg_optional = None
use_for = None
return cls(provider_id, provider_name, op_endpoint_url, sreg_required=sreg_required, sreg_optional=sreg_optional, use_for=use_for, store_user_preference=store_user_preference, never_associate_with_user=never_associate_with_user)
def __init__(self, id, name, op_endpoint_url, sreg_required=None, sreg_optional=None, use_for=None, store_user_preference=None, never_associate_with_user=None):
'''When sreg options are not specified, defaults are used.'''
self.id = id
self.name = name
self.op_endpoint_url = op_endpoint_url
if sreg_optional is None:
self.sreg_optional = ['nickname', 'email']
else:
self.sreg_optional = sreg_optional
if sreg_required:
self.sreg_required = sreg_required
else:
self.sreg_required = []
if use_for is not None:
self.use_for = use_for
else:
self.use_for = {}
if 'nickname' in (self.sreg_optional + self.sreg_required):
self.use_for['username'] = 'nickname'
if 'email' in (self.sreg_optional + self.sreg_required):
self.use_for['email'] = 'email'
if store_user_preference:
self.store_user_preference = store_user_preference
else:
self.store_user_preference = {}
if never_associate_with_user:
self.never_associate_with_user = True
else:
self.never_associate_with_user = False
def post_authentication(self, trans, openid_manager, info):
sreg_attributes = openid_manager.get_sreg(info)
for store_pref_name, store_pref_value_name in self.store_user_preference.items():
if store_pref_value_name in (self.sreg_optional + self.sreg_required):
trans.user.preferences[store_pref_name] = sreg_attributes.get(store_pref_value_name)
else:
raise Exception('Only sreg is currently supported.')
trans.sa_session.add(trans.user)
trans.sa_session.flush()
def has_post_authentication_actions(self):
return bool(self.store_user_preference)
class OpenIDProviders:
'''Collection of OpenID Providers'''
NO_PROVIDER_ID = NO_PROVIDER_ID
@classmethod
def from_file(cls, filename):
try:
return cls.from_elem(parse_xml(filename).getroot())
except Exception as e:
log.error('Failed to load OpenID Providers: %s' % (e))
return cls()
@classmethod
def from_elem(cls, xml_root):
oid_elem = xml_root
providers = OrderedDict()
for elem in oid_elem.findall('provider'):
try:
provider = OpenIDProvider.from_file(os.path.join('lib/galaxy/openid', elem.get('file')))
providers[provider.id] = provider
log.debug(f'Loaded OpenID provider: {provider.name} ({provider.id})')
except Exception as e:
log.error('Failed to add OpenID provider: %s' % (e))
return cls(providers)
def __init__(self, providers=None):
if providers:
self.providers = providers
else:
self.providers = OrderedDict()
self._banned_identifiers = [provider.op_endpoint_url for provider in self.providers.values() if provider.never_associate_with_user]
def __iter__(self):
yield from self.providers.values()
def get(self, name, default=None):
if name in self.providers:
return self.providers[name]
else:
return default
def new_provider_from_identifier(self, identifier):
return OpenIDProvider(None, identifier, identifier, never_associate_with_user=identifier in self._banned_identifiers)
| 40.849315
| 236
| 0.654091
|
ade49b8116e4b3381701fe51cba248aafc3bb7ef
| 6,979
|
py
|
Python
|
homeassistant/components/media_player/volumio.py
|
adolfoeliazat/voidhomecontrol
|
6d733253811c553912e46e24debec818b28b0688
|
[
"Apache-2.0"
] | 1
|
2021-08-06T09:54:39.000Z
|
2021-08-06T09:54:39.000Z
|
homeassistant/components/media_player/volumio.py
|
adolfoeliazat/voidhomecontrol
|
6d733253811c553912e46e24debec818b28b0688
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/media_player/volumio.py
|
adolfoeliazat/voidhomecontrol
|
6d733253811c553912e46e24debec818b28b0688
|
[
"Apache-2.0"
] | 1
|
2020-11-04T07:34:41.000Z
|
2020-11-04T07:34:41.000Z
|
"""
Volumio Platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.volumio/
"""
import logging
import asyncio
import aiohttp
import voluptuous as vol
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_PLAY_MEDIA, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_STOP,
SUPPORT_PLAY, MediaPlayerDevice, PLATFORM_SCHEMA, MEDIA_TYPE_MUSIC)
from homeassistant.const import (
STATE_PLAYING, STATE_PAUSED, STATE_IDLE, CONF_HOST, CONF_PORT, CONF_NAME)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = 'localhost'
DEFAULT_NAME = 'Volumio'
DEFAULT_PORT = 3000
TIMEOUT = 10
SUPPORT_VOLUMIO = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_SEEK | \
SUPPORT_PLAY_MEDIA | SUPPORT_STOP | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the Volumio platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
async_add_devices([Volumio(name, host, port, hass)])
class Volumio(MediaPlayerDevice):
"""Volumio Player Object."""
def __init__(self, name, host, port, hass):
"""Initialize the media player."""
self.host = host
self.port = port
self.hass = hass
self._url = '{}:{}'.format(host, str(port))
self._name = name
self._state = {}
self.async_update()
self._lastvol = self._state.get('volume', 0)
@asyncio.coroutine
def send_volumio_msg(self, method, params=None):
"""Send message."""
url = "http://{}:{}/api/v1/{}/".format(self.host, self.port, method)
_LOGGER.debug("URL: %s params: %s", url, params)
try:
websession = async_get_clientsession(self.hass)
response = yield from websession.get(url, params=params)
if response.status == 200:
data = yield from response.json()
else:
_LOGGER.error(
"Query failed, response code: %s Full message: %s",
response.status, response)
return False
except (asyncio.TimeoutError, aiohttp.ClientError) as error:
_LOGGER.error("Failed communicating with Volumio: %s", type(error))
return False
try:
return data
except AttributeError:
_LOGGER.error("Received invalid response: %s", data)
return False
@asyncio.coroutine
def async_update(self):
"""Update state."""
resp = yield from self.send_volumio_msg('getState')
if resp is False:
return
self._state = resp.copy()
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def state(self):
"""Return the state of the device."""
status = self._state.get('status', None)
if status == 'pause':
return STATE_PAUSED
elif status == 'play':
return STATE_PLAYING
else:
return STATE_IDLE
@property
def media_title(self):
"""Title of current playing media."""
return self._state.get('title', None)
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
return self._state.get('artist', None)
@property
def media_album_name(self):
"""Artist of current playing media (Music track only)."""
return self._state.get('album', None)
@property
def media_image_url(self):
"""Image url of current playing media."""
url = self._state.get('albumart', None)
if url is None:
return
if str(url[0:2]).lower() == 'ht':
mediaurl = url
else:
mediaurl = "http://{}:{}{}".format(self.host, self.port, url)
return mediaurl
@property
def media_seek_position(self):
"""Time in seconds of current seek position."""
return self._state.get('seek', None)
@property
def media_duration(self):
"""Time in seconds of current song duration."""
return self._state.get('duration', None)
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
volume = self._state.get('volume', None)
if volume is not None:
volume = volume / 100
return volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._state.get('mute', None)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def supported_features(self):
"""Flag of media commands that are supported."""
return SUPPORT_VOLUMIO
def async_media_next_track(self):
"""Send media_next command to media player."""
return self.send_volumio_msg('commands', params={'cmd': 'next'})
def async_media_previous_track(self):
"""Send media_previous command to media player."""
return self.send_volumio_msg('commands', params={'cmd': 'prev'})
def async_media_play(self):
"""Send media_play command to media player."""
return self.send_volumio_msg('commands', params={'cmd': 'play'})
def async_media_pause(self):
"""Send media_pause command to media player."""
if self._state['trackType'] == 'webradio':
return self.send_volumio_msg('commands', params={'cmd': 'stop'})
return self.send_volumio_msg('commands', params={'cmd': 'pause'})
def async_set_volume_level(self, volume):
"""Send volume_up command to media player."""
return self.send_volumio_msg(
'commands', params={'cmd': 'volume', 'volume': int(volume * 100)})
def async_mute_volume(self, mute):
"""Send mute command to media player."""
mutecmd = 'mute' if mute else 'unmute'
if mute:
# mute is implemenhted as 0 volume, do save last volume level
self._lastvol = self._state['volume']
return self.send_volumio_msg(
'commands', params={'cmd': 'volume', 'volume': mutecmd})
else:
return self.send_volumio_msg(
'commands', params={'cmd': 'volume', 'volume': self._lastvol})
| 32.765258
| 79
| 0.631036
|
a9ab7b1982b1e938678d3ac601ff21b9bda16125
| 3,683
|
py
|
Python
|
app.py
|
aikagari/tornado_test
|
5d5e218df56e46711106cffd2886bdfcabfd8046
|
[
"MIT"
] | null | null | null |
app.py
|
aikagari/tornado_test
|
5d5e218df56e46711106cffd2886bdfcabfd8046
|
[
"MIT"
] | null | null | null |
app.py
|
aikagari/tornado_test
|
5d5e218df56e46711106cffd2886bdfcabfd8046
|
[
"MIT"
] | null | null | null |
import asyncio
from tornado import websocket, web, ioloop
import aioredis
import os
import time
status_source_dict = {'\x01': 'IDLE', '\x02': 'ACTIVE', '\x03': 'RECHARGE'}
connections = []
last_msg = {}
class BaseSocket(websocket.WebSocketHandler):
async def open(self):
connections.append(self)
self.conn = await self.setup_redis()
def on_message(self, message):
pass
def on_close(self):
connections.remove(self)
def check_origin(self):
return True
async def setup_redis(self):
redis_url = os.environ.get('REDIS_HOST', 'localhost')
conn = await aioredis.create_redis(f'redis://{redis_url}')
return conn
class ListenerApp(BaseSocket):
def on_message(self, message):
msg_to_resp = self.parse_message(message)
self.write_message(msg_to_resp)
def parse_message(self, message):
msg_to_resp = ''
try:
message_list = [chr(int(x, 16)) for x in message.split(' ')]
header = message_list[0]
assert header == '\x01'
num_message = ' '.join(str(ord(i)) for i in message_list[1:3])
id_source = ''.join(str(ord(i)) for i in message_list[3:11])
status_source = message_list[11]
assert status_source in status_source_dict
numfields = ord(message_list[12])
message_data = {}
for i in range(0, numfields):
key_seq = l[12*i:12*i+8]
value_seq =l[12*i+8:12*i+12]
key_data = ''.join(str(ord(i)) for i in key_seq)
value_data = ''.join(str(ord(i)) for i in value_seq)
message_data[key_data] = value_data
message_to_check = message[:-1]
get_xor_hash(message_to_check)
assert xor_data == message[-1]
msg_result = '\r\n'.join("{} | {}".format(item[0], item[1]) for item in message_data.items())
last_msg[id_source] = {'time': int(round(time.time() * 1000)), 'num_message': num_message, 'status_source': status_source}
self.conn.publish('messages', msg_result)
num_to_resp = ' '.join(message_list[1:3])
msg_to_resp = f'\x11 {num_to_resp}'
except Exception:
msg_to_resp = '\x12 \x00 \x00'
xor_hash_resp = get_xor_hash(msg_to_resp)
return f'{msg_to_resp} {xor_hash_resp}'
class ResponseApp(BaseSocket):
async def open(self):
await super().open()
channel = await self.conn.subscribe('messages')
self.channel = channel[0]
current_time = int(round(time.time() * 1000))
for key, value in last_msg.items():
last_time = current_time - value['time']
str_to_response = f"[{key}] {value['num_message']} | {status_source_dict[value['status_source']]} | {last_time}\r\n"
await self.write_message(str_to_response)
asyncio.ensure_future(self.consumer())
async def consumer(self):
while await self.channel.wait_message():
message = await self.channel.get(encoding='utf-8')
for connection in connections:
await connection.write_message(message)
def get_xor_hash(message):
xor_data = 0
for item in message:
xor_data ^= ord(item)
return hash(xor_data)
if __name__ == '__main__':
app_response = web.Application([
(r'/', ResponseApp)
])
app_listener = web.Application([
(r'/', ListenerApp)
])
app_listener.listen(8888)
app_response.listen(8889)
loop = ioloop.IOLoop.current()
loop.start()
| 32.883929
| 134
| 0.594624
|
01c7f8220b79bd0c492602fc4c7f8f5cb494f650
| 9,772
|
py
|
Python
|
timml/util.py
|
raoulcollenteur/timml
|
1d393e73c03e62b836678d156dec19bd38d64b2e
|
[
"MIT"
] | null | null | null |
timml/util.py
|
raoulcollenteur/timml
|
1d393e73c03e62b836678d156dec19bd38d64b2e
|
[
"MIT"
] | null | null | null |
timml/util.py
|
raoulcollenteur/timml
|
1d393e73c03e62b836678d156dec19bd38d64b2e
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from .trace import timtracelines, timtraceline
plt.rcParams['contour.negative_linestyle'] = 'solid'
class PlotTim:
def plot(self, win=None, newfig=True, figsize=None, orientation='hor', topfigfrac=0.8):
"""Plot layout
Parameters
----------
win : list or tuple
[x1, x2, y1, y2]
"""
if newfig:
plt.figure(figsize=figsize)
ax1 = None
ax2 = None
if orientation == 'both':
ax1 = plt.axes([0.125, 0.18 + (1 - topfigfrac) * 0.7, (0.9 - 0.125), topfigfrac * 0.7])
ax2 = plt.axes([0.125, 0.11, (0.9 - 0.125), (1 - topfigfrac) * 0.7], sharex=ax1)
elif orientation[:3] == 'hor':
ax1 = plt.subplot()
elif orientation[:3] == 'ver':
ax2 = plt.subplot()
else:
if orientation == 'both':
fig = plt.gcf()
ax1 = fig.axes[0]
ax2 = fig.axes[1]
elif orientation[:3] == 'hor':
fig = plt.gcf()
ax1 = fig.axes[0]
ax2 = None
elif orientation[:3] == 'ver':
fig = plt.gcf()
ax1 = None
ax2 = fig.axes[0]
if ax1 is not None:
plt.sca(ax1)
for e in self.elementlist:
e.plot()
if orientation[:3] == 'hor':
plt.axis('scaled')
elif orientation == 'both':
plt.axis('equal') # cannot be 'scaled' when sharing axes
if win is not None:
plt.axis(win)
if ax2 is not None:
plt.sca(ax2)
for i in range(self.aq.nlayers):
if self.aq.ltype[i] == 'l':
plt.axhspan(ymin=self.aq.z[i + 1], ymax=self.aq.z[i], color=[0.8, 0.8, 0.8])
for i in range(1, self.aq.nlayers):
if self.aq.ltype[i] == 'a' and self.aq.ltype[i - 1] == 'a':
plt.axhspan(ymin=self.aq.z[i], ymax=self.aq.z[i], color=[0.8, 0.8, 0.8])
def contour(self, win, ngr=20, layers=0, levels=20, layout=True, labels=True,
decimals=0, color=None, newfig=True, figsize=None, legend=True):
"""Contour plot
Parameters
----------
win : list or tuple
[x1, x2, y1, y2]
ngr : scalar, tuple or list
if scalar: number of grid points in x and y direction
if tuple or list: nx, ny, number of grid points in x and y direction
layers : integer, list or array
layers for which grid is returned
levels : integer or array (default 20)
levels that are contoured
layout : boolean (default True)
plot layout of elements
labels : boolean (default True)
print labels along contours
decimals : integer (default 0)
number of decimals of labels along contours
color : str or list of strings
color of contour lines
newfig : boolean (default True)
create new figure
figsize : tuple of 2 values (default is mpl default)
size of figure
legend : list or boolean (default True)
add legend to figure
if list of strings: use strings as names in legend
"""
x1, x2, y1, y2 = win
if np.isscalar(ngr):
nx = ny = ngr
else:
nx, ny = ngr
layers = np.atleast_1d(layers)
xg = np.linspace(x1, x2, nx)
yg = np.linspace(y1, y2, ny)
h = self.headgrid(xg, yg, layers)
if newfig:
plt.figure(figsize=figsize)
# color
if color is None:
c = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif type(color) is str:
c = len(layers) * [color]
elif type(color) is list:
c = color
if len(c) < len(layers):
n = np.ceil(self.aq.naq / len(c))
c = n * c
# contour
cscollectionlist = []
for i in range(len(layers)):
cs = plt.contour(xg, yg, h[i], levels, colors=c[i])
cscollectionlist.append(cs.collections[0])
if labels:
fmt = '%1.' + str(decimals) + 'f'
plt.clabel(cs, fmt=fmt)
if type(legend) is list:
plt.legend(cscollectionlist, legend)
elif legend:
legendlist = ['layer ' + str(i) for i in layers]
plt.legend(cscollectionlist, legendlist)
plt.axis('scaled')
if layout:
self.plot(win=[x1, x2, y1, y2], newfig=False)
#plt.show()
def vcontour(self, win, n, levels, labels=False, decimals=0, color=None,
vinterp=True, nudge=1e-6, newfig=True, figsize=None, layout=True):
"""Vertical contour
"""
x1, x2, y1, y2 = win
h = self.headalongline(np.linspace(x1 + nudge, x2 - nudge, n),
np.linspace(y1 + nudge, y2 - nudge, n))
L = np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
xg = np.linspace(0, L, n)
if vinterp:
zg = 0.5 * (self.aq.zaqbot + self.aq.zaqtop)
zg = np.hstack((self.aq.zaqtop[0], zg, self.aq.zaqbot[-1]))
h = np.vstack((h[0], h, h[-1]))
else:
zg = np.empty(2 * self.aq.naq)
for i in range(self.aq.naq):
zg[2 * i] = self.aq.zaqtop[i]
zg[2 * i + 1] = self.aq.zaqbot[i]
h = np.repeat(h, 2, 0)
if newfig:
plt.figure(figsize=figsize)
cs = plt.contour(xg, zg, h, levels, colors=color)
if labels:
fmt = '%1.' + str(decimals) + 'f'
plt.clabel(cs, fmt=fmt)
if layout:
self.plot(win=[x1, x2, y1, y2], orientation='ver', newfig=False)
def tracelines(self, xstart, ystart, zstart, hstepmax, vstepfrac=0.2,
tmax=1e12, nstepmax=100, silent='.', color=None, orientation='hor',
win=[-1e30, 1e30, -1e30, 1e30], newfig=False, figsize=None):
"""Draw trace lines
"""
if color is None:
c = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif type(color) is str:
c = self.aq.naq * [color]
elif type(color) is list:
c = color
if len(c) < self.aq.naq:
n = int(np.ceil(self.aq.naq / len(c)))
c = n * c
fig = plt.gcf()
assert len(fig.axes) > 0, 'Error: Need to specify axes in figure before invoking tracelines'
ax1 = None
ax2 = None
if orientation == 'both':
ax1 = fig.axes[0]
ax2 = fig.axes[1]
elif orientation[:3] == 'hor':
ax1 = fig.axes[0]
elif orientation[:3] == 'ver':
ax2 = fig.axes[1]
xyztlist = []
for i in range(len(xstart)):
xyzt, layerlist = timtraceline(self, xstart[i], ystart[i], zstart[i], hstepmax=hstepmax,
vstepfrac=vstepfrac, tmax=tmax, nstepmax=nstepmax,
silent=silent, win=win, returnlayers=True)
if silent == '.':
print('.', end='', flush=True)
if ax1 is not None:
#plt.axes(ax1)
color = [c[self.aq.layernumber[i]] if self.aq.ltype[i] == 'a' else 'k' for i in layerlist]
points = np.array([xyzt[:,0], xyzt[:,1]]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments, colors=color)
ax1.add_collection(lc)
#ax1.plot(xyzt[:, 0], xyzt[:, 1], color=color)
if ax2 is not None:
color = [c[self.aq.layernumber[i]] if self.aq.ltype[i] == 'a' else 'k' for i in layerlist]
points = np.array([xyzt[:,0], xyzt[:,2]]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments, colors=color)
ax2.add_collection(lc)
ax2.set_ylim(self.aq.z[-1], self.aq.z[0])
def vcontoursf1D(self, x1, x2, nx, levels, labels=False, decimals=0, color=None,
nudge=1e-6, newfig=True, figsize=None, layout=True, ax=None):
"""
Vertical contour for 1D model
"""
naq = self.aq.naq
xflow = np.linspace(x1 + nudge, x2 - nudge, nx)
Qx = np.empty((naq, nx))
for i in range(nx):
Qx[:, i], Qydump = self.disvec(xflow[i], 0)
zflow = np.empty(2 * naq)
for i in range(self.aq.naq):
zflow[2 * i] = self.aq.zaqtop[i]
zflow[2 * i + 1] = self.aq.zaqbot[i]
Qx = Qx[::-1] # set upside down
Qxgrid = np.empty((2 * naq, nx))
Qxgrid[0] = 0
for i in range(naq - 1):
Qxgrid[2 * i + 1] = Qxgrid[2 * i] - Qx[i]
Qxgrid[2 * i + 2] = Qxgrid[2 * i + 1]
Qxgrid[-1] = Qxgrid[-2] - Qx[-1]
Qxgrid = Qxgrid[::-1] # index 0 at top
if newfig:
fig, ax = plt.subplots(1, 1, figsize=figsize)
else:
ax=ax
cs = ax.contour(xflow, zflow, Qxgrid, levels, colors=color)
if labels:
fmt = '%1.' + str(decimals) + 'f'
plt.clabel(cs, fmt=fmt)
#if layout:
# self.plot(win=[x1, x2, y1, y2], orientation='ver', newfig=False)
| 40.04918
| 106
| 0.486901
|
fbc3abfb2de8de62fde84c96b35e81332de95df5
| 137
|
py
|
Python
|
Utils/Utils.py
|
dgod1028/SBass_Project
|
cdfd13a6876ef54621c3486ac89ad3bd50f23158
|
[
"MIT"
] | null | null | null |
Utils/Utils.py
|
dgod1028/SBass_Project
|
cdfd13a6876ef54621c3486ac89ad3bd50f23158
|
[
"MIT"
] | null | null | null |
Utils/Utils.py
|
dgod1028/SBass_Project
|
cdfd13a6876ef54621c3486ac89ad3bd50f23158
|
[
"MIT"
] | null | null | null |
import pickle
def load_object( p):
return pickle.load(open(p, "rb"))
def save_object( obj, p):
pickle.dump(obj, open(p, "wb"))
| 17.125
| 37
| 0.642336
|
79abf4a0ac2acf21bc4fb786c4af4638105db35a
| 3,171
|
py
|
Python
|
models_modified.py
|
atul2099/squad
|
7901894c0db3aad097a0f61279dff96b142c1b01
|
[
"MIT"
] | null | null | null |
models_modified.py
|
atul2099/squad
|
7901894c0db3aad097a0f61279dff96b142c1b01
|
[
"MIT"
] | null | null | null |
models_modified.py
|
atul2099/squad
|
7901894c0db3aad097a0f61279dff96b142c1b01
|
[
"MIT"
] | null | null | null |
"""Top-level model classes.
Author:
Chris Chute (chute@stanford.edu)
"""
import layers_modified as layers
import torch
import torch.nn as nn
class BiDAF(nn.Module):
"""Baseline BiDAF model for SQuAD.
Based on the paper:
"Bidirectional Attention Flow for Machine Comprehension"
by Minjoon Seo, Aniruddha Kembhavi, Ali Farhadi, Hannaneh Hajishirzi
(https://arxiv.org/abs/1611.01603).
Follows a high-level structure commonly found in SQuAD models:
- Embedding layer: Embed word indices to get word vectors.
- Encoder layer: Encode the embedded sequence.
- Attention layer: Apply an attention mechanism to the encoded sequence.
- Model encoder layer: Encode the sequence again.
- Output layer: Simple layer (e.g., fc + softmax) to get final outputs.
Args:
word_vectors (torch.Tensor): Pre-trained word vectors.
hidden_size (int): Number of features in the hidden state at each layer.
drop_prob (float): Dropout probability.
"""
def __init__(self, word_vectors, char_vectors, hidden_size, drop_prob=0., char_channel_size=100, char_channel_width=4):
super(BiDAF, self).__init__()
self.emb = layers.Embedding(word_vectors=word_vectors,
char_vectors=char_vectors,
hidden_size=hidden_size,
char_channel_size=char_channel_size,
char_channel_width=char_channel_width,
drop_prob=drop_prob)
self.enc = layers.RNNEncoder(input_size=hidden_size,
hidden_size=hidden_size,
num_layers=1,
drop_prob=drop_prob)
self.att = layers.BiDAFAttention(hidden_size=2 * hidden_size,
drop_prob=drop_prob)
self.mod = layers.RNNEncoder(input_size=8 * hidden_size,
hidden_size=hidden_size,
num_layers=2,
drop_prob=drop_prob)
self.out = layers.BiDAFOutput(hidden_size=hidden_size,
drop_prob=drop_prob)
def forward(self, cw_idxs, cc_idxs, qw_idxs, qc_idxs):
c_mask = torch.zeros_like(cw_idxs) != cw_idxs
q_mask = torch.zeros_like(qw_idxs) != qw_idxs
c_len, q_len = c_mask.sum(-1), q_mask.sum(-1)
c_emb = self.emb(cw_idxs, cc_idxs) # (batch_size, c_len, hidden_size)
q_emb = self.emb(qw_idxs, qc_idxs) # (batch_size, q_len, hidden_size)
c_enc = self.enc(c_emb, c_len) # (batch_size, c_len, 2 * hidden_size)
q_enc = self.enc(q_emb, q_len) # (batch_size, q_len, 2 * hidden_size)
att = self.att(c_enc, q_enc,
c_mask, q_mask) # (batch_size, c_len, 8 * hidden_size)
mod = self.mod(att, c_len) # (batch_size, c_len, 2 * hidden_size)
out = self.out(att, mod, c_mask) # 2 tensors, each (batch_size, c_len)
return out
| 41.723684
| 123
| 0.583097
|
5bdbfabb1867fb7ba328d7dd199b9ffa2be94092
| 13,897
|
py
|
Python
|
subwabbit/base.py
|
Venousek/subwabbit
|
fc04616a51e7d13982f6d462de2c0b17150231fb
|
[
"BSD-3-Clause"
] | 12
|
2019-06-03T04:10:50.000Z
|
2021-10-01T18:24:23.000Z
|
subwabbit/base.py
|
Venousek/subwabbit
|
fc04616a51e7d13982f6d462de2c0b17150231fb
|
[
"BSD-3-Clause"
] | 3
|
2019-10-17T17:57:03.000Z
|
2021-10-02T07:48:55.000Z
|
subwabbit/base.py
|
Venousek/subwabbit
|
fc04616a51e7d13982f6d462de2c0b17150231fb
|
[
"BSD-3-Clause"
] | 7
|
2019-09-06T19:15:16.000Z
|
2021-10-01T18:24:24.000Z
|
from abc import ABC, abstractmethod
from typing import Dict, List, Tuple, Iterable, Any, Optional, Union
class VowpalWabbitError(Exception):
pass
class VowpalWabbitBaseFormatter(ABC):
"""
Formatter translates structured information about context and items to
Vowpal Wabbit's input format: https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Input-format
It also can implement reverse translation, from Vowpal Wabbits feature names into human readable feature names.
"""
@abstractmethod
def format_common_features(self, common_features: Any,
debug_info: Any = None) -> str:
"""
Return part of VW line with features that are common for one call of predict/train.
This method will run just once per one call of
:class:`subwabbit.base.VowpalWabbitBaseModel`'s `predict()` or `train()` method.
:param common_features: Features common for all items
:param debug_info: Optional dict that can be filled by information useful for debugging
:return: Part of line that is common for each item in one call. Returned string has to start with '|' symbol.
"""
raise NotImplementedError()
@abstractmethod
def format_item_features(self, common_features: Any, item_features: Any,
debug_info: Any = None) -> str:
"""
Return part of VW line with features specific to each item.
This method will run for each item per one call of
:class:`subwabbit.base.VowpalWabbitBaseModel`'s `predict()` or `train()` method.
.. note::
It is a good idea to cache results of this method.
:param common_features: Features common for all items
:param item_features: Features for item
:param debug_info: Optional dict that can be filled by information useful for debugging
:return: Part of line that is specific for item. Depends on whether namespaces are used or not in
``format_common_features`` method:
- namespaces are used: returned string has to start with ``'|NAMESPACE_NAME'`` where `NAMESPACE_NAME`
is the name of some namespace
- namespaces are not used: returned string should not contain '|' symbol
"""
raise NotImplementedError()
# pylint: disable=too-many-arguments,no-self-use
def get_formatted_example(self, common_line_part: str, item_line_part: str,
label: Optional[float] = None, weight: Optional[float] = None,
debug_info: Optional[Dict[Any, Any]] = None): # pylint: disable=unused-argument
"""
Compose valid VW line from its common and item-dependent parts.
:param common_line_part: Part of line that is common for each item in one call.
:param item_line_part: Part of line specific for each item
:param label: Label of this row
:param weight: Optional weight of row
:param debug_info: Optional dict that can be filled by information useful for debugging
:return: One VW line in input format: https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Input-format
"""
if label is not None:
return ' '.join((
str(label),
str(weight) if weight is not None else '',
common_line_part,
item_line_part
))
return ' '.join((common_line_part, item_line_part))
def get_human_readable_explanation(self, explanation_string: str,
feature_translator: Any = None) -> List[Dict]:
"""
Transform explanation string into more readable form.
Every feature used for prediction is translated into this structure:
.. code-block:: python
{
# For each feature used in higher interaction there is a 2-tuple
'names': [('Human readable namespace name 1', 'Human readable feature name 1'), ...],
'original_feature_name': 'c^c8*f^f102' # feature name how vowpal sees it,
'hashindex': 123, # Vowpal's internal hash of feature name
'value': 0.123, # value for feature in input line
'weight': -0.534, # weight learned by VW for this feature
'potential': value * weight,
'relative_potential': abs(potential) / sum_of_abs_potentials_for_all_features
}
:param explanation_string: Explanation string from :func:`~VowpalWabbitBaseModel.explain_vw_line`
:param feature_translator: Any object that can help you with translation of feature names into human readable
form, for example some database connection.
See :func:`~VowpalWabbitBaseFormatter.parse_element`
:return: List of dicts, sorted by contribution to final score
"""
parsed_features = []
potential_sum = 0.0
for feature in [f.split(':') for f in explanation_string.split('\t')]:
feature_name = feature[0]
hash_index = feature[1]
value = float(feature[2])
weight = float(feature[3].split('@')[0])
# quadratic and higher level interactions have multiple features for one weight
feature_name_parts = feature_name.split('*')
parsed_feature_name_parts = [self.parse_element(el, feature_translator) for el in feature_name_parts]
parsed_features.append({
'names': parsed_feature_name_parts,
'original_feature_name': feature_name,
'hashindex': int(hash_index),
'value': value,
'weight': weight,
'potential': value * weight
})
potential_sum += abs(value * weight)
if potential_sum == 0:
# can happen in case all features are unknown
potential_sum = 1
for parsed_feature in parsed_features:
parsed_feature['relative_potential'] = abs(parsed_feature['potential'] / potential_sum) # type: ignore
return list(sorted(parsed_features, key=lambda f: f['relative_potential'], reverse=True))
# pylint: disable=invalid-name
def get_human_readable_explanation_html(self, explanation_string: str, feature_translator: Any = None,
max_rows: Optional[int] = None):
"""
Visualize importance of features in Jupyter notebook.
:param explanation_string: Explanation string from :func:`~VowpalWabbitBaseModel.explain_vw_line`
:param feature_translator: Any object that can help you with translation, e.g. some database connection.
:param max_rows: Maximum number of most important features. None return all used features.
:return: `IPython.core.display.HTML`
"""
try:
from IPython.core.display import HTML
except ImportError:
raise ImportError('Please install IPython to use this method')
explanation = self.get_human_readable_explanation(explanation_string, feature_translator)
rows = []
for row_number, feature in enumerate(explanation):
if max_rows is not None and (row_number + 1) > max_rows:
break
feature_name = ''
for name in feature['names']:
if feature_name:
feature_name += '''
<span style="color: grey; margin-left: 10px; margin-right: 10px;">IN COMBINATION WITH</span>
'''
feature_name += name[0]
feature_name += ': <i>{}</i>'.format(name[1])
rows.append(
'''
<tr>
<td>
<div style="display: block; width: 100px; border: solid 1px;
-webkit-border-radius: 5px; -moz-border-radius: 5px; border-radius: 5px;">
<div style="display: block; width: {width}%; height: 20px; background-color: {color};
overflow: hidden;"></div>
</div>
</td>
<td>{potential:.4f}</td>
<td>
{feature_value:.4f}
</td>
<td>
{feature_weight:.4f}
</td>
<td>
{feature_name}
</td>
</tr>
'''.format(
width=feature['relative_potential'] * 100,
color='green' if feature['potential'] > 0 else 'red',
potential=feature['potential'],
feature_value=feature['value'],
feature_weight=feature['weight'],
feature_name=feature_name
)
)
return HTML('''
<table>
<thead>
<tr>
<th>Relative potential</th>
<th>Potential</th>
<th>Value</th>
<th>Weight</th>
<th>Feature name</th>
</tr>
</thead>
<tbody>
''' + ''.join(rows) + '''
</tbody>
</table>''')
# pylint: disable=unused-argument,no-self-use
def parse_element(self, element: str, feature_translator: Any = None) -> Tuple[str, str]:
"""
This method is supposed to translate namespace name and feature name to human readable form.
For example, element can be "a_item_id^i123" and result can be ('Item ID', 'News of the day: ID of item is 123')
:param element: namespace name and feature name, e.g. a_item_id^i123
:param feature_translator: Any object that can help you with translation, e.g. some database connection
:return: tuple(human understandable namespace name, human understandable feature name)
"""
splitted = element.split('^')
if len(splitted) == 1:
return '', splitted[0]
return splitted[0], splitted[1]
class VowpalWabbitDummyFormatter(VowpalWabbitBaseFormatter):
"""
Formatter that assumes that either common features and item features are already formatted VW input format strings.
"""
def format_common_features(self, common_features: str,
debug_info: Optional[Dict[Any, Any]] = None) -> str:
return common_features
def format_item_features(self, common_features: Any, item_features: str,
debug_info: Optional[Dict[Any, Any]] = None) -> str:
return item_features
class VowpalWabbitBaseModel(ABC):
"""
Declaration of Vowpal Wabbit model interface.
"""
def __init__(self, formatter: VowpalWabbitBaseFormatter):
self.formatter = formatter
super().__init__()
# pylint: disable=too-many-arguments
@abstractmethod
def predict(
self,
common_features: Any,
items_features: Iterable[Any],
timeout: Optional[float] = None,
debug_info: Any = None,
metrics: Optional[Dict] = None,
detailed_metrics: Optional[Dict] = None
) -> Iterable[Union[float,str]]:
"""
Transforms iterable with item features to iterator of predictions.
:param common_features: Features common for all items
:param items_features: Iterable with features for each item
:param timeout: Optionally specify how much time in seconds is desired for computing predictions.
In case timeout is passed, returned iterator can has less items that items features iterable.
:param debug_info: Some object that can be filled by information useful for debugging.
:param metrics: Optional dict that is populated with some metrics that are good to monitor.
:param detailed_metrics: Optional dict with more detailed (and more time consuming) metrics that are good
for debugging and profiling.
:return: Iterable with predictions for each item from ``items_features``
"""
raise NotImplementedError()
# pylint: disable=too-many-arguments
@abstractmethod
def train(
self,
common_features: Any,
items_features: Iterable[Any],
labels: Iterable[float],
weights: Iterable[Optional[float]],
debug_info: Any = None
) -> None:
"""
Transform features, label and weight into VW line format and send it to Vowpal.
:param common_features: Features common for all items
:param items_features: Iterable with features for each item
:param labels: Iterable with same length as items features with label for each item
:param weights: Iterable with same length as items features with optional weight for each item
:param debug_info: Some object that can be filled by information useful for debugging
"""
raise NotImplementedError()
@abstractmethod
def explain_vw_line(self, vw_line: str, link_function: bool = False):
"""
Uses VW audit mode to inspect weights used for prediction. Audit mode has to be turned on
by passing ``audit_mode=True`` to constructor.
:param vw_line: String in VW line format
:param link_function: If your model use link function, pass True
:return: (raw prediction without use of link function, explanation string)
"""
raise NotImplementedError()
| 45.415033
| 120
| 0.593941
|
16c92621e4747e30ba53fb4c1306c488f52ef2ef
| 1,290
|
py
|
Python
|
textGen.py
|
JackToaster/Reassuring-Parable-Generator
|
50a86793dfe81337c457a2ee373cfeb71af98c4a
|
[
"MIT"
] | 47
|
2017-06-12T03:51:15.000Z
|
2021-06-15T04:59:55.000Z
|
textGen.py
|
bigDonJuan/Reassuring-Parable-Generator
|
50a86793dfe81337c457a2ee373cfeb71af98c4a
|
[
"MIT"
] | 2
|
2017-07-11T18:56:06.000Z
|
2017-07-26T02:44:39.000Z
|
textGen.py
|
bigDonJuan/Reassuring-Parable-Generator
|
50a86793dfe81337c457a2ee373cfeb71af98c4a
|
[
"MIT"
] | 5
|
2017-06-12T07:17:40.000Z
|
2021-03-14T00:11:50.000Z
|
import time
from config import config
def fixFormat(input_string):
return input_string
#evaluate and replace a string
def evaluatePhrase(inputString, config):
if inputString.find('{') == -1:
return inputString
else:
index1 = inputString.find('{')
index2 = inputString.find('}')
key = inputString[index1 + 1:index2]
#if the key is !, it's a subject
if key == '!':
phrase = config.get_subject()
else:
phrase = config.get_phrase(key)
inputString = inputString[:index1] + phrase + inputString[index2 + 1:]
inputString = fixFormat(inputString)
return evaluatePhrase(inputString, config)
#generate a phrase
def gen_phrase(config):
output_string = config.get_phrase('starter')
return evaluatePhrase(output_string, config)
filename = input("Enter config filename to use for text generation:")
loaded_config = config(filename)
number_of_outputs = int(input('Enter number of strings to generate:'))
current_time = time.time()
for i in range(0,number_of_outputs):
loaded_config.create_subjects()
print(gen_phrase(loaded_config))
time_dif = time.time() - current_time
print('Generated ' + str(number_of_outputs) + ' strings in ' + str(time_dif) + " seconds.")
| 29.318182
| 91
| 0.68062
|
c63b4459240ef0c1227f2dbef865bd2309e344cb
| 1,606
|
py
|
Python
|
workspacePrior/ParsedTwitterScripts/AddRetweetsBotCSV.py
|
sai6kiran/TwitterBotFarms
|
cf6bfddda9fac1e27477186fd4f4b086ac711781
|
[
"MIT"
] | null | null | null |
workspacePrior/ParsedTwitterScripts/AddRetweetsBotCSV.py
|
sai6kiran/TwitterBotFarms
|
cf6bfddda9fac1e27477186fd4f4b086ac711781
|
[
"MIT"
] | null | null | null |
workspacePrior/ParsedTwitterScripts/AddRetweetsBotCSV.py
|
sai6kiran/TwitterBotFarms
|
cf6bfddda9fac1e27477186fd4f4b086ac711781
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import csv
import pandas as pd
import time
import pdb #Debugger Library
#Load main csv file into a pandas datframe. The dataframe is essentially an array containing sub dataframes. The sub dataframes contain 2000 tweets [rows] each. This is done using "chunksize" = 2000.
df = pd.read_csv("/root/.encrypted/.pythonSai/ira_tweets_csv_hashed.csv", sep=",", header=None, usecols=[0,1,18,19,20], chunksize=2000, skiprows=1, names=["tweetid","userid","is_retweet","retweet_userid","retweet_tweetid"])
#Create a new dataframe that will only contain the tweets [rows] that were retweets from main file. Each tweet [row] has the originator tweet tweet id, originator tweet userid, its tweet tweet id, its tweet userid, and isretweet, that is a boolean [True, False] of whether the tweet was a retweet.
df_lst = pd.DataFrame(columns=["tweetid","userid","is_retweet","retweet_userid","retweet_tweetid"])
pd.set_option('display.max_columns', 100)
#Iterating through each sub dataframe inside main dataframe.
for df_ in df:
#pdb.set_trace() #To start debugger
#Log start time of parsing a sub dataframe of 2000 tweets.
t0 = time.time()
#Map all tweets that were retweets to new dataframe.
df_lst = df_.loc[df_["is_retweet"].map(lambda x: x==True)]["retweet_userid"].map(lambda x: oba.append(x))
#Write the new dataframe to a csv file.
df_lst.to_csv('my_parsed.csv', mode='a', header=False)
#Log end time of parsing the sub dataframe of 2000 tweets.
t1 = time.time()
#print total time completed to run iteration.
print(t1-t0)
| 51.806452
| 298
| 0.729141
|
ca31d138d1d8a59348afc47059befc8b1266f420
| 9,247
|
py
|
Python
|
tests/pytests/test_issues.py
|
bhagathsagar/RediSearch
|
6baec33dc4cc594947c074768991bcf353fe07db
|
[
"MIT",
"Ruby",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/pytests/test_issues.py
|
bhagathsagar/RediSearch
|
6baec33dc4cc594947c074768991bcf353fe07db
|
[
"MIT",
"Ruby",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/pytests/test_issues.py
|
bhagathsagar/RediSearch
|
6baec33dc4cc594947c074768991bcf353fe07db
|
[
"MIT",
"Ruby",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
from common import getConnectionByEnv, waitForIndex, sortedResults, toSortedFlatList
def test_1282(env):
env.expect('FT.CREATE idx ON HASH SCHEMA txt1 TEXT').equal('OK')
env.expect('FT.ADD idx doc1 1.0 FIELDS txt1 foo').equal('OK')
# optional search for new word would crash server
env.expect('FT.SEARCH idx', '~foo').equal([1L, 'doc1', ['txt1', 'foo']])
env.expect('FT.SEARCH idx', '~bar ~foo').equal([1L, 'doc1', ['txt1', 'foo']])
def test_1304(env):
env.expect('FT.CREATE idx SCHEMA txt1 TEXT').equal('OK')
env.expect('FT.EXPLAIN idx -20*').equal('PREFIX{-20*}\n')
env.expect('FT.EXPLAIN idx -\\20*').equal('NOT{\n PREFIX{20*}\n}\n')
def test_1414(env):
env.skipOnCluster()
env.expect('FT.CREATE idx SCHEMA txt1 TEXT').equal('OK')
env.expect('ft.add idx doc 1 fields foo hello bar world').ok()
env.expect('ft.search idx * limit 0 1234567').error().contains('LIMIT exceeds maximum of 1000000')
env.expect('FT.CONFIG set MAXSEARCHRESULTS -1').equal('OK')
env.expect('ft.search idx * limit 0 1234567').equal([1L, 'doc', ['foo', 'hello', 'bar', 'world']])
def test_1502(env):
conn = getConnectionByEnv(env)
conn.execute_command('HSET', 'a', 'bar', 'hello')
env.expect('FT.CREATE idx1 SKIPINITIALSCAN SCHEMA foo TEXT').ok()
env.expect('FT.CREATE idx2 SKIPINITIALSCAN SCHEMA foo TEXT').ok()
env.expect('ft.search idx1 *').equal([0L])
env.expect('ft.search idx2 *').equal([0L])
env.expect('FT.ALTER idx1 SKIPINITIALSCAN SCHEMA ADD bar TEXT').ok()
env.expect('FT.ALTER idx2 SCHEMA ADD bar TEXT').ok()
waitForIndex(env, 'idx2')
env.expect('ft.search idx1 *').equal([0L])
env.expect('ft.search idx2 *').equal([1L, 'a', ['bar', 'hello']])
def test_1601(env):
conn = getConnectionByEnv(env)
conn.execute_command('FT.CREATE', 'idx:movie', 'SCHEMA', 'title', 'TEXT')
conn.execute_command('HSET', 'movie:1', 'title', 'Star Wars: Episode I - The Phantom Menace')
conn.execute_command('HSET', 'movie:2', 'title', 'Star Wars: Episodes II - Attack of the Clones')
conn.execute_command('HSET', 'movie:3', 'title', 'Star Wars: Episode III - Revenge of the Sith')
res = env.cmd('ft.search idx:movie @title:(episode) withscores nocontent')
env.assertEqual(res[0], 3L)
def testMultiSortby(env):
conn = getConnectionByEnv(env)
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 't1', 'TEXT', 'SORTABLE', 't2', 'TEXT', 'SORTABLE', 't3', 'TEXT', 'SORTABLE')
conn.execute_command('FT.ADD', 'idx', '1', '1', 'FIELDS', 't1', 'foo', 't2', 'bar', 't3', 'baz')
conn.execute_command('FT.ADD', 'idx', '2', '1', 'FIELDS', 't1', 'bar', 't2', 'foo', 't3', 'baz')
sortby_t1 = [2L, '2', '1']
sortby_t2 = [2L, '1', '2']
env.expect('ft.search idx foo nocontent sortby t1 asc').equal(sortby_t1)
env.expect('ft.search idx foo nocontent sortby t2 asc').equal(sortby_t2)
env.expect('ft.search idx foo nocontent sortby t1 sortby t3').error()\
.contains('Multiple SORTBY steps are not allowed. Sort multiple fields in a single step')
#TODO: allow multiple sortby steps
#env.expect('ft.search idx foo nocontent sortby t1 sortby t3').equal(sortby_t1)
#env.expect('ft.search idx foo nocontent sortby t2 sortby t3').equal(sortby_t2)
def test_1667(env):
conn = getConnectionByEnv(env)
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'tag', 'TAG', 'text', 'TEXT')
env.expect('ft.search idx @tag:{a}').equal([0L])
env.expect('ft.search idx @tag:{b}').equal([0L])
conn.execute_command('HSET', 'doc', 'tag', 'a,b')
conn.execute_command('HSET', 'doc1', 'tag', 'abc')
# test single stopword
env.expect('ft.search idx @tag:{a}').equal([1L, 'doc', ['tag', 'a,b']])
env.expect('ft.search idx @tag:{b}').equal([1L, 'doc', ['tag', 'a,b']])
env.expect('ft.search idx @tag:{c}').equal([0L])
# test stopword in list
env.expect('ft.search idx @tag:{a|c}').equal([1L, 'doc', ['tag', 'a,b']])
env.expect('ft.search idx @tag:{c|a}').equal([1L, 'doc', ['tag', 'a,b']])
env.expect('ft.search idx @tag:{c|a|c}').equal([1L, 'doc', ['tag', 'a,b']])
# test stopword with prefix
env.expect('ft.search idx @tag:{ab*}').equal([1L, 'doc1', ['tag', 'abc']])
env.expect('ft.search idx @tag:{abc*}').equal([1L, 'doc1', ['tag', 'abc']])
# ensure regular text field
conn.execute_command('HSET', 'doc_a', 'text', 'a')
conn.execute_command('HSET', 'doc_b', 'text', 'b')
env.expect('ft.search idx a').equal([0L])
env.expect('ft.search idx b').equal([1L, 'doc_b', ['text', 'b']])
def test_MOD_865(env):
conn = getConnectionByEnv(env)
args_list = ['FT.CREATE', 'idx', 'SCHEMA']
for i in range(1025):
args_list.extend([i, 'NUMERIC', 'SORTABLE'])
env.expect(*args_list).error().contains('Schema is limited to 1024 fields')
env.expect('FT.DROPINDEX', 'idx')
args_list = ['FT.CREATE', 'idx', 'SCHEMA']
for i in range(129):
args_list.extend([i, 'TEXT'])
env.expect(*args_list).error().contains('Schema is limited to 128 TEXT fields')
env.expect('FT.DROPINDEX', 'idx')
args_list = ['FT.CREATE', 'idx', 'SCHEMA']
for i in range(2):
args_list.extend(['txt', 'TEXT'])
env.expect(*args_list).error().contains('Duplicate field in schema - txt')
env.expect('FT.DROPINDEX', 'idx')
def test_issue1826(env):
# Stopword query is case sensitive.
conn = getConnectionByEnv(env)
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 't', 'TEXT')
conn.execute_command('HSET', 'doc', 't', 'boy with glasses')
env.expect('FT.SEARCH', 'idx', 'boy with glasses').equal([1L, 'doc', ['t', 'boy with glasses']])
env.expect('FT.SEARCH', 'idx', 'boy With glasses').equal([1L, 'doc', ['t', 'boy with glasses']])
def test_issue1834(env):
# Stopword query is case sensitive.
conn = getConnectionByEnv(env)
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 't', 'TEXT')
conn.execute_command('HSET', 'doc', 't', 'hell hello')
env.expect('FT.SEARCH', 'idx', 'hell|hello', 'HIGHLIGHT').equal([1L, 'doc', ['t', '<b>hell</b> <b>hello</b>']])
def test_issue1880(env):
# order of iterator in intersect is optimized by function
env.skipOnCluster()
conn = getConnectionByEnv(env)
env.cmd('FT.CONFIG', 'SET', '_PRINT_PROFILE_CLOCK', 'false')
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 't', 'TEXT')
conn.execute_command('HSET', 'doc1', 't', 'hello world')
conn.execute_command('HSET', 'doc2', 't', 'hello')
excepted_res = ['Type', 'INTERSECT', 'Counter', 1L, 'Child iterators',
['Type', 'TEXT', 'Term', 'world', 'Counter', 1L, 'Size', 1L],
['Type', 'TEXT', 'Term', 'hello', 'Counter', 1L, 'Size', 2L]]
res1 = env.cmd('FT.PROFILE', 'idx', 'SEARCH', 'QUERY', 'hello world')
res2 = env.cmd('FT.PROFILE', 'idx', 'SEARCH', 'QUERY', 'world hello')
# both queries return `world` iterator before `hello`
env.assertEqual(res1[1][3][1], excepted_res)
env.assertEqual(res2[1][3][1], excepted_res)
# test with a term which does not exist
excepted_res = ['Type', 'INTERSECT', 'Counter', 0L, 'Child iterators',
None,
['Type', 'TEXT', 'Term', 'world', 'Counter', 0L, 'Size', 1L],
['Type', 'TEXT', 'Term', 'hello', 'Counter', 0L, 'Size', 2L]]
res3 = env.cmd('FT.PROFILE', 'idx', 'SEARCH', 'QUERY', 'hello new world')
env.assertEqual(res3[1][3][1], excepted_res)
def test_issue1932(env):
conn = getConnectionByEnv(env)
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 't', 'TEXT')
env.expect('FT.AGGREGATE', 'idx', '*', 'LIMIT', '100000000000000000', '100000000000', 'SORTBY', '1', '@t').error() \
.contains('OFFSET exceeds maximum of 1000000')
def test_issue1988(env):
conn = getConnectionByEnv(env)
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 't', 'TEXT')
conn.execute_command('HSET', 'doc1', 't', 'foo')
env.expect('FT.SEARCH', 'idx', 'foo').equal([1L, 'doc1', ['t', 'foo']])
env.expect('FT.SEARCH', 'idx', 'foo', 'WITHSCORES').equal([1L, 'doc1', '1', ['t', 'foo']])
env.expect('FT.SEARCH', 'idx', 'foo', 'SORTBY' , 't').equal([1L, 'doc1', ['t', 'foo']])
env.expect('FT.SEARCH', 'idx', 'foo', 'WITHSCORES', 'SORTBY' , 't').equal([1L, 'doc1', '1', ['t', 'foo']])
def test_MOD1266(env):
# Test parsing failure
conn = getConnectionByEnv(env)
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'n1', 'NUMERIC', 'SORTABLE', 'n2', 'NUMERIC', 'SORTABLE')
conn.execute_command('HSET', 'doc1', 'n1', '1', 'n2', '1')
conn.execute_command('HSET', 'doc2', 'n1', '2', 'n2', '2')
conn.execute_command('HSET', 'doc2', 'n1', 'foo', 'n2', '-999')
conn.execute_command('HSET', 'doc3', 'n1', '3', 'n2', '3')
env.expect('FT.SEARCH', 'idx', '*', 'sortby', 'n2', 'DESC', 'RETURN', '1', 'n2') \
.equal([2L, 'doc3', ['n2', '3'], 'doc1', ['n2', '1']])
# Test fetching failure. An object cannot be indexed
conn.execute_command('FT.CREATE', 'jsonidx', 'ON', 'JSON', 'SCHEMA', '$.t', 'TEXT')
conn.execute_command('JSON.SET', '1', '$', r'{"t":"Redis"}')
env.expect('FT.SEARCH', 'jsonidx', '*').equal([1L, '1', ['$', '{"t":"Redis"}']])
env.expect('FT.SEARCH', 'jsonidx', 'redis').equal([1L, '1', ['$', '{"t":"Redis"}']])
conn.execute_command('JSON.SET', '1', '$.t', r'{"inner_t":"Redis"}')
env.expect('FT.SEARCH', 'jsonidx', '*').equal([0L])
| 48.161458
| 130
| 0.622364
|
14fc46c8980b1dbc95155fe5277b9f23aed5bd3c
| 6,304
|
py
|
Python
|
acs_test_suites/OTC/libs/testlib/base/base_step.py
|
wangji1/test-framework-and-suites-for-android
|
59564f826f205fe7fab64f45b88b1a6dde6900af
|
[
"Apache-2.0"
] | 8
|
2018-09-14T01:34:01.000Z
|
2021-07-01T02:00:23.000Z
|
acs_test_suites/OTC/libs/testlib/base/base_step.py
|
wangji1/test-framework-and-suites-for-android
|
59564f826f205fe7fab64f45b88b1a6dde6900af
|
[
"Apache-2.0"
] | 3
|
2019-09-10T11:39:50.000Z
|
2019-10-10T08:26:22.000Z
|
acs_test_suites/OTC/libs/testlib/base/base_step.py
|
wangji1/test-framework-and-suites-for-android
|
59564f826f205fe7fab64f45b88b1a6dde6900af
|
[
"Apache-2.0"
] | 9
|
2018-10-11T15:14:03.000Z
|
2021-02-17T11:37:20.000Z
|
#!/usr/bin/env python
"""
Copyright (C) 2018 Intel Corporation
?
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
?
http://www.apache.org/licenses/LICENSE-2.0
?
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
?
SPDX-License-Identifier: Apache-2.0
"""
import os
import sys
from testlib.base.base_utils import Resolution, BlockingError, FailedError, TimeoutError
from testlib.utils import logger
# Resolution, BlockingError, FailedError are moved to testlib.base.base_util.py for maintenance
class step(object):
"""
Generic class for a test step
Should be extended by each test step
Members:
blocking -- step is blocking or not. Defaults to True
critical -- test step importance. Defaults to True
if True Test will stop at this step
passm -- message printed to STDOUT if step is Passed
errorm -- message printed to STDOUT in case of error
step_data -- used to pass data to next steps
will be returned by execute_step method
resolution -- one of PASS, FAILED, BLOCKED, TIMEOUT
verbose -- verbose mode for script issues investigation.
Defaults to False
"""
blocking = False
critical = True
step_data = None
resolution = None
verbose = False
logger = None
with_flush = False
def __init__(self, debug_info=True, **kwargs):
super(step, self).__init__()
self.passm = self.__class__.__name__ + " - [PASSED]"
self.errorm = self.__class__.__name__ + " - [FAILED]"
self.debug_info = debug_info
if "blocking" in kwargs:
self.blocking = kwargs['blocking']
if "critical" in kwargs:
self.critical = kwargs['critical']
if "print_error" in kwargs:
self.errorm = self.errorm + " -> " + kwargs['print_error']
if "print_pass" in kwargs:
self.passm = kwargs['print_pass']
if "verbose" in kwargs:
self.verbose = kwargs['verbose']
if "with_flush" in kwargs:
self.with_flush = kwargs['with_flush']
if "logger" in kwargs:
self.logger = kwargs['logger']
else:
if "LOG_PATH" in os.environ:
self.testlib_log_path = os.environ["LOG_PATH"]
else:
import testlib
self.testlib_log_path = os.path.dirname(testlib.__file__) + "/logs/"
self.logger = logger.testlib_log(log_path=self.testlib_log_path, log_name="testlib_default")
def __call__(self, **kwargs):
"""will execute step"""
return self.execute_step()
def execute_step(self):
"""
method to be called for each step
check is optional
"""
try:
self.do()
self.check()
except TimeoutError, e:
self.resolution = Resolution.TIMEOUT
raise e
except Exception:
if self.debug_info:
self.log_error_info()
raise
return self.step_data
def do(self):
"""must overwrite this method to implement an action"""
raise NotImplementedError('Must implement "do" method for each step')
def check(self):
"""
it does the verification defined in check_condition
on verification failure -->
- if blocking it will raise a BlockingError
- if failed it will:
- raise a FailedError for critical steps
- print an error message for non critical steps
"""
check_result = self.check_condition()
if check_result is not None:
if check_result:
self.resolution = Resolution.PASS
if self.logger:
self.logger.info(self.passm)
else:
print self.passm
else:
if self.blocking:
self.resolution = Resolution.BLOCKED
if self.logger:
self.logger.error(self.errorm)
raise BlockingError(self.errorm)
else:
self.resolution = Resolution.FAIL
if self.critical:
if self.logger:
self.logger.error(self.errorm)
raise FailedError(self.errorm)
else:
if self.debug_info:
self.log_error_info()
if self.logger:
self.logger.error(self.errorm)
else:
print self.errorm
if self.with_flush:
sys.stdout.flush()
else:
self.resolution = Resolution.PASS
self.passm = "[ Unchecked ] " + self.passm
if self.logger:
self.logger.info(self.passm)
else:
print self.passm
def log_error_info(self):
"""
Overwrite this method to save test artifacts in case of failed tests.
"""
return None
def check_condition(self):
"""
overwrite this method to return test step specific verification
should return True or False
if not overwritten check will not be performed
"""
return None
def set_passm(self, pass_string):
"""
Helps you customize pass message
Example:
step.set_passm("OK")
"""
self.passm = self.__class__.__name__ + " {0} - [PASSED]".format(pass_string)
def set_errorm(self, particular_string, error_string):
"""
Helps you customize error message
Example:
step.set_errorm("OK", "Pressing OK button failed")
"""
self.errorm = self.__class__.__name__ + " {0} - [FAILED] -> {1}".format(particular_string, error_string)
| 34.637363
| 112
| 0.577253
|
48436779d8b00b815c6ec79abd982f3b01a3dc18
| 17,797
|
py
|
Python
|
marconi/queues/storage/base.py
|
andrew-vant/marconi
|
9afe7ba27efe597cca8baac77fd0aa387c75001a
|
[
"Apache-2.0"
] | null | null | null |
marconi/queues/storage/base.py
|
andrew-vant/marconi
|
9afe7ba27efe597cca8baac77fd0aa387c75001a
|
[
"Apache-2.0"
] | null | null | null |
marconi/queues/storage/base.py
|
andrew-vant/marconi
|
9afe7ba27efe597cca8baac77fd0aa387c75001a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the DriverBase abstract class for Marconi storage drivers."""
import abc
import six
DEFAULT_QUEUES_PER_PAGE = 10
DEFAULT_MESSAGES_PER_PAGE = 10
DEFAULT_SHARDS_PER_PAGE = 10
DEFAULT_MESSAGES_PER_CLAIM = 10
@six.add_metaclass(abc.ABCMeta)
class DriverBase(object):
"""Base class for both data and control plane drivers
:param conf: Configuration containing options for this driver.
:type conf: `oslo.config.ConfigOpts`
:param cache: Cache instance to use for reducing latency
for certain lookups.
:type cache: `marconi.common.cache.backends.BaseCache`
"""
def __init__(self, conf, cache):
self.conf = conf
self.cache = cache
@six.add_metaclass(abc.ABCMeta)
class DataDriverBase(DriverBase):
"""Interface definition for storage drivers.
Data plane storage drivers are responsible for implementing the
core functionality of the system.
Connection information and driver-specific options are
loaded from the config file or the shard catalog.
:param conf: Configuration containing options for this driver.
:type conf: `oslo.config.ConfigOpts`
:param cache: Cache instance to use for reducing latency
for certain lookups.
:type cache: `marconi.common.cache.backends.BaseCache`
"""
def __init__(self, conf, cache):
super(DataDriverBase, self).__init__(conf, cache)
@abc.abstractmethod
def is_alive(self):
"""Check whether the storage is ready."""
raise NotImplementedError
@abc.abstractproperty
def queue_controller(self):
"""Returns the driver's queue controller."""
raise NotImplementedError
@abc.abstractproperty
def message_controller(self):
"""Returns the driver's message controller."""
raise NotImplementedError
@abc.abstractproperty
def claim_controller(self):
"""Returns the driver's claim controller."""
raise NotImplementedError
@six.add_metaclass(abc.ABCMeta)
class ControlDriverBase(DriverBase):
"""Interface definition for control plane storage drivers.
Storage drivers that work at the control plane layer allow one to
modify aspects of the functionality of the system. This is ideal
for administrative purposes.
Allows access to the shard registry through a catalogue and a
shard controller.
:param conf: Configuration containing options for this driver.
:type conf: `oslo.config.ConfigOpts`
:param cache: Cache instance to use for reducing latency
for certain lookups.
:type cache: `marconi.common.cache.backends.BaseCache`
"""
@abc.abstractproperty
def catalogue_controller(self):
"""Returns the driver's catalogue controller."""
raise NotImplementedError
@abc.abstractproperty
def shards_controller(self):
"""Returns storage's shard management controller."""
raise NotImplementedError
class ControllerBase(object):
"""Top-level class for controllers.
:param driver: Instance of the driver
instantiating this controller.
"""
def __init__(self, driver):
self.driver = driver
@six.add_metaclass(abc.ABCMeta)
class Queue(ControllerBase):
"""This class is responsible for managing queues.
Queue operations include CRUD, monitoring, etc.
Storage driver implementations of this class should
be capable of handling high workloads and huge
numbers of queues.
"""
@abc.abstractmethod
def list(self, project=None, marker=None,
limit=DEFAULT_QUEUES_PER_PAGE, detailed=False):
"""Base method for listing queues.
:param project: Project id
:param marker: The last queue name
:param limit: (Default 10) Max number of queues to return
:param detailed: Whether metadata is included
:returns: An iterator giving a sequence of queues
and the marker of the next page.
"""
raise NotImplementedError
@abc.abstractmethod
def get_metadata(self, name, project=None):
"""Base method for queue metadata retrieval.
:param name: The queue name
:param project: Project id
:returns: Dictionary containing queue metadata
:raises: DoesNotExist
"""
raise NotImplementedError
@abc.abstractmethod
def create(self, name, project=None):
"""Base method for queue creation.
:param name: The queue name
:param project: Project id
:returns: True if a queue was created and False
if it was updated.
"""
raise NotImplementedError
@abc.abstractmethod
def exists(self, name, project=None):
"""Base method for testing queue existence.
:param name: The queue name
:param project: Project id
:returns: True if a queue exists and False
if it does not.
"""
raise NotImplementedError
@abc.abstractmethod
def set_metadata(self, name, metadata, project=None):
"""Base method for updating a queue metadata.
:param name: The queue name
:param metadata: Queue metadata as a dict
:param project: Project id
:raises: DoesNotExist
"""
raise NotImplementedError
@abc.abstractmethod
def delete(self, name, project=None):
"""Base method for deleting a queue.
:param name: The queue name
:param project: Project id
"""
raise NotImplementedError
@abc.abstractmethod
def stats(self, name, project=None):
"""Base method for queue stats.
:param name: The queue name
:param project: Project id
:returns: Dictionary with the
queue stats
"""
raise NotImplementedError
@six.add_metaclass(abc.ABCMeta)
class Message(ControllerBase):
"""This class is responsible for managing message CRUD."""
@abc.abstractmethod
def list(self, queue, project=None, marker=None,
limit=DEFAULT_MESSAGES_PER_PAGE,
echo=False, client_uuid=None,
include_claimed=False):
"""Base method for listing messages.
:param queue: Name of the queue to get the
message from.
:param project: Project id
:param marker: Tail identifier
:param limit: (Default 10) Max number of messages to return.
:type limit: Maybe int
:param echo: (Default False) Boolean expressing whether
or not this client should receive its own messages.
:param client_uuid: A UUID object. Required when echo=False.
:param include_claimed: omit claimed messages from listing?
:type include_claimed: bool
:returns: An iterator giving a sequence of messages and
the marker of the next page.
"""
raise NotImplementedError
@abc.abstractmethod
def first(self, queue, project=None, sort=1):
"""Get first message in the queue (including claimed).
:param queue: Name of the queue to list
:param sort: (Default 1) Sort order for the listing. Pass 1 for
ascending (oldest message first), or -1 for descending (newest
message first).
:returns: First message in the queue, or None if the queue is
empty
"""
raise NotImplementedError
@abc.abstractmethod
def get(self, queue, message_id, project=None):
"""Base method for getting a message.
:param queue: Name of the queue to get the
message from.
:param project: Project id
:param message_id: Message ID
:returns: Dictionary containing message data
:raises: DoesNotExist
"""
raise NotImplementedError
@abc.abstractmethod
def bulk_get(self, queue, message_ids, project=None):
"""Base method for getting multiple messages.
:param queue: Name of the queue to get the
message from.
:param project: Project id
:param message_ids: A sequence of message IDs.
:returns: An iterable, yielding dicts containing
message details
"""
raise NotImplementedError
@abc.abstractmethod
def post(self, queue, messages, client_uuid, project=None):
"""Base method for posting one or more messages.
Implementations of this method should guarantee
and preserve the order, in the returned list, of
incoming messages.
:param queue: Name of the queue to post message to.
:param messages: Messages to post to queue, an iterable
yielding 1 or more elements. An empty iterable
results in undefined behavior.
:param client_uuid: A UUID object.
:param project: Project id
:returns: List of message ids
"""
raise NotImplementedError
@abc.abstractmethod
def delete(self, queue, message_id, project=None, claim=None):
"""Base method for deleting a single message.
:param queue: Name of the queue to post
message to.
:param message_id: Message to be deleted
:param project: Project id
:param claim: Claim this message
belongs to. When specified, claim must
be valid and message_id must belong to
it.
"""
raise NotImplementedError
@abc.abstractmethod
def bulk_delete(self, queue, message_ids, project=None):
"""Base method for deleting multiple messages.
:param queue: Name of the queue to post
message to.
:param message_ids: A sequence of message IDs
to be deleted.
:param project: Project id
"""
raise NotImplementedError
@six.add_metaclass(abc.ABCMeta)
class Claim(ControllerBase):
@abc.abstractmethod
def get(self, queue, claim_id, project=None):
"""Base method for getting a claim.
:param queue: Name of the queue this
claim belongs to.
:param claim_id: The claim id
:param project: Project id
:returns: (Claim's metadata, claimed messages)
:raises: DoesNotExist
"""
raise NotImplementedError
@abc.abstractmethod
def create(self, queue, metadata, project=None,
limit=DEFAULT_MESSAGES_PER_CLAIM):
"""Base method for creating a claim.
:param queue: Name of the queue this
claim belongs to.
:param metadata: Claim's parameters
to be stored.
:param project: Project id
:param limit: (Default 10) Max number
of messages to claim.
:returns: (Claim ID, claimed messages)
"""
raise NotImplementedError
@abc.abstractmethod
def update(self, queue, claim_id, metadata, project=None):
"""Base method for updating a claim.
:param queue: Name of the queue this
claim belongs to.
:param claim_id: Claim to be updated
:param metadata: Claim's parameters
to be updated.
:param project: Project id
"""
raise NotImplementedError
@abc.abstractmethod
def delete(self, queue, claim_id, project=None):
"""Base method for deleting a claim.
:param queue: Name of the queue this
claim belongs to.
:param claim_id: Claim to be deleted
:param project: Project id
"""
raise NotImplementedError
@six.add_metaclass(abc.ABCMeta)
class ShardsBase(ControllerBase):
"""A controller for managing shards."""
@abc.abstractmethod
def list(self, marker=None, limit=DEFAULT_SHARDS_PER_PAGE,
detailed=False):
"""Lists all registered shards.
:param marker: used to determine which shard to start with
:type marker: six.text_type
:param limit: (Default 10) Max number of results to return
:type limit: int
:param detailed: whether to include options
:type detailed: bool
:returns: A list of shards - name, weight, uri
:rtype: [{}]
"""
raise NotImplementedError
@abc.abstractmethod
def create(self, name, weight, uri, options=None):
"""Registers a shard entry.
:param name: The name of this shard
:type name: six.text_type
:param weight: the likelihood that this shard will be used
:type weight: int
:param uri: A URI that can be used by a storage client
(e.g., pymongo) to access this shard.
:type uri: six.text_type
:param options: Options used to configure this shard
:type options: dict
"""
raise NotImplementedError
@abc.abstractmethod
def get(self, name, detailed=False):
"""Returns a single shard entry.
:param name: The name of this shard
:type name: six.text_type
:param detailed: Should the options data be included?
:type detailed: bool
:returns: weight, uri, and options for this shard
:rtype: {}
:raises: ShardDoesNotExist if not found
"""
raise NotImplementedError
@abc.abstractmethod
def exists(self, name):
"""Returns a single shard entry.
:param name: The name of this shard
:type name: six.text_type
:returns: True if the shard exists
:rtype: bool
"""
raise NotImplementedError
@abc.abstractmethod
def delete(self, name):
"""Removes a shard entry.
:param name: The name of this shard
:type name: six.text_type
:rtype: None
"""
raise NotImplementedError
@abc.abstractmethod
def update(self, name, **kwargs):
"""Updates the weight, uris, and/or options of this shard
:param name: Name of the shard
:type name: text
:param kwargs: one of: `uri`, `weight`, `options`
:type kwargs: dict
:raises: ShardDoesNotExist
"""
raise NotImplementedError
@abc.abstractmethod
def drop_all(self):
"""Deletes all shards from storage."""
raise NotImplementedError
@six.add_metaclass(abc.ABCMeta)
class CatalogueBase(ControllerBase):
"""A controller for managing the catalogue. The catalogue is
responsible for maintaining a mapping between project.queue
entries to their shard.
"""
@abc.abstractmethod
def list(self, project):
"""Returns a list of queue entries from the catalogue associated with
this project.
:param project: The project to use when filtering through queue
entries.
:type project: six.text_type
:returns: [{'project': ..., 'queue': ..., 'shard': ...},]
:rtype: [dict]
"""
raise NotImplementedError
@abc.abstractmethod
def get(self, project, queue):
"""Returns the shard identifier for the queue registered under this
project.
:param project: Namespace to search for the given queue
:type project: six.text_type
:param queue: The name of the queue to search for
:type queue: six.text_type
:returns: {'shard': ...}
:rtype: dict
:raises: QueueNotMapped
"""
raise NotImplementedError
@abc.abstractmethod
def exists(self, project, queue):
"""Determines whether the given queue exists under project.
:param project: Namespace to check.
:type project: six.text_type
:param queue: str - Particular queue to check for
:type queue: six.text_type
:return: True if the queue exists under this project
:rtype: bool
"""
@abc.abstractmethod
def insert(self, project, queue, shard):
"""Creates a new catalogue entry, or updates it if it already existed.
:param project: str - Namespace to insert the given queue into
:type project: six.text_type
:param queue: str - The name of the queue to insert
:type queue: six.text_type
:param shard: shard identifier to associate this queue with
:type shard: six.text_type
"""
raise NotImplementedError
@abc.abstractmethod
def delete(self, project, queue):
"""Removes this entry from the catalogue.
:param project: The namespace to search for this queue
:type project: six.text_type
:param queue: The queue name to remove
:type queue: six.text_type
"""
raise NotImplementedError
@abc.abstractmethod
def update(self, project, queue, shards=None):
"""Updates the shard identifier for this queue
:param project: Namespace to search
:type project: six.text_type
:param queue: The name of the queue
:type queue: six.text_type
:param shards: The name of the shard where this project/queue lives.
:type shards: six.text_type
:raises: QueueNotMapped
"""
raise NotImplementedError
@abc.abstractmethod
def drop_all(self):
"""Drops all catalogue entries from storage."""
raise NotImplementedError
| 31.222807
| 78
| 0.643704
|
1a09aaa9e9d157b76bb9c97209f91769a6afb1db
| 1,759
|
py
|
Python
|
wlanpi_commands/speedtest.py
|
nigelbowden/wlanpi-chat-bot
|
cedab0e83d6e33d47f66c1a3be202564f95ac408
|
[
"MIT"
] | null | null | null |
wlanpi_commands/speedtest.py
|
nigelbowden/wlanpi-chat-bot
|
cedab0e83d6e33d47f66c1a3be202564f95ac408
|
[
"MIT"
] | null | null | null |
wlanpi_commands/speedtest.py
|
nigelbowden/wlanpi-chat-bot
|
cedab0e83d6e33d47f66c1a3be202564f95ac408
|
[
"MIT"
] | null | null | null |
from .command import Command
import os
import subprocess
from utils.emojis import graph, hour_glass
class Speedtest(Command):
def __init__(self, telegram_object, conf_obj):
super().__init__(telegram_object, conf_obj)
self.command_name = "speedtest"
def run(self, args_list):
# send status msg
chat_id = self.telegram_object.chat_id
self.telegram_object.send_msg("{} Running speedtest...please wait {}".format(graph(), hour_glass()), chat_id)
# perform speedtest
speedtest_info = []
speedtest_cmd = "speedtest | egrep -w \"Testing from|Download|Upload\" | sed -r 's/Testing from.*?\(/My IP: /g; s/\)\.\.\.//g; s/Download/D/g; s/Upload/U/g; s/bit\/s/bps/g'"
try:
speedtest_output = subprocess.check_output(speedtest_cmd, shell=True).decode().strip()
speedtest_info = speedtest_output.split('\n')
except subprocess.CalledProcessError as exc:
output = exc.output.decode()
error = ["Err: Speedtest error", output]
print(error)
return False
if len(speedtest_info) == 0:
speedtest_info.append("No output sorry")
return self._render(speedtest_info)
def help(self):
"""
Return the help page for this command
"""
short_msg = "Run an Ookla speedtest from probe."
long_msg = """Speedtest: This command runs a spedtest to the Ookla speedtest service from the probe.
It provides the upload and download speed from the probe to the Internet.
(This test requires your probe to have Internet access)"""
if self.display_mode == "compact":
return short_msg
else:
return long_msg
| 34.490196
| 181
| 0.630472
|
88edd213aca8b71e3060d9d6dd952f7b197bfab3
| 3,500
|
py
|
Python
|
src/zsl/application/modules/web/web_context_module.py
|
AtteqCom/zsl
|
0d418ef957c9780263b1031dbc59482cd974bc04
|
[
"MIT"
] | 2
|
2017-05-17T08:08:52.000Z
|
2019-03-25T00:24:51.000Z
|
src/zsl/application/modules/web/web_context_module.py
|
AtteqCom/zsl
|
0d418ef957c9780263b1031dbc59482cd974bc04
|
[
"MIT"
] | 100
|
2017-01-11T13:43:11.000Z
|
2022-02-10T09:27:18.000Z
|
src/zsl/application/modules/web/web_context_module.py
|
AtteqCom/zsl
|
0d418ef957c9780263b1031dbc59482cd974bc04
|
[
"MIT"
] | 1
|
2017-05-10T10:27:01.000Z
|
2017-05-10T10:27:01.000Z
|
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import logging
from typing import Any
import click
from injector import Binder, provides, singleton
from zsl import Config, Zsl, inject
from zsl.application.initialization_context import InitializationContext
from zsl.application.modules.cli_module import ZslCli
from zsl.application.modules.context_module import DefaultContextModule, default_initializers
from zsl.application.modules.web.cors import CORS_CONFIGURATION_NAME, CORSConfiguration
from zsl.interface.web.performers.task import create_task_mapping
from zsl.utils.injection_helper import simple_bind
class WebInitializer(object):
"""Initialize the web application."""
@staticmethod
def initialize():
"""
Import in this form is necessary so that we avoid the unwanted behavior and immediate initialization of the
application objects. This makes the initialization procedure run in the time when it is necessary and has every
required resources.
"""
from zsl.interface.web.performers.default import create_not_found_mapping
from zsl.interface.web.performers.resource import create_resource_mapping
create_not_found_mapping()
create_resource_mapping()
#: Initializers used in unit web applications
web_initializers = default_initializers + (WebInitializer,)
class WebCli(object):
@inject(zsl_cli=ZslCli)
def __init__(self, zsl_cli):
# type: (ZslCli) -> None
@zsl_cli.cli.group(help='Web related tasks.')
def web():
pass
@web.command(help="Run web server and serve the application")
@click.option('--host', '-h', help="host to bind to", default='127.0.0.1')
@click.option('--port', '-p', help="port to bind to", default=5000)
@inject(web_handler=WebHandler)
def run(web_handler, host, port):
# type: (WebHandler, str, int)->None
web_handler.run_web(host=host, port=port)
self._web = web
@property
def web(self):
return self._web
class WebHandler(object):
@inject(flask=Zsl)
def run_web(self, flask, host='127.0.0.1', port=5000, **options):
# type: (Zsl, str, int, **Any)->None
"""Alias for Flask.run"""
return flask.run(
host=flask.config.get('FLASK_HOST', host),
port=flask.config.get('FLASK_PORT', port),
debug=flask.config.get('DEBUG', False),
**options
)
class WebContextModule(DefaultContextModule):
"""Adds web application context to current configuration."""
def _create_context(self):
logging.getLogger(__name__).debug("Creating web context.")
return InitializationContext(initializers=web_initializers)
@provides(interface=WebCli, scope=singleton)
def provide_web_cli(self):
return WebCli()
@provides(interface=WebHandler, scope=singleton)
def provide_web_handler(self):
return WebHandler()
@provides(interface=CORSConfiguration, scope=singleton)
@inject(config=Config)
def provide_cors_configuration(self, config):
# type: (Config)->CORSConfiguration
return config.get(CORS_CONFIGURATION_NAME, CORSConfiguration())
def configure(self, binder):
# type: (Binder) -> None
super(WebContextModule, self).configure(binder)
simple_bind(binder, WebCli, singleton)
create_task_mapping()
| 33.980583
| 119
| 0.697143
|
fd5981b17d5f559e447a63bf1ddade523794b2db
| 5,077
|
py
|
Python
|
geosnap/tests/test_clusters.py
|
WawNun/geosnap
|
9838498b89d42c94fef73ee2983dd385dab17345
|
[
"BSD-3-Clause"
] | 14
|
2018-09-19T22:34:44.000Z
|
2019-04-03T17:18:22.000Z
|
geosnap/tests/test_clusters.py
|
WawNun/geosnap
|
9838498b89d42c94fef73ee2983dd385dab17345
|
[
"BSD-3-Clause"
] | 55
|
2018-10-01T18:31:25.000Z
|
2019-04-08T16:23:46.000Z
|
geosnap/tests/test_clusters.py
|
WawNun/geosnap
|
9838498b89d42c94fef73ee2983dd385dab17345
|
[
"BSD-3-Clause"
] | 5
|
2018-10-02T21:41:46.000Z
|
2019-01-25T02:59:16.000Z
|
from geosnap import Community
import numpy as np
from geosnap import DataStore
from numpy.testing import assert_array_equal, assert_array_almost_equal
reno = Community.from_census(msa_fips="39900", datastore=DataStore())
columns = [
"median_household_income",
"p_poverty_rate",
"p_unemployment_rate",
]
# Aspatial Clusters
def test_gm():
r = reno.cluster(columns=columns, method="gaussian_mixture", best_model=True)
assert len(r.gdf.gaussian_mixture.unique()) >= 5
def test_ward():
r = reno.cluster(columns=columns, method="ward")
assert len(r.gdf.ward.unique()) == 7
def test_spectral():
r = reno.cluster(columns=columns, method="spectral")
assert len(r.gdf.spectral.unique()) == 7
def test_kmeans():
r = reno.cluster(columns=columns, method="kmeans")
assert len(r.gdf.kmeans.unique()) == 7
def test_aff_prop():
r = reno.cluster(
columns=columns,
method="affinity_propagation",
cluster_kwargs=dict(preference=-100),
)
assert len(r.gdf.affinity_propagation.unique()) == 3
def test_hdbscan():
r = reno.cluster(columns=columns, method="hdbscan")
assert len(r.gdf.hdbscan.unique()) >= 4
def test_ward_pooling_unique():
r = reno.cluster(
columns=columns, method="ward", pooling="unique", model_colname="ward_unique"
)
labels = r.gdf.ward_unique.dropna().astype(str).values
assert_array_equal(
labels,
np.array(['5', '2', '5', '0', '0', '0', '0', '4', '4', '5', '4', '2', '0',
'0', '1', '5', '2', '5', '2', '0', '0', '1', '1', '0', '2', '1',
'1', '1', '0', '1', '1', '5', '5', '1', '4', '3', '3', '1', '1',
'4', '1', '5', '0', '0', '4', '4', '2', '4', '0', '0', '2', '1',
'0', '0', '5', '0', '1', '1', '0', '1', '0', '3', '1', '1', '5',
'4', '0', '5', '0', '0', '0', '0', '1', '2', '2', '1', '0', '0',
'1', '0', '4', '1', '4', '4', '4', '4', '0', '2', '1', '2', '4',
'4', '4', '4', '1', '0', '4', '0', '5', '5', '5', '2', '2', '0',
'2', '5', '0', '1', '1', '0', '4', '1', '0', '5', '0', '5', '5',
'1', '4', '5', '0', '3', '2', '5', '1', '3', '4', '4', '1', '1',
'0', '0', '2', '1', '4', '4', '3', '4', '4', '1', '2', '2', '1',
'3', '4', '1', '0', '0', '1', '0', '1', '4', '4', '2', '4', '3',
'5', '0', '4', '3', '5', '0', '1', '1', '1', '1', '5', '0', '1',
'1', '0', '0', '5', '1', '0', '1', '1', '2', '5', '1', '4', '4',
'3', '1', '3', '0', '0', '1', '3', '1', '2', '0', '0', '2', '2',
'1', '4', '2', '2', '2', '4', '5', '0', '1', '1', '1', '3', '0',
'1', '0', '0', '4', '3', '0', '0', '0', '4', '0', '4', '3', '1',
'0', '4', '0', '3'], dtype=object)
)
# Spatial Clusters
def test_spenc():
r = reno.regionalize(columns=columns, method="spenc")
assert len(r.gdf.spenc.unique()) >= 6
def test_maxp_count():
r = reno.regionalize(
columns=columns, method="max_p", region_kwargs=dict(initial=10)
)
assert len(r.gdf.max_p.unique()) >= 8
def test_maxp_thresh():
r = reno.regionalize(
columns=columns,
method="max_p",
region_kwargs=dict(initial=10),
threshold_variable="n_total_pop",
threshold=10000,
)
assert len(r.gdf.max_p.unique()) >= 8
def test_ward_spatial():
r = reno.regionalize(columns=columns, method="ward_spatial", n_clusters=7)
assert len(r.gdf.ward_spatial.unique()) == 8
def test_skater():
r = reno.regionalize(columns=columns, method="skater", n_clusters=10)
assert len(r.gdf.skater.unique()) == 11
def test_azp():
r = reno.regionalize(columns=columns, method="azp", n_clusters=7)
assert len(r.gdf.azp.unique()) == 8
# Test seeding
def test_seed():
# Ward is deterministic
np.random.seed(12345)
r = reno.cluster(columns=columns, method="ward")
card = r.gdf.groupby("ward").count()["geoid"].values
np.testing.assert_array_equal(card, [27, 83, 19, 51, 38, 7])
def test_random_state():
# no seeds
reno = Community.from_census(msa_fips="39900", datastore=DataStore())
r1 = reno.cluster(columns=columns, method="kmeans", n_clusters=5)
r2 = reno.cluster(columns=columns, method="kmeans", n_clusters=5)
card1 = r1.gdf.groupby("kmeans").count()["geoid"].values
card1.sort()
card2 = r2.gdf.groupby("kmeans").count()["geoid"].values
card2.sort()
# test that the cardinalities are different
np.testing.assert_raises(
AssertionError, np.testing.assert_array_equal, card1, card2
)
# seeds
reno = Community.from_census(msa_fips="39900", datastore=DataStore())
seed = 10
r1 = reno.cluster(columns=columns, method="kmeans", n_clusters=5, random_state=seed)
r2 = reno.cluster(columns=columns, method="kmeans", n_clusters=5, random_state=seed)
card1 = r1.gdf.groupby("kmeans").count()["geoid"].values
card1.sort()
card2 = r2.gdf.groupby("kmeans").count()["geoid"].values
card2.sort()
# test that the cardinalities are identical
np.testing.assert_array_equal(card1, card2)
| 30.584337
| 88
| 0.553673
|
5a1c1de93f29ecbb1fd93c684fcf1cef51cfa52e
| 15,215
|
py
|
Python
|
private/templates/CRMT/controllers.py
|
arnavsharma93/eden
|
2e559a277c4144ba4f4cdcd108460d025923671d
|
[
"MIT"
] | null | null | null |
private/templates/CRMT/controllers.py
|
arnavsharma93/eden
|
2e559a277c4144ba4f4cdcd108460d025923671d
|
[
"MIT"
] | null | null | null |
private/templates/CRMT/controllers.py
|
arnavsharma93/eden
|
2e559a277c4144ba4f4cdcd108460d025923671d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from os import path
from urllib import urlencode
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import current, URL
from gluon.html import *
#from gluon.storage import Storage
from s3.s3filter import S3FilterForm, S3FilterString, S3OptionsFilter
from s3.s3resource import S3FieldSelector, S3URLQuery
from s3.s3summary import S3Summary
from s3.s3utils import s3_auth_user_represent_name, S3CustomController
THEME = "CRMT"
# =============================================================================
class index(S3CustomController):
""" Custom Home Page """
def __call__(self):
T = current.T
db = current.db
s3db = current.s3db
request = current.request
response = current.response
s3 = response.s3
output = {}
output["title"] = response.title = current.deployment_settings.get_system_name()
# Map
auth = current.auth
is_logged_in = auth.is_logged_in()
callback = None
if is_logged_in:
# Show the User's Coalition's Polygon
org_group_id = auth.user.org_group_id
if org_group_id:
# Lookup Coalition Name
table = s3db.org_group
row = db(table.id == org_group_id).select(table.name,
limitby=(0, 1)
).first()
if row:
callback = '''S3.gis.show_map();
var layer,layers=S3.gis.maps.default_map.layers;
for(var i=0,len=layers.length;i<len;i++){
layer=layers[i];
if(layer.name=='%s'){layer.setVisibility(true)}}''' % row.name
if not callback:
# Show all Coalition Polygons
callback = '''S3.gis.show_map();
var layer,layers=S3.gis.maps.default_map.layers;
for(var i=0,len=layers.length;i<len;i++){
layer=layers[i];
if(layer.name=='All Coalitions'){layer.setVisibility(true)}}
'''
gis = current.gis
config = gis.get_config()
config.zoom = 8
map = gis.show_map(width=770,
height=295,
callback=callback,
catalogue_layers=True,
collapsed=True,
save=False,
)
output["map"] = map
# Description of available data
from s3db.cms import S3CMS
for item in response.menu:
item["cms"] = S3CMS.resource_content(module = item["c"],
resource = item["f"])
# Site Activity Log
resource = s3db.resource("s3_audit")
resource.add_filter(S3FieldSelector("~.method") != "delete")
orderby = "s3_audit.timestmp desc"
list_fields = ["id",
"method",
"user_id",
"tablename",
"record_id",
]
#current.deployment_settings.ui.customize_s3_audit()
db.s3_audit.user_id.represent = s3_auth_user_represent_name
list_id = "log"
datalist, numrows, ids = resource.datalist(fields=list_fields,
start=None,
limit=4,
list_id=list_id,
orderby=orderby,
layout=s3.render_log)
# Placeholder
filter_form = DIV(_class="filter_form")
if numrows == 0:
# Empty table or just no match?
from s3.s3crud import S3CRUD
table = resource.table
if "deleted" in table:
available_records = db(table.deleted != True)
else:
available_records = db(table._id > 0)
if available_records.select(table._id,
limitby=(0, 1)).first():
msg = DIV(S3CRUD.crud_string(resource.tablename,
"msg_no_match"),
_class="empty")
else:
msg = DIV(S3CRUD.crud_string(resource.tablename,
"msg_list_empty"),
_class="empty")
data = msg
else:
# Render the list
ajaxurl = URL(c="default", f="audit", args="datalist_f.dl")
popup_url = URL(c="default", f="audit", args="datalist.popup")
dl = datalist.html(ajaxurl=ajaxurl,
pagesize=4,
popup_url=popup_url,
popup_title=T("Updates"),
)
data = dl
if is_logged_in and org_group_id:
# Add a Filter
filter_widgets = [S3OptionsFilter("user_id$org_group_id",
label = "",
# Can't just use "" as this is then omitted from rendering
options = {"*": T("All"),
org_group_id: T("My Community"),
},
multiple = False,
),
]
filter_submit_url = URL(c="default", f="index")
filter_ajax_url = URL(c="default", f="audit", args=["filter.options"])
filter_form = S3FilterForm(filter_widgets,
filter_manager = False,
formstyle = filter_formstyle,
clear = False,
submit = True,
ajax = True,
url = filter_submit_url,
ajaxurl = filter_ajax_url,
_class = "filter-form",
_id = "%s-filter-form" % list_id
)
filter_form = filter_form.html(resource,
request.get_vars,
target=list_id,
)
output["updates"] = data
output["filter_form"] = filter_form
# Add JavaScript
appname = request.application
debug = s3.debug
scripts_append = s3.scripts.append
if debug:
# Infinite Scroll doesn't make sense here, but currently required by dataLists.js
scripts_append("/%s/static/scripts/jquery.infinitescroll.js" % appname)
scripts_append("/%s/static/scripts/jquery.viewport.js" % appname)
scripts_append("/%s/static/scripts/S3/s3.dataLists.js" % appname)
else:
scripts_append("/%s/static/scripts/S3/s3.dataLists.min.js" % appname)
self._view(THEME, "index.html")
return output
# =============================================================================
def filter_formstyle(row_id, label, widget, comment, hidden=False):
"""
Custom Formstyle for FilterForm
@param row_id: HTML id for the row
@param label: the label
@param widget: the form widget
@param comment: the comment
@param hidden: whether the row should initially be hidden or not
"""
if hidden:
_class = "advanced hide"
else:
_class= ""
if label:
return DIV(label, widget, _id=row_id, _class=_class)
else:
return DIV(widget, _id=row_id, _class=_class)
# =============================================================================
class filters(S3CustomController):
""" Custom controller to manage saved filters """
def __call__(self):
""" Main entry point """
# Authorization (user must be logged in)
auth = current.auth
permissions = auth.permission
if not auth.user:
permissions.fail()
fmt = permissions.format
if current.request.env.request_method == "POST" and fmt != "dl":
return self.update()
pe_id = auth.user.pe_id
s3 = current.response.s3
# Filter
f = S3FieldSelector("pe_id") == pe_id
s3.filter = f
# List Fields
current.s3db.configure("pr_filter",
list_fields = ["title",
"resource",
"url",
"query"],
list_layout = self.render_filter,
orderby = "resource")
# Page length
s3.dl_pagelength = 10
# Data list
current.request.args = ["datalist.%s" % fmt]
output = current.rest_controller("pr", "filter",
list_ajaxurl = URL(f="index",
args="filters.dl"))
# Title and view
T = current.T
if fmt != "dl":
output["title"] = T("Saved Filters")
self._view(THEME, "filters.html")
# Script for inline-editing of filter title
options = {"cssclass": "jeditable-input",
"tooltip": str(T("Click to edit"))}
script = '''$('.jeditable').editable('%s',%s)''' % \
(URL(args="filters"), json.dumps(options))
s3.jquery_ready.append(script)
return output
# -------------------------------------------------------------------------
@classmethod
def render_filter(cls, list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for 'Saved Filters'
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["pr_filter.id"]
item_class = "thumbnail"
raw = record._row
resource_name = raw["pr_filter.resource"]
resource = current.s3db.resource(resource_name)
T = current.T
# Resource title
crud_strings = current.response.s3.crud_strings.get(resource.tablename)
if crud_strings:
resource_name = crud_strings.title_list
else:
resource_name = string.capwords(resource.name, "_")
# Filter title
title = record["pr_filter.title"]
# Filter Query and Summary URLs
fstring = S3FilterString(resource, raw["pr_filter.query"])
query = fstring.represent()
links = cls.summary_urls(resource,
raw["pr_filter.url"],
fstring.get_vars)
actions = []
if links:
if "map" in links:
actions.append(A(I(" ", _class="icon icon-globe"),
_title=T("Open Map"),
_href=links["map"]))
if "table" in links:
actions.append(A(I(" ", _class="icon icon-list"),
_title=T("Open Table"),
_href=links["table"]))
if "chart" in links:
actions.append(A(I(" ", _class="icon icon-list"),
_title=T("Open Chart"),
_href=links["chart"]))
# Render the item
item = DIV(DIV(DIV(actions,
_class="action-bar fleft"),
SPAN(T("%(resource)s Filter") % \
dict(resource=resource_name),
_class="card-title"),
DIV(A(I(" ", _class="icon icon-remove-sign"),
_title=T("Delete this Filter"),
_class="dl-item-delete"),
_class="edit-bar fright"),
_class="card-header"),
DIV(DIV(H5(title,
_id="filter-title-%s" % record_id,
_class="media-heading jeditable"),
DIV(query),
_class="media-body"),
_class="media"),
_class=item_class,
_id=item_id)
return item
# -------------------------------------------------------------------------
def update(self):
""" Simple ajax method to update a saved filter title """
post_vars = current.request.post_vars
record_id = post_vars["id"].rsplit("-", 1)[-1]
new_title = post_vars["value"]
if new_title:
ftable = current.s3db.pr_filter
success = current.db(ftable.id==record_id) \
.update(title=new_title)
else:
success = False
if success:
return new_title
else:
raise HTTP(400)
# -------------------------------------------------------------------------
@staticmethod
def summary_urls(resource, url, filters):
"""
Construct the summary tabs URLs for a saved filter.
@param resource: the S3Resource
@param url: the filter page URL
@param filters: the filter GET vars
"""
links = {}
if not url:
return links
get_vars = S3URLQuery.parse_url(url)
get_vars.pop("t", None)
get_vars.pop("w", None)
get_vars.update(filters)
list_vars = []
for (k, v) in get_vars.items():
if v is None:
continue
values = v if type(v) is list else [v]
for value in values:
if value is not None:
list_vars.append((k, value))
base_url = url.split("?", 1)[0]
summary_config = S3Summary._get_config(resource)
tab_idx = 0
for section in summary_config:
if section.get("common"):
continue
section_id = section["name"]
tab_vars = list_vars + [("t", str(tab_idx))]
links[section["name"]] = "%s?%s" % (base_url, urlencode(tab_vars))
tab_idx += 1
return links
# END =========================================================================
| 37.383292
| 108
| 0.455012
|
5a888c40b74a3b4e893bb69ea1069e0f5cec5f25
| 2,084
|
py
|
Python
|
src/raiden_libs/contract_info.py
|
karlb/raiden-services
|
6f77784395b15ffb612b47bd4c88268cdff01da2
|
[
"MIT"
] | null | null | null |
src/raiden_libs/contract_info.py
|
karlb/raiden-services
|
6f77784395b15ffb612b47bd4c88268cdff01da2
|
[
"MIT"
] | null | null | null |
src/raiden_libs/contract_info.py
|
karlb/raiden-services
|
6f77784395b15ffb612b47bd4c88268cdff01da2
|
[
"MIT"
] | null | null | null |
import sys
from typing import Dict, List, Tuple
import structlog
from eth_utils import decode_hex
from raiden.utils.typing import Address, BlockNumber, ChainID
from raiden_contracts.contract_manager import (
ContractManager,
contracts_precompiled_path,
get_contracts_deployment_info,
)
log = structlog.get_logger(__name__)
CONTRACT_MANAGER = ContractManager(contracts_precompiled_path())
def get_contract_addresses_and_start_block(
chain_id: ChainID,
contracts: List[str],
address_overwrites: Dict[str, Address],
contracts_version: str = None,
) -> Tuple[Dict[str, Address], BlockNumber]:
""" Returns contract addresses and start query block for a given chain and contracts version.
The default contracts can be overwritten by the additional parameters.
Args:
chain_id: The chain id to look for deployed contracts.
contracts: The list of contracts which should be considered
address_overwrites: Dict of addresses which should be used instead of
the ones in the requested deployment.
contracts_version: The version of the contracts to use.
Returns: A dictionary with the contract addresses and start block for the given information
"""
contract_data = get_contracts_deployment_info(chain_id=chain_id, version=contracts_version)
if not contract_data:
log.error(
"No deployed contracts were found at the default registry",
contracts_version=contracts_version,
)
sys.exit(1)
# Get deployed addresses for those contracts which have no overwrites
addresses = {
c: address_overwrites.get(c, Address(decode_hex(contract_data["contracts"][c]["address"])))
for c in contracts
}
# Set start block to zero if any contract addresses are overwritten
if any(address_overwrites.values()):
start_block = BlockNumber(0)
else:
start_block = BlockNumber(
max(0, min(contract_data["contracts"][c]["block_number"] for c in contracts))
)
return addresses, start_block
| 34.733333
| 99
| 0.721209
|
1fd6fd94690cc893e6df64d26e6776ddc66866b1
| 5,451
|
py
|
Python
|
selectable/base.py
|
zeehio/django-selectable
|
40c44559320a98229091b56c034e145eb77e24af
|
[
"BSD-2-Clause"
] | null | null | null |
selectable/base.py
|
zeehio/django-selectable
|
40c44559320a98229091b56c034e145eb77e24af
|
[
"BSD-2-Clause"
] | null | null | null |
selectable/base.py
|
zeehio/django-selectable
|
40c44559320a98229091b56c034e145eb77e24af
|
[
"BSD-2-Clause"
] | null | null | null |
"Base classes for lookup creation."
from __future__ import unicode_literals
import json
import operator
import re
from functools import reduce
from django.conf import settings
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.core.urlresolvers import reverse
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse
from django.db.models import Q
from django.utils.encoding import smart_text
from django.utils.html import conditional_escape
from django.utils.translation import ugettext as _
from selectable.forms import BaseLookupForm
__all__ = (
'LookupBase',
'ModelLookup',
)
class JsonResponse(HttpResponse):
"HttpResponse subclass for returning JSON data."
def __init__(self, *args, **kwargs):
kwargs['content_type'] = 'application/json'
super(JsonResponse, self).__init__(*args, **kwargs)
class LookupBase(object):
"Base class for all django-selectable lookups."
form = BaseLookupForm
response = JsonResponse
def _name(cls):
app_name = cls.__module__.split('.')[-2].lower()
class_name = cls.__name__.lower()
name = '%s-%s' % (app_name, class_name)
return name
name = classmethod(_name)
def _url(cls):
return reverse('selectable-lookup', args=[cls.name()])
url = classmethod(_url)
def get_query(self, request, term):
return []
def get_item_label(self, item):
return smart_text(item)
def get_item_id(self, item):
return smart_text(item)
def get_item_value(self, item):
return smart_text(item)
def get_item(self, value):
return value
def create_item(self, value):
raise NotImplemented()
def format_item(self, item):
"Construct result dictionary for the match item."
result = {
'id': self.get_item_id(item),
'value': self.get_item_value(item),
'label': self.get_item_label(item),
}
for key in settings.SELECTABLE_ESCAPED_KEYS:
if key in result:
result[key] = conditional_escape(result[key])
return result
def paginate_results(self, results, options):
"Return a django.core.paginator.Page of results."
limit = options.get('limit', settings.SELECTABLE_MAX_LIMIT)
paginator = Paginator(results, limit)
page = options.get('page', 1)
try:
results = paginator.page(page)
except (EmptyPage, InvalidPage):
results = paginator.page(paginator.num_pages)
return results
def results(self, request):
"Match results to given term and return the serialized HttpResponse."
results = {}
form = self.form(request.GET)
if form.is_valid():
options = form.cleaned_data
term = options.get('term', '')
raw_data = self.get_query(request, term)
results = self.format_results(raw_data, options)
content = self.serialize_results(results)
return self.response(content)
def format_results(self, raw_data, options):
'''
Returns a python structure that later gets serialized.
raw_data
full list of objects matching the search term
options
a dictionary of the given options
'''
page_data = self.paginate_results(raw_data, options)
results = {}
meta = options.copy()
meta['more'] = _('Show more results')
if page_data and page_data.has_next():
meta['next_page'] = page_data.next_page_number()
if page_data and page_data.has_previous():
meta['prev_page'] = page_data.previous_page_number()
results['data'] = [self.format_item(item) for item in page_data.object_list]
results['meta'] = meta
return results
def serialize_results(self, results):
"Returns serialized results for sending via http."
return json.dumps(results, cls=DjangoJSONEncoder, ensure_ascii=False)
class ModelLookup(LookupBase):
"Lookup class for easily defining lookups based on Django models."
model = None
filters = {}
search_fields = ()
def get_query(self, request, term):
qs = self.get_queryset()
if term:
search_filters = []
if self.search_fields:
for field in self.search_fields:
search_filters.append(Q(**{field: term}))
qs = qs.filter(reduce(operator.or_, search_filters))
return qs
def get_queryset(self):
try:
qs = self.model._default_manager.get_queryset()
except AttributeError: # Django <= 1.5.
qs = self.model._default_manager.get_query_set()
if self.filters:
qs = qs.filter(**self.filters)
return qs
def get_item_id(self, item):
return item.pk
def get_item(self, value):
item = None
if value:
try:
item = self.get_queryset().get(pk=value)
except (ValueError, self.model.DoesNotExist):
item = None
return item
def create_item(self, value):
data = {}
if self.search_fields:
field_name = re.sub(r'__\w+$', '', self.search_fields[0])
if field_name:
data = {field_name: value}
return self.model(**data)
| 30.971591
| 84
| 0.629242
|
7e5a23d85e5f844ea598e7a01274c0dc95b781aa
| 293
|
py
|
Python
|
domain/model/remote_account/remote_account.py
|
agiledragon/transfer-money-python
|
a8f7520c37f75e098623e58e269d3fccf11cb205
|
[
"MIT"
] | 5
|
2019-08-07T05:53:39.000Z
|
2021-11-18T07:02:02.000Z
|
domain/model/remote_account/remote_account.py
|
agiledragon/transfer-money-python
|
a8f7520c37f75e098623e58e269d3fccf11cb205
|
[
"MIT"
] | null | null | null |
domain/model/remote_account/remote_account.py
|
agiledragon/transfer-money-python
|
a8f7520c37f75e098623e58e269d3fccf11cb205
|
[
"MIT"
] | 5
|
2019-08-07T05:53:53.000Z
|
2020-05-18T16:45:54.000Z
|
from domain.model.base.aggregate_root import AggregateRoot
from domain.model.comm_role.account_info import AccountInfo
class RemoteAccount(AggregateRoot):
def __init__(self, account_id):
AggregateRoot.__init__(self, account_id)
self.account_info = AccountInfo(account_id)
| 36.625
| 59
| 0.795222
|
4aeac305a6f98a219baa617d4da0a5583920d685
| 1,803
|
py
|
Python
|
hiector/ssrdd/utils/box/bbox_np.py
|
sentinel-hub/hiector
|
95102c1fcfa63d127a389262e9d569e3aa3495cc
|
[
"MIT"
] | 3
|
2022-03-15T11:19:27.000Z
|
2022-03-24T15:59:49.000Z
|
hiector/ssrdd/utils/box/bbox_np.py
|
sentinel-hub/hiector
|
95102c1fcfa63d127a389262e9d569e3aa3495cc
|
[
"MIT"
] | null | null | null |
hiector/ssrdd/utils/box/bbox_np.py
|
sentinel-hub/hiector
|
95102c1fcfa63d127a389262e9d569e3aa3495cc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# File : bbox_np.py
# Author : Kai Ao
# Email : capino627@163.com
# Date : 2020/12/12 11:08
#
# This file is part of Rotation-Decoupled Detector.
# https://github.com/Capino512/pytorch-rotation-decoupled-detector
# Distributed under MIT License.
import numpy as np
def bbox_switch(bbox, in_type, out_type): # 'xyxy', 'xywh'
if in_type == "xyxy" and out_type == "xywh":
bbox = np.concatenate([(bbox[..., 0:2] + bbox[..., 2:4]) / 2, bbox[..., 2:4] - bbox[..., 0:2]], axis=-1)
elif in_type == "xywh" and out_type == "xyxy":
bbox = np.concatenate([bbox[..., 0:2] - bbox[..., 2:4] / 2, bbox[..., 0:2] + bbox[..., 2:4] / 2], axis=-1)
return bbox
def xywha2xy4(xywha): # a represents the angle(degree), clockwise, a=0 along the X axis
x, y, w, h, a = xywha
corner = np.array([[-w / 2, -h / 2], [w / 2, -h / 2], [w / 2, h / 2], [-w / 2, h / 2]])
a = np.deg2rad(a)
transform = np.array([[np.cos(a), -np.sin(a)], [np.sin(a), np.cos(a)]])
return transform.dot(corner.T).T + [x, y]
def xy42xywha(xy4, flag=0): # bbox(4x2) represents a rectangle
# flag=0, 0 <= a < 180
# flag=1, 0 <= a < 180, w >= h
# flag=2, -45 <= a < 45
x, y = np.mean(xy4, axis=0)
diff01 = xy4[0] - xy4[1]
diff03 = xy4[0] - xy4[3]
w = np.sqrt(np.square(diff01).sum())
h = np.sqrt(np.square(diff03).sum())
if w >= h:
a = np.rad2deg(np.arctan2(diff01[1], diff01[0]))
else:
a = np.rad2deg(np.arctan2(diff03[1], diff03[0])) + 90
if flag > 0:
if w < h:
w, h = h, w
a += 90
a = (a % 180 + 180) % 180
if flag > 1:
if 45 <= a < 135:
w, h = h, w
a -= 90
elif a >= 135:
a -= 180
return np.stack([x, y, w, h, a])
| 32.781818
| 114
| 0.511925
|
852bd277c89bf5d8ea47e58b3428828d7ae66c42
| 2,561
|
py
|
Python
|
utilities/writer.py
|
Nniy/lark
|
09a9ee45006ebf07ad8abe6f03384cac7f407cc1
|
[
"Apache-2.0"
] | 1
|
2018-11-11T23:05:34.000Z
|
2018-11-11T23:05:34.000Z
|
utilities/writer.py
|
Nniy/lark
|
09a9ee45006ebf07ad8abe6f03384cac7f407cc1
|
[
"Apache-2.0"
] | null | null | null |
utilities/writer.py
|
Nniy/lark
|
09a9ee45006ebf07ad8abe6f03384cac7f407cc1
|
[
"Apache-2.0"
] | null | null | null |
import os.path
import pretty_midi as pm
import numpy as np
import matplotlib.pyplot as plt
import h5py
np.set_printoptions(threshold=np.nan)
def save_midi(piano_roll, filename):
midi_output = pm.PrettyMIDI()
guitar = pm.Instrument(program=25)
bass = pm.Instrument(program=33)
string = pm.Instrument(program=41)
instruments = [guitar, bass, string]
for i in range(piano_roll.shape[0]):
for j in range(piano_roll.shape[1]):
if piano_roll[i][j] < 0.8:
piano_roll[i][j] = 0
else:
piano_roll[i][j] = 1
piano_roll = np.array(np.hsplit(piano_roll, 3), dtype=np.int8)
# print(piano_roll[0])
# plt.imshow(piano_roll[0], cmap='gray')
# plt.show()
for i, track in enumerate(piano_roll):
tick = 0
while tick < track.shape[0]:
if
for t, tick in enumerate(track):
for k, key in enumerate(tick):
if key == 1:
key_hold = 1
start_time = t
end_time = start_time + key_hold
while t + key_hold < track.shape[0]:
print(t + key_hold)
if track[t + key_hold][key] == 1:
key_hold += 1
end_time += 1
else:
break
# zero_interval = 0
# while (t + key_hold) < track.shape[0]:
# if track[t + key_hold][key] == 1:
# track[t + key_hold][key] = 0
# key_hold += 1
# end_time += 1
# elif track[t + key_hold][key] == 0 and zero_interval < 50 and end_time > 2:
# key_hold += 1
# end_time += 1
# zero_interval += 1
# else:
# break
note = pm.Note(velocity=100,
pitch=k,
start=start_time,
end=end_time)
instruments[i].notes.append(note)
midi_output.instruments.append(instruments[i])
midi_output.write(filename)
hf = h5py.File('../data/test_data.h5', 'r')
piano_roll_samples = np.array(hf.get('test'))
hf.close()
for p in range(piano_roll_samples.shape[0]):
save_midi(piano_roll_samples[p], '../midi%d.mid' % p)
| 33.697368
| 101
| 0.4631
|
a8ba74364bc2649f230de77ac57da9bb46893cf7
| 13,025
|
py
|
Python
|
tools/clusterfuzz/v8_foozzie_test.py
|
EXHades/v8
|
5fe0aa3bc79c0a9d3ad546b79211f07105f09585
|
[
"BSD-3-Clause"
] | 20,995
|
2015-01-01T05:12:40.000Z
|
2022-03-31T21:39:18.000Z
|
tools/clusterfuzz/v8_foozzie_test.py
|
Andrea-MariaDB-2/v8
|
a0f0ebd7a876e8cb2210115adbfcffe900e99540
|
[
"BSD-3-Clause"
] | 333
|
2020-07-15T17:06:05.000Z
|
2021-03-15T12:13:09.000Z
|
tools/clusterfuzz/v8_foozzie_test.py
|
Andrea-MariaDB-2/v8
|
a0f0ebd7a876e8cb2210115adbfcffe900e99540
|
[
"BSD-3-Clause"
] | 4,523
|
2015-01-01T15:12:34.000Z
|
2022-03-28T06:23:41.000Z
|
#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import random
import subprocess
import sys
import unittest
import v8_commands
import v8_foozzie
import v8_fuzz_config
import v8_suppressions
try:
basestring
except NameError:
basestring = str
PYTHON3 = sys.version_info >= (3, 0)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
FOOZZIE = os.path.join(BASE_DIR, 'v8_foozzie.py')
TEST_DATA = os.path.join(BASE_DIR, 'testdata')
KNOWN_BUILDS = [
'd8',
'clang_x86/d8',
'clang_x86_v8_arm/d8',
'clang_x64_v8_arm64/d8',
'clang_x64_pointer_compression/d8',
]
class ConfigTest(unittest.TestCase):
def testExperiments(self):
"""Test integrity of probabilities and configs."""
CONFIGS = v8_foozzie.CONFIGS
EXPERIMENTS = v8_fuzz_config.FOOZZIE_EXPERIMENTS
FLAGS = v8_fuzz_config.ADDITIONAL_FLAGS
# Probabilities add up to 100%.
first_is_int = lambda x: type(x[0]) == int
assert all(map(first_is_int, EXPERIMENTS))
assert sum(x[0] for x in EXPERIMENTS) == 100
# Configs used in experiments are defined.
assert all(map(lambda x: x[1] in CONFIGS, EXPERIMENTS))
assert all(map(lambda x: x[2] in CONFIGS, EXPERIMENTS))
# The last config item points to a known build configuration.
assert all(map(lambda x: x[3] in KNOWN_BUILDS, EXPERIMENTS))
# All flags have a probability.
first_is_float = lambda x: type(x[0]) == float
assert all(map(first_is_float, FLAGS))
first_between_0_and_1 = lambda x: x[0] > 0 and x[0] < 1
assert all(map(first_between_0_and_1, FLAGS))
# Test consistent flags.
second_is_string = lambda x: isinstance(x[1], basestring)
assert all(map(second_is_string, FLAGS))
# We allow spaces to separate more flags. We don't allow spaces in the flag
# value.
is_flag = lambda x: x.startswith('--')
all_parts_are_flags = lambda x: all(map(is_flag, x[1].split()))
assert all(map(all_parts_are_flags, FLAGS))
def testConfig(self):
"""Smoke test how to choose experiments."""
config = v8_fuzz_config.Config('foo', random.Random(42))
experiments = [
[25, 'ignition', 'jitless', 'd8'],
[75, 'ignition', 'ignition', 'clang_x86/d8'],
]
flags = [
[0.1, '--flag'],
[0.3, '--baz'],
[0.3, '--foo --bar'],
]
self.assertEqual(
[
'--first-config=ignition',
'--second-config=jitless',
'--second-d8=d8',
'--second-config-extra-flags=--baz',
'--second-config-extra-flags=--foo',
'--second-config-extra-flags=--bar',
],
config.choose_foozzie_flags(experiments, flags),
)
self.assertEqual(
[
'--first-config=ignition',
'--second-config=jitless',
'--second-d8=d8',
],
config.choose_foozzie_flags(experiments, flags),
)
class UnitTest(unittest.TestCase):
def testCluster(self):
crash_test_example_path = 'CrashTests/path/to/file.js'
self.assertEqual(
v8_foozzie.ORIGINAL_SOURCE_DEFAULT,
v8_foozzie.cluster_failures(''))
self.assertEqual(
v8_foozzie.ORIGINAL_SOURCE_CRASHTESTS,
v8_foozzie.cluster_failures(crash_test_example_path))
self.assertEqual(
'_o_O_',
v8_foozzie.cluster_failures(
crash_test_example_path,
known_failures={crash_test_example_path: '_o_O_'}))
self.assertEqual(
'980',
v8_foozzie.cluster_failures('v8/test/mjsunit/apply.js'))
def testDiff(self):
def diff_fun(one, two, skip=False):
suppress = v8_suppressions.get_suppression(skip)
return suppress.diff_lines(one.splitlines(), two.splitlines())
one = ''
two = ''
diff = None, None
self.assertEqual(diff, diff_fun(one, two))
one = 'a \n b\nc();'
two = 'a \n b\nc();'
diff = None, None
self.assertEqual(diff, diff_fun(one, two))
# Ignore line before caret and caret position.
one = """
undefined
weird stuff
^
somefile.js: TypeError: suppressed message
undefined
"""
two = """
undefined
other weird stuff
^
somefile.js: TypeError: suppressed message
undefined
"""
diff = None, None
self.assertEqual(diff, diff_fun(one, two))
one = """
Still equal
Extra line
"""
two = """
Still equal
"""
diff = '- Extra line', None
self.assertEqual(diff, diff_fun(one, two))
one = """
Still equal
"""
two = """
Still equal
Extra line
"""
diff = '+ Extra line', None
self.assertEqual(diff, diff_fun(one, two))
one = """
undefined
somefile.js: TypeError: undefined is not a constructor
"""
two = """
undefined
otherfile.js: TypeError: undefined is not a constructor
"""
diff = """- somefile.js: TypeError: undefined is not a constructor
+ otherfile.js: TypeError: undefined is not a constructor""", None
self.assertEqual(diff, diff_fun(one, two))
# Test that skipping suppressions works.
one = """
v8-foozzie source: foo
weird stuff
^
"""
two = """
v8-foozzie source: foo
other weird stuff
^
"""
self.assertEqual((None, 'foo'), diff_fun(one, two))
diff = ('- ^\n+ ^', 'foo')
self.assertEqual(diff, diff_fun(one, two, skip=True))
def testOutputCapping(self):
def output(stdout, is_crash):
exit_code = -1 if is_crash else 0
return v8_commands.Output(exit_code=exit_code, stdout=stdout, pid=0)
def check(stdout1, stdout2, is_crash1, is_crash2, capped_lines1,
capped_lines2):
output1 = output(stdout1, is_crash1)
output2 = output(stdout2, is_crash2)
self.assertEqual(
(capped_lines1, capped_lines2),
v8_suppressions.get_output_capped(output1, output2))
# No capping, already equal.
check('1\n2', '1\n2', True, True, '1\n2', '1\n2')
# No crash, no capping.
check('1\n2', '1\n2\n3', False, False, '1\n2', '1\n2\n3')
check('1\n2\n3', '1\n2', False, False, '1\n2\n3', '1\n2')
# Cap smallest if all runs crash.
check('1\n2', '1\n2\n3', True, True, '1\n2', '1\n2')
check('1\n2\n3', '1\n2', True, True, '1\n2', '1\n2')
check('1\n2', '1\n23', True, True, '1\n2', '1\n2')
check('1\n23', '1\n2', True, True, '1\n2', '1\n2')
# Cap the non-crashy run.
check('1\n2\n3', '1\n2', False, True, '1\n2', '1\n2')
check('1\n2', '1\n2\n3', True, False, '1\n2', '1\n2')
check('1\n23', '1\n2', False, True, '1\n2', '1\n2')
check('1\n2', '1\n23', True, False, '1\n2', '1\n2')
# The crashy run has more output.
check('1\n2\n3', '1\n2', True, False, '1\n2\n3', '1\n2')
check('1\n2', '1\n2\n3', False, True, '1\n2', '1\n2\n3')
check('1\n23', '1\n2', True, False, '1\n23', '1\n2')
check('1\n2', '1\n23', False, True, '1\n2', '1\n23')
# Keep output difference when capping.
check('1\n2', '3\n4\n5', True, True, '1\n2', '3\n4')
check('1\n2\n3', '4\n5', True, True, '1\n2', '4\n5')
check('12', '345', True, True, '12', '34')
check('123', '45', True, True, '12', '45')
def cut_verbose_output(stdout, n_comp):
# This removes the first lines containing d8 commands of `n_comp` comparison
# runs.
return '\n'.join(stdout.split('\n')[n_comp * 2:])
def run_foozzie(second_d8_dir, *extra_flags, **kwargs):
second_config = 'ignition_turbo'
if 'second_config' in kwargs:
second_config = 'jitless'
kwargs = {}
if PYTHON3:
kwargs['text'] = True
return subprocess.check_output([
sys.executable, FOOZZIE,
'--random-seed', '12345',
'--first-d8', os.path.join(TEST_DATA, 'baseline', 'd8.py'),
'--second-d8', os.path.join(TEST_DATA, second_d8_dir, 'd8.py'),
'--first-config', 'ignition',
'--second-config', second_config,
os.path.join(TEST_DATA, 'fuzz-123.js'),
] + list(extra_flags), **kwargs)
class SystemTest(unittest.TestCase):
"""This tests the whole correctness-fuzzing harness with fake build
artifacts.
Overview of fakes:
baseline: Example foozzie output including a syntax error.
build1: Difference to baseline is a stack trace difference expected to
be suppressed.
build2: Difference to baseline is a non-suppressed output difference
causing the script to fail.
build3: As build1 but with an architecture difference as well.
"""
def testSyntaxErrorDiffPass(self):
stdout = run_foozzie('build1', '--skip-smoke-tests')
self.assertEqual('# V8 correctness - pass\n',
cut_verbose_output(stdout, 3))
# Default comparison includes suppressions.
self.assertIn('v8_suppressions.js', stdout)
# Default comparison doesn't include any specific mock files.
self.assertNotIn('v8_mock_archs.js', stdout)
self.assertNotIn('v8_mock_webassembly.js', stdout)
def testDifferentOutputFail(self):
with open(os.path.join(TEST_DATA, 'failure_output.txt')) as f:
expected_output = f.read()
with self.assertRaises(subprocess.CalledProcessError) as ctx:
run_foozzie('build2', '--skip-smoke-tests',
'--first-config-extra-flags=--flag1',
'--first-config-extra-flags=--flag2=0',
'--second-config-extra-flags=--flag3')
e = ctx.exception
self.assertEqual(v8_foozzie.RETURN_FAIL, e.returncode)
self.assertEqual(expected_output, cut_verbose_output(e.output, 2))
def testSmokeTest(self):
with open(os.path.join(TEST_DATA, 'smoke_test_output.txt')) as f:
expected_output = f.read()
with self.assertRaises(subprocess.CalledProcessError) as ctx:
run_foozzie('build2')
e = ctx.exception
self.assertEqual(v8_foozzie.RETURN_FAIL, e.returncode)
self.assertEqual(expected_output, e.output)
def testDifferentArch(self):
"""Test that the architecture-specific mocks are passed to both runs when
we use executables with different architectures.
"""
# Build 3 simulates x86, while the baseline is x64.
stdout = run_foozzie('build3', '--skip-smoke-tests')
lines = stdout.split('\n')
# TODO(machenbach): Don't depend on the command-lines being printed in
# particular lines.
self.assertIn('v8_mock_archs.js', lines[1])
self.assertIn('v8_mock_archs.js', lines[3])
def testDifferentArchFailFirst(self):
"""Test that we re-test against x64. This tests the path that also fails
on x64 and then reports the error as x64.
"""
with open(os.path.join(TEST_DATA, 'failure_output_arch.txt')) as f:
expected_output = f.read()
# Build 3 simulates x86 and produces a difference on --bad-flag, but
# the baseline build shows the same difference when --bad-flag is passed.
with self.assertRaises(subprocess.CalledProcessError) as ctx:
run_foozzie('build3', '--skip-smoke-tests',
'--second-config-extra-flags=--bad-flag')
e = ctx.exception
self.assertEqual(v8_foozzie.RETURN_FAIL, e.returncode)
self.assertEqual(expected_output, cut_verbose_output(e.output, 3))
def testDifferentArchFailSecond(self):
"""As above, but we test the path that only fails in the second (ia32)
run and not with x64 and then reports the error as ia32.
"""
with open(os.path.join(TEST_DATA, 'failure_output_second.txt')) as f:
expected_output = f.read()
# Build 3 simulates x86 and produces a difference on --very-bad-flag,
# which the baseline build doesn't.
with self.assertRaises(subprocess.CalledProcessError) as ctx:
run_foozzie('build3', '--skip-smoke-tests',
'--second-config-extra-flags=--very-bad-flag')
e = ctx.exception
self.assertEqual(v8_foozzie.RETURN_FAIL, e.returncode)
self.assertEqual(expected_output, cut_verbose_output(e.output, 3))
def testJitless(self):
"""Test that webassembly is mocked out when comparing with jitless."""
stdout = run_foozzie(
'build1', '--skip-smoke-tests', second_config='jitless')
lines = stdout.split('\n')
# TODO(machenbach): Don't depend on the command-lines being printed in
# particular lines.
self.assertIn('v8_mock_webassembly.js', lines[1])
self.assertIn('v8_mock_webassembly.js', lines[3])
def testSkipSuppressions(self):
"""Test that the suppressions file is not passed when skipping
suppressions.
"""
# Compare baseline with baseline. This passes as there is no difference.
stdout = run_foozzie(
'baseline', '--skip-smoke-tests', '--skip-suppressions')
self.assertNotIn('v8_suppressions.js', stdout)
# Compare with a build that usually suppresses a difference. Now we fail
# since we skip suppressions.
with self.assertRaises(subprocess.CalledProcessError) as ctx:
run_foozzie(
'build1', '--skip-smoke-tests', '--skip-suppressions')
e = ctx.exception
self.assertEqual(v8_foozzie.RETURN_FAIL, e.returncode)
self.assertNotIn('v8_suppressions.js', e.output)
if __name__ == '__main__':
unittest.main()
| 34.640957
| 79
| 0.655125
|
959c74a05efc151fb41ded6466bcb7c4002dc3f9
| 1,669
|
py
|
Python
|
Funky Sentence Gen/Funky Sentence Generator.py
|
Software-Cat/Python-Mini-Projects
|
3e8a150758eddfc2b21a5e8c665fd6eca850e55d
|
[
"MIT"
] | null | null | null |
Funky Sentence Gen/Funky Sentence Generator.py
|
Software-Cat/Python-Mini-Projects
|
3e8a150758eddfc2b21a5e8c665fd6eca850e55d
|
[
"MIT"
] | null | null | null |
Funky Sentence Gen/Funky Sentence Generator.py
|
Software-Cat/Python-Mini-Projects
|
3e8a150758eddfc2b21a5e8c665fd6eca850e55d
|
[
"MIT"
] | null | null | null |
import random
adjectives = ["abandoned", "baffled", "cringy", "dazzling", "eccentric", "fancy", "generous", "happy", "ill", "jocose", "kind", "lazy", "magical", "naked",
"obstinate", "patriotic", "queasy", "raging", "savage", "talented", "unlucky", "vegetarian", "white", "xenophobic", "yawning", "zippy"]
executerNouns = ["apple", "banana", "cat", "dog", "elephatnt", "flamingo", "giraffe", "hippo", "iguana", "jellyfish", "kangaroo", "ladybug", "mammoth", "numbat",
"octopus", "panda", "quail", "rabbit", "snake", "teacher", "umpire", "vocalist", "whale", "xylophone", "yoga instructor", "zoologist"]
adverbs = ["accidentally", "beneficially", "chaotically", "doubtfully", "efficiently", "fearfullly", "gently", "hypocritically", "impulsively", "jealously", "keenly",
"loudly", "mysteriously", "naively", "obediently", "passionately", "quietly", "rationally", "sadly", "telepathically", "uncontrollably", "viciously",
"wildly", "xenophobically", "youthfully", "zealously"]
verbs = ["ate", "bent", "cleaned", "danced", "educated", "fabricated",
"grew", "hacked", "immobilized", "jumbled", "kicked"]
genders = ["his", "hers"]
subjectNouns = ["aquarium", "bandana", "cabbage"]
prepositions = ["with", "without", "in front of",
"behind", "next to", "under", "over"]
objects = ["aeroplane", "broom"]
print("The " + random.choice(adjectives) + " " + random.choice(executerNouns) + " " + random.choice(adverbs) + " " + random.choice(verbs) + " " +
random.choice(genders) + " " + random.choice(subjectNouns) + " " + random.choice(prepositions) + " a/an " + random.choice(objects) + ".")
| 75.863636
| 166
| 0.615339
|
98f9e6e2736f68e5ba5898d7aba94a096b0231eb
| 1,471
|
py
|
Python
|
bloggitt/core/urls.py
|
SubhradeepSS/Bloggitt
|
fb9131765519e55a19164b2637ad9d4821dbac5e
|
[
"MIT"
] | 33
|
2020-11-22T11:25:11.000Z
|
2022-01-06T20:05:14.000Z
|
bloggitt/core/urls.py
|
SubhradeepSS/Bloggitt
|
fb9131765519e55a19164b2637ad9d4821dbac5e
|
[
"MIT"
] | 72
|
2020-11-22T20:31:00.000Z
|
2021-08-16T00:47:17.000Z
|
bloggitt/core/urls.py
|
SubhradeepSS/Bloggitt
|
fb9131765519e55a19164b2637ad9d4821dbac5e
|
[
"MIT"
] | 39
|
2020-11-22T17:57:06.000Z
|
2021-05-31T05:13:26.000Z
|
from . import views
from django.urls import path
from .views import PostLikeToggle , PostLikeAPIToggle,ProfileUpdateView,ProfileView,PostUpdateView
urlpatterns = [
path('login/', views.loginUser, name='login'),
path('signup/', views.signup, name='signup'),
path('logout/', views.logoutUser, name='logout'),
path('favorites/', views.favorites, name='favorites'),
path('', views.postlist, name='home'),
path('blog/create', views.PostCreateView.as_view(), name='create_blog'),
path('post/like/<slug:slug>/', PostLikeToggle.as_view(), name='like-toggle'),
path('api/like/<slug:slug>/', PostLikeAPIToggle.as_view(), name='like-api-toggle'),
path('detail/<slug:slug>/', views.postdetail, name='post_detail'),
path('detail/<slug:slug>/Favourites', views.Favorites, name='Favorites'),
path('detail/<slug:slug>/update/', PostUpdateView.as_view(), name='post-update'),
path('fetch', views.fetch, name="fetch"),
path('profile-update/', ProfileUpdateView.as_view(), name='profile-update'),
path('profile/', ProfileView.as_view(), name='profile'),
path('about/',views.about,name='about'),
path('search/',views.search,name='search'),
path('tags/<slug:slug>/', views.posts_by_tag, name='posts_by_tag'),
]
from django.conf import settings
from django.conf.urls.static import static
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| 50.724138
| 98
| 0.687288
|
3e54c328cb59e4a6c5c0c47007d72bbd0e99b973
| 15,659
|
py
|
Python
|
tests/test_overlap.py
|
opendatacube/odc-geo
|
4f9004720d899dff4bf0b208e4fe602790d055d7
|
[
"Apache-2.0"
] | 5
|
2021-12-22T01:32:24.000Z
|
2022-03-10T07:50:34.000Z
|
tests/test_overlap.py
|
opendatacube/odc-geo
|
4f9004720d899dff4bf0b208e4fe602790d055d7
|
[
"Apache-2.0"
] | 29
|
2022-01-11T08:48:23.000Z
|
2022-03-29T09:03:42.000Z
|
tests/test_overlap.py
|
opendatacube/odc-geo
|
4f9004720d899dff4bf0b208e4fe602790d055d7
|
[
"Apache-2.0"
] | 2
|
2022-01-26T23:20:34.000Z
|
2022-03-21T16:54:00.000Z
|
# This file is part of the Open Data Cube, see https://opendatacube.org for more information
#
# Copyright (c) 2015-2020 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
import math
from random import uniform
import numpy as np
import pytest
from affine import Affine
from odc.geo import CRS, geom, resyx_, wh_, xy_
from odc.geo.geobox import GeoBox, scaled_down_geobox
from odc.geo.gridspec import GridSpec
from odc.geo.math import affine_from_pts, decompose_rws, is_affine_st, stack_xy
from odc.geo.overlap import (
LinearPointTransform,
ReprojectInfo,
_can_paste,
compute_axis_overlap,
compute_output_geobox,
compute_reproject_roi,
get_scale_at_point,
native_pix_transform,
)
from odc.geo.roi import (
roi_is_empty,
roi_normalise,
roi_shape,
scaled_down_roi,
scaled_up_roi,
)
from odc.geo.testutils import AlbersGS, epsg3577, epsg3857, epsg4326, mkA
def diff_affine(A: Affine, B: Affine) -> float:
return math.sqrt(sum((a - b) ** 2 for a, b in zip(A, B)))
def test_affine_checks():
assert is_affine_st(mkA(scale=(1, 2), translation=(3, -10))) is True
assert is_affine_st(mkA(scale=(1, -2), translation=(-3, -10))) is True
assert is_affine_st(mkA(rot=0.1)) is False
assert is_affine_st(mkA(shear=0.4)) is False
def test_affine_rsw():
def run_test(a, scale, shear=0, translation=(0, 0), tol=1e-8):
A = mkA(a, scale=scale, shear=shear, translation=translation)
R, W, S = decompose_rws(A)
assert diff_affine(A, R * W * S) < tol
assert diff_affine(S, mkA(0, scale)) < tol
assert diff_affine(R, mkA(a, translation=translation)) < tol
for a in (0, 12, 45, 33, 67, 89, 90, 120, 170):
run_test(a, (1, 1))
run_test(a, (0.5, 2))
run_test(-a, (0.5, 2))
run_test(a, (1, 2))
run_test(-a, (1, 2))
run_test(a, (2, -1))
run_test(-a, (2, -1))
run_test(0, (3, 4), 10)
run_test(-33, (3, -1), 10, translation=(100, -333))
def test_fit():
def run_test(A, n, tol=1e-5):
X = [xy_(uniform(0, 1), uniform(0, 1)) for _ in range(n)]
Y = [xy_(A * pt.xy) for pt in X]
A_ = affine_from_pts(X, Y)
assert diff_affine(A, A_) < tol
A = mkA(13, scale=(3, 4), shear=3, translation=(100, -3000))
run_test(A, 3)
run_test(A, 10)
run_test(mkA(), 3)
run_test(mkA(), 10)
def test_scale_at_point():
def mk_transform(sx, sy):
A = mkA(37, scale=(sx, sy), translation=(2127, 93891))
return LinearPointTransform(A)
tol = 1e-4
pt = xy_(0, 0)
for sx, sy in [(3, 4), (0.4, 0.333)]:
tr = mk_transform(sx, sy)
sx_, sy_ = get_scale_at_point(pt, tr).xy
assert abs(sx - sx_) < tol
assert abs(sy - sy_) < tol
sx_, sy_ = get_scale_at_point(pt, tr, 0.1).xy
assert abs(sx - sx_) < tol
assert abs(sy - sy_) < tol
def test_pix_transform():
pt = tuple(
int(x / 10) * 10
for x in geom.point(145, -35, epsg4326).to_crs(epsg3577).coords[0]
)
A = mkA(scale=(20, -20), translation=pt)
src = GeoBox((512, 1024), A, epsg3577)
dst = GeoBox.from_geopolygon(src.geographic_extent, resyx_(0.0001, -0.0001))
tr = native_pix_transform(src, dst)
pts_src = [xy_(0, 0), xy_(10, 20), xy_(300, 200)]
pts_dst = tr(pts_src)
pts_src_ = tr.back(pts_dst)
np.testing.assert_almost_equal(stack_xy(pts_src), stack_xy(pts_src_))
assert tr.linear is None
assert repr(tr).startswith("GbxPointTransform(")
# check identity transform
tr = native_pix_transform(src, src)
pts_src = [xy_(0, 0), xy_(10, 20), xy_(300, 200)]
pts_dst = tr(pts_src)
pts_src_ = tr.back(pts_dst)
np.testing.assert_almost_equal(stack_xy(pts_src), stack_xy(pts_src_))
np.testing.assert_almost_equal(stack_xy(pts_src), stack_xy(pts_dst))
assert tr.linear is not None
assert tr.back.linear is not None
assert tr.back.back is tr
assert repr(tr).startswith("LinearPointTransform(")
# check scale only change
tr = native_pix_transform(src, scaled_down_geobox(src, 2))
pts_dst = tr(pts_src)
pts_src_ = tr.back(pts_dst)
assert tr.linear is not None
assert tr.back.linear is not None
assert tr.back.back is tr
np.testing.assert_almost_equal(
stack_xy(pts_dst), [(pt.x / 2, pt.y / 2) for pt in pts_src]
)
np.testing.assert_almost_equal(stack_xy(pts_src), stack_xy(pts_src_))
def test_compute_reproject_roi():
src = AlbersGS.tile_geobox((15, -40))
dst = GeoBox.from_geopolygon(
src.extent.to_crs(epsg3857).buffer(10), resolution=src.resolution
)
rr = compute_reproject_roi(src, dst)
assert rr.roi_src == np.s_[0 : src.height, 0 : src.width]
assert 0 < rr.scale < 1
assert rr.transform.linear is None
assert rr.transform.back is not None
assert rr.transform.back.linear is None
# check pure translation case
roi_ = np.s_[113:-100, 33:-10]
rr = compute_reproject_roi(src, src[roi_])
assert rr.roi_src == roi_normalise(roi_, src.shape)
assert rr.scale == 1
rr = compute_reproject_roi(src, src[roi_], padding=0, align=0)
assert rr.roi_src == roi_normalise(roi_, src.shape)
assert rr.scale == 1
# check pure translation case
roi_ = np.s_[113:-100, 33:-10]
rr = compute_reproject_roi(src, src[roi_], align=256)
assert rr.roi_src == np.s_[0 : src.height, 0 : src.width]
assert rr.scale == 1
roi_ = np.s_[113:-100, 33:-10]
rr = compute_reproject_roi(src, src[roi_])
assert rr.scale == 1
assert roi_shape(rr.roi_src) == roi_shape(rr.roi_dst)
assert roi_shape(rr.roi_dst) == src[roi_].shape
# check pasteable zoom_out
dst = src.zoom_out(2)
rr = compute_reproject_roi(src, dst)
assert rr.paste_ok is True
assert rr.read_shrink == 2
assert roi_shape(rr.roi_src) == src.shape
assert roi_shape(rr.roi_dst) == dst.shape
# check non-pasteable zoom_out
dst = src[1:, :].zoom_out(2)
rr = compute_reproject_roi(src, dst)
assert rr.paste_ok is False
assert rr.read_shrink == 2
def test_compute_reproject_roi_paste():
src = GeoBox(
wh_(1000, 2000),
mkA(scale=(10, -10), translation=(10 * 123, -10 * 230)),
epsg3857,
)
def _check(src: GeoBox, dst: GeoBox, rr: ReprojectInfo):
assert rr.read_shrink >= 1
if roi_is_empty(rr.roi_src):
assert roi_is_empty(rr.roi_dst)
return
if rr.paste_ok:
if rr.read_shrink == 1:
assert roi_shape(rr.roi_src) == roi_shape(rr.roi_dst)
assert src[rr.roi_src].shape == dst[rr.roi_dst].shape
else:
# roi source must align to read scale
# => round-triping roi to overview and back should not change roi
assert (
scaled_up_roi(
scaled_down_roi(rr.roi_src, rr.read_shrink), rr.read_shrink
)
== rr.roi_src
)
src_ = src[rr.roi_src].zoom_out(rr.read_shrink)
assert src_.shape == dst[rr.roi_dst].shape
if rr.read_shrink == 1:
assert rr.scale <= 1.1
else:
assert rr.scale >= rr.read_shrink
if src.crs == dst.crs:
_src = src[rr.roi_src].extent
_dst = dst[rr.roi_dst].extent
else:
_src = src[rr.roi_src].geographic_extent
_dst = dst[rr.roi_dst].geographic_extent
assert _src.intersection(_dst).area > 0
def _yes(src: GeoBox, dst: GeoBox, **kw):
rr = compute_reproject_roi(src, dst, **kw)
assert rr.paste_ok is True
_check(src, dst, rr)
def _no_(src: GeoBox, dst: GeoBox, **kw):
rr = compute_reproject_roi(src, dst, **kw)
assert rr.paste_ok is False
_check(src, dst, rr)
t_ = Affine.translation
s_ = Affine.scale
# plain pixel aligned translation
_yes(src, src)
_yes(src, src[10:, 29:])
_yes(src[10:, 29:], src)
# subpixel translation below threshhold
_no_(src, src * t_(0.3, 0.3))
_yes(src, src * t_(0.3, 0.3), ttol=0.5)
_no_(src, src * t_(0.0, 0.1))
_yes(src, src * t_(0.0, 0.1), ttol=0.15)
_no_(src, src * t_(-0.1, 0.0))
_yes(src, src * t_(-0.1, 0.0), ttol=0.15)
# tiny scale deviations
_no_(src, src[20:, :30] * s_(1.003, 1.003))
_yes(src, src[20:, :30] * s_(1.003, 1.003), stol=0.01)
# integer shrink
_no_(src, src.zoom_out(2.3))
_yes(src, src.zoom_out(2))
_yes(src, src.zoom_out(3))
_yes(src, src.zoom_out(2 + 1e-5)) # rounding issues should not matter
_no_(src.zoom_out(3), src)
_no_(src.zoom_out(2), src)
# integer shrink but with sub-pixel translation after shrinking
_yes(src, src[4:, 8:].zoom_out(4))
_no_(src, src[2:, 8:].zoom_out(4))
_no_(src, src[8:, 3:].zoom_out(4))
_yes(src, src[8:, 3:].zoom_out(4), ttol=0.5)
def test_compute_reproject_roi_issue647():
"""In some scenarios non-overlapping geoboxes will result in non-empty
`roi_dst` even though `roi_src` is empty.
Test this case separately.
"""
src = GeoBox(
(10980, 10980), Affine(10, 0, 300000, 0, -10, 5900020), CRS("epsg:32756")
)
dst = GeoBox((976, 976), Affine(10, 0, 1730240, 0, -10, -4170240), CRS("EPSG:3577"))
assert src.extent.overlaps(dst.extent.to_crs(src.crs)) is False
rr = compute_reproject_roi(src, dst)
assert roi_is_empty(rr.roi_src)
assert roi_is_empty(rr.roi_dst)
def test_compute_reproject_roi_issue1047():
"""`compute_reproject_roi(geobox, geobox[roi])` sometimes returns
`src_roi != roi`, when `geobox` has (1) tiny pixels and (2) oddly
sized `alignment`.
Test this issue is resolved.
"""
geobox = GeoBox(
(3000, 3000),
Affine(
0.00027778, 0.0, 148.72673054908861, 0.0, -0.00027778, -34.98825802556622
),
"EPSG:4326",
)
src_roi = np.s_[2800:2810, 10:30]
rr = compute_reproject_roi(geobox, geobox[src_roi])
assert rr.paste_ok is True
assert rr.roi_src == src_roi
assert rr.roi_dst == np.s_[0:10, 0:20]
def test_compute_reproject_roi_overhang():
"""
Images with global coverage in epsg:4326 often have slightly
wrong georegistration that causes image boundaries to reach outside
of the [-180, -90, 180, 90] bounding box.
Reproject roi introduces clipping to deal with that issue.
"""
tol = 1e-3
src_geobox = GeoBox.from_bbox(
(-180 - tol, -90 - tol, 180 + tol, 90 + tol),
epsg4326,
shape=wh_(2000, 1000),
tight=True,
)
assert src_geobox.shape.wh == (2000, 1000)
assert src_geobox.extent.boundingbox[0] < -180
assert src_geobox.extent.boundingbox[1] < -90
assert src_geobox.extent.boundingbox[2] > +180
assert src_geobox.extent.boundingbox[3] > +90
dst_geobox = GridSpec.web_tiles(0)[0, 0]
rr = compute_reproject_roi(src_geobox, dst_geobox)
assert rr.paste_ok is False
assert dst_geobox[rr.roi_dst] == dst_geobox
def test_axis_overlap():
s_ = np.s_
# Source overlaps destination fully
#
# S: |<--------------->|
# D: |<----->|
assert compute_axis_overlap(100, 20, 1, 10) == s_[10:30, 0:20]
assert compute_axis_overlap(100, 20, 2, 10) == s_[10:50, 0:20]
assert compute_axis_overlap(100, 20, 0.25, 10) == s_[10:15, 0:20]
assert compute_axis_overlap(100, 20, -1, 80) == s_[60:80, 0:20]
assert compute_axis_overlap(100, 20, -0.5, 50) == s_[40:50, 0:20]
assert compute_axis_overlap(100, 20, -2, 90) == s_[50:90, 0:20]
# Destination overlaps source fully
#
# S: |<-------->|
# D: |<----------------->|
assert compute_axis_overlap(10, 100, 1, -10) == s_[0:10, 10:20]
assert compute_axis_overlap(10, 100, 2, -10) == s_[0:10, 5:10]
assert compute_axis_overlap(10, 100, 0.5, -10) == s_[0:10, 20:40]
assert compute_axis_overlap(10, 100, -1, 11) == s_[0:10, 1:11]
# Partial overlaps
#
# S: |<----------->|
# D: |<----------->|
assert compute_axis_overlap(10, 10, 1, 3) == s_[3:10, 0:7]
assert compute_axis_overlap(10, 15, 1, 3) == s_[3:10, 0:7]
# S: |<----------->|
# D: |<----------->|
assert compute_axis_overlap(10, 10, 1, -5) == s_[0:5, 5:10]
assert compute_axis_overlap(50, 10, 1, -5) == s_[0:5, 5:10]
# No overlaps
# S: |<--->|
# D: |<--->|
assert compute_axis_overlap(10, 10, 1, 11) == s_[10:10, 0:0]
assert compute_axis_overlap(10, 40, 1, 11) == s_[10:10, 0:0]
# S: |<--->|
# D: |<--->|
assert compute_axis_overlap(10, 10, 1, -11) == s_[0:0, 10:10]
assert compute_axis_overlap(40, 10, 1, -11) == s_[0:0, 10:10]
def test_can_paste():
assert _can_paste(mkA(translation=(10, -20))) == (True, None)
assert _can_paste(mkA(scale=(10, 10))) == (True, None)
assert _can_paste(mkA(scale=(-10, 10), translation=(0, -4 * 10))) == (True, None)
assert _can_paste(mkA(shear=0.3)) == (False, "has rotation or shear")
assert _can_paste(mkA(rot=30)) == (False, "has rotation or shear")
assert _can_paste(mkA(scale=(-11.1, 11.1))) == (False, "non-integer scale")
assert _can_paste(mkA(scale=(0.5, 0.5))) == (False, "non-integer scale")
assert _can_paste(mkA(scale=(2, 3))) == (False, "sx!=sy, probably")
assert _can_paste(mkA(scale=(-10, 10), translation=(0, -4))) == (
False,
"sub-pixel translation",
)
assert _can_paste(mkA(scale=(-10, 10), translation=(-4, 10))) == (
False,
"sub-pixel translation",
)
assert _can_paste(mkA(translation=(0, 0.4))) == (False, "sub-pixel translation")
assert _can_paste(mkA(translation=(0.4, 0))) == (False, "sub-pixel translation")
def test_compute_output_geobox():
# sentinel2 over Gibraltar strait
src = GeoBox.from_bbox(
[199980, 3890220, 309780, 4000020], "EPSG:32630", resolution=10
)
# just copy resolution since both in meters
dst = compute_output_geobox(src, "epsg:6933")
assert dst.crs.units == src.crs.units
assert dst.crs == "epsg:6933"
assert dst.resolution == src.resolution
assert dst.geographic_extent.contains(src.geographic_extent)
assert compute_output_geobox(src, "epsg:6933") == src.to_crs("epsg:6933")
assert compute_output_geobox(
src, "epsg:6933", resolution="auto"
) == compute_output_geobox(src, "epsg:6933", resolution="same")
# force estimation of new resolution
dst = compute_output_geobox(src, "epsg:6933", resolution="fit")
assert dst.crs == "epsg:6933"
assert dst.resolution != src.resolution
assert dst.resolution.x == -dst.resolution.y
assert dst.geographic_extent.contains(src.geographic_extent)
# check conversion to lon/lat
dst = compute_output_geobox(src, "epsg:4326")
assert dst.crs == "epsg:4326"
assert dst.resolution != src.resolution
assert dst.resolution.x == -dst.resolution.y
assert dst.geographic_extent.contains(src.geographic_extent)
npix_change = (src.shape[0] * src.shape[1]) / (dst.shape[0] * dst.shape[1])
assert 0.8 < npix_change < 1.1
# go back from 4326
_src = dst
dst = compute_output_geobox(_src, src.crs)
npix_change = (_src.shape[0] * _src.shape[1]) / (dst.shape[0] * dst.shape[1])
assert 0.8 < npix_change < 1.1
assert dst.geographic_extent.contains(_src.geographic_extent)
# test bad input
with pytest.raises(ValueError):
_ = compute_output_geobox(src, "epsg:6933", resolution="bad-one")
| 32.154004
| 92
| 0.618494
|
f7e0bdf4dd0c4a996fd33c5a8938691ee792308f
| 397
|
py
|
Python
|
pcts_users/asgi.py
|
fga-eps-mds/2021.1-PCTs-Users
|
627114bfe10e943a30dda0c54f4d57ba927d6cc1
|
[
"MIT"
] | null | null | null |
pcts_users/asgi.py
|
fga-eps-mds/2021.1-PCTs-Users
|
627114bfe10e943a30dda0c54f4d57ba927d6cc1
|
[
"MIT"
] | 9
|
2021-09-04T14:30:51.000Z
|
2021-11-07T21:46:38.000Z
|
pcts_users/asgi.py
|
fga-eps-mds/2021.1-PCTs-Users
|
627114bfe10e943a30dda0c54f4d57ba927d6cc1
|
[
"MIT"
] | null | null | null |
"""
ASGI config for pcts_users project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pcts_users.settings')
application = get_asgi_application()
| 23.352941
| 78
| 0.788413
|
703954ba1d64e684839af55e869a1aaab4162322
| 432
|
py
|
Python
|
Ex019.py
|
devwill77/Python
|
585d6197ab561c9c8813fa87682b3ff4f141319e
|
[
"MIT"
] | null | null | null |
Ex019.py
|
devwill77/Python
|
585d6197ab561c9c8813fa87682b3ff4f141319e
|
[
"MIT"
] | null | null | null |
Ex019.py
|
devwill77/Python
|
585d6197ab561c9c8813fa87682b3ff4f141319e
|
[
"MIT"
] | null | null | null |
'''
- DESAFIO 019
- Um professor quer sortear um dos seus quatro alunos para apagar o quadro.
Faça um programa que ajude ele , lendo o nome deles e escrevendo o nome do escolhido.
'''
from random import choice
a1 = str(input('Aluno 1: '))
a2 = str(input('Aluno 2: '))
a3 = str(input('Aluno 3: '))
a4 = str(input('Aluno 4: '))
lista = [a1, a2, a3, a4]
escolhido = choice(lista)
print('O escolhido foi {}'.format(escolhido))
| 18.782609
| 85
| 0.668981
|
a7e0211905005a1d1d2975f1b8296e8ebbf0892c
| 65,161
|
py
|
Python
|
anchorecli/cli/utils.py
|
tehlingchu/anchore-cli
|
b0df36337f443749991a49263227c1d40989debb
|
[
"Apache-2.0"
] | 110
|
2017-09-14T02:15:15.000Z
|
2022-03-30T20:14:21.000Z
|
anchorecli/cli/utils.py
|
tehlingchu/anchore-cli
|
b0df36337f443749991a49263227c1d40989debb
|
[
"Apache-2.0"
] | 115
|
2017-09-22T12:15:30.000Z
|
2022-01-17T12:31:21.000Z
|
anchorecli/cli/utils.py
|
tehlingchu/anchore-cli
|
b0df36337f443749991a49263227c1d40989debb
|
[
"Apache-2.0"
] | 56
|
2017-09-22T11:26:25.000Z
|
2022-03-03T14:14:58.000Z
|
import os
import re
import sys
import copy
import json
import yaml
import logging
import dateutil.parser
import textwrap
import base64
try:
from urllib.parse import quote_plus, unquote_plus
except ImportError:
from urllib import quote_plus, unquote_plus
from prettytable import PrettyTable, PLAIN_COLUMNS, ALL
from collections import OrderedDict
import anchorecli.clients.apiexternal
_logger = logging.getLogger(__name__)
def setup_config(cli_opts):
ret = {
"config": None,
"user": None,
"pass": None,
"url": "http://localhost:8228/v1",
"hub-url": "https://hub.anchore.io/",
"api-version": None,
"ssl_verify": True,
"jsonmode": False,
"debug": False,
"as_account": None,
}
settings = {}
# load environment if present
for e in [
"ANCHORE_CLI_USER",
"ANCHORE_CLI_PASS",
"ANCHORE_CLI_URL",
"ANCHORE_CLI_HUB_URL",
"ANCHORE_CLI_API_VERSION",
"ANCHORE_CLI_SSL_VERIFY",
"ANCHORE_CLI_JSON",
"ANCHORE_CLI_DEBUG",
"ANCHORE_CLI_ACCOUNT",
"ANCHORE_CLI_CONFIG",
]:
if e in os.environ:
settings[e] = os.environ[e]
# load up credentials file if present
try:
if "ANCHORE_CLI_CONFIG" in settings:
credential_file = settings["ANCHORE_CLI_CONFIG"]
else:
home = os.path.expanduser("~")
credential_file = os.path.join(home, ".anchore", "credentials.yaml")
if os.path.exists(credential_file):
ydata = {}
with open(credential_file, "r") as FH:
try:
ydata = yaml.safe_load(FH)
except Exception as err:
raise Exception("YAML load failed: " + str(err))
if ydata:
try:
if type(ydata) != type(dict()):
raise Exception("invalid credentials file format")
default_creds = ydata.get("default", {})
for e in [
"ANCHORE_CLI_USER",
"ANCHORE_CLI_PASS",
"ANCHORE_CLI_URL",
"ANCHORE_CLI_HUB_URL",
"ANCHORE_CLI_API_VERSION",
"ANCHORE_CLI_SSL_VERIFY",
]:
if e in default_creds:
settings[e] = default_creds[e]
except Exception as err:
raise Exception(
"credentials file exists and has data, but cannot parse: "
+ str(err)
)
except Exception as err:
raise Exception(
"error while processing credentials file, please check format and read permissions - exception: "
+ str(err)
)
# load cmdline options
if cli_opts["config"]:
settings["ANCHORE_CLI_CONFIG"] = cli_opts["config"]
if cli_opts["u"]:
settings["ANCHORE_CLI_USER"] = cli_opts["u"]
if cli_opts["p"]:
settings["ANCHORE_CLI_PASS"] = cli_opts["p"]
if cli_opts["url"]:
settings["ANCHORE_CLI_URL"] = cli_opts["url"]
if cli_opts["hub-url"]:
settings["ANCHORE_CLI_HUB_URL"] = cli_opts["hub-url"]
if cli_opts["api-version"]:
settings["ANCHORE_CLI_API_VERSION"] = cli_opts["api-version"]
if cli_opts["insecure"]:
settings["ANCHORE_CLI_SSL_VERIFY"] = "n"
if cli_opts["json"]:
settings["ANCHORE_CLI_JSON"] = "y"
if cli_opts["debug"]:
settings["ANCHORE_CLI_DEBUG"] = "y"
if cli_opts.get("as_account") is not None:
settings["ANCHORE_CLI_ACCOUNT"] = cli_opts["as_account"]
if "ANCHORE_CLI_CONFIG" in settings:
ret["config"] = settings["ANCHORE_CLI_CONFIG"]
if "ANCHORE_CLI_USER" in settings:
ret["user"] = settings["ANCHORE_CLI_USER"]
if "ANCHORE_CLI_PASS" in settings:
ret["pass"] = settings["ANCHORE_CLI_PASS"]
if "ANCHORE_CLI_URL" in settings:
ret["url"] = settings["ANCHORE_CLI_URL"]
if "ANCHORE_CLI_HUB_URL" in settings:
ret["hub-url"] = settings["ANCHORE_CLI_HUB_URL"]
if "ANCHORE_CLI_API_VERSION" in settings:
ret["api-version"] = settings["ANCHORE_CLI_API_VERSION"]
if "ANCHORE_CLI_SSL_VERIFY" in settings:
if settings["ANCHORE_CLI_SSL_VERIFY"].lower() == "n":
ret["ssl_verify"] = False
if "ANCHORE_CLI_JSON" in settings:
if settings["ANCHORE_CLI_JSON"].lower() == "y":
ret["jsonmode"] = True
if "ANCHORE_CLI_DEBUG" in settings:
if settings["ANCHORE_CLI_DEBUG"].lower() == "y":
ret["debug"] = True
if "ANCHORE_CLI_ACCOUNT" in settings:
ret["as_account"] = settings["ANCHORE_CLI_ACCOUNT"]
return ret
def doexit(ecode):
if not os.environ.get("ANCHORE_CLI_NO_FDS_CLEANUP"):
try:
sys.stdout.close()
except Exception:
pass
try:
sys.stderr.close()
except Exception:
pass
sys.exit(ecode)
def group_list_of_dicts(indict, bykey):
ret = []
gdict = {}
for el in indict:
elkey = el[bykey]
if elkey not in gdict:
gdict[elkey] = []
gdict[elkey].append(el)
for k in list(gdict.keys()):
for el in gdict[k]:
ret.append(el)
return ret
def format_error_output(config, op, params, payload):
try:
errdata = json.loads(str(payload))
except ValueError:
errdata = {"message": str(payload)}
if config["jsonmode"]:
return json.dumps(errdata, indent=4, sort_keys=True)
obuf = ""
outdict = OrderedDict()
if "message" in errdata:
outdict["Error"] = str(errdata["message"])
if "httpcode" in errdata:
outdict["HTTP Code"] = str(errdata["httpcode"])
if "detail" in errdata and errdata["detail"]:
outdict["Detail"] = str(errdata["detail"])
for k in list(outdict.keys()):
obuf = obuf + k + ": " + outdict[k] + "\n"
if not obuf:
obuf = str(payload)
hint = create_hint(outdict.get("Detail"))
if hint:
obuf = obuf + hint
# operation-specific output postfixes
if op in ["account_delete"]:
if "Invalid account state change requested" in errdata.get("message", ""):
obuf = (
obuf
+ "\nNOTE: accounts must be disabled (anchore-cli account disable <account>) in order to be deleted\n"
)
return obuf
def create_hint(error_message):
"""
Apply some heuristics to determine if the message is a validation failure
complaining about missing keys, if so, attempt to extract what may be the
missing key, and craft a message that indicates how that might look inside
a JSON object.
:returns: multiline string on success, ``None`` on failure.
"""
# when validation fails, the message already has something we can depend on
# skip processing otherwise
try:
if "is a required property" not in error_message:
return
except TypeError:
return
pattern = re.compile(r"'(?P<key>.*?)'")
search = re.search(pattern, error_message)
if not search:
return
parsed = search.groupdict()
key = parsed.get("key")
if key:
msg = (
'Hint: The "{key}" key is not present in the JSON file, make sure it exists:\n\n'
" {{\n"
' "{key}": <value>\n'
" ...\n"
" }}\n"
)
return msg.format(key=key)
def plain_column_table(header, align="l"):
table = PrettyTable(header)
table.set_style(PLAIN_COLUMNS)
table.align = align
return table
def format_output(config, op, params, payload):
if config["jsonmode"]:
try:
ret = json.dumps(payload, indent=4, sort_keys=True)
# XXX catch json exception explicitly here
except Exception:
ret = json.dumps({"payload": str(payload)}, indent=4, sort_keys=True)
return ret
ret = ""
try:
if op == "image_list":
if params["show_all"]:
filtered_records = payload
else:
# this creates a filtered list w only the latest image records of any found tags
latest_tag_details = {}
latest_records = {}
for image_record in payload:
for image_detail in image_record["image_detail"]:
fulltag = image_detail["fulltag"]
tagts = dateutil.parser.parse(image_detail["created_at"])
if fulltag not in latest_tag_details:
latest_tag_details[fulltag] = image_detail
latest_records[fulltag] = image_record
else:
lasttagts = dateutil.parser.parse(
latest_tag_details[fulltag]["created_at"]
)
if tagts >= lasttagts:
latest_tag_details[fulltag] = image_detail
latest_records[fulltag] = image_record
filtered_records = list(latest_records.values())
if params["full"]:
header = ["Full Tag", "Image Digest", "Analysis Status", "Image ID"]
else:
header = ["Full Tag", "Image Digest", "Analysis Status"]
t = plain_column_table(header)
add_rows = []
for image_record in filtered_records:
for image_detail_record in image_record["image_detail"]:
image_detail = copy.deepcopy(image_detail_record)
imageId = fulltag = "None"
imageId = image_detail.pop("imageId", "None")
fulltag = (
image_detail.pop("registry", "None")
+ "/"
+ image_detail.pop("repo", "None")
+ ":"
+ image_detail.pop("tag", "None")
)
if params["full"]:
row = [
fulltag,
image_record["imageDigest"],
image_record["analysis_status"],
imageId,
]
else:
row = [
fulltag,
image_record["imageDigest"],
image_record["analysis_status"],
]
if row not in add_rows:
add_rows.append(row)
for row in add_rows:
t.add_row(row)
ret = t.get_string(sortby="Full Tag")
elif op == "image_vuln":
ret = format_vulnerabilities(payload, params)
elif op in ["image_content", "image_metadata"]:
obuf = ""
if "query_type" not in params or not params["query_type"]:
outdict = OrderedDict()
for t in payload:
outdict[t] = "available"
for k in list(outdict.keys()):
obuf = obuf + k + ": " + outdict[k] + "\n"
obuf = obuf + "\n"
else:
if params["query_type"] == "os":
header = ["Package", "Version", "Licenses"]
t = plain_column_table(header)
for el in payload["content"]:
licenses = el.get("licenses", [el.get("license")])
row = [el["package"], el["version"], " ".join(licenses)]
t.add_row(row)
obuf = obuf + t.get_string(sortby="Package")
elif params["query_type"] == "files":
header = ["Filename", "Size"]
t = plain_column_table(header)
for el in payload["content"]:
row = [el["filename"], el["size"]]
t.add_row(row)
obuf = obuf + t.get_string(sortby="Size", reversesort=True)
elif params["query_type"] in ["npm", "gem", "python"]:
header = ["Package", "Version", "Location"]
t = plain_column_table(header)
for el in payload["content"]:
row = [el["package"], el["version"], el["location"]]
t.add_row(row)
obuf = obuf + t.get_string(sortby="Package")
elif params["query_type"] in ["java"]:
header = [
"Package",
"Specification-Version",
"Implementation-Version",
"Location",
]
t = plain_column_table(header)
for el in payload["content"]:
row = [
el["package"],
el["specification-version"],
el["implementation-version"],
el["location"],
]
t.add_row(row)
obuf = obuf + t.get_string(sortby="Package")
elif params["query_type"] in [
"manifest",
"dockerfile",
"docker_history",
]:
if op == "image_content":
obuf = format_content_query(payload)
else:
# Metadata Query. Note: The design of this whole method is bad, just doing the change in place
# to reduce changes for now, but should refactor this thing later
obuf = format_metadata_query(payload)
elif params["query_type"] in ["malware"]:
obuf = format_malware_scans(payload, params)
else:
try:
if payload["content"]:
el = payload["content"][0]
if (
el.get("package", None)
and el.get("version", None)
and el.get("location", None)
):
header = ["Package", "Version", "Location"]
t = plain_column_table(header)
for el in payload["content"]:
row = [el["package"], el["version"], el["location"]]
t.add_row(row)
obuf = obuf + t.get_string(sortby="Package")
else:
header = list(el.keys())
t = PrettyTable(header)
t.set_style(PLAIN_COLUMNS)
t.align = "l"
for el in payload["content"]:
row = []
for k in header:
row.append(el[k])
t.add_row(row)
obuf = obuf + t.get_string()
except Exception as err:
raise Exception(
"could not parse content result - exception: " + str(err)
)
ret = obuf
elif op in ["image_add", "image_get", "image_import"]:
obuf = ""
for image_record in payload:
outdict = OrderedDict()
outdict["Image Digest"] = str(image_record["imageDigest"])
if image_record.get("parentDigest", None):
outdict["Parent Digest"] = str(image_record["parentDigest"])
outdict["Analysis Status"] = str(image_record["analysis_status"])
outdict["Image Type"] = str(image_record["image_type"])
outdict["Analyzed At"] = str(image_record["analyzed_at"])
image_detail = copy.deepcopy(image_record["image_detail"][0])
imageId = image_detail.pop("imageId", "None")
outdict["Image ID"] = str(imageId)
if "image_content" in image_record and image_record["image_content"]:
image_content = image_record["image_content"]
if "metadata" in image_content and image_content["metadata"]:
image_content_metadata = image_content["metadata"]
outdict["Dockerfile Mode"] = str(
image_content_metadata["dockerfile_mode"]
)
outdict["Distro"] = str(image_content_metadata["distro"])
outdict["Distro Version"] = str(
image_content_metadata["distro_version"]
)
outdict["Size"] = str(image_content_metadata["image_size"])
outdict["Architecture"] = str(image_content_metadata["arch"])
outdict["Layer Count"] = str(
image_content_metadata["layer_count"]
)
if "annotations" in image_record and image_record["annotations"]:
outdict["Annotations"] = ", ".join(
[
str(x) + "=" + str(y)
for x, y in list(image_record["annotations"].items())
]
)
for k in list(outdict.keys()):
obuf = obuf + k + ": " + outdict[k] + "\n"
obuf = obuf + "\n"
for image_detail_record in image_record["image_detail"]:
image_detail = copy.deepcopy(image_detail_record)
outdict = OrderedDict()
outdict["Full Tag"] = str(image_detail.pop("fulltag", "None"))
outdict["Tag Detected At"] = str(
image_detail.pop("tag_detected_at", "None")
)
for k in list(outdict.keys()):
obuf = obuf + k + ": " + outdict[k] + "\n"
obuf = obuf + "\n"
ret = obuf
elif op in ["registry_add", "registry_get", "registry_update"]:
obuf = ""
for registry_record in payload:
outdict = OrderedDict()
outdict["Registry"] = str(registry_record["registry"])
outdict["Name"] = str(registry_record.get("registry_name", "N/A"))
outdict["User"] = str(registry_record["registry_user"])
outdict["Type"] = str(registry_record["registry_type"])
outdict["Verify TLS"] = str(registry_record["registry_verify"])
outdict["Created"] = str(registry_record["created_at"])
outdict["Updated"] = str(registry_record["last_updated"])
for k in list(outdict.keys()):
obuf = obuf + k + ": " + outdict[k] + "\n"
obuf = obuf + "\n"
ret = obuf
elif op == "registry_list":
header = ["Registry", "Name", "Type", "User"]
t = plain_column_table(header)
for registry_record in payload:
row = [
registry_record["registry"],
registry_record.get("registry_name", "N/A"),
registry_record["registry_type"],
registry_record["registry_user"],
]
t.add_row(row)
ret = t.get_string(sortby="Registry")
elif op in ["subscription_list", "subscription_get"]:
header = ["Tag", "Subscription Type", "Active"]
if params.get("full", ""):
header += ["Subscription ID"]
if op == "subscription_get":
header += ["User ID"]
t = plain_column_table(header)
for subscription_record in payload:
row = [
subscription_record["subscription_key"],
subscription_record["subscription_type"],
str(subscription_record["active"]),
]
if params.get("full", ""):
row.append(subscription_record.get("subscription_id", ""))
if op == "subscription_get":
row += [subscription_record.get("userId")]
t.add_row(row)
ret = t.get_string(sortby="Tag")
elif op == "repo_list":
header = ["Repository", "Watched", "TagCount"]
t = plain_column_table(header)
for subscription_record in payload:
try:
sval = json.loads(subscription_record["subscription_value"])
tagcount = str(sval["tagcount"])
except Exception:
tagcount = "N/A"
row = [
subscription_record["subscription_key"],
str(subscription_record["active"]),
str(tagcount),
]
t.add_row(row)
ret = t.get_string(sortby="Repository")
elif op in ["repo_get", "repo_watch", "repo_unwatch", "repo_add"]:
header = ["Repository", "Watched", "TagCount"]
t = PrettyTable(header)
t.set_style(PLAIN_COLUMNS)
t.align = "l"
for subscription_record in payload:
sval = json.loads(subscription_record["subscription_value"])
tagcount = str(sval.get("tagcount", "N/A"))
row = [
subscription_record["subscription_key"],
str(subscription_record["active"]),
tagcount,
]
t.add_row(row)
if params.get("dry_run", ""):
ret = "DRY RUN: Repository not added\n\n"
ret += t.get_string(sortby="Repository")
elif op in ["policy_add", "policy_get"]:
if "detail" in params and params["detail"]:
try:
ret = json.dumps(
payload[0]["policybundle"], indent=4, sort_keys=True
)
except Exception:
ret = json.dumps(payload, indent=4, sort_keys=True)
else:
obuf = ""
if op == "policy_add":
payload = [payload]
else:
pass
for policy_record in payload:
outdict = OrderedDict()
outdict["Policy ID"] = str(policy_record["policyId"])
outdict["Active"] = str(policy_record["active"])
outdict["Source"] = str(policy_record["policy_source"])
outdict["Created"] = str(policy_record["created_at"])
outdict["Updated"] = str(policy_record["last_updated"])
for k in list(outdict.keys()):
obuf = obuf + k + ": " + outdict[k] + "\n"
obuf = obuf + "\n"
ret = obuf
elif op == "policy_list":
header = ["Policy ID", "Active", "Created", "Updated"]
t = plain_column_table(header)
for policy_record in payload:
row = [
policy_record["policyId"],
str(policy_record["active"]),
policy_record["created_at"],
policy_record["last_updated"],
]
t.add_row(row)
ret = t.get_string(sortby="Active", reversesort=True)
elif op == "policy_hub_list":
header = ["Name", "Description"]
t = plain_column_table(header)
for record in payload["content"]:
if record.get("type", None) == "bundle":
row = [
textwrap.fill(record["name"], width=40),
textwrap.fill(record["description"], width=60),
]
t.add_row(row)
ret = t.get_string(sortby="Name", reversesort=True)
elif op == "policy_hub_get":
obuf = ""
outdict = OrderedDict()
outdict["Policy Bundle ID"] = str(payload["id"])
outdict["Name"] = str(payload["name"])
outdict["Description"] = str(
payload.get("description", payload.get("comment", "N/A"))
)
for k in list(outdict.keys()):
obuf = obuf + k + ": " + outdict[k] + "\n"
obuf = obuf + "\n"
id_to_name = {}
for record in payload["policies"]:
outdict = OrderedDict()
outdict["Policy Name"] = record["name"]
# outdict['Policy ID'] = record['id']
outdict["Policy Description"] = str(
record.get("description", record.get("comment", "N/A"))
)
id_to_name[record["id"]] = record["name"]
for k in list(outdict.keys()):
obuf = obuf + k + ": " + outdict[k] + "\n"
obuf = obuf + "\n"
for record in payload["whitelists"]:
outdict = OrderedDict()
outdict["Whitelist Name"] = record["name"]
# outdict['Whitelist ID'] = record['id']
outdict["Whitelist Description"] = str(
record.get("description", record.get("comment", "N/A"))
)
id_to_name[record["id"]] = record["name"]
for k in list(outdict.keys()):
obuf = obuf + k + ": " + outdict[k] + "\n"
obuf = obuf + "\n"
for record in payload["mappings"]:
outdict = OrderedDict()
outdict["Mapping Name"] = record["name"]
outdict["Mapping Rule"] = "{}/{}:{}".format(
record["registry"], record["repository"], record["image"]["value"]
)
pids = []
pid = record.get("policy_id", None)
if pid:
pids.append(pid)
pids = [str(id_to_name[x]) for x in pids + record.get("policy_ids", [])]
outdict["Mapping Policies"] = ",".join(pids)
wids = [str(id_to_name[x]) for x in record.get("whitelist_ids", [])]
outdict["Mapping Whitelists"] = ",".join(wids)
for k in list(outdict.keys()):
obuf = obuf + k + ": " + outdict[k] + "\n"
obuf = obuf + "\n"
ret = obuf
# ret = json.dumps(payload, indent=4, sort_keys=True)
elif op == "evaluate_check":
obuf = ""
for eval_record in payload:
outdict = OrderedDict()
for imageDigest in list(eval_record.keys()):
for fulltag in eval_record[imageDigest]:
if not eval_record[imageDigest][fulltag]:
outdict["Image Digest"] = str(imageDigest)
outdict["Full Tag"] = str(fulltag)
outdict["Status"] = "no_eval_available"
for k in list(outdict.keys()):
obuf = obuf + k + ": " + outdict[k] + "\n"
obuf = obuf + "\n"
else:
for evaldata in eval_record[imageDigest][fulltag]:
outdict["Image Digest"] = str(imageDigest)
outdict["Full Tag"] = str(fulltag)
if "detail" in params and params["detail"]:
evaldetail = evaldata["detail"]
outdict["Image ID"] = str(
evaldetail["result"]["image_id"]
)
outdict["Status"] = str(evaldata["status"])
outdict["Last Eval"] = str(evaldata["last_evaluation"])
outdict["Policy ID"] = str(evaldata["policyId"])
t = None
if "detail" in params and params["detail"]:
evaldetail = evaldata["detail"]
imageId = evaldetail["result"]["image_id"]
try:
outdict["Final Action"] = str(
evaldetail["result"]["final_action"]
)
outdict["Final Action Reason"] = str(
evaldetail["result"]["final_action_reason"]
)
except:
pass
evalresults = evaldetail["result"]["result"][
imageId
]["result"]
header = ["Gate", "Trigger", "Detail", "Status"]
t = plain_column_table(header)
for row in evalresults["rows"]:
if "full" in params and params["full"]:
detailrow = row[5]
else:
detailrow = row[5]
status_detail = row[6]
try:
if row[7]:
eval_whitelist_detail = row[7]
status_detail = (
"whitelisted("
+ eval_whitelist_detail[
"whitelist_name"
]
+ ")"
)
except:
status_detail = row[6]
newrow = [
row[3],
row[4],
detailrow,
status_detail,
]
t.add_row(newrow)
for k in list(outdict.keys()):
obuf = obuf + k + ": " + outdict[k] + "\n"
if t:
obuf = obuf + "\n"
obuf = obuf + t.get_string() + "\n"
ret = obuf
elif op == "policy_activate":
try:
ret = "Success: " + str(params["policyId"]) + " activated"
except Exception:
ret = "Success"
elif op == "system_status":
out_list = []
db_version = code_version = None
for service_record in payload.get("service_states", []):
if service_record.get("status", None):
service_status = "up"
else:
service_status = "down ({})".format(
service_record.get("status_message", "Status Unknown")
)
out_list.append(
"Service {} ({}, {}): {}".format(
service_record.get("servicename", "ServiceName Unknown"),
service_record.get("hostid", "HostID Unknown"),
service_record.get("base_url", "Base URL Unknown"),
str(service_status),
)
)
# This is a fallback mechanism to get the db & code versions from a non-api service
# (should there be no healthy api service available)
if not db_version:
db_version = service_record.get("service_detail", {}).get(
"db_version", None
)
if not code_version:
code_version = service_record.get("service_detail", {}).get(
"version", None
)
# Set the code & db versions with the details from the first discovered API service that is up
if (
service_record.get("servicename", "") == "apiext"
and service_status == "up"
):
service_detail = service_record.get("service_detail", {})
code_version = service_detail.get("version", None)
db_version = service_detail.get("db_version", None)
output_buffer = "\n".join(out_list)
output_buffer += "\n\nEngine DB Version: {}\n".format(
db_version or "Not Found"
)
output_buffer += "Engine Code Version: {}".format(
code_version or "Not Found"
)
ret = output_buffer
elif op == "event_delete":
if payload is not None and isinstance(payload, list):
ret = (
"Deleted {} events".format(len(payload))
if payload
else "No matching events found"
)
else:
ret = "Success"
elif op in ["describe_gates"]:
ret = _format_gates(payload, all=params.get("all", False))
elif op in ["describe_gate_triggers"]:
ret = _format_triggers(
payload, params.get("gate", "").lower(), all=params.get("all", False)
)
elif op in ["describe_gate_trigger_params"]:
ret = _format_trigger_params(
payload,
params.get("gate", "").lower(),
params.get("trigger", "").lower(),
all=params.get("all", False),
)
elif op in ["system_describe_error_codes"]:
header = ["Error Code", "Description"]
t = plain_column_table(header)
for el in payload:
error_name = el.get("name", "N/A")
error_description = textwrap.fill(
el.get("description", "N/A"), width=60
)
t.add_row([error_name, error_description])
ret = t.get_string(sortby="Error Code") + "\n"
elif op in ["system_feeds_list"]:
header = ["Feed", "Group", "LastSync", "RecordCount"]
t = plain_column_table(header)
for el in payload:
feed = el.get("name", "N/A")
feed_enabled = el.get("enabled", True)
if not feed_enabled:
feed = "{}(disabled)".format(feed)
for gel in el["groups"]:
group_enabled = gel.get("enabled", True)
last_sync = gel.get("last_sync", None)
if not last_sync:
if feed_enabled and group_enabled:
last_sync = "pending"
else:
last_sync = "-"
gname = gel.get("name", "N/A")
if not group_enabled:
gname = "{}(disabled)".format(gname)
t.add_row([feed, gname, last_sync, gel.get("record_count", "N/A")])
ret = t.get_string(sortby="Feed") + "\n"
elif op in ["system_feed_groups"]:
header = ["Group", "LastSync", "RecordCount"]
t = PrettyTable(header)
t.set_style(PLAIN_COLUMNS)
t.align = "l"
for gel in payload:
last_sync = gel.get("last_sync", None)
if not last_sync:
last_sync = "pending"
gname = gel.get("name", "N/A")
if not gel.get("enabled", True):
gname = "{}(disabled)".format(gname)
t.add_row([gname, last_sync, gel.get("record_count", "N/A")])
ret = t.get_string(sortby="Group") + "\n"
elif op in ["system_feeds_flush"]:
ret = "Success"
if type(payload) == list:
header = ["Feed", "Group", "Status", "Records Updated", "Sync Duration"]
t = plain_column_table(header)
for feed in payload:
for group in feed.get("groups"):
row = [
feed["feed"],
group["group"],
group["status"],
group["updated_record_count"],
"{:.2f}s".format(group["total_time_seconds"]),
]
t.add_row(row)
ret = t.get_string(sortby="Feed")
elif op == "event_list":
header = ["Timestamp", "Level", "Event", "Resource", "ID"]
t = plain_column_table(header)
for event_res in payload["results"]:
event = event_res["event"]
row = [
event["timestamp"],
event["level"],
event["type"],
event["resource"].get("id"),
event_res["generated_uuid"],
]
t.add_row(row)
ret = t.get_string()
elif op == "event_list_full":
header = [
"Timestamp",
"Level",
"Event",
"ResourceType",
"Resource",
"Service",
"Host",
"ID",
]
t = plain_column_table(header)
for event_res in payload["results"]:
event = event_res["event"]
row = [
event["timestamp"],
event["level"],
event["type"],
event["resource"].get("type"),
event["resource"].get("id"),
event["source"]["servicename"],
event["source"]["hostid"],
event_res["generated_uuid"],
]
t.add_row(row)
ret = t.get_string()
elif op == "event_get":
ret = yaml.safe_dump(payload["event"], default_flow_style=False)
elif op == "query_images_by_vulnerability":
header = [
"Full Tag",
"Severity",
"Package",
"Package Type",
"Namespace",
"Digest",
]
t = plain_column_table(header)
for record in payload.get("images", []):
for tag_record in record.get("image", {}).get("tag_history", []):
for package_record in record.get("vulnerable_packages", []):
row = [
tag_record.get("fulltag", "N/A"),
package_record.get("severity", "N/A"),
"{}-{}".format(
package_record.get("name"),
package_record.get("version"),
),
package_record.get("type"),
package_record.get("namespace", "N/A"),
record.get("image", {}).get("imageDigest", "N/A"),
]
t.add_row(row)
ret = t.get_string()
elif op == "query_images_by_package":
header = ["Full Tag", "Package", "Package Type", "Digest"]
t = plain_column_table(header)
for record in payload.get("images", []):
for tag_record in record.get("image", {}).get("tag_history", []):
for package_record in record.get("packages", []):
row = [
tag_record.get("fulltag", "N/A"),
"{}-{}".format(
package_record.get("name"),
package_record.get("version"),
),
package_record.get("type"),
record.get("image", {}).get("imageDigest", "N/A"),
]
t.add_row(row)
ret = t.get_string()
elif op == "account_whoami":
outdict = OrderedDict()
outdict["Username"] = str(payload.get("user", {}).get("username", "N/A"))
outdict["AccountName"] = str(payload.get("account", {}).get("name", "N/A"))
outdict["AccountEmail"] = str(
payload.get("account", {}).get("email", "N/A")
)
outdict["AccountType"] = str(payload.get("account", {}).get("type", "N/A"))
obuf = ""
for k in list(outdict.keys()):
obuf = obuf + k + ": " + outdict[k] + "\n"
obuf = obuf + "\n"
ret = obuf
elif op in ["account_add", "account_get"]:
outdict = OrderedDict()
outdict["Name"] = str(payload.get("name", "N/A"))
outdict["Email"] = str(payload.get("email", "N/A"))
outdict["Type"] = str(payload.get("type", "N/A"))
outdict["State"] = str(payload.get("state", "N/A"))
outdict["Created"] = str(payload.get("created_at", "N/A"))
obuf = ""
for k in list(outdict.keys()):
obuf = obuf + "{}: {}\n".format(k, outdict[k])
obuf = obuf + "\n"
ret = obuf
elif op in ["account_list"]:
header = ["Name", "Email", "Type", "State", "Created"]
t = plain_column_table(header)
for record in payload:
row = [
str(record.get("name", "N/A")),
str(record.get("email", "N/A")),
str(record.get("type", "N/A")),
str(record.get("state", "N/A")),
str(record.get("created_at", "N/A")),
]
t.add_row(row)
ret = t.get_string(sortby="Created") + "\n"
elif op in ["user_add", "user_get"]:
outdict = OrderedDict()
outdict["Name"] = str(payload.get("username", "N/A"))
outdict["Type"] = str(payload.get("type", "N/A"))
outdict["Source"] = str(payload.get("source", "N/A"))
outdict["Created"] = str(payload.get("created_at", "N/A"))
obuf = ""
for k in list(outdict.keys()):
obuf = obuf + "{}: {}\n".format(k, outdict[k])
obuf = obuf + "\n"
ret = obuf
elif op in ["user_list"]:
header = ["Name", "Type", "Source", "Created"]
t = plain_column_table(header)
for record in payload:
row = [
str(record.get("username", "N/A")),
str(record.get("type", "N/A")),
str(record.get("source", "N/A")),
str(record.get("created_at", "N/A")),
]
t.add_row(row)
ret = t.get_string(sortby="Created") + "\n"
elif op in ["user_setpassword"]:
ret = "Password (re)set success"
elif (
op in ["delete_system_service"]
or re.match(".*_delete$", op)
or re.match(".*_activate$", op)
or re.match(".*_deactivate$", op)
or re.match(".*_enable$", op)
or re.match(".*_disable$", op)
):
# NOTE this should always be the last in the if/elif conditional
ret = "Success"
elif op in ["analysis_archive_list", "archived_analysis"]:
header = [
"Digest",
"Tags",
"Analyzed At",
"Archived At",
"Status",
"Archive Size Bytes",
]
t = plain_column_table(header)
for record in payload:
row = [
str(record["imageDigest"]),
str(
",".join(
[x["pullstring"] for x in record.get("image_detail", [])]
)
),
str(record["analyzed_at"]),
str(record["created_at"]),
str(record["status"]),
str(record["archive_size_bytes"]),
]
t.add_row(row)
ret = t.get_string(sortby="Archived At", reversesort=True) + "\n"
elif op in ["archive_analysis"]:
header = ["Image Digest", "Archive Status", "Details"]
t = plain_column_table(header)
for record in payload:
row = [
str(record["digest"]),
str(record["status"]),
str(record["detail"]),
]
t.add_row(row)
ret = t.get_string(sortby="Archive Status") + "\n"
elif op in ["transition_rules"]:
header = [
"Rule Id",
"Global",
"Transition",
"Analysis Age (Days)",
"Tag Versions Newer",
"Registry",
"Repository",
"Tag",
"Max Images",
"Registry Exclude",
"Repo Exclude",
"Tag Exclude",
"Exclude Exp Days",
"Last Updated",
]
t = plain_column_table(header)
if type(payload) != list:
payload = [payload]
for record in payload:
row = [
str(record["rule_id"]),
str(record["system_global"]),
str(record["transition"]),
str(record["analysis_age_days"]),
str(record["tag_versions_newer"]),
str(record["selector"]["registry"]),
str(record["selector"]["repository"]),
str(record["selector"]["tag"]),
str(record["max_images_per_account"]),
str(record["exclude"]["selector"]["registry"]),
str(record["exclude"]["selector"]["repository"]),
str(record["exclude"]["selector"]["tag"]),
str(record["exclude"]["expiration_days"]),
str(record["last_updated"]),
]
t.add_row(row)
ret = t.get_string(sortby="Last Updated", reversesort=True) + "\n"
elif op in ["transition_rule_history"]:
header = ["Rule Id", "Image Digest", "Transition", "Transition Date"]
t = plain_column_table(header)
for record in payload:
row = [
str(record["rule_id"]),
str(record["imageDigest"]),
str(record["transition"]),
str(record["created_at"]),
]
t.add_row(row)
ret = t.get_string(sortby="Transition Date", reversesort=True) + "\n"
elif op in ["list_corrections"]:
header = ["ID", "Match", "Replace", "Created At", "Description"]
t = plain_column_table(header)
for record in payload:
row = [
str(record["uuid"]),
str(record["match"]),
str(record["replace"]),
str(record["created_at"]),
str(record["description"]),
]
t.add_row(row)
ret = t.get_string(sortby="Created At", reversesort=True) + "\n"
elif op in ["get_correction"]:
ret = (
"UUID: %s\nMatch: %s\nReplace: %s\nCreated At: %s\nDescription: %s\n"
% (
str(payload["uuid"]),
str(payload["match"]),
str(payload["replace"]),
str(payload["created_at"]),
str(payload["description"]),
)
)
elif (
op
in [
"delete_system_service",
"test_webhook",
"add_correction",
"delete_correction",
]
or re.match(".*_delete$", op)
or re.match(".*_activate$", op)
or re.match(".*_deactivate$", op)
or re.match(".*_enable$", op)
or re.match(".*_disable$", op)
):
# NOTE this should always be the last in the if/elif conditional
ret = "Success"
else:
raise Exception("no output handler for this operation ({})".format(op))
except Exception as err:
print(
"WARNING: failed to format output (returning raw output) - exception: "
+ str(err)
)
try:
ret = json.dumps(payload, indent=4, sort_keys=True)
# XXX catch json errors here
except Exception:
ret = str(payload)
return ret
def format_malware_scans(payload, params):
"""
Example response:
{
"content": [
{
"enabled": true,
"findings": [
{
"path": "/elf_payload1",
"signature": "Unix.Trojan.MSShellcode-40"
}
],
"metadata": {
"db_version": {
"bytecode": "331",
"daily": "25890",
"main": "59"
}
},
"scanner": "clamav"
}
],
"content_type": "malware",
"imageDigest": "sha256:0eb874fcad5414762a2ca5b2496db5291aad7d3b737700d05e45af43bad3ce4d"
}
:param payload:
:param params:
:return:
"""
obuf = ""
# Handle error
if "query_type" not in params or not params["query_type"]:
# payload will be a list with what is available as a query for the
# given image
for query in payload:
obuf += "%s: available\n" % query
return obuf + "\n"
if params["query_type"] in ["malware"]:
header = ["Scanner", "Matched Signature", "Path"]
t = plain_column_table(header)
for el in payload["content"]:
scanner = el.get("scanner")
for row in [
[scanner, x.get("signature", "unknown"), x.get("path", "unknown")]
for x in el.get("findings", {})
]:
t.add_row(row)
obuf = obuf + t.get_string(sortby="Path")
return obuf
def format_vulnerabilities(payload, params):
obuf = ""
if "query_type" not in params or not params["query_type"]:
# payload will be a list with what is available as a query for the
# given image
for query in payload:
obuf += "%s: available\n" % query
return obuf + "\n"
if params["query_type"] in ["os", "non-os", "all"]:
header = [
"Vulnerability ID",
"Package",
"Severity",
"Fix",
"CVE Refs",
"Vulnerability URL",
"Type",
"Feed Group",
"Package Path",
]
t = plain_column_table(header)
for el in payload["vulnerabilities"]:
nvd_data = el.get("nvd_data")
cve_ids = []
for nvd_record in nvd_data:
_id = nvd_record.get("id")
if _id:
cve_ids.append(_id)
row = [
el["vuln"],
el["package"],
el["severity"],
el["fix"],
",".join(cve_ids),
el["url"],
el["package_type"],
el["feed_group"],
el["package_path"],
]
t.add_row(row)
obuf = obuf + t.get_string(sortby="Severity")
return obuf
def format_content_query(payload):
content = payload.get("content", "")
if not content:
return ""
if isinstance(content, list):
# In some situations the `content` key can be a list, not a string
content = "".join(content)
try:
return base64.b64decode(content).decode("utf-8")
except Exception:
# This broad exception catching is warranted here because there are all
# sort of warts we would need to catch with utf-8 decoding and
# b64decode. The actual exception is not that relevant here
return ""
def format_metadata_query(payload):
ret = ""
if not payload:
return ret
image_digest = payload.get("imageDigest", "")
if image_digest:
ret += "Image Digest: {}\n".format(image_digest)
metadata = payload.get("metadata", "")
if metadata:
try:
ret += "Metadata: {}\n".format(base64.b64decode(metadata).decode("utf-8"))
except Exception:
_logger.warning("Failed to base64 decode Metadata")
pass
metadata_type = payload.get("metadata_type", "")
if metadata_type:
ret += "Metadata Type: {}\n".format(metadata_type)
return ret
def string_splitter(input_str, max_length=40):
"""
Returns a string that is the input string but with \n inserted every max_length chars
:param input_str:
:param max_length: int num of chars between \n
:return: string
"""
chunks = []
chunk = ""
pieces = input_str.split(" ")
for piece in pieces:
if len(chunk) + len(piece) < max_length:
chunk = " ".join([chunk, piece])
else:
chunks.append(chunk)
chunk = piece
chunks.append(chunk)
return "\n".join(chunks).strip()
def _format_gates(payload, all=False):
if not all:
header = ["Gate", "Description"]
else:
header = ["Gate", "Description", "State", "Superceded By"]
t = PrettyTable(header, hrules=ALL)
t.align = "l"
if payload:
for gate in payload:
desc = string_splitter(gate.get("description", ""), 60)
if all:
t.add_row(
[
gate["name"].lower(),
desc,
gate.get("state", ""),
gate.get("superceded_by", ""),
]
)
elif gate.get("state") in [None, "active"]:
t.add_row([gate["name"].lower(), desc])
return t.get_string(sortby="Gate", print_empty=True)
else:
return "No policy spec to parse"
def _format_triggers(payload, gate, all=False):
if not all:
header = ["Trigger", "Description", "Parameters"]
else:
header = ["Trigger", "Description", "Parameters", "State", "Superceded By"]
t = PrettyTable(header, hrules=ALL)
t.align = "l"
if payload:
for gate in [x for x in payload if x["name"].lower() == gate]:
for trigger_entry in gate.get("triggers", []):
desc = string_splitter(trigger_entry.get("description", ""))
param_str = string_splitter(
", ".join(
[x["name"].lower() for x in trigger_entry.get("parameters", [])]
),
max_length=20,
)
if all:
t.add_row(
[
trigger_entry["name"].lower(),
desc,
param_str,
trigger_entry.get("state", ""),
trigger_entry.get("superceded_by", ""),
]
)
elif trigger_entry.get("state") in [None, "active"]:
t.add_row([trigger_entry["name"].lower(), desc, param_str])
return t.get_string(sortby="Trigger", print_empty=True)
else:
return "No policy spec to parse"
def _format_trigger_params(payload, gate, trigger, all=False):
if all:
header = [
"Parameter",
"Description",
"Required",
"Example",
"State",
"Supereceded By",
]
else:
header = ["Parameter", "Description", "Required", "Example"]
t = PrettyTable(header, hrules=ALL)
t.align = "l"
if payload:
for gate in [x for x in payload if x["name"].lower() == gate]:
for trigger_entry in [
x for x in gate.get("triggers", []) if x["name"].lower() == trigger
]:
for p in trigger_entry.get("parameters", []):
desc = string_splitter(p.get("description", ""))
if all:
t.add_row(
[
p["name"].lower(),
desc,
p.get("required", True),
p.get("example", ""),
p.get("state", ""),
p.get("superceded_by", ""),
]
)
elif p.get("state") in [None, "active"]:
t.add_row(
[
p["name"].lower(),
desc,
p.get("required", True),
p.get("example", ""),
]
)
return t.get_string(sortby="Parameter", print_empty=True)
else:
return "No policy spec to parse"
def get_eval_ecode(evaldata, imageDigest):
# 0 aid tag 0 status
ret = 2
try:
fulltag = list(evaldata[0][imageDigest].keys())[0]
status = evaldata[0][imageDigest][fulltag][0]["status"].lower()
if status == "pass":
ret = 0
elif status == "fail":
ret = 1
else:
raise Exception("got unknown eval status result: " + str(status))
except Exception:
ret = 2
return ret
def get_ecode(response):
ecode = 2
try:
httpcode = response["httpcode"]
_logger.debug("fetched httpcode from response: %s", str(httpcode))
if httpcode in range(200, 299):
ecode = 0
elif httpcode in [401, 500]:
ecode = 2
else:
ecode = 1
except Exception:
pass
return ecode
def check_access(config):
# test the endpoint
try:
rc = anchorecli.clients.apiexternal.get_base_routes(config)
if not rc["success"]:
raise Exception(json.dumps(rc["error"], sort_keys=True))
except Exception as err:
if config["debug"]:
raise Exception(
"could not access anchore service (user="
+ str(config["user"])
+ " url="
+ str(config["url"])
+ "): "
+ str(err)
)
else:
raise Exception(
"could not access anchore service (user="
+ str(config["user"])
+ " url="
+ str(config["url"])
+ ")"
)
return True
def discover_inputimage_format(config, input_string):
itype = None
if re.match("(sha256|local):[0-9a-fA-F]{64}", input_string):
itype = "imageDigest"
elif re.match("[0-9a-fA-F]{64}", input_string):
itype = "imageid"
else:
itype = "tag"
return itype
def discover_inputimage(config, input_string):
patt = re.match("(.*@|^)(sha256:.*)", input_string)
if patt:
urldigest = quote_plus(patt.group(2))
return ("digest", input_string, urldigest)
try:
digest = unquote_plus(str(input_string))
patt = re.match("(.*@|^)(sha256:.*)", digest)
if patt:
return ("imageDigest", input_string, input_string)
patt = re.match("(.*@|^)(local:.*)", digest)
if patt:
return ("imageDigest", input_string, input_string)
except Exception:
pass
urldigest = None
ret_type = "tag"
try:
ret = anchorecli.clients.apiexternal.get_image(config, tag=input_string)
if ret["success"]:
urldigest = ret["payload"][0]["imageDigest"]
try:
image_record = ret["payload"][0]
for image_detail in image_record["image_detail"]:
if input_string == image_detail["imageId"]:
ret_type = "imageid"
break
except Exception:
pass
else:
pass
except Exception:
urldigest = None
return ret_type, input_string, urldigest
def parse_dockerimage_string(instr):
host = None
port = None
repo = None
tag = None
registry = None
repotag = None
fulltag = None
fulldigest = None
digest = None
imageId = None
if re.match("^sha256:.*", instr):
registry = "docker.io"
digest = instr
elif len(instr) == 64 and not re.findall("[^0-9a-fA-F]+", instr):
imageId = instr
else:
# get the host/port
patt = re.match("(.*?)/(.*)", instr)
if patt:
a = patt.group(1)
remain = patt.group(2)
patt = re.match("(.*?):(.*)", a)
if patt:
host = patt.group(1)
port = patt.group(2)
elif a == "docker.io":
host = "docker.io"
port = None
elif a in ["localhost", "localhost.localdomain", "localbuild"]:
host = a
port = None
else:
patt = re.match(".*\..*", a)
if patt:
host = a
else:
host = "docker.io"
remain = instr
port = None
else:
host = "docker.io"
port = None
remain = instr
# get the repo/tag
patt = re.match("(.*)@(.*)", remain)
if patt:
repo = patt.group(1)
digest = patt.group(2)
else:
patt = re.match("(.*):(.*)", remain)
if patt:
repo = patt.group(1)
tag = patt.group(2)
else:
repo = remain
tag = "latest"
if not tag:
tag = "latest"
if port:
registry = ":".join([host, port])
else:
registry = host
if digest:
repotag = "@".join([repo, digest])
else:
repotag = ":".join([repo, tag])
fulltag = "/".join([registry, repotag])
if not digest:
digest = None
else:
fulldigest = registry + "/" + repo + "@" + digest
tag = None
fulltag = None
repotag = None
ret = {}
ret["host"] = host
ret["port"] = port
ret["repo"] = repo
ret["tag"] = tag
ret["registry"] = registry
ret["repotag"] = repotag
ret["fulltag"] = fulltag
ret["digest"] = digest
ret["fulldigest"] = fulldigest
ret["imageId"] = imageId
if ret["fulldigest"]:
ret["pullstring"] = ret["fulldigest"]
elif ret["fulltag"]:
ret["pullstring"] = ret["fulltag"]
else:
ret["pullstring"] = None
return ret
| 37.578431
| 118
| 0.449517
|
85baf3a534d283793d04761bbe1c93ea3351fcd4
| 13,516
|
py
|
Python
|
tflearn/models/generator.py
|
AnshulBasia/tflearn
|
e6f1aa8c19f90cfb2d4b0b03133c434ee1dba376
|
[
"MIT"
] | 1
|
2017-05-12T07:26:14.000Z
|
2017-05-12T07:26:14.000Z
|
tflearn/models/generator.py
|
mathematiguy/tflearn
|
095cf976f4a79ef5da8a970fec7ab533707c6c59
|
[
"MIT"
] | null | null | null |
tflearn/models/generator.py
|
mathematiguy/tflearn
|
095cf976f4a79ef5da8a970fec7ab533707c6c59
|
[
"MIT"
] | 1
|
2018-12-25T07:18:57.000Z
|
2018-12-25T07:18:57.000Z
|
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
import tensorflow as tf
from ..helpers.trainer import Trainer, evaluate as eval
from ..helpers.evaluator import Evaluator
from ..utils import feed_dict_builder, is_none
class SequenceGenerator(object):
""" Sequence Generator Model.
A deep neural network model for generating sequences.
Arguments:
network: `Tensor`. Neural network to be used.
dictionary: `dict`. A dictionary associating each sample with a key (
usually integers). For example: {'a': 0, 'b': 1, 'c': 2, ...}.
seq_maxlen: `int`. The maximum length of a sequence.
tensorboard_verbose: `int`. Summary verbose level, it accepts
different levels of tensorboard logs:
```python
0 - Loss, Accuracy (Best Speed).
1 - Loss, Accuracy, Gradients.
2 - Loss, Accuracy, Gradients, Weights.
3 - Loss, Accuracy, Gradients, Weights, Activations, Sparsity.
(Best visualization)
```
tensorboard_dir: `str`. Directory to store tensorboard logs.
Default: "/tmp/tflearn_logs/"
checkpoint_path: `str`. Path to store model checkpoints. If None,
no model checkpoint will be saved. Default: None.
max_checkpoints: `int` or None. Maximum amount of checkpoints. If
None, no limit. Default: None.
session: `Session`. A session for running ops. If None, a new one will
be created. Note: When providing a session, variables must have been
initialized already, otherwise an error will be raised.
Attributes:
trainer: `Trainer`. Handle model training.
predictor: `Predictor`. Handle model prediction.
session: `Session`. The current model session.
"""
def __init__(self, network, dictionary=None, seq_maxlen=25,
clip_gradients=0.0, tensorboard_verbose=0,
tensorboard_dir="/tmp/tflearn_logs/",
checkpoint_path=None, max_checkpoints=None,
session=None):
assert isinstance(network, tf.Tensor), "'network' arg is not a Tensor!"
self.net = network
self.train_ops = tf.get_collection(tf.GraphKeys.TRAIN_OPS)
self.trainer = Trainer(self.train_ops,
clip_gradients=clip_gradients,
tensorboard_dir=tensorboard_dir,
tensorboard_verbose=tensorboard_verbose,
checkpoint_path=checkpoint_path,
max_checkpoints=max_checkpoints,
session=session)
self.session = self.trainer.session
self.inputs = tf.get_collection(tf.GraphKeys.INPUTS)
self.targets = tf.get_collection(tf.GraphKeys.TARGETS)
self.predictor = Evaluator([self.net],
session=self.session)
self.dic = dictionary
self.rev_dic = reverse_dictionary(dictionary)
self.seq_maxlen = seq_maxlen
def fit(self, X_inputs, Y_targets, n_epoch=10, validation_set=None,
show_metric=False, batch_size=None, shuffle=None,
snapshot_epoch=True, snapshot_step=None, excl_trainops=None,
run_id=None):
""" Fit.
Train model, feeding X_inputs and Y_targets to the network.
NOTE: When not feeding dicts, data assignations is made by
input/estimator layers creation order (For example, the second
input layer created will be feeded by the second value of
X_inputs list).
Examples:
```python
model.fit(X, Y) # Single input and output
model.fit({'input1': X}, {'output1': Y}) # Single input and output
model.fit([X1, X2], Y) # Mutliple inputs, Single output
# validate with X_val and [Y1_val, Y2_val]
model.fit(X, [Y1, Y2], validation_set=(X_val, [Y1_val, Y2_val]))
# 10% of training data used for validation
model.fit(X, Y, validation_set=0.1)
```
Arguments:
X_inputs: array, `list` of array (if multiple inputs) or `dict`
(with inputs layer name as keys). Data to feed to train
model.
Y_targets: array, `list` of array (if multiple inputs) or `dict`
(with estimators layer name as keys). Targets (Labels) to
feed to train model. Usually set as the next element of a
sequence, i.e. for x[0] => y[0] = x[1].
n_epoch: `int`. Number of epoch to run. Default: None.
validation_set: `tuple`. Represents data used for validation.
`tuple` holds data and targets (provided as same type as
X_inputs and Y_targets). Additionally, it also accepts
`float` (<1) to performs a data split over training data.
show_metric: `bool`. Display or not accuracy at every step.
batch_size: `int` or None. If `int`, overrides all network
estimators 'batch_size' by this value.
shuffle: `bool` or None. If `bool`, overrides all network
estimators 'shuffle' by this value.
snapshot_epoch: `bool`. If True, it will snapshot model at the end
of every epoch. (Snapshot a model will evaluate this model
on validation set, as well as create a checkpoint if
'checkpoint_path' specified).
snapshot_step: `int` or None. If `int`, it will snapshot model
every 'snapshot_step' steps.
excl_trainops: `list` of `TrainOp`. A list of train ops to
exclude from training process (TrainOps can be retrieve
through `tf.get_collection_ref(tf.GraphKeys.TRAIN_OPS)`).
run_id: `str`. Give a name for this run. (Useful for Tensorboard).
"""
if batch_size:
for train_op in self.train_ops:
train_op.batch_size = batch_size
valX, valY = None, None
if validation_set:
if isinstance(validation_set, float):
valX = validation_set
valY = validation_set
else:
valX = validation_set[0]
valY = validation_set[1]
# For simplicity we build sync dict synchronously but
# Trainer support asynchronous feed dict allocation
feed_dict = feed_dict_builder(X_inputs, Y_targets, self.inputs,
self.targets)
feed_dicts = [feed_dict for i in self.train_ops]
val_feed_dicts = None
if not (is_none(valX) or is_none(valY)):
if isinstance(valX, float):
val_feed_dicts = valX
else:
val_feed_dict = feed_dict_builder(valX, valY, self.inputs,
self.targets)
val_feed_dicts = [val_feed_dict for i in self.train_ops]
# Retrieve data preprocesing and augmentation
dprep_dict, daug_dict = {}, {}
dprep_collection = tf.get_collection(tf.GraphKeys.DATA_PREP)
daug_collection = tf.get_collection(tf.GraphKeys.DATA_AUG)
for i in range(len(self.inputs)):
if dprep_collection[i] is not None:
dprep_dict[self.inputs[i]] = dprep_collection[i]
if daug_collection[i] is not None:
daug_dict[self.inputs[i]] = daug_collection[i]
self.trainer.fit(feed_dicts, val_feed_dicts=val_feed_dicts,
n_epoch=n_epoch,
show_metric=show_metric,
snapshot_step=snapshot_step,
snapshot_epoch=snapshot_epoch,
shuffle_all=shuffle,
dprep_dict=dprep_dict,
daug_dict=daug_dict,
excl_trainops=excl_trainops,
run_id=run_id)
self.predictor = Evaluator([self.net],
session=self.trainer.session)
def _predict(self, X):
feed_dict = feed_dict_builder(X, None, self.inputs, None)
return self.predictor.predict(feed_dict)
def generate(self, seq_length, temperature=0.5, seq_seed=None,
display=False):
""" Generate.
Generate a sequence. Temperature is controlling the novelty of
the created sequence, a temperature near 0 will looks like samples
used for training, while the higher the temperature, the more novelty.
For optimal results, it is suggested to set sequence seed as some
random sequence samples from training dataset.
Arguments:
seq_length: `int`. The generated sequence length.
temperature: `float`. Novelty rate.
seq_seed: `sequence`. A sequence used as a seed to generate a
new sequence. Suggested to be a sequence from data used for
training.
display: `bool`. If True, print sequence as it is generated.
Returns:
The generated sequence.
"""
generated = seq_seed[:]
sequence = seq_seed[:]
whole_sequence = seq_seed[:]
if display: sys.stdout.write(str(generated))
for i in range(seq_length):
x = np.zeros((1, self.seq_maxlen, len(self.dic)))
for t, char in enumerate(sequence):
x[0, t, self.dic[char]] = 1.
preds = self._predict(x)[0]
next_index = _sample(preds, temperature)
next_char = self.rev_dic[next_index]
try: #Python 2
unicode_or_str = [str, unicode]
except: #Python 3
unicode_or_str = [str]
if type(sequence) in unicode_or_str:
generated += next_char
sequence = sequence[1:] + next_char
whole_sequence += next_char
else:
generated.append(next_char)
sequence = sequence[1:]
sequence.append(next_char)
whole_sequence.append(next_char)
if display:
sys.stdout.write(str(next_char))
sys.stdout.flush()
if display: print()
return whole_sequence
def save(self, model_file):
""" Save.
Save model weights.
Arguments:
model_file: `str`. Model path.
"""
self.trainer.save(model_file)
def load(self, model_file, **optargs):
""" Load.
Restore model weights.
Arguments:
model_file: `str`. Model path.
optargs: optional extra arguments for trainer.restore (see helpers/trainer.py)
These optional arguments may be used to limit the scope of
variables restored, and to control whether a new session is
created for the restored variables.
"""
self.trainer.restore(model_file, **optargs)
self.session = self.trainer.session
self.predictor = Evaluator([self.net],
session=self.session,
model=None)
for d in tf.get_collection(tf.GraphKeys.DATA_PREP):
if d: d.restore_params(self.session)
def get_weights(self, weight_tensor):
""" Get weights.
Get a variable weights.
Examples:
sgen = SequenceGenerator(...)
w = sgen.get_weights(denselayer.W) -- get a dense layer weights
Arguments:
weight_tensor: `tf.Tensor`. A Variable.
Returns:
`np.array`. The provided variable weights.
"""
return weight_tensor.eval(self.trainer.session)
def set_weights(self, tensor, weights):
""" Set Weights.
Assign a tensor variable a given value.
Arguments:
tensor: `Tensor`. The tensor variable to assign value.
weights: The value to be assigned.
"""
op = tf.assign(tensor, weights)
self.trainer.session.run(op)
def evaluate(self, X, Y, batch_size=128):
""" Evaluate.
Evaluate model on given samples.
Arguments:
X: array, `list` of array (if multiple inputs) or `dict`
(with inputs layer name as keys). Data to feed to train
model.
Y: array, `list` of array (if multiple inputs) or `dict`
(with estimators layer name as keys). Targets (Labels) to
feed to train model. Usually set as the next element of a
sequence, i.e. for x[0] => y[0] = x[1].
batch_size: `int`. The batch size. Default: 128.
Returns:
The metric score.
"""
feed_dict = feed_dict_builder(X, Y, self.inputs, self.targets)
return eval(self.trainer.session, self.net, feed_dict, batch_size)
def reverse_dictionary(dic):
# Build reverse dict
rev_dic = {}
for key in dic:
rev_dic[dic[key]] = key
return rev_dic
def _sample(a, temperature=1.0):
# helper function to sample an index from a probability array
a = np.log(a) / temperature
a = np.exp(a) / np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1, a, 1))
| 39.752941
| 90
| 0.580423
|
a6ce11cbc3c2ea782fc3581c0d2c70d18a0d1f30
| 2,078
|
py
|
Python
|
ip_generators.py
|
G4LD0R/spoofDos
|
c7302fb18aca6196f87c13d38b8f6559c5c4ef89
|
[
"MIT"
] | null | null | null |
ip_generators.py
|
G4LD0R/spoofDos
|
c7302fb18aca6196f87c13d38b8f6559c5c4ef89
|
[
"MIT"
] | null | null | null |
ip_generators.py
|
G4LD0R/spoofDos
|
c7302fb18aca6196f87c13d38b8f6559c5c4ef89
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
#-*- coding: utf-8 -*-
#ip_generators.py
from bs4 import BeautifulSoup
import requests
import random
import socket
import os, sys
import re
class generator:
def __cinit__(self):
self.data=None
#Generate to random ip addresses according to given sample ip address
def random_ip(self, ipaddress="192.168.1.7", netmask="255.0.0.0"):
try:
a,b,c,d = ipaddress.split(".")
k = str(random.randint(0, 255))
l = str(random.randint(0, 255))
m = str(random.randint(0, 255))
if netmask == "255.255.255.0":
ip_address = "{0}.{1}.{2}.{3}".format(a, b, c, k)
elif netmask == "255.255.0.0":
ip_address = "{0}.{1}.{2}.{3}".format(a, b, k, l)
elif netmask == "255.0.0.0":
ip_address = "{0}.{1}.{2}.{3}".format(a, k, l, m)
else:
ip_address = False
return ip_address
except:
return False
#Return the own ip address
def own_ip(self):
try:
hostname = socket.gethostname()
ip_address = socket.gethostbyname(hostname)
return ip_address
except:
return False
#Read the Ip addresses from wordlist path
def read_from_wordlist(self, path):
ip_list = []
if os.path.exists(path):
with open(path, "r") as wordlist:
dataCaptured = wordlist.readlines()
for row in dataCaptured:
try:
a, b = row.split("\n")
ip_list.append(a)
except:
pass
ip_address = random.choice(ip_list)
return ip_address
else:
return False
# Fetch Actual Ip Addresses From Free Proxy Portals
def fetch_actual_ips(self):
try:
proxy_list = ["https://www.us-proxy.org/", "https://www.sslproxies.org/", "https://free-proxy-list.net/"]
code = -1
while code != 200:
url = random.choice(proxy_list)
response = requests.get(url)
code = response.status_code
soup = BeautifulSoup(response.content,"lxml")
ipList = []
trs = soup.find("table", attrs={"id":"proxylisttable"}).find("tbody").find_all("tr")
for tr in trs:
ip = tr.find("td").text
ipList.append(ip)
return ipList
except:
return False
| 21.42268
| 108
| 0.630414
|
c92811fe253586cdc3b5ec07843671845bb1a6c7
| 1,277
|
py
|
Python
|
Proper/proper/examples/talbot.py
|
RupertDodkins/medis
|
bdb1f00fb93506da2a1f251bc6780e70e97a16c5
|
[
"MIT"
] | 1
|
2021-06-25T17:35:56.000Z
|
2021-06-25T17:35:56.000Z
|
Proper/proper/examples/talbot.py
|
RupertDodkins/medis
|
bdb1f00fb93506da2a1f251bc6780e70e97a16c5
|
[
"MIT"
] | null | null | null |
Proper/proper/examples/talbot.py
|
RupertDodkins/medis
|
bdb1f00fb93506da2a1f251bc6780e70e97a16c5
|
[
"MIT"
] | 2
|
2018-12-08T15:05:13.000Z
|
2019-08-08T17:28:24.000Z
|
# Copyright 2016, 2017 California Institute of Technology
# Users must agree to abide by the restrictions listed in the
# file "LegalStuff.txt" in the PROPER library directory.
#
# PROPER developed at Jet Propulsion Laboratory/California Inst. Technology
# Original IDL version by John Krist
# Python translation by Navtej Saini, with Luis Marchen and Nikta Amiri
import proper
import numpy as np
def talbot(wavelength, gridsize, PASSVALUE = {'period': 0., 'diam': 0., 'dist': 0.}):
talbot_length = 2. * PASSVALUE['period']**2 / wavelength
wfo = proper.prop_begin(PASSVALUE['diam'], wavelength, gridsize)
# create 1-D grating pattern
m = 0.2
x = (np.arange(gridsize, dtype = np.float64) - gridsize/2) \
* proper.prop_get_sampling(wfo)
grating = 0.5 * (1 + m * np.cos(2*np.pi*x/PASSVALUE['period']))
# create 2-D amplitude grating pattern
grating = np.dot(grating.reshape(gridsize,1), np.ones([1,gridsize], dtype = np.float64))
proper.prop_multiply(wfo, grating)
proper.prop_define_entrance(wfo)
proper.prop_propagate(wfo, PASSVALUE['dist'], TO_PLANE = True)
(wfo, sampling) = proper.prop_end(wfo, NOABS = True)
return (wfo, sampling)
| 32.74359
| 92
| 0.661707
|
79db95ba61c2e66c512d1f44e65dba6d1d783045
| 7,590
|
py
|
Python
|
desdeo_tools/interaction/validators.py
|
aswinmurali-io/desdeo-tools
|
86d4792ded3383f43658832d7f5dca69f236d61b
|
[
"MIT"
] | 1
|
2021-04-26T13:59:19.000Z
|
2021-04-26T13:59:19.000Z
|
desdeo_tools/interaction/validators.py
|
aswinmurali-io/desdeo-tools
|
86d4792ded3383f43658832d7f5dca69f236d61b
|
[
"MIT"
] | 13
|
2020-05-11T13:05:22.000Z
|
2022-03-12T01:02:01.000Z
|
desdeo_tools/interaction/validators.py
|
aswinmurali-io/desdeo-tools
|
86d4792ded3383f43658832d7f5dca69f236d61b
|
[
"MIT"
] | 9
|
2019-11-05T14:30:26.000Z
|
2021-08-28T11:57:43.000Z
|
import pandas as pd
import numpy as np
class ValidationError(Exception):
"""Raised when an error related to the validation is encountered.
"""
def validate_ref_point_with_ideal_and_nadir(
dimensions_data: pd.DataFrame, reference_point: pd.DataFrame
):
validate_ref_point_dimensions(dimensions_data, reference_point)
validate_ref_point_data_type(reference_point)
validate_ref_point_with_ideal(dimensions_data, reference_point)
validate_with_ref_point_nadir(dimensions_data, reference_point)
def validate_ref_point_with_ideal(
dimensions_data: pd.DataFrame, reference_point: pd.DataFrame
):
validate_ref_point_dimensions(dimensions_data, reference_point)
ideal_fitness = dimensions_data.loc["ideal"] * dimensions_data.loc["minimize"]
ref_point_fitness = reference_point * dimensions_data.loc["minimize"]
if not (ideal_fitness <= ref_point_fitness).all(axis=None):
problematic_columns = ideal_fitness.index[
(ideal_fitness > ref_point_fitness).values.tolist()[0]
].values
msg = (
f"Reference point should be worse than or equal to the ideal point\n"
f"The following columns have problematic values: {problematic_columns}"
)
raise ValidationError(msg)
def validate_with_ref_point_nadir(
dimensions_data: pd.DataFrame, reference_point: pd.DataFrame
):
validate_ref_point_dimensions(dimensions_data, reference_point)
nadir_fitness = dimensions_data.loc["nadir"] * dimensions_data.loc["minimize"]
ref_point_fitness = reference_point * dimensions_data.loc["minimize"]
if not (ref_point_fitness <= nadir_fitness).all(axis=None):
problematic_columns = nadir_fitness.index[
(nadir_fitness < ref_point_fitness).values.tolist()[0]
].values
msg = (
f"Reference point should be better than or equal to the nadir point\n"
f"The following columns have problematic values: {problematic_columns}"
)
raise ValidationError(msg)
def validate_ref_point_dimensions(
dimensions_data: pd.DataFrame, reference_point: pd.DataFrame
):
if not dimensions_data.shape[1] == reference_point.shape[1]:
msg = (
f"There is a mismatch in the number of columns of the dataframes.\n"
f"Columns in dimensions data: {dimensions_data.columns}\n"
f"Columns in the reference point provided: {reference_point.columns}"
)
raise ValidationError(msg)
if not all(dimensions_data.columns == reference_point.columns):
msg = (
f"There is a mismatch in the column names of the dataframes.\n"
f"Columns in dimensions data: {dimensions_data.columns}\n"
f"Columns in the reference point provided: {reference_point.columns}"
)
raise ValidationError(msg)
def validate_ref_point_data_type(reference_point: pd.DataFrame):
for dtype in reference_point.dtypes:
if not pd.api.types.is_numeric_dtype(dtype):
msg = (
f"Type of data in reference point dataframe should be numeric.\n"
f"Provided datatype: {dtype}"
)
raise ValidationError(msg)
def validate_specified_solutions(indices: np.ndarray, n_solutions: int) -> None:
"""Validate the Decision maker's choice of preferred/non-preferred solutions.
Args:
indices (np.ndarray): Index/indices of preferred solutions specified by the Decision maker.
n_solutions (int): Number of solutions in total.
Returns:
Raises:
ValidationError: In case the preference is invalid.
"""
if indices.shape[0] < 1:
raise ValidationError("Please specify at least one (non-)preferred solution.")
if not isinstance(indices, (np.ndarray, list)):
raise ValidationError("Please specify index/indices of (non-)preferred solutions in a list, even if there is only "
"one.")
if not all(0 <= i <= (n_solutions - 1) for i in indices):
msg = "indices of (non-)preferred solutions should be between 0 and {}. Current indices are {}." \
.format(n_solutions - 1, indices)
raise ValidationError(msg)
def validate_bounds(dimensions_data: pd.DataFrame, bounds: np.ndarray, n_objectives: int) -> None:
"""Validate the Decision maker's desired lower and upper bounds for objective values.
Args:
dimensions_data (pd.DataFrame): DataFrame including information whether an objective is minimized or
maximized, for each objective. In addition, includes ideal and nadir vectors.
bounds (np.ndarray): Desired lower and upper bounds for each objective.
n_objectives (int): Number of objectives in problem.
Returns:
Raises:
ValidationError: In case desired bounds are invalid.
"""
if not isinstance(bounds, np.ndarray):
msg = "Please specify bounds as a numpy array. Current type: {}.".format(type(bounds))
raise ValidationError(msg)
if len(bounds) != n_objectives:
msg = "Length of 'bounds' ({}) must be the same as number of objectives ({}).".format(len(bounds), n_objectives)
raise ValidationError(msg)
if not all(isinstance(b, (np.ndarray, list)) for b in bounds):
print(type(bounds[0]))
msg = "Please give bounds for each objective in a list."
raise ValidationError(msg)
if any(len(b) != 2 for b in bounds):
msg = "Length of each item of 'bounds' must 2, containing the lower and upper bound for an objective."
raise ValidationError(msg)
if any(b[0] > b[1] for b in bounds):
msg = "Lower bound cannot be greater than upper bound. Please specify lower bound first, then upper bound."
raise ValidationError(msg)
# check that bounds are within ideal and nadir points for each objective
for i, b in enumerate(bounds):
if dimensions_data.loc['minimize'].values.tolist()[i] == 1: # minimized objectives
if dimensions_data.loc['ideal'].values.tolist()[i] is not None:
if b[0] < dimensions_data.loc['ideal'].values.tolist()[i]:
msg = "Lower bound cannot be lower than ideal value for objective. Ideal vector: {}." \
.format(dimensions_data.loc['ideal'].values.tolist())
raise ValidationError(msg)
if dimensions_data.loc['nadir'].values.tolist()[i] is not None:
if b[1] > dimensions_data.loc['nadir'].values.tolist()[i]:
msg = "Upper bound cannot be higher than nadir value for objective. Nadir vector: {}." \
.format(dimensions_data.loc['nadir'].values.tolist())
raise ValidationError(msg)
else: # maximized objectives:
if dimensions_data.loc['ideal'].values.tolist()[i] is not None:
if b[1] > dimensions_data.loc['ideal'].values.tolist()[i]:
msg = "Upper bound cannot be higher than ideal value for objective. Ideal vector: {}." \
.format(dimensions_data.loc['ideal'].values.tolist())
raise ValidationError(msg)
if dimensions_data.loc['nadir'].values.tolist()[i] is not None:
if b[0] < dimensions_data.loc['nadir'].values.tolist()[i]:
msg = "Lower bound cannot be lower than nadir value for objective. Nadir vector: {}." \
.format(dimensions_data.loc['nadir'].values.tolist())
raise ValidationError(msg)
| 46.564417
| 123
| 0.66166
|
40253dbb09ff601dee43edf16be33a3d5289bf8c
| 511
|
py
|
Python
|
bbs/migrations/0006_userprofile_friends.py
|
luo1fly/s10day12bbs
|
ae927f0f3d9f5a09863e06bf894ac53cac83a61f
|
[
"Apache-2.0"
] | null | null | null |
bbs/migrations/0006_userprofile_friends.py
|
luo1fly/s10day12bbs
|
ae927f0f3d9f5a09863e06bf894ac53cac83a61f
|
[
"Apache-2.0"
] | null | null | null |
bbs/migrations/0006_userprofile_friends.py
|
luo1fly/s10day12bbs
|
ae927f0f3d9f5a09863e06bf894ac53cac83a61f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-04 06:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bbs', '0005_auto_20160804_0405'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='friends',
field=models.ManyToManyField(blank=1, related_name='_userprofile_friends_+', to='bbs.UserProfile'),
),
]
| 24.333333
| 111
| 0.637965
|
373b0547bc90a6e757715731adfb8c34c6b31bc4
| 175
|
py
|
Python
|
Chapter02/musicterminal/pytify/auth/authorization.py
|
ariwells2001/Python-Programming-Blueprints
|
23981ab304e65bcc24560393c75fd5ee85c96ce5
|
[
"MIT"
] | 72
|
2017-12-19T09:19:40.000Z
|
2021-11-08T13:13:34.000Z
|
Chapter02/musicterminal/pytify/auth/authorization.py
|
ariwells2001/Python-Programming-Blueprints
|
23981ab304e65bcc24560393c75fd5ee85c96ce5
|
[
"MIT"
] | 20
|
2018-03-21T01:15:27.000Z
|
2021-09-08T00:59:40.000Z
|
Chapter02/musicterminal/pytify/auth/authorization.py
|
ariwells2001/Python-Programming-Blueprints
|
23981ab304e65bcc24560393c75fd5ee85c96ce5
|
[
"MIT"
] | 53
|
2017-12-19T09:19:42.000Z
|
2022-03-06T02:21:10.000Z
|
from collections import namedtuple
Authorization = namedtuple('Authorization', [
'access_token',
'token_type',
'expires_in',
'scope',
'refresh_token',
])
| 17.5
| 45
| 0.668571
|
54895cac576ba3204649cfc456309d2fd8787518
| 2,139
|
py
|
Python
|
setup.py
|
raghavven/molssi_bestpractices_2021
|
e43bd7e269ed0f75f44107736bc5111fd5e21aaf
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
raghavven/molssi_bestpractices_2021
|
e43bd7e269ed0f75f44107736bc5111fd5e21aaf
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
raghavven/molssi_bestpractices_2021
|
e43bd7e269ed0f75f44107736bc5111fd5e21aaf
|
[
"BSD-3-Clause"
] | null | null | null |
"""
molecool
Visualization
"""
import sys
from setuptools import setup, find_packages
import versioneer
short_description = __doc__.split("\n")
# from https://github.com/pytest-dev/pytest-runner#conditional-requirement
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
try:
with open("README.md", "r") as handle:
long_description = handle.read()
except:
long_description = "\n".join(short_description[2:])
setup(
# Self-descriptive entries which should always be present
name='molecool',
author='Raghav',
author_email='raghav011986@gmail.com',
description=short_description[0],
long_description=long_description,
long_description_content_type="text/markdown",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='BSD-3-Clause',
# Which Python importable modules should be included when your package is installed
# Handled automatically by setuptools. Use 'exclude' to prevent some specific
# subpackage(s) from being added, if needed
packages=find_packages(),
# Optional include package data to ship with your package
# Customize MANIFEST.in if the general case does not suit your needs
# Comment out this line to prevent the files from being packaged with your software
include_package_data=True,
# Allows `setup.py test` to work correctly with pytest
setup_requires=[] + pytest_runner,
# Additional entries you may want simply uncomment the lines you want and fill in the data
# url='http://www.my_package.com', # Website
# install_requires=[], # Required packages, pulls from pip if needed; do not use for Conda deployment
# platforms=['Linux',
# 'Mac OS-X',
# 'Unix',
# 'Windows'], # Valid platforms your code works on, adjust to your flavor
# python_requires=">=3.5", # Python version restrictions
# Manual control if final package is compressible or not, set False to prevent the .egg from being made
# zip_safe=False,
)
| 35.65
| 118
| 0.69799
|
45f5a2edaf4165fd8feac8e72c172a520bfa0b33
| 384
|
py
|
Python
|
01.py
|
Shourov1702040/hpylearners_Python
|
aa13010318995af8088a60bff321950ce60c2a9b
|
[
"MIT"
] | 7
|
2021-10-02T06:46:35.000Z
|
2022-02-11T01:13:28.000Z
|
01.py
|
Mohadeb-Kumar/hpylearners_Python
|
5f2d2b0a14a357a0a2538196586f34e721ee097f
|
[
"MIT"
] | null | null | null |
01.py
|
Mohadeb-Kumar/hpylearners_Python
|
5f2d2b0a14a357a0a2538196586f34e721ee097f
|
[
"MIT"
] | 6
|
2021-05-11T20:06:23.000Z
|
2022-02-14T23:31:14.000Z
|
# Python program to display the Fibonacci sequence
def recur_fibo(n):
if n <= 1:
return n
else:
return(recur_fibo(n-1) + recur_fibo(n-2))
nterms = 10
# check if the number of terms is valid
if nterms <= 0:
print("Plese enter a positive integer")
else:
print("Fibonacci sequence:")
for i in range(nterms):
print(recur_fibo(i))
| 20.210526
| 51
| 0.619792
|
d8d365543e6c683b47f2e8ae1e88a320b32c85e5
| 18,037
|
py
|
Python
|
pyspeckit/spectrum/measurements.py
|
glangsto/pyspeckit
|
346b24fb828d1d33c7891cdde7609723e51af34c
|
[
"MIT"
] | null | null | null |
pyspeckit/spectrum/measurements.py
|
glangsto/pyspeckit
|
346b24fb828d1d33c7891cdde7609723e51af34c
|
[
"MIT"
] | 1
|
2021-05-14T19:17:41.000Z
|
2021-05-14T19:17:41.000Z
|
pyspeckit/spectrum/measurements.py
|
glangsto/pyspeckit
|
346b24fb828d1d33c7891cdde7609723e51af34c
|
[
"MIT"
] | 1
|
2018-10-02T15:11:17.000Z
|
2018-10-02T15:11:17.000Z
|
from __future__ import print_function
import numpy as np
from astropy.extern.six.moves import xrange
import itertools
from . import cosmology
from collections import OrderedDict
"""
To test:
import spectrum
spec = spectrum.Spectrum('sample_sdss.txt')
spec.plotter(xmin = 6400, xmax = 6800)
spec.specfit(guesses = [20, 6718.29, 5, 100, 6564.614, 20, 50, 6585.27, 20, 20, 6732.67, 5, 50, 6549.86, 5])
spec.measure()
"""
cm_per_mpc = 3.08568e+24
class Measurements(object):
def __init__(self, Spectrum, z=None, d=None, fluxnorm=None,
miscline=None, misctol=10., ignore=None, derive=True, debug=False,
restframe=False, ptol=2, sort=False):
"""
This can be called after a fit is run. It will inherit the specfit
object and derive as much as it can from modelpars. Just do:
spec.measure(z, xunits, fluxnorm)
Notes: If z (redshift) or d (distance) are present, we can compute
integrated line luminosities rather than just fluxes. Provide distance
in cm.
Only works with Gaussians. To generalize:
1. make sure we manipulate modelpars correctly, i.e. read in
entries corresponding to wavelength/frequency/whatever correctly.
Parameters
----------
z: float or None
redshift
d: float or None
distance in cm (used for luminosities)
fluxnorm: bool
Normalize the fluxes?
miscline: dictionary
miscline = [{'name': H_alpha', 'wavelength': 6565}]
misctol: tolerance (in Angstroms) for identifying an unmatched line
to the line(s) we specify in miscline dictionary.
sort: bool
Sort the entries in order of observed wavelength (or velocity or
frequency)
"""
self.debug = debug
self.restframe = restframe
# Inherit specfit object
self.specfit = Spectrum.specfit
self.speclines = Spectrum.speclines
# Bit of a hack - help identifying unmatched lines
self.miscline = miscline
self.misctol = misctol
# Flux units in case we are interested in line luminosities or just having real flux units
if fluxnorm is not None:
self.fluxnorm = fluxnorm
else:
self.fluxnorm = 1
# This is where we'll keep our results
self.lines = OrderedDict()
# Read in observed wavelengths
tmp1 = np.reshape(self.specfit.modelpars, (int(len(self.specfit.modelpars) / 3), 3))
tmp2 = np.reshape(self.specfit.modelerrs, (int(len(self.specfit.modelerrs) / 3), 3))
if ignore is not None:
tmp1 = np.delete(tmp1, ignore, 0)
tmp2 = np.delete(tmp2, ignore, 0)
# each tmp1 contains amplitude,wavelength,width
# (Assumes gaussians)
wavelengths = tmp1[:,1]
# sort by wavelength
if sort:
order = np.argsort(wavelengths)
self.obspos = wavelengths[order]
else:
order = np.arange(wavelengths.size)
self.obspos = wavelengths
self.Nlines = wavelengths.size
# Read in modelpars and modelerrs, re-organize so they are 2D arrays sorted by ascending wavelength
self.modelpars = np.zeros_like(tmp1)
self.modelerrs = np.zeros_like(tmp2)
for i, element in enumerate(order):
self.modelpars[i] = tmp1[element]
self.modelerrs[i] = tmp2[element]
# Read in appropriate list of reference wavelengths/frequencies/whatever
self.reflines = self.speclines.optical.get_optical_lines()
self.refpos = self.reflines['xarr']
self.refname = self.reflines['name']
# Redshift reference lines if restframe = True
if self.restframe and z is not None:
self.refpos *= (1.0 + z)
# If distance or redshift has been provided, we can compute luminosities from fluxes
if d is not None:
self.d = d
else:
self.d = None
if z is not None:
self.cosmology = cosmology.Cosmology()
self.d = self.cosmology.LuminosityDistance(z) * cm_per_mpc
self.unmatched = self.identify_by_position(ptol=ptol)
#if np.sum(unmatched) >= 2:
# self.identify_by_spacing(unmatched)
if derive:
self.derive()
def identify_by_position(self, ptol):
"""
Match observed lines to nearest reference line. Don't use spacing at all.
ptol = tolerance (in angstroms) to accept positional match
"""
if not hasattr(self, 'lines'):
self.lines = OrderedDict()
# Fill lines dictionary
unmatched = np.zeros_like(self.obspos)
for i, pos in enumerate(self.obspos):
# Check miscline directory for match
matched = False
if self.miscline is not None:
for line in self.miscline:
if abs(pos - line['wavelength']) > ptol:
continue
matched = True
name = line['name']
break
if not matched:
diff = np.abs(pos - self.refpos)
loc = np.argmin(diff)
if diff[loc] <= ptol:
matched = True
name = self.refname[loc]
if name in self.lines.keys():
name += '_1'
num = int(name[-1])
while name in self.lines.keys():
num += 1
name = '%s_%i' % (self.refname[loc], num)
if matched:
self.lines[name] = {}
self.lines[name]['modelpars'] = list(self.modelpars[i])
self.lines[name]['modelerrs'] = list(self.modelerrs[i])
else:
name = 'unknown_1'
num = 1
while name in self.lines.keys():
num += 1
name = 'unknown_%i' % num
self.lines[name] = {}
self.lines[name]['modelpars'] = list(self.modelpars[i])
self.lines[name]['modelerrs'] = list(self.modelerrs[i])
unmatched[i] = 1
return unmatched
def identify_by_spacing(self):
"""
Determine identity of lines in self.modelpars. Fill entries of self.lines dictionary.
Note: This method will be infinitely slow for more than 10 or so lines.
"""
if self.unmatched is None:
self.unmatched = np.ones_like(self.obspos)
# Remove lines that were already identified
obspos = self.obspos[self.unmatched == 1]
# Spacing between observed lines (odiff) and reference lines (rdiff)
self.odiff = np.abs(np.diff(obspos))
self.rdiff = np.abs(np.diff(self.refpos))
# Don't try to identify lines with separations smaller than the smallest
# separation in our reference library
self.rdmin = 0.99 * min(self.rdiff)
# If lines have multiple components (i.e. spacing much closer than ref lines),
# delete them from ID list.
if np.any(self.odiff) < self.rdmin:
where = np.ravel(np.argwhere(self.odiff < self.rdmin))
odiff = np.delete(self.odiff, where)
multi = True
else:
where = 0
odiff = self.odiff
multi = False
refpos = self.refpos
refname = self.refname
# Don't include elements of reference array that are far away from the observed lines (speeds things up)
condition = (refpos >= 0.99 * min(self.obspos)) & (refpos <= 1.01 * max(self.obspos))
refpos = refpos[condition]
refname = refname[condition]
if len(refpos) == 0:
print('WARNING: No reference lines in this wavelength regime.')
elif len(refpos) < self.Nlines:
print('WARNING: More observed lines than reference lines in this band.')
# Construct all possible (N-element) combos of reference lines
combos = itertools.combinations(refpos, min(self.Nlines, len(refpos)))
# List to keep track of line identification. Each entry is (cost, (line1, line2, line3,...))
self.IDresults = []
for i, combo in enumerate(combos):
rdiff = np.diff(combo)
if len(odiff) == len(rdiff):
result = (np.sum(np.abs(odiff - rdiff)), combo)
self.IDresults.append(result)
else: # If more/less observed lines than reference lines, try excluding observed lines one at a time
if len(odiff) > len(rdiff):
subcombos = itertools.combinations(odiff, len(rdiff))
for subcombo in subcombos:
result = (np.sum(np.abs(subcombo - rdiff)), combo)
self.IDresults.append(result)
else:
subcombos = itertools.combinations(rdiff, len(odiff))
for subcombo in subcombos:
result = (np.sum(np.abs(odiff - subcombo)), combo)
self.IDresults.append(result)
# Pick best solution
best = np.argmin(zip(*self.IDresults)[0]) # Location of best solution
ALLloc = [] # x-values of best fit lines in reference dictionary
# Determine indices of matched reference lines
for element in self.IDresults[best][1]:
ALLloc.append(np.argmin(np.abs(refpos - element)))
# Fill lines dictionary
for i, element in enumerate(ALLloc):
line = refname[element]
self.lines[line] = {}
loc = np.argmin(np.abs(self.obspos - refpos[element]))
self.lines[line]['modelpars'] = list(self.modelpars[loc])
self.lines[line]['modelerrs'] = list(self.modelerrs[loc])
# Track down odd lines (i.e. broad components of lines already identified)
# This won't yet work for lines that are truly unidentified
if len(ALLloc) < self.Nlines:
# Figure out which modelpars/errs that belong to lines that were already identified
mpars = self.modelpars.copy()
merrs = self.modelerrs.copy()
for line in self.lines:
wavelengths = zip(*mpars)[1]
i = np.argmin(np.abs(zip(*mpars)[1] - self.lines[line]['modelpars'][1]))
mpars = np.delete(mpars, i, 0)
merrs = np.delete(merrs, i, 0)
# Loop over unmatched modelpars/errs, find name of unmatched line, extend corresponding dict entry
if self.miscline is None:
for i, x in enumerate(zip(*mpars)[1]):
self.lines['unknown%i' % i] = {}
self.lines['unknown%i' % i]['modelpars'] = mpars[i]
self.lines['unknown%i' % i]['modelerrs'] = merrs[i]
# If we've know a-priori which lines the unmatched lines are likely to be, use that information
else:
print(self.miscline)
for i, miscline in enumerate(self.miscline):
for j, x in enumerate(zip(*mpars)[1]):
if abs(x - miscline['wavelength']) < self.misctol:
name = miscline['name']
else:
name = 'unknown%i' % j
self.lines[name] = {}
self.lines[name]['modelpars'] = mpars[j]
self.lines[name]['modelerrs'] = merrs[j]
self.separate()
def derive(self):
"""
Calculate luminosity and FWHM for all spectral lines.
"""
for line in self.lines.keys():
if self.debug:
print("Computing parameters for line %s" % line)
self.lines[line]['fwhm'] = self.compute_fwhm(self.lines[line]['modelpars'])
self.lines[line]['flux'] = self.compute_flux(self.lines[line]['modelpars'])
self.lines[line]['amp'] = self.compute_amplitude(self.lines[line]['modelpars'])
self.lines[line]['pos'] = self.lines[line]['modelpars'][1]
if self.d is not None:
self.lines[line]['lum'] = self.compute_luminosity(self.lines[line]['modelpars'])
def separate(self):
"""
For multicomponent lines, separate into broad and narrow components (assume only one of components is narrow).
"""
for key in self.lines.keys():
modpars = self.lines[key]['modelpars']
moderrs = self.lines[key]['modelerrs']
if len(modpars) > 3:
modpars2d = np.reshape(modpars, (len(modpars) / 3, 3))
moderrs2d = np.reshape(moderrs, (len(moderrs) / 3, 3))
sigma = zip(*modpars2d)[2]
minsigma = min(np.abs(sigma))
i_narrow = list(np.abs(sigma)).index(minsigma)
else:
continue
self.lines["{0}_N".format(key)] = {}
self.lines["{0}_N".format(key)]['modelpars'] = []
self.lines["{0}_N".format(key)]['modelerrs'] = []
self.lines["{0}_B".format(key)] = {}
self.lines["{0}_B".format(key)]['modelpars'] = []
self.lines["{0}_B".format(key)]['modelerrs'] = []
for i, arr in enumerate(modpars2d):
if i == i_narrow:
self.lines["{0}_N".format(key)]['modelpars'] = arr
self.lines["{0}_N".format(key)]['modelerrs'] = moderrs2d[i]
else:
self.lines["{0}_B".format(key)]['modelpars'].extend(arr)
self.lines["{0}_B".format(key)]['modelerrs'].extend(moderrs2d[i])
def compute_flux(self, pars):
"""
Calculate integrated flux of emission line. Works for multi-component fits too. Unnormalized.
"""
flux = 0
niter = (len(pars) / 3)
assert niter == int(niter)
for i in xrange(int(niter)):
flux += np.sqrt(2. * np.pi) * pars[3 * i] * abs(pars[2 + 3 * i])
return flux * self.fluxnorm
def compute_amplitude(self, pars):
"""
Calculate amplitude of emission line. Should be easy - add multiple components if they exist.
Currently assumes multiple components have the same centroid.
"""
amp = 0
niter = (len(pars) / 3)
for i in xrange(int(niter)):
amp += pars[3 * i]
return amp * self.fluxnorm
def compute_luminosity(self, pars):
"""
Determine luminosity of line (need distance and flux units).
"""
lum = 0
niter = (len(pars) / 3)
for i in xrange(int(niter)):
lum += self.compute_flux(pars) * 4. * np.pi * self.d**2
return lum
def compute_fwhm(self, pars):
"""
Determine full-width at half maximum for multi-component fit numerically, or analytically if line
has only a single component. Uses bisection technique for the former with absolute tolerance of 1e-4.
"""
if len(pars) == 3:
return 2. * np.sqrt(2. * np.log(2.)) * abs(pars[2])
else:
atol = 1e-4
niter = (len(pars) / 3)
pars2d = np.reshape(pars, (int(niter), 3))
start = zip(*pars2d)[1][0] # start at central wavelength of first component
# If the centroids are exactly the same for all components, we know the peak, and peak position
if np.allclose(zip(*pars2d)[1], atol):
fmax = np.sum(zip(*pars2d)[0])
# Otherwise, we have to figure out where the multicomponent peak is
else:
f = lambda x: self.specfit.fitter.slope(x)
xfmax = self.bisection(f, start)
fmax = self.specfit.fitter.n_modelfunc(pars)(np.array([xfmax, xfmax]))[0]
hmax = 0.5 * fmax
# current height relative to half max - we want to minimize this function. Could be asymmetric.
f = lambda x: self.specfit.fitter.n_modelfunc(pars)(np.array([x])) - hmax
xhmax1 = self.bisection(f, start)
xhmax2 = self.bisection(f, start + (start - xhmax1))
return abs(xhmax2 - xhmax1)
def bisection(self, f, x_guess):
"""
Find root of function using bisection method. Absolute tolerance of 1e-4 is being used.
"""
x1, x2 = self.bracket_root(f, x_guess)
# Narrow bracketed range with bisection until tolerance is met
while abs(x2 - x1) > 1e-4:
midpt = np.mean([x1, x2])
fmid = f(midpt)
if np.sign(fmid) < 0: x1 = midpt
else: x2 = midpt
if fmid == 0.0: break
return x2
def bracket_root(self, f, x_guess, atol = 1e-4):
"""
Bracket root by finding points where function goes from positive to negative.
"""
f1 = f(x_guess)
f2 = f(x_guess + 1)
df = f2 - f1
# Determine whether increasing or decreasing x_guess will lead us to zero
if (f1 > 0 and df < 0) or (f1 < 0 and df > 0): sign = 1
else: sign = -1
# Find root bracketing points
xpre = x_guess
xnow = x_guess + sign
fpre = f1
fnow = f(xnow)
while (np.sign(fnow) == np.sign(fpre)):
xpre = xnow
xnow += sign * 0.1
fpre = f(xpre)
fnow = f(xnow)
x1 = min(xnow, xpre)
x2 = max(xnow, xpre)
if not np.all([np.sign(fpre), np.sign(fnow)]):
x1 -= 1e-4
x2 += 1e-4
return x1, x2
def to_tex(self):
"""
Write out fit results to tex format.
"""
pass
| 36.961066
| 118
| 0.550202
|
6169c77d05545f1f532003d1f71c2b3e32d7c56d
| 1,145
|
py
|
Python
|
polkadot_prometheus_exporter/_utils.py
|
w3f-community/polkadot-prometheus-exporter
|
b1ee5608572533a27a2aa6d9523a84a05249073c
|
[
"Apache-2.0"
] | 8
|
2019-06-10T08:38:56.000Z
|
2021-04-16T17:52:54.000Z
|
polkadot_prometheus_exporter/_utils.py
|
w3f-community/polkadot-prometheus-exporter
|
b1ee5608572533a27a2aa6d9523a84a05249073c
|
[
"Apache-2.0"
] | null | null | null |
polkadot_prometheus_exporter/_utils.py
|
w3f-community/polkadot-prometheus-exporter
|
b1ee5608572533a27a2aa6d9523a84a05249073c
|
[
"Apache-2.0"
] | 1
|
2021-03-03T18:23:38.000Z
|
2021-03-03T18:23:38.000Z
|
# Copyright (C) 2019 MixBytes, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND (express or implied).
from abc import ABC, abstractmethod
from time import time
class PeriodicTask(ABC):
"""
Utility class which runs _perform() at most every period_seconds.
"""
def __init__(self, period_seconds):
self.period_seconds = period_seconds
self.__last_invocation_time = None
@abstractmethod
def _perform(self):
raise NotImplementedError()
def run(self):
now = time()
if self.__last_invocation_time is not None and self.__last_invocation_time + self.period_seconds > now:
return
self.__last_invocation_time = now
self._perform()
def check(condition, error_msg=None):
if not condition:
raise (RuntimeError() if error_msg is None else RuntimeError(error_msg))
| 28.625
| 111
| 0.704803
|
400eb450484d415ed23eea86cc637a288cf7cd62
| 230
|
py
|
Python
|
polymetis/polymetis/python/torchcontrol/utils/__init__.py
|
ali-senguel/fairo
|
1ec5d8ecbdfc782de63a92aad9bf8534110ce762
|
[
"MIT"
] | 669
|
2020-11-21T01:20:20.000Z
|
2021-09-13T13:25:16.000Z
|
polymetis/polymetis/python/torchcontrol/utils/__init__.py
|
ali-senguel/fairo
|
1ec5d8ecbdfc782de63a92aad9bf8534110ce762
|
[
"MIT"
] | 324
|
2020-12-07T18:20:34.000Z
|
2021-09-14T17:17:18.000Z
|
polymetis/polymetis/python/torchcontrol/utils/__init__.py
|
ali-senguel/fairo
|
1ec5d8ecbdfc782de63a92aad9bf8534110ce762
|
[
"MIT"
] | 56
|
2021-01-04T19:57:40.000Z
|
2021-09-13T21:20:08.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .tensor_utils import *
from .time_utils import *
| 32.857143
| 65
| 0.765217
|
d75c2df82fdb86d324a6ad0f066d09e8f491cbd9
| 1,509
|
py
|
Python
|
tpdatasrc/co8infra/scr/Spell192 - Gentle Repose.py
|
edoipi/TemplePlus
|
f0e552289822fea908f16daa379fa568b1bd286d
|
[
"MIT"
] | 69
|
2015-05-05T14:09:25.000Z
|
2022-02-15T06:13:04.000Z
|
tpdatasrc/co8infra/scr/Spell192 - Gentle Repose.py
|
edoipi/TemplePlus
|
f0e552289822fea908f16daa379fa568b1bd286d
|
[
"MIT"
] | 457
|
2015-05-01T22:07:45.000Z
|
2022-03-31T02:19:10.000Z
|
tpdatasrc/co8infra/scr/Spell192 - Gentle Repose.py
|
edoipi/TemplePlus
|
f0e552289822fea908f16daa379fa568b1bd286d
|
[
"MIT"
] | 25
|
2016-02-04T21:19:53.000Z
|
2021-11-15T23:14:51.000Z
|
from toee import *
def OnBeginSpellCast( spell ):
print "Gentle Repose OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
game.particles( "sp-necromancy-conjure", spell.caster )
def OnSpellEffect( spell ):
print "Gentle Repose OnSpellEffect"
target = spell.target_list[0]
if not target.obj.is_friendly( spell.caster ):
if not target.obj.saving_throw_spell( spell.dc, D20_Save_Will, D20STD_F_NONE, spell.caster, spell.id ):
# saving throw unsuccessful
target.obj.float_mesfile_line( 'mes\\spell.mes', 30002 )
game.particles( 'sp-Inflict Light Wounds', target.obj )
target.obj.float_mesfile_line( 'mes\\spell.mes', 192 )
x = target.obj.obj_get_int(obj_f_critter_flags2)
x = x | 64
target.obj.obj_set_int(obj_f_critter_flags2, x)
game.global_vars[900] = target.obj.obj_get_int(obj_f_critter_flags2)
else:
# saving throw successful
target.obj.float_mesfile_line( 'mes\\spell.mes', 30001 )
game.particles( 'Fizzle', target.obj )
else:
game.particles( 'sp-Inflict Light Wounds', target.obj )
target.obj.float_mesfile_line( 'mes\\spell.mes', 192 )
x = target.obj.obj_get_int(obj_f_critter_flags2)
x = x | 64
target.obj.obj_set_int(obj_f_critter_flags2, x)
spell.target_list.remove_target( target.obj )
spell.spell_end( spell.id )
def OnBeginRound( spell ):
print "Gentle Repose OnBeginRound"
def OnEndSpellCast( spell ):
print "Gentle Repose OnEndSpellCast"
| 32.106383
| 105
| 0.743539
|
d0d7a67d80a981d4a1937e5c8ed4c1243eb68765
| 40,245
|
py
|
Python
|
ansible/roles/lib_openshift_3.2/library/oc_serviceaccount.py
|
rhdedgar/openshift-tools
|
3c077d905688c3d4477bbd4ca48977b705f7e779
|
[
"Apache-2.0"
] | null | null | null |
ansible/roles/lib_openshift_3.2/library/oc_serviceaccount.py
|
rhdedgar/openshift-tools
|
3c077d905688c3d4477bbd4ca48977b705f7e779
|
[
"Apache-2.0"
] | 3
|
2016-12-01T23:01:36.000Z
|
2016-12-02T00:16:48.000Z
|
ansible/roles/lib_openshift_3.2/library/oc_serviceaccount.py
|
rhdedgar/openshift-tools
|
3c077d905688c3d4477bbd4ca48977b705f7e779
|
[
"Apache-2.0"
] | 1
|
2018-01-30T05:44:59.000Z
|
2018-01-30T05:44:59.000Z
|
#!/usr/bin/env python # pylint: disable=too-many-lines
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self.namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oadm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not user_def.has_key(key):
if debug:
print 'User data does not have key [%s]' % key
print 'User data: %s' % user_def
return False
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])
return False
if len(user_def[key]) != len(value):
if debug:
print "List lengths are not equal."
print "key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))
print "user_def: %s" % user_def[key]
print "value: %s" % value
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not user_def.has_key(key):
if debug:
print "user_def does not have key [%s]" % key
return False
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false: not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print "keys are not equal in dict"
print api_values
print user_values
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print key
print value
if user_def.has_key(key):
print user_def[key]
return False
if debug:
print 'returning true'
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict == None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]:
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding data to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
tmp_filename = self.filename + '.yedit'
try:
with open(tmp_filename, 'w') as yfd:
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except Exception as err:
raise YeditException(err.message)
os.rename(tmp_filename, self.filename)
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename == None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError as _:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if entry.has_key(key_or_item):
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \
' value=[%s] [%s]' % (value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index != None:
ind = index
if ind != None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
#already exists, return
if ind != None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
class ServiceAccountConfig(object):
'''Service account config class
This class stores the options and returns a default service account
'''
# pylint: disable=too-many-arguments
def __init__(self, sname, namespace, kubeconfig, secrets=None, image_pull_secrets=None):
self.name = sname
self.kubeconfig = kubeconfig
self.namespace = namespace
self.secrets = secrets or []
self.image_pull_secrets = image_pull_secrets or []
self.data = {}
self.create_dict()
def create_dict(self):
''' return a properly structured volume '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'ServiceAccount'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['secrets'] = []
if self.secrets:
for sec in self.secrets:
self.data['secrets'].append({"name": sec})
self.data['imagePullSecrets'] = []
if self.image_pull_secrets:
for sec in self.image_pull_secrets:
self.data['imagePullSecrets'].append({"name": sec})
# pylint: disable=too-many-public-methods
class ServiceAccount(Yedit):
''' Class to wrap the oc command line tools '''
image_pull_secrets_path = "imagePullSecrets"
secrets_path = "secrets"
def __init__(self, content):
'''ServiceAccount constructor'''
super(ServiceAccount, self).__init__(content=content)
self._secrets = None
self._image_pull_secrets = None
@property
def image_pull_secrets(self):
''' property for image_pull_secrets '''
if self._image_pull_secrets == None:
self._image_pull_secrets = self.get(ServiceAccount.image_pull_secrets_path) or []
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, secrets):
''' property for secrets '''
self._image_pull_secrets = secrets
@property
def secrets(self):
''' property for secrets '''
print "Getting secrets property"
if not self._secrets:
self._secrets = self.get(ServiceAccount.secrets_path) or []
return self._secrets
@secrets.setter
def secrets(self, secrets):
''' property for secrets '''
self._secrets = secrets
def delete_secret(self, inc_secret):
''' remove a secret '''
remove_idx = None
for idx, sec in enumerate(self.secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.secrets[remove_idx]
return True
return False
def delete_image_pull_secret(self, inc_secret):
''' remove a image_pull_secret '''
remove_idx = None
for idx, sec in enumerate(self.image_pull_secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.image_pull_secrets[remove_idx]
return True
return False
def find_secret(self, inc_secret):
'''find secret'''
for secret in self.secrets:
if secret['name'] == inc_secret:
return secret
return None
def find_image_pull_secret(self, inc_secret):
'''find secret'''
for secret in self.image_pull_secrets:
if secret['name'] == inc_secret:
return secret
return None
def add_secret(self, inc_secret):
'''add secret'''
if self.secrets:
self.secrets.append({"name": inc_secret})
else:
self.put(ServiceAccount.secrets_path, [{"name": inc_secret}])
def add_image_pull_secret(self, inc_secret):
'''add image_pull_secret'''
if self.image_pull_secrets:
self.image_pull_secrets.append({"name": inc_secret})
else:
self.put(ServiceAccount.image_pull_secrets_path, [{"name": inc_secret}])
# pylint: disable=too-many-instance-attributes
class OCServiceAccount(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'sa'
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
verbose=False):
''' Constructor for OCVolume '''
super(OCServiceAccount, self).__init__(config.namespace, config.kubeconfig)
self.config = config
self.namespace = config.namespace
self._service_account = None
@property
def service_account(self):
''' property function service'''
if not self._service_account:
self.get()
return self._service_account
@service_account.setter
def service_account(self, data):
''' setter function for yedit var '''
self._service_account = data
def exists(self):
''' return whether a volume exists '''
if self.service_account:
return True
return False
def get(self):
'''return volume information '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.service_account = ServiceAccount(content=result['results'][0])
elif '\"%s\" not found' % self.config.name in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create the object'''
return self._create_from_content(self.config.name, self.config.data)
def update(self):
'''update the object'''
# need to update the tls information and the service name
for secret in self.config.secrets:
result = self.service_account.find_secret(secret)
if not result:
self.service_account.add_secret(secret)
for secret in self.config.image_pull_secrets:
result = self.service_account.find_image_pull_secret(secret)
if not result:
self.service_account.add_image_pull_secret(secret)
return self._replace_content(self.kind, self.config.name, self.config.data)
def needs_update(self):
''' verify an update is needed '''
# since creating an service account generates secrets and imagepullsecrets
# check_def_equal will not work
# Instead, verify all secrets passed are in the list
for secret in self.config.secrets:
result = self.service_account.find_secret(secret)
if not result:
return True
for secret in self.config.image_pull_secrets:
result = self.service_account.find_image_pull_secret(secret)
if not result:
return True
return False
def main():
'''
ansible oc module for route
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
name=dict(default=None, required=True, type='str'),
namespace=dict(default=None, required=True, type='str'),
secrets=dict(default=None, type='list'),
image_pull_secrets=dict(default=None, type='list'),
),
supports_check_mode=True,
)
rconfig = ServiceAccountConfig(module.params['name'],
module.params['namespace'],
module.params['kubeconfig'],
module.params['secrets'],
module.params['image_pull_secrets'],
)
oc_sa = OCServiceAccount(rconfig,
verbose=module.params['debug'])
state = module.params['state']
api_rval = oc_sa.get()
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
if oc_sa.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = oc_sa.delete()
module.exit_json(changed=True, results=api_rval, state="absent")
module.exit_json(changed=False, state="absent")
if state == 'present':
########
# Create
########
if not oc_sa.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
api_rval = oc_sa.create()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = oc_sa.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
########
# Update
########
if oc_sa.needs_update():
api_rval = oc_sa.update()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = oc_sa.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| 32.960688
| 118
| 0.543422
|
db9c0e81e8be7893916f9a25f4259ff582cf47ba
| 1,725
|
py
|
Python
|
hashes/enigma_machine.py
|
sourcery-ai-bot/Python
|
f1444aca73068e0c55f60ec63b12c53b402e54dd
|
[
"MIT"
] | null | null | null |
hashes/enigma_machine.py
|
sourcery-ai-bot/Python
|
f1444aca73068e0c55f60ec63b12c53b402e54dd
|
[
"MIT"
] | null | null | null |
hashes/enigma_machine.py
|
sourcery-ai-bot/Python
|
f1444aca73068e0c55f60ec63b12c53b402e54dd
|
[
"MIT"
] | null | null | null |
alphabets = [chr(i) for i in range(32, 126)]
gear_one = [i for i in range(len(alphabets))]
gear_two = [i for i in range(len(alphabets))]
gear_three = [i for i in range(len(alphabets))]
reflector = [i for i in reversed(range(len(alphabets)))]
code = []
gear_one_pos = gear_two_pos = gear_three_pos = 0
def rotator():
global gear_one_pos
global gear_two_pos
global gear_three_pos
i = gear_one[0]
gear_one.append(i)
del gear_one[0]
gear_one_pos += 1
if gear_one_pos % int(len(alphabets)) == 0:
i = gear_two[0]
gear_two.append(i)
del gear_two[0]
gear_two_pos += 1
if gear_two_pos % int(len(alphabets)) == 0:
i = gear_three[0]
gear_three.append(i)
del gear_three[0]
gear_three_pos += 1
def engine(input_character):
target = alphabets.index(input_character)
target = gear_one[target]
target = gear_two[target]
target = gear_three[target]
target = reflector[target]
target = gear_three.index(target)
target = gear_two.index(target)
target = gear_one.index(target)
code.append(alphabets[target])
rotator()
if __name__ == "__main__":
decode = input("Type your message:\n")
decode = list(decode)
while True:
try:
token = int(input("Please set token:(must be only digits)\n"))
break
except Exception as error:
print(error)
for _ in range(token):
rotator()
for i in decode:
engine(i)
print("\n" + "".join(code))
print(
f"\nYour Token is {token} please write it down.\nIf you want to decode "
f"this message again you should input same digits as token!"
)
| 28.278689
| 80
| 0.615652
|
fc97eefbe4c3977895035b4f16235ebc3369fe71
| 3,953
|
py
|
Python
|
awacs/dms.py
|
michael-k/awacs
|
ed3dc822d268f10b0cd83feb90fd279277e54ed4
|
[
"BSD-2-Clause"
] | 358
|
2015-01-01T05:11:05.000Z
|
2022-03-20T14:11:39.000Z
|
awacs/dms.py
|
cloudtools/awacs
|
f66550a812073f4e3ebd545279a5a1e6856cf39d
|
[
"BSD-2-Clause"
] | 171
|
2015-01-17T00:32:48.000Z
|
2022-03-28T02:02:57.000Z
|
awacs/dms.py
|
michael-k/awacs
|
ed3dc822d268f10b0cd83feb90fd279277e54ed4
|
[
"BSD-2-Clause"
] | 100
|
2015-01-04T16:34:34.000Z
|
2022-02-21T06:17:17.000Z
|
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS Database Migration Service"
prefix = "dms"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
AddTagsToResource = Action("AddTagsToResource")
ApplyPendingMaintenanceAction = Action("ApplyPendingMaintenanceAction")
CancelReplicationTaskAssessmentRun = Action("CancelReplicationTaskAssessmentRun")
CreateEndpoint = Action("CreateEndpoint")
CreateEventSubscription = Action("CreateEventSubscription")
CreateReplicationInstance = Action("CreateReplicationInstance")
CreateReplicationSubnetGroup = Action("CreateReplicationSubnetGroup")
CreateReplicationTask = Action("CreateReplicationTask")
DeleteCertificate = Action("DeleteCertificate")
DeleteConnection = Action("DeleteConnection")
DeleteEndpoint = Action("DeleteEndpoint")
DeleteEventSubscription = Action("DeleteEventSubscription")
DeleteReplicationInstance = Action("DeleteReplicationInstance")
DeleteReplicationSubnetGroup = Action("DeleteReplicationSubnetGroup")
DeleteReplicationTask = Action("DeleteReplicationTask")
DeleteReplicationTaskAssessmentRun = Action("DeleteReplicationTaskAssessmentRun")
DescribeAccountAttributes = Action("DescribeAccountAttributes")
DescribeApplicableIndividualAssessments = Action(
"DescribeApplicableIndividualAssessments"
)
DescribeCertificates = Action("DescribeCertificates")
DescribeConnections = Action("DescribeConnections")
DescribeEndpointSettings = Action("DescribeEndpointSettings")
DescribeEndpointTypes = Action("DescribeEndpointTypes")
DescribeEndpoints = Action("DescribeEndpoints")
DescribeEventCategories = Action("DescribeEventCategories")
DescribeEventSubscriptions = Action("DescribeEventSubscriptions")
DescribeEvents = Action("DescribeEvents")
DescribeOrderableReplicationInstances = Action("DescribeOrderableReplicationInstances")
DescribeRefreshSchemasStatus = Action("DescribeRefreshSchemasStatus")
DescribeReplicationInstanceTaskLogs = Action("DescribeReplicationInstanceTaskLogs")
DescribeReplicationInstances = Action("DescribeReplicationInstances")
DescribeReplicationSubnetGroups = Action("DescribeReplicationSubnetGroups")
DescribeReplicationTaskAssessmentResults = Action(
"DescribeReplicationTaskAssessmentResults"
)
DescribeReplicationTaskAssessmentRuns = Action("DescribeReplicationTaskAssessmentRuns")
DescribeReplicationTaskIndividualAssessments = Action(
"DescribeReplicationTaskIndividualAssessments"
)
DescribeReplicationTasks = Action("DescribeReplicationTasks")
DescribeSchemas = Action("DescribeSchemas")
DescribeTableStatistics = Action("DescribeTableStatistics")
ImportCertificate = Action("ImportCertificate")
ListTagsForResource = Action("ListTagsForResource")
ModifyEndpoint = Action("ModifyEndpoint")
ModifyEventSubscription = Action("ModifyEventSubscription")
ModifyReplicationInstance = Action("ModifyReplicationInstance")
ModifyReplicationSubnetGroup = Action("ModifyReplicationSubnetGroup")
ModifyReplicationTask = Action("ModifyReplicationTask")
MoveReplicationTask = Action("MoveReplicationTask")
RebootReplicationInstance = Action("RebootReplicationInstance")
RefreshSchemas = Action("RefreshSchemas")
ReloadTables = Action("ReloadTables")
RemoveTagsFromResource = Action("RemoveTagsFromResource")
StartReplicationTask = Action("StartReplicationTask")
StartReplicationTaskAssessment = Action("StartReplicationTaskAssessment")
StartReplicationTaskAssessmentRun = Action("StartReplicationTaskAssessmentRun")
StopReplicationTask = Action("StopReplicationTask")
TestConnection = Action("TestConnection")
| 46.505882
| 88
| 0.835568
|
42825043ccd94812fab101d89114fc62554fbf22
| 13,660
|
py
|
Python
|
coherence/backends/lolcats_storage.py
|
crass/coherence
|
d1cf150f5fa4a4bd75c7ec682ef2a2783ccf50ca
|
[
"MIT"
] | null | null | null |
coherence/backends/lolcats_storage.py
|
crass/coherence
|
d1cf150f5fa4a4bd75c7ec682ef2a2783ccf50ca
|
[
"MIT"
] | null | null | null |
coherence/backends/lolcats_storage.py
|
crass/coherence
|
d1cf150f5fa4a4bd75c7ec682ef2a2783ccf50ca
|
[
"MIT"
] | 1
|
2019-09-18T05:33:39.000Z
|
2019-09-18T05:33:39.000Z
|
# -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Benjamin Kampmann <ben.kampmann@googlemail.com>
"""
This is a Media Backend that allows you to access the cool and cute pictures
from lolcats.com. This is mainly meant as a Sample Media Backend to learn how to
write a Media Backend.
So. You are still reading which allows me to assume that you want to learn how
to write a Media Backend for Coherence. NICE :) .
Once again: This is a SIMPLE Media Backend. It does not contain any big
requests, searches or even transcoding. The only thing we want to do in this
simple example, is to fetch a rss link on startup, parse it, save it and restart
the process one hour later again. Well, on top of this, we also want to provide
these informations as a Media Server in the UPnP/DLNA Network of course ;) .
Wow. You are still reading. You must be really interested. Then let's go.
"""
########## NOTE:
# Please don't complain about the coding style of this document - I know. It is
# just this way to make it easier to document and to understand.
########## The imports
# The entry point for each kind of Backend is a 'BackendStore'. The BackendStore
# is the instance that does everything Usually. In this Example it can be
# understood as the 'Server', the object retrieving and serving the data.
from coherence.backend import BackendStore
# The data itself is stored in BackendItems. They are also the first things we
# are going to create.
from coherence.backend import BackendItem
# To make the data 'renderable' we need to define the DIDLite-Class of the Media
# we are providing. For that we have a bunch of helpers that we also want to
# import
from coherence.upnp.core import DIDLLite
# Coherence relies on the Twisted backend. I hope you are familar with the
# concept of deferreds. If not please read:
# http://twistedmatrix.com/projects/core/documentation/howto/async.html
#
# It is a basic concept that you need to understand to understand the following
# code. But why am I talking about it? Oh, right, because we use a http-client
# based on the twisted.web.client module to do our requests.
from coherence.upnp.core.utils import getPage
# And we also import the reactor, that allows us to specify an action to happen
# later
from twisted.internet import reactor
# And to parse the RSS-Data (which is XML), we use the coherence helper
from coherence.extern.et import parse_xml
########## The models
# After the download and parsing of the data is done, we want to save it. In
# this case, we want to fetch the images and store their URL and the title of
# the image. That is the LolcatsImage class:
class LolcatsImage(BackendItem):
# We inherit from BackendItem as it already contains a lot of helper methods
# and implementations. For this simple example, we only have to fill the
# item with data.
def __init__(self, parent_id, id, title, url):
self.parentid = parent_id # used to be able to 'go back'
self.update_id = 0
self.id = id # each item has its own and unique id
self.location = url # the url of the picture
self.name = title # the title of the picture. Inside
# coherence this is called 'name'
# Item.item is a special thing. This is used to explain the client what
# kind of data this is. For e.g. A VideoItem or a MusicTrack. In our
# case, we have an image.
self.item = DIDLLite.ImageItem(id, parent_id, self.name)
# each Item.item has to have one or more Resource objects
# these hold detailed information about the media data
# and can represent variants of it (different sizes, transcoded formats)
res = DIDLLite.Resource(self.location, 'http-get:*:image/jpeg:*')
res.size = None #FIXME: we should have a size here
# and a resolution entry would be nice too
self.item.res.append(res)
class LolcatsContainer(BackendItem):
# The LolcatsContainer will hold the reference to all our LolcatsImages. This
# kind of BackenedItem is a bit different from the normal BackendItem,
# because it has 'children' (the lolcatsimages). Because of that we have
# some more stuff to do in here.
def __init__(self, parent_id, id):
# the ids as above
self.parent_id = parent_id
self.id = id
# we never have a different name anyway
self.name = 'LOLCats'
# but we need to set it to a certain mimetype to explain it, that we
# contain 'children'.
self.mimetype = 'directory'
# As we are updating our data periodically, we increase this value so
# that our clients can check easier if something has changed since their
# last request.
self.update_id = 0
# that is where we hold the children
self.children = []
# and we need to give a DIDLLite again. This time we want to be
# understood as 'Container'.
self.item = DIDLLite.Container(id, parent_id, self.name)
self.item.childCount = None # will be set as soon as we have images
def get_children(self, start=0, end=0):
# This is the only important implementation thing: we have to return our
# list of children
if end != 0:
return self.children[start:end]
return self.children[start:]
# there is nothing special in here
# FIXME: move it to a base BackendContainer class
def get_child_count(self):
return len(self.children)
def get_item(self):
return self.item
def get_name(self):
return self.name
def get_id(self):
return self.id
########## The server
# As already said before the implementation of the server is done in an
# inheritance of a BackendStore. This is where the real code happens (usually).
# In our case this would be: downloading the page, parsing the content, saving
# it in the models and returning them on request.
class LolcatsStore(BackendStore):
# this *must* be set. Because the (most used) MediaServer Coherence also
# allows other kind of Backends (like remote lights).
implements = ['MediaServer']
# this is only for this implementation: the http link to the lolcats rss
# feed that we want to read and parse:
rss_url = "http://feeds.feedburner.com/ICanHasCheezburger?format=xml"
# as we are going to build a (very small) tree with the items, we need to
# define the first (the root) item:
ROOT_ID = 0
def __init__(self, server, *args, **kwargs):
# first we inizialize our heritage
BackendStore.__init__(self,server,**kwargs)
# When a Backend is initialized, the configuration is given as keyword
# arguments to the initialization. We receive it here as a dicitonary
# and allow some values to be set:
# the name of the MediaServer as it appears in the network
self.name = kwargs.get('name', 'Lolcats')
# timeout between updates in hours:
self.refresh = int(kwargs.get('refresh', 1)) * (60 *60)
# the UPnP device that's hosting that backend, that's already done
# in the BackendStore.__init__, just left here the sake of completeness
self.server = server
# internally used to have a new id for each item
self.next_id = 1000
# we store the last update from the rss feed so that we know if we have
# to parse again, or not:
self.last_updated = None
# initialize our lolcats container (no parent, this is the root)
self.container = LolcatsContainer(None, self.ROOT_ID)
# but as we also have to return them on 'get_by_id', we have our local
# store of images per id:
self.images = {}
# we tell that if an XBox sends a request for images we'll
# map the WMC id of that request to our local one
self.wmc_mapping = {'16': 0}
# and trigger an update of the data
dfr = self.update_data()
# So, even though the initialize is kind of done, Coherence does not yet
# announce our Media Server.
# Coherence does wait for signal send by us that we are ready now.
# And we don't want that to happen as long as we don't have succeeded
# in fetching some first data, so we delay this signaling after the update is done:
dfr.addCallback(self.init_completed)
dfr.addCallback(self.queue_update)
def get_by_id(self, id):
print "asked for", id, type(id)
# what ever we are asked for, we want to return the container only
if isinstance(id, basestring):
id = id.split('@',1)
id = id[0]
if int(id) == self.ROOT_ID:
return self.container
return self.images.get(int(id), None)
def upnp_init(self):
# after the signal was triggered, this method is called by coherence and
# from now on self.server is existing and we can do
# the necessary setup here
# that allows us to specify our server options in more detail.
# here we define what kind of media content we do provide
# mostly needed to make some naughty DLNA devices behave
# will probably move into Coherence internals one day
self.server.connection_manager_server.set_variable( \
0, 'SourceProtocolInfo', ['http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_TN;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000',
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_SM;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000',
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_MED;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000',
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_LRG;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000',
'http-get:*:image/jpeg:*'])
# and as it was done after we fetched the data the first time
# we want to take care about the server wide updates as well
self._update_container()
def _update_container(self, result=None):
# we need to inform Coherence about these changes
# again this is something that will probably move
# into Coherence internals one day
if self.server:
self.server.content_directory_server.set_variable(0,
'SystemUpdateID', self.update_id)
value = (self.ROOT_ID,self.container.update_id)
self.server.content_directory_server.set_variable(0,
'ContainerUpdateIDs', value)
return result
def update_loop(self):
# in the loop we want to call update_data
dfr = self.update_data()
# aftert it was done we want to take care about updating
# the container
dfr.addCallback(self._update_container)
# in ANY case queue an update of the data
dfr.addBoth(self.queue_update)
def update_data(self):
# trigger an update of the data
# fetch the rss
dfr = getPage(self.rss_url)
# push it through our xml parser
dfr.addCallback(parse_xml)
# then parse the data into our models
dfr.addCallback(self.parse_data)
return dfr
def parse_data(self, xml_data):
# So. xml_data is a cElementTree Element now. We can read our data from
# it now.
# each xml has a root element
root = xml_data.getroot()
# from there, we look for the newest update and compare it with the one
# we have saved. If they are the same, we don't need to go on:
pub_date = root.find('./channel/lastBuildDate').text
if pub_date == self.last_updated:
return
# not the case, set this as the last update and continue
self.last_updated = pub_date
# and reset the childrens list of the container and the local storage
self.container.children = []
self.images = {}
# Attention, as this is an example, this code is meant to be as simple
# as possible and not as efficient as possible. IMHO the following code
# pretty much sucks, because it is totally blocking (even though we have
# 'only' 20 elements)
# we go through our entries and do something specific to the
# lolcats-rss-feed to fetch the data out of it
url_item = './{http://search.yahoo.com/mrss/}content'
for item in root.findall('./channel/item'):
title = item.find('./title').text
try:
url = item.findall(url_item)[1].get('url', None)
except IndexError:
continue
if url is None:
continue
image = LolcatsImage(self.ROOT_ID, self.next_id, title, url)
self.container.children.append(image)
self.images[self.next_id] = image
# increase the next_id entry every time
self.next_id += 1
# and increase the container update id and the system update id
# so that the clients can refresh with the new data
self.container.update_id += 1
self.update_id += 1
def queue_update(self, error_or_failure):
# We use the reactor to queue another updating of our data
print error_or_failure
reactor.callLater(self.refresh, self.update_loop)
| 39.94152
| 146
| 0.658199
|
91bb65fbfc70390417fc8db21c253c4b9f4a0765
| 821
|
py
|
Python
|
client/verta/verta/_swagger/_public/artifactstore/model/ArtifactstoreStoreArtifactWithStreamResponse.py
|
stefan-petrov-toptal/modeldb
|
a8a9b9da6ed964c91351230b2f0d2703c75794de
|
[
"Apache-2.0"
] | 835
|
2017-02-08T20:14:24.000Z
|
2020-03-12T17:37:49.000Z
|
client/verta/verta/_swagger/_public/artifactstore/model/ArtifactstoreStoreArtifactWithStreamResponse.py
|
stefan-petrov-toptal/modeldb
|
a8a9b9da6ed964c91351230b2f0d2703c75794de
|
[
"Apache-2.0"
] | 651
|
2019-04-18T12:55:07.000Z
|
2022-03-31T23:45:09.000Z
|
client/verta/verta/_swagger/_public/artifactstore/model/ArtifactstoreStoreArtifactWithStreamResponse.py
|
stefan-petrov-toptal/modeldb
|
a8a9b9da6ed964c91351230b2f0d2703c75794de
|
[
"Apache-2.0"
] | 170
|
2017-02-13T14:49:22.000Z
|
2020-02-19T17:59:12.000Z
|
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class ArtifactstoreStoreArtifactWithStreamResponse(BaseType):
def __init__(self, cloud_file_key=None, cloud_file_path=None):
required = {
"cloud_file_key": False,
"cloud_file_path": False,
}
self.cloud_file_key = cloud_file_key
self.cloud_file_path = cloud_file_path
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
tmp = d.get('cloud_file_key', None)
if tmp is not None:
d['cloud_file_key'] = tmp
tmp = d.get('cloud_file_path', None)
if tmp is not None:
d['cloud_file_path'] = tmp
return ArtifactstoreStoreArtifactWithStreamResponse(**d)
| 27.366667
| 64
| 0.686967
|
0ff6e7bb30fc0f6021605e9cb3d2bc5aae0259d6
| 7,604
|
py
|
Python
|
python-real-time-application/feature_extractor.py
|
italogsfernandes/emg-moviments-classifier
|
7a58f162fa6c5bc1a2906c539dac3e0210115522
|
[
"MIT"
] | 2
|
2018-12-16T16:48:17.000Z
|
2019-02-09T15:03:30.000Z
|
python-real-time-application/feature_extractor.py
|
italogsfernandes/emg-moviments-classifier
|
7a58f162fa6c5bc1a2906c539dac3e0210115522
|
[
"MIT"
] | 3
|
2018-05-16T20:12:15.000Z
|
2018-05-16T20:12:57.000Z
|
python-real-time-application/feature_extractor.py
|
italogsfernandes/emg-moviments-classifier
|
7a58f162fa6c5bc1a2906c539dac3e0210115522
|
[
"MIT"
] | 1
|
2019-01-17T15:23:33.000Z
|
2019-01-17T15:23:33.000Z
|
# codded as pressas por @italogsfernandes 13 de dezembro as 5:53 da manhã
# escutando:
# The Spinners - The Rubberband Man,
# The staple singers - Chica Boom
# None of us are free - Solomon Burke
# Logo depois de uma playlist de Sweatting bullets
# Detailled Description:
# RMS:
# sqrt(mean(square(vetor)))
# ZC:
# a = [ 1, 2, 1, 1, -3, -4, 7, 8, 9, 10, -2, 1, -3, 5, 6, 7,-10]
# sign() = [ 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1]
# diff() = [ 0, 0, 0, -2, 0, 2, 0, 0, 0, -2, 2, -2, 2, 0, 0, -2]
# where() = (array([ 3, 5, 9, 10, 11, 12, 15]),)
# where()[0].shape[0] = 7
# The number of zero crossing should be 7, but because sign()
# returns 0 if 0 is passed, 1 for positive, and -1 for negative values,
# diff() will count the transition containing zero twice.
#
# SSC:
# It uses diff to derivate the signal and obtain the slope
# So it verifies how many times the slope has changed from a positive
# number to a negative one.
# Try uncommenting the next lines and verify:
# ttt = np.linspace(0,1,1000)
# xxx = np.sin(2*np.pi*10*ttt) + 0.8*np.sin(2*np.pi*15*ttt) + 0.2*np.sin(2*np.pi*1*ttt)
#
# ssc_ = np.diff(np.sign(np.diff(xxx)))
# ssc_ = np.append(ssc_, [0,0])
# plt.plot(ttt,xxx)
# plt.plot(ttt,ssc_)
# ssc_ = np.where(ssc_)[0].shape[0]
import numpy as np # handling numerical data
import pandas as pd
def get_features(input_array,
features_list = ['RMS', 'ZC', 'MAV', 'VAR', 'WL', 'SSC'],
output_obj = {}):
"""Get all the features listed in the features list
Features list made using as reference:
Nazmi N, Abdul Rahman MA, Yamamoto S-I, Ahmad SA, Zamzuri H, Mazlan SA.
A Review of Classification Techniques of EMG Signals during Isotonic and
Isometric Contractions. Postolache OA, Casson A, Mukhopadhyay S, eds.
Sensors (Basel, Switzerland). 2016;16(8):1304. doi:10.3390/s16081304.
Parameters
----------
input_array : type
Description of parameter `input_array`.
features_list : list
Available features:
* 'RMS' : Root Mean Square
* 'ZC' : Zero Crossing
* 'MAV' : Mean Absolute Value
* 'VAR' : Variance
* 'WL' : Wave Length
* 'SSC' : Slope Signal Change
Default value setted to all available features
output_obj : type of object to be returned
suggestion -> you can utilze pandas dataFrame
for improve performance in some cases
Returns
-------
dict
dict with an key for each feature (in lowercase)
"""
features_list = [f.lower() for f in features_list]
if 'rms' in features_list:
output_obj['rms'] = get_rms(input_array)
if 'zc' in features_list:
output_obj['zc'] = get_zc(input_array)
if 'mav' in features_list:
output_obj['mav'] = get_mav(input_array)
if 'var' in features_list:
output_obj['var'] = get_var(input_array)
if 'wl' in features_list:
output_obj['wl'] = get_wl(input_array)
if 'ssc' in features_list:
output_obj['ssc'] = get_ssc(input_array)
return output_obj
def get_features_DataFrame(input_array,
features_list = ['RMS', 'ZC', 'MAV', 'VAR', 'WL', 'SSC']):
"""Get all the features listed in the features list
Features list made using as reference:
Nazmi N, Abdul Rahman MA, Yamamoto S-I, Ahmad SA, Zamzuri H, Mazlan SA.
A Review of Classification Techniques of EMG Signals during Isotonic and
Isometric Contractions. Postolache OA, Casson A, Mukhopadhyay S, eds.
Sensors (Basel, Switzerland). 2016;16(8):1304. doi:10.3390/s16081304.
Parameters
----------
input_array : type
Description of parameter `input_array`.
features_list : list
Available features:
* 'RMS' : Root Mean Square
* 'ZC' : Zero Crossing
* 'MAV' : Mean Absolute Value
* 'VAR' : Variance
* 'WL' : Wave Length
* 'SSC' : Slope Signal Change
Default value setted to all available features
output_obj : type of object to be returned
suggestion -> you can utilze pandas dataFrame
for improve performance in some cases
Returns
-------
dict
dict with an key for each feature (in lowercase)
"""
features_list = [f.lower() for f in features_list]
output_obj = pd.DataFrame(columns=features_list)
if 'rms' in features_list:
output_obj['rms'] = get_rms(input_array)
if 'zc' in features_list:
output_obj['zc'] = get_zc(input_array)
if 'mav' in features_list:
output_obj['mav'] = get_mav(input_array)
if 'var' in features_list:
output_obj['var'] = get_var(input_array)
if 'wl' in features_list:
output_obj['wl'] = get_wl(input_array)
if 'ssc' in features_list:
output_obj['ssc'] = get_ssc(input_array)
return output_obj
def get_rms(input_array):
"""Root Mean Square (RMS)
Parameters
----------
input_array : numpy type array
One-dimentional numpy array
May work with multi-dimentional arrays, but this wasn't tested
Returns
-------
numpy float64 number
The result of this operation in the input array.
"""
return np.sqrt(np.mean(np.square(
input_array)))
def get_zc(input_array):
"""Zero Crossing (ZC)
Parameters
----------
input_array : numpy type array
One-dimentional numpy array
May work with multi-dimentional arrays, but this wasn't tested
Returns
-------
numpy float64 number
The result of this operation in the input array.
"""
s3= np.sign(
input_array)
s3[s3==0] = -1 # replace zeros with -1
return (np.where(np.diff(s3)))[0].shape[0]
def get_mav(input_array):
"""Mean Absolute Value (MAV)
Parameters
----------
input_array : numpy type array
One-dimentional numpy array
May work with multi-dimentional arrays, but this wasn't tested
Returns
-------
numpy float64 number
The result of this operation in the input array.
"""
return np.mean(np.abs(
input_array))
def get_var(input_array):
"""Variance (VAR)
Parameters
----------
input_array : numpy type array
One-dimentional numpy array
May work with multi-dimentional arrays, but this wasn't tested
Returns
-------
numpy float64 number
The result of this operation in the input array.
"""
return np.var(
input_array)
def get_wl(input_array):
"""Wave Lenght (WL)
Parameters
----------
input_array : numpy type array
One-dimentional numpy array
May work with multi-dimentional arrays, but this wasn't tested
Returns
-------
numpy float64 number
The result of this operation in the input array.
"""
return np.sum(np.abs(np.diff(
input_array)))
def get_ssc(input_array):
"""Signal Slop Changes (SSC)
Parameters
----------
input_array : numpy type array
One-dimentional numpy array
May work with multi-dimentional arrays, but this wasn't tested
Returns
-------
numpy float64 number
The result of this operation in the input array.
"""
return np.where(np.diff(np.sign(np.diff(
input_array))))[0].shape[0]
| 29.937008
| 95
| 0.600605
|
c34cefc0ae9538174866b5c17604ba2b30139f05
| 4,568
|
py
|
Python
|
hb/build/build.py
|
openharmony-sig-ci/build_lite
|
833511a697ab640c444055c279822e16b6cbde5f
|
[
"Apache-2.0"
] | null | null | null |
hb/build/build.py
|
openharmony-sig-ci/build_lite
|
833511a697ab640c444055c279822e16b6cbde5f
|
[
"Apache-2.0"
] | null | null | null |
hb/build/build.py
|
openharmony-sig-ci/build_lite
|
833511a697ab640c444055c279822e16b6cbde5f
|
[
"Apache-2.0"
] | 1
|
2021-09-13T11:57:24.000Z
|
2021-09-13T11:57:24.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import defaultdict
from hb.build.build_process import Build
from hb.set.set import set_product
from hb.common.utils import get_current_time
from hb.common.utils import OHOSException
def add_options(parser):
parser.add_argument('component', help='name of the component', nargs='*',
default=None)
parser.add_argument('-b', '--build_type', help='release or debug version',
nargs=1, default=['debug'])
parser.add_argument('-c', '--compiler', help='specify compiler',
nargs=1, default=['clang'])
parser.add_argument('-t', '--test', help='compile test suit', nargs='*')
parser.add_argument('--dmverity', help='Enable dmverity',
action="store_true")
parser.add_argument('--tee', help='Enable tee',
action="store_true")
parser.add_argument('-p', '--product', help='build a specified product '
'with {product_name}@{company}, eg: camera@huawei',
nargs=1, default=[])
parser.add_argument('-f', '--full',
help='full code compilation', action='store_true')
parser.add_argument('-n', '--ndk', help='compile ndk',
action='store_true')
parser.add_argument('-T', '--target', help='Compile single target',
nargs='*', default=[])
parser.add_argument('-v', '--verbose',
help='show all command lines while building',
action='store_true')
parser.add_argument('-shs', '--sign_haps_by_server',
help='sign haps by server', action='store_true')
parser.add_argument('--patch', help='apply product patch before compiling',
action='store_true')
parser.add_argument('--gn-args', nargs=1, default='',
help='specifies gn build arguments, '
'eg: --gn-args="foo="bar" enable=true blah=7"')
def exec_command(args):
if len(args.product):
product, company = args.product[0].split('@')
set_product(product_name=product, company=company)
build = Build(args.component)
cmd_args = defaultdict(list)
build.register_args('ohos_build_type', args.build_type[0])
# Get the compilation time in timestamp and human readable format
build.register_args('ohos_build_time', get_current_time(type='timestamp'))
build.register_args('ohos_build_datetime',
get_current_time(type='datetime'))
if args.test is not None:
build.test = args.test
if args.dmverity:
build.register_args('enable_ohos_security_dmverity',
'true',
quota=False)
build.config.fs_attr.add('dmverity_enable')
if args.tee:
build.register_args('tee_enable', 'true', quota=False)
build.config.fs_attr.add('tee_enable')
if args.ndk:
build.register_args('ohos_build_ndk', 'true', quota=False)
if hasattr(args, 'target') and len(args.target):
build.register_args('ohos_build_target', args.target)
if hasattr(args, 'verbose') and args.verbose:
cmd_args['gn'].append('-v')
cmd_args['ninja'].append('-v')
if hasattr(args, 'ninja'):
return build.build(args.full, ninja=args.ninja)
if args.sign_haps_by_server:
build.register_args('ohos_sign_haps_by_server',
'true',
quota=False)
if len(args.gn_args):
for gn_arg in args.gn_args[0].split(' '):
try:
variable, value = gn_arg.split('=')
build.register_args(variable, value)
except ValueError:
raise OHOSException(f'Invalid gn args: {gn_arg}')
return build.build(args.full, patch=args.patch, cmd_args=cmd_args)
| 40.070175
| 79
| 0.610333
|
ccddcdfbf9225f776f44934ac11a429110c00f58
| 467
|
py
|
Python
|
backend/api/models/icbc_upload_date.py
|
kuanfan99/zeva
|
57b506a108fe57438506569d5503c90c52216b2f
|
[
"Apache-2.0"
] | 3
|
2020-03-25T03:06:20.000Z
|
2021-01-20T23:36:03.000Z
|
backend/api/models/icbc_upload_date.py
|
kuanfan99/zeva
|
57b506a108fe57438506569d5503c90c52216b2f
|
[
"Apache-2.0"
] | 740
|
2019-12-16T15:53:39.000Z
|
2022-03-26T08:25:10.000Z
|
backend/api/models/icbc_upload_date.py
|
kuanfan99/zeva
|
57b506a108fe57438506569d5503c90c52216b2f
|
[
"Apache-2.0"
] | 11
|
2019-11-28T20:39:15.000Z
|
2022-01-31T17:53:31.000Z
|
from django.db import models
from auditable.models import Auditable
class IcbcUploadDate(Auditable):
upload_date = models.DateField(
blank=False,
db_comment="the date the icbc data is current to",
null=False,
auto_now=False)
class Meta:
db_table = 'icbc_upload_date'
db_table_comment = "contains a record for each time that the icbc file is \
uploaded, with the date current to as specified by the user"
| 29.1875
| 79
| 0.691649
|
cce9ca9cf6bdcc43e18b16cd370236fc3b15c484
| 5,250
|
py
|
Python
|
moldynquick/namd.py
|
akey7/moldynquick
|
1cfe78f331a47ff6a31226dc8f3cf8f30bc694c4
|
[
"MIT"
] | null | null | null |
moldynquick/namd.py
|
akey7/moldynquick
|
1cfe78f331a47ff6a31226dc8f3cf8f30bc694c4
|
[
"MIT"
] | 8
|
2020-04-01T03:25:28.000Z
|
2020-04-21T23:14:34.000Z
|
moldynquick/namd.py
|
akey7/moldynquick
|
1cfe78f331a47ff6a31226dc8f3cf8f30bc694c4
|
[
"MIT"
] | null | null | null |
"""
This module extracts data from NAMD formatted files
"""
from typing import List, Dict, Any
import pandas as pd
import MDAnalysis as mda
from MDAnalysis.analysis import align, rms
import numpy as np
class NAMDLog:
"""
This class extracts data from a NAMD log file.
"""
def __init__(self, log_filename: str):
"""
This creates the instance attributes needed to parse the log file.
Parameters
----------
log_filename: str
The name of the logfile to extract.
"""
self.log_filename = log_filename
def extract_energies_wide(self) -> pd.DataFrame:
"""
This extracts the energies from the log file and returns a dataframe
that has those energies in it.
Returns
-------
pd.DataFrame
A dataframe of the energies in wid (pivoted) format.
"""
wide: List[Dict[str, Any]] = []
with open(self.log_filename, "r") as file:
for line in file.readlines():
if line.startswith("ENERGY:"):
values = [m for m in [l.strip() for l in line.split(" ")][1:] if len(m) > 0]
timestep = int(values[0])
wide_row = {
"timestep": timestep,
"bond [kcal/mol]": float(values[1]),
"angle [kcal/mol]": float(values[2]),
"dihedral [kcal/mol]": float(values[3]),
"improper [kcal/mol]": float(values[4]),
"electrostatic [kcal/mol]": float(values[5]),
"VDW [kcal/mol]": float(values[6])
}
wide.append(wide_row)
df: pd.DataFrame = pd.DataFrame(wide)
return df
def extract_energies_tall(self) -> pd.DataFrame:
"""
Extracts the narrow format of the schema to a Pandas dataframe.
Yes this does cause the log file to be read twice. But fixing that
will need to wait until another version.
Returns
-------
pd.DataFrame
The dataframe that contains the rows in tall format.
"""
tall: List[Dict[str, Any]] = []
wide: pd.DataFrame = self.extract_energies_wide()
for _, wide_row in wide.iterrows():
timestep: int = wide_row["timestep"]
for key, value in wide_row.items():
if key != "timestep":
tall_row = {
"timestep": timestep,
"measurement": key,
"value": value
}
tall.append(tall_row)
df: pd.DataFrame = pd.DataFrame(tall)
return df
def extract_temperatures(self) -> pd.DataFrame:
"""
Extracts the temperatures
Returns
-------
pd.DataFrame
The temperatures.
"""
rows: List[Dict[str, Any]] = []
with open(self.log_filename, "r") as file:
for line in file.readlines():
if line.startswith("ENERGY:"):
values = [m for m in [l.strip() for l in line.split(" ")][1:] if len(m) > 0]
timestep = int(values[0])
row = {
"timestep": timestep,
"temp [K]": float(values[11]),
"tempavg [K]": float(values[14])
}
rows.append(row)
df = pd.DataFrame(rows)
return df
class NAMDTrajectory:
"""
This extracts trajectory information from the .dcd log file.
"""
def __init__(self, psf_filename: str, dcd_filename: str):
"""
Instantiates with the right filenames to extract trajectory information
Parameters
----------
psf_filename: str
The filename to the PSF file.
dcd_filename: str
The trajectory DCD file.
"""
self.psf_filename = psf_filename
self.dcd_filename = dcd_filename
def rmsd_from_first_frame(self, selected_atoms: str = "(protein) and name C CA N") -> pd.DataFrame:
"""
This calculates the RMSD for every frame from the first frame.
Parameters
----------
selected_atoms: str
The selection string to use for the atoms being aligned in
the trajectory. Defaults to alpha carbons.
Returns
-------
pd.DataFrame
Dataframe with the columns of frame and RMSD [Å]
"""
mobile = mda.Universe(self.psf_filename, self.dcd_filename)
ref = mda.Universe(self.psf_filename, self.dcd_filename)
# These two lines appear to have no effect, but in reality
# they set the positions in the trajectory.
mobile.trajectory[-1]
ref.trajectory[0]
mobile_ca = mobile.select_atoms(selected_atoms)
ref_ca = ref.select_atoms(selected_atoms)
aligner = align.AlignTraj(mobile, ref, select=selected_atoms, in_memory=True).run()
df = pd.DataFrame(data={"frame": np.arange(len(aligner.rmsd)), "RMSD [Å]": aligner.rmsd})
return df
| 31.437126
| 103
| 0.532
|
3e1974a913d437c7d8b3fda1354ab5d72f763074
| 40,277
|
py
|
Python
|
auth-api/src/auth_api/services/org.py
|
mengdong19/sbc-auth
|
66fbd94a79d6de18102d3db29743ffeab89ea161
|
[
"Apache-2.0"
] | null | null | null |
auth-api/src/auth_api/services/org.py
|
mengdong19/sbc-auth
|
66fbd94a79d6de18102d3db29743ffeab89ea161
|
[
"Apache-2.0"
] | null | null | null |
auth-api/src/auth_api/services/org.py
|
mengdong19/sbc-auth
|
66fbd94a79d6de18102d3db29743ffeab89ea161
|
[
"Apache-2.0"
] | 1
|
2019-07-25T18:20:41.000Z
|
2019-07-25T18:20:41.000Z
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service for managing Organization data."""
import json
from datetime import datetime
from typing import Dict, Tuple
from flask import current_app
from jinja2 import Environment, FileSystemLoader
from sbc_common_components.tracing.service_tracing import ServiceTracing # noqa: I001
from auth_api import status as http_status
from auth_api.exceptions import BusinessException, CustomException
from auth_api.exceptions.errors import Error
from auth_api.models import AccountLoginOptions as AccountLoginOptionsModel
from auth_api.models import Affiliation as AffiliationModel
from auth_api.models import Contact as ContactModel
from auth_api.models import ContactLink as ContactLinkModel
from auth_api.models import Membership as MembershipModel
from auth_api.models import Org as OrgModel
from auth_api.models import User as UserModel
from auth_api.models.affidavit import Affidavit as AffidavitModel
from auth_api.schemas import ContactSchema, OrgSchema, InvitationSchema
from auth_api.utils.enums import (
AccessType, ChangeType, LoginSource, OrgStatus, OrgType, PaymentMethod,
Status, PaymentAccountStatus, TaskRelationshipType, TaskType, TaskStatus)
from auth_api.utils.roles import ADMIN, VALID_STATUSES, Role, STAFF, EXCLUDED_FIELDS
from auth_api.utils.util import camelback2snake
from .affidavit import Affidavit as AffidavitService
from .authorization import check_auth
from .contact import Contact as ContactService
from .keycloak import KeycloakService
from .notification import send_email
from .products import Product as ProductService
from .rest_service import RestService
from .task import Task as TaskService
ENV = Environment(loader=FileSystemLoader('.'), autoescape=True)
class Org: # pylint: disable=too-many-public-methods
"""Manages all aspects of Org data.
This service manages creating, updating, and retrieving Org data via the Org model.
"""
def __init__(self, model):
"""Return an Org Service."""
self._model = model
@ServiceTracing.disable_tracing
def as_dict(self):
"""Return the internal Org model as a dictionary.
None fields are not included.
"""
org_schema = OrgSchema()
obj = org_schema.dump(self._model, many=False)
return obj
@staticmethod
def create_org(org_info: dict, user_id, # pylint: disable=too-many-locals, too-many-statements, too-many-branches
token_info: Dict = None, bearer_token: str = None, origin_url: str = None):
"""Create a new organization."""
current_app.logger.debug('<create_org ')
# bcol is treated like an access type as well;so its outside the scheme
bcol_credential = org_info.pop('bcOnlineCredential', None)
mailing_address = org_info.pop('mailingAddress', None)
payment_info = org_info.pop('paymentInfo', {})
selected_payment_method = payment_info.get('paymentMethod', None)
org_type = org_info.get('typeCode', OrgType.BASIC.value)
branch_name = org_info.get('branchName', None)
# If the account is created using BCOL credential, verify its valid bc online account
if bcol_credential:
bcol_response = Org.get_bcol_details(bcol_credential, bearer_token).json()
Org._map_response_to_org(bcol_response, org_info)
is_staff_admin = token_info and Role.STAFF_CREATE_ACCOUNTS.value in token_info.get('realm_access').get('roles')
is_bceid_user = token_info and token_info.get('loginSource', None) == LoginSource.BCEID.value
Org.validate_account_limit(is_staff_admin, user_id)
access_type = Org.validate_access_type(is_bceid_user, is_staff_admin, org_info)
# Always check duplicated name for all type of account.
Org.raise_error_if_duplicate_name(org_info['name'], branch_name)
# set premium for GOVM accounts..TODO remove if not needed this logic
if access_type == AccessType.GOVM.value:
org_type = OrgType.PREMIUM.value
org_info.update({'typeCode': OrgType.PREMIUM.value})
org = OrgModel.create_from_dict(camelback2snake(org_info))
org.access_type = access_type
# If the account is anonymous or govm set the billable value as False else True
org.billable = access_type not in [AccessType.ANONYMOUS.value, AccessType.GOVM.value]
# Set the status based on access type
# Check if the user is APPROVED else set the org status to PENDING
# Send an email to staff to remind review the pending account
if access_type in (AccessType.EXTRA_PROVINCIAL.value, AccessType.REGULAR_BCEID.value) \
and not AffidavitModel.find_approved_by_user_id(user_id=user_id):
Org._handle_bceid_status_and_notification(org, origin_url, token_info)
if access_type == AccessType.GOVM.value:
org.status_code = OrgStatus.PENDING_INVITE_ACCEPT.value
# If mailing address is provided, save it
if mailing_address:
Org.add_contact_to_org(mailing_address, org)
# create the membership record for this user if its not created by staff and access_type is anonymous
Org.create_membership(access_type, is_staff_admin, org, user_id)
# dir search doesnt need default products
if access_type not in (AccessType.ANONYMOUS.value,):
ProductService.create_default_product_subscriptions(org, is_new_transaction=False)
payment_method = Org._validate_and_get_payment_method(selected_payment_method, OrgType[org_type],
access_type=access_type)
user_name = ''
if payment_method == PaymentMethod.PAD.value: # to get the pad accepted date
user: UserModel = UserModel.find_by_jwt_token(token=token_info)
user_name = user.username
Org._create_payment_settings(org, payment_info, payment_method, mailing_address, user_name, True)
# TODO do we have to check anything like this below?
# if payment_account_status == PaymentAccountStatus.FAILED:
# raise BusinessException(Error.ACCOUNT_CREATION_FAILED_IN_PAY, None)
org.commit()
current_app.logger.info(f'<created_org org_id:{org.id}')
return Org(org)
@staticmethod
def _handle_bceid_status_and_notification(org, origin_url, token_info):
org.status_code = OrgStatus.PENDING_STAFF_REVIEW.value
user = UserModel.find_by_jwt_token(token=token_info)
Org.send_staff_review_account_reminder(user, org.id, origin_url)
# create a staff review task for this account
task_info = {'name': org.name,
'relationshipId': org.id,
'relatedTo': user.id,
'dateSubmitted': datetime.today(),
'relationshipType': TaskRelationshipType.ORG.value,
'type': TaskType.PENDING_STAFF_REVIEW.value,
'status': TaskStatus.OPEN.value
}
TaskService.create_task(task_info, do_commit=False)
@staticmethod
def _validate_and_get_payment_method(selected_payment_type: str, org_type: OrgType, access_type=None) -> str:
# TODO whats a better place for this
org_payment_method_mapping = {
OrgType.BASIC: (
PaymentMethod.CREDIT_CARD.value, PaymentMethod.DIRECT_PAY.value, PaymentMethod.ONLINE_BANKING.value),
OrgType.PREMIUM: (
PaymentMethod.CREDIT_CARD.value, PaymentMethod.DIRECT_PAY.value,
PaymentMethod.PAD.value, PaymentMethod.BCOL.value)
}
if access_type == AccessType.GOVM.value:
payment_type = PaymentMethod.EJV.value
elif selected_payment_type:
valid_types = org_payment_method_mapping.get(org_type, [])
if selected_payment_type in valid_types:
payment_type = selected_payment_type
else:
raise BusinessException(Error.INVALID_INPUT, None)
else:
payment_type = PaymentMethod.BCOL.value if \
org_type == OrgType.PREMIUM else Org._get_default_payment_method_for_creditcard()
return payment_type
@staticmethod
def create_membership(access_type, is_staff_admin, org, user_id):
"""Create membership account."""
if not is_staff_admin and access_type != AccessType.ANONYMOUS.value:
membership = MembershipModel(org_id=org.id, user_id=user_id, membership_type_code='ADMIN',
membership_type_status=Status.ACTIVE.value)
membership.add_to_session()
# Add the user to account_holders group
KeycloakService.join_account_holders_group()
@staticmethod
def validate_account_limit(is_staff_admin, user_id):
"""Validate account limit."""
if not is_staff_admin: # staff can create any number of orgs
count = OrgModel.get_count_of_org_created_by_user_id(user_id)
if count >= current_app.config.get('MAX_NUMBER_OF_ORGS'):
raise BusinessException(Error.MAX_NUMBER_OF_ORGS_LIMIT, None)
@staticmethod
def validate_access_type(is_bceid_user, is_staff_admin, org_info):
"""Validate and return access type."""
access_type: str = org_info.get('accessType', None)
if access_type:
if is_staff_admin and not access_type:
raise BusinessException(Error.ACCCESS_TYPE_MANDATORY, None)
if not is_staff_admin and access_type in AccessType.ANONYMOUS.value:
raise BusinessException(Error.USER_CANT_CREATE_ANONYMOUS_ORG, None)
if not is_staff_admin and access_type in AccessType.GOVM.value:
raise BusinessException(Error.USER_CANT_CREATE_GOVM_ORG, None)
if not is_bceid_user and access_type in (AccessType.EXTRA_PROVINCIAL.value, AccessType.REGULAR_BCEID.value):
raise BusinessException(Error.USER_CANT_CREATE_EXTRA_PROVINCIAL_ORG, None)
if is_bceid_user and access_type not in (AccessType.EXTRA_PROVINCIAL.value, AccessType.REGULAR_BCEID.value):
raise BusinessException(Error.USER_CANT_CREATE_REGULAR_ORG, None)
else:
# If access type is not provided, add default value based on user
if is_staff_admin:
pass
elif is_bceid_user:
access_type = AccessType.EXTRA_PROVINCIAL.value
else:
access_type = AccessType.REGULAR.value
return access_type
@staticmethod
def raise_error_if_duplicate_name(name, branch_name=None):
"""Raise error if there is duplicate org name already."""
existing_similar__org = OrgModel.find_similar_org_by_name(name, branch_name=branch_name)
if existing_similar__org is not None:
raise BusinessException(Error.DATA_CONFLICT, None)
@staticmethod
def _create_payment_settings(org_model: OrgModel, payment_info: dict, # pylint: disable=too-many-arguments
payment_method: str, mailing_address=None, username: str = None,
is_new_org: bool = True) -> PaymentAccountStatus:
"""Add payment settings for the org."""
pay_url = current_app.config.get('PAY_API_URL')
org_name_for_pay = f'{org_model.name}-{org_model.branch_name}' if org_model.branch_name else org_model.name
pay_request = {
'accountId': org_model.id,
# pay needs the most unique idenitfier.So combine name and branch name
'accountName': org_name_for_pay,
'paymentInfo': {
'methodOfPayment': payment_method,
'billable': org_model.billable
}
}
if mailing_address:
pay_request['contactInfo'] = mailing_address
if org_model.bcol_account_id:
pay_request['bcolAccountNumber'] = org_model.bcol_account_id
pay_request['bcolUserId'] = org_model.bcol_user_id
if (revenue_account := payment_info.get('revenueAccount')) is not None:
pay_request['paymentInfo']['revenueAccount'] = revenue_account
if payment_method == PaymentMethod.PAD.value: # PAD has bank related details
pay_request['paymentInfo']['bankTransitNumber'] = payment_info.get('bankTransitNumber', None)
pay_request['paymentInfo']['bankInstitutionNumber'] = payment_info.get('bankInstitutionNumber', None)
pay_request['paymentInfo']['bankAccountNumber'] = payment_info.get('bankAccountNumber', None)
pay_request['padTosAcceptedBy'] = username
# invoke pay-api
token = RestService.get_service_account_token()
if is_new_org:
response = RestService.post(endpoint=f'{pay_url}/accounts',
data=pay_request, token=token, raise_for_status=True)
else:
response = RestService.put(endpoint=f'{pay_url}/accounts/{org_model.id}',
data=pay_request, token=token, raise_for_status=True)
if response == http_status.HTTP_200_OK:
payment_account_status = PaymentAccountStatus.CREATED
elif response == http_status.HTTP_202_ACCEPTED:
payment_account_status = PaymentAccountStatus.PENDING
else:
payment_account_status = PaymentAccountStatus.FAILED
return payment_account_status
@staticmethod
def _get_default_payment_method_for_creditcard():
return PaymentMethod.DIRECT_PAY.value if current_app.config.get(
'DIRECT_PAY_ENABLED') else PaymentMethod.CREDIT_CARD.value
@staticmethod
def get_bcol_details(bcol_credential: Dict, bearer_token: str = None, org_id=None):
"""Retrieve and validate BC Online credentials."""
bcol_response = None
if bcol_credential:
bcol_response = RestService.post(endpoint=current_app.config.get('BCOL_API_URL') + '/profiles',
data=bcol_credential, token=bearer_token, raise_for_status=False)
if bcol_response.status_code != http_status.HTTP_200_OK:
error = json.loads(bcol_response.text)
raise BusinessException(CustomException(error['detail'], bcol_response.status_code), None)
bcol_account_number = bcol_response.json().get('accountNumber')
if Org.bcol_account_link_check(bcol_account_number, org_id):
raise BusinessException(Error.BCOL_ACCOUNT_ALREADY_LINKED, None)
return bcol_response
def change_org_ype(self, org_info, action=None, bearer_token: str = None):
"""Update the passed organization with the new info.
if Upgrade:
//TODO .Missing RULES
1.do bcol verification
2.attach mailing
3.change the org with bcol org name
If downgrade:
//TODO .Missing RULES
1.remove contact
2.deactivate payment settings
3.add new payment settings for cc
4.change the org with user passed org name
"""
if self._model.access_type == AccessType.ANONYMOUS.value:
raise BusinessException(Error.INVALID_INPUT, None)
bcol_credential = org_info.pop('bcOnlineCredential', None)
mailing_address = org_info.pop('mailingAddress', None)
current_app.logger.debug('<update_org ', action)
if action == ChangeType.DOWNGRADE.value:
if org_info.get('typeCode') != OrgType.BASIC.value:
raise BusinessException(Error.INVALID_INPUT, None)
# if they have not changed the name , they can claim the name. Dont check duplicate..or else check duplicate
if org_info.get('name') != self._model.name:
Org.raise_error_if_duplicate_name(org_info['name'])
# remove the bcol payment details from payment table
org_info['bcol_account_id'] = ''
org_info['bcol_user_id'] = ''
org_info['bcol_account_name'] = ''
payment_type = Org._get_default_payment_method_for_creditcard()
# TODO Add the pay-api call here
Org.__delete_contact(self._model)
if action == ChangeType.UPGRADE.value:
if org_info.get('typeCode') != OrgType.PREMIUM.value or bcol_credential is None:
raise BusinessException(Error.INVALID_INPUT, None)
bcol_response = Org.get_bcol_details(bcol_credential, bearer_token, self._model.id).json()
Org._map_response_to_org(bcol_response, org_info)
payment_type = PaymentMethod.BCOL.value
# If mailing address is provided, save it
if mailing_address:
self.add_contact_to_org(mailing_address, self._model)
self._model.update_org_from_dict(camelback2snake(org_info), exclude=('status_code'))
# TODO pass username instead of blanks
Org._create_payment_settings(self._model, {}, payment_type, mailing_address, '', False)
return self
@staticmethod
def _map_response_to_org(bcol_response, org_info):
org_info.update({
'bcol_account_id': bcol_response.get('accountNumber'),
'bcol_user_id': bcol_response.get('userId'),
'bcol_account_name': bcol_response.get('orgName')
})
# New org who linked to BCOL account will use BCOL account name as default name
# Existing account keep their account name to avoid payment info change.
if not org_info.get('name'):
org_info.update({'name': bcol_response.get('orgName')})
@staticmethod
def add_contact_to_org(mailing_address, org):
"""Update the passed organization with the mailing address."""
contact = ContactModel(**camelback2snake(mailing_address))
contact = contact.add_to_session()
contact_link = ContactLinkModel()
contact_link.contact = contact
contact_link.org = org
contact_link.add_to_session()
def update_org(self, org_info, token_info: Dict = None, # pylint: disable=too-many-locals
bearer_token: str = None):
"""Update the passed organization with the new info."""
current_app.logger.debug('<update_org ')
has_org_updates: bool = False # update the org table if this variable is set true
has_status_changing: bool = False
org_model: OrgModel = self._model
# to enforce necessary details for govm account creation
is_govm_account = org_model.access_type == AccessType.GOVM.value
is_govm_account_creation = \
is_govm_account and org_model.status_code == OrgStatus.PENDING_INVITE_ACCEPT.value
# govm name is not being updated now
is_name_getting_updated = 'name' in org_info and not is_govm_account
if is_name_getting_updated:
existing_similar__org = OrgModel.find_similar_org_by_name(org_info['name'], self._model.id)
if existing_similar__org is not None:
raise BusinessException(Error.DATA_CONFLICT, None)
has_org_updates = True
# If the account is created using BCOL credential, verify its valid bc online account
# If it's a valid account disable the current one and add a new one
if bcol_credential := org_info.pop('bcOnlineCredential', None):
bcol_response = Org.get_bcol_details(bcol_credential, bearer_token, self._model.id).json()
Org._map_response_to_org(bcol_response, org_info)
has_org_updates = True
product_subscriptions = org_info.pop('productSubscriptions', None)
mailing_address = org_info.pop('mailingAddress', None)
payment_info = org_info.pop('paymentInfo', {})
if is_govm_account_creation and (mailing_address is None or payment_info.get('revenueAccount') is None):
raise BusinessException(Error.GOVM_ACCOUNT_DATA_MISSING, None)
if is_govm_account_creation:
has_org_updates = True
org_info['statusCode'] = OrgStatus.PENDING_STAFF_REVIEW.value
has_status_changing = True
if product_subscriptions is not None:
subscription_data = {'subscriptions': product_subscriptions}
ProductService.create_product_subscription(self._model.id, subscription_data=subscription_data,
skip_auth=True, token_info=token_info)
# Update mailing address Or create new one
if mailing_address:
contacts = self._model.contacts
if len(contacts) > 0:
contact = self._model.contacts[0].contact
contact.update_from_dict(**camelback2snake(mailing_address))
contact.save()
else:
Org.add_contact_to_org(mailing_address, self._model)
if has_org_updates:
excluded = ('type_code',) if has_status_changing else EXCLUDED_FIELDS
self._model.update_org_from_dict(camelback2snake(org_info), exclude=excluded)
if payment_info:
selected_payment_method = payment_info.get('paymentMethod', None)
payment_type = Org._validate_and_get_payment_method(selected_payment_method, OrgType[self._model.type_code],
self._model.access_type)
user: UserModel = UserModel.find_by_jwt_token(token=token_info)
# TODO when updating the bank info , dont pass user.username as tos updated by..handle this
Org._create_payment_settings(self._model, payment_info, payment_type, mailing_address, user.username, False)
current_app.logger.debug('>update_org ')
return self
@staticmethod
def delete_org(org_id, token_info: Dict = None, ):
"""Soft-Deletes an Org.
It should not be deletable if there are members or business associated with the org
"""
# Check authorization for the user
current_app.logger.debug('<org Inactivated')
check_auth(token_info, one_of_roles=(ADMIN, STAFF), org_id=org_id)
org: OrgModel = OrgModel.find_by_org_id(org_id)
if not org:
raise BusinessException(Error.DATA_NOT_FOUND, None)
count_members = len([member for member in org.members if member.status in VALID_STATUSES])
if count_members > 1 or len(org.affiliated_entities) >= 1:
raise BusinessException(Error.ORG_CANNOT_BE_DISSOLVED, None)
org.status_code = OrgStatus.INACTIVE.value
org.save()
# Don't remove account if it's staff who deactivate org.
is_staff_admin = token_info and Role.STAFF_CREATE_ACCOUNTS.value in token_info.get('realm_access').get('roles')
if not is_staff_admin:
# Remove user from thr group if the user doesn't have any other orgs membership
user = UserModel.find_by_jwt_token(token=token_info)
if len(MembershipModel.find_orgs_for_user(user.id)) == 0:
KeycloakService.remove_from_account_holders_group(user.keycloak_guid)
current_app.logger.debug('org Inactivated>')
def get_payment_info(self):
"""Return the Payment Details for an org by calling Pay API."""
pay_url = current_app.config.get('PAY_API_URL')
# invoke pay-api
token = RestService.get_service_account_token()
response = RestService.get(endpoint=f'{pay_url}/accounts/{self._model.id}', token=token, retry_on_failure=True)
return response.json()
@staticmethod
def find_by_org_id(org_id, token_info: Dict = None, allowed_roles: Tuple = None):
"""Find and return an existing organization with the provided id."""
if org_id is None:
return None
org_model = OrgModel.find_by_org_id(org_id)
if not org_model:
return None
# Check authorization for the user
check_auth(token_info, one_of_roles=allowed_roles, org_id=org_id)
return Org(org_model)
@staticmethod
def find_by_org_name(org_name):
"""Find and return an existing organization with the provided name."""
if org_name is None:
return None
org_model = OrgModel.find_by_org_name(org_name)
if not org_model:
return None
orgs = {'orgs': []}
for org in org_model:
orgs['orgs'].append(Org(org).as_dict())
return orgs
@staticmethod
def get_login_options_for_org(org_id, token_info: Dict = None, allowed_roles: Tuple = None):
"""Get the payment settings for the given org."""
current_app.logger.debug('get_login_options(>')
org = OrgModel.find_by_org_id(org_id)
if org is None:
raise BusinessException(Error.DATA_NOT_FOUND, None)
# Check authorization for the user
check_auth(token_info, one_of_roles=allowed_roles, org_id=org_id)
return AccountLoginOptionsModel.find_active_by_org_id(org_id)
@staticmethod
def add_login_option(org_id, login_source, token_info: Dict = None):
"""Create a new contact for this org."""
# check for existing contact (only one contact per org for now)
current_app.logger.debug('>add_login_option')
org = OrgModel.find_by_org_id(org_id)
if org is None:
raise BusinessException(Error.DATA_NOT_FOUND, None)
check_auth(token_info, one_of_roles=ADMIN, org_id=org_id)
login_option = AccountLoginOptionsModel(login_source=login_source, org_id=org_id)
login_option.save()
return login_option
@staticmethod
def update_login_option(org_id, login_source, token_info: Dict = None):
"""Create a new contact for this org."""
# check for existing contact (only one contact per org for now)
current_app.logger.debug('>update_login_option')
org = OrgModel.find_by_org_id(org_id)
if org is None:
raise BusinessException(Error.DATA_NOT_FOUND, None)
check_auth(token_info, one_of_roles=ADMIN, org_id=org_id)
existing_login_option = AccountLoginOptionsModel.find_active_by_org_id(org_id)
if existing_login_option is not None:
existing_login_option.is_active = False
existing_login_option.add_to_session()
login_option = AccountLoginOptionsModel(login_source=login_source, org_id=org_id)
login_option.save()
return login_option
@staticmethod
def get_contacts(org_id):
"""Get the contacts for the given org."""
current_app.logger.debug('get_contacts>')
org = OrgModel.find_by_org_id(org_id)
if org is None:
raise BusinessException(Error.DATA_NOT_FOUND, None)
collection = []
for contact_link in org.contacts:
collection.append(ContactService(contact_link.contact).as_dict())
return {'contacts': collection}
@staticmethod
def add_contact(org_id, contact_info):
"""Create a new contact for this org."""
# check for existing contact (only one contact per org for now)
current_app.logger.debug('>add_contact')
org = OrgModel.find_by_org_id(org_id)
if org is None:
raise BusinessException(Error.DATA_NOT_FOUND, None)
contact_link = ContactLinkModel.find_by_org_id(org_id)
if contact_link is not None:
raise BusinessException(Error.DATA_ALREADY_EXISTS, None)
contact = ContactModel(**camelback2snake(contact_info))
contact = contact.flush()
contact_link = ContactLinkModel()
contact_link.contact = contact
contact_link.org = org
contact_link.save()
current_app.logger.debug('<add_contact')
return ContactService(contact)
@staticmethod
def update_contact(org_id, contact_info):
"""Update the existing contact for this org."""
current_app.logger.debug('>update_contact ')
org = OrgModel.find_by_org_id(org_id)
if org is None:
raise BusinessException(Error.DATA_NOT_FOUND, None)
# find the contact link for this org
contact_link = ContactLinkModel.find_by_org_id(org_id)
if contact_link is None or contact_link.contact is None:
raise BusinessException(Error.DATA_NOT_FOUND, None)
contact = contact_link.contact
contact.update_from_dict(**camelback2snake(contact_info))
contact.save()
current_app.logger.debug('<update_contact ')
# return the updated contact
return ContactService(contact)
@staticmethod
def delete_contact(org_id):
"""Delete the contact for this org."""
current_app.logger.debug('>delete_contact ')
org = OrgModel.find_by_org_id(org_id)
if not org or not org.contacts:
raise BusinessException(Error.DATA_NOT_FOUND, None)
deleted_contact = Org.__delete_contact(org)
current_app.logger.debug('<delete_contact ')
return ContactService(deleted_contact)
@staticmethod
def __delete_contact(org):
# unlink the org from its contact
contact_link = ContactLinkModel.find_by_org_id(org.id)
if contact_link:
del contact_link.org
contact_link.commit()
# clean up any orphaned contacts and links
if not contact_link.has_links():
contact = contact_link.contact
contact_link.delete()
contact.delete()
return contact
return None
def get_owner_count(self):
"""Get the number of owners for the specified org."""
return len([x for x in self._model.members if x.membership_type_code == ADMIN])
@staticmethod
def get_orgs(user_id, valid_statuses=VALID_STATUSES):
"""Return the orgs associated with this user."""
return MembershipModel.find_orgs_for_user(user_id, valid_statuses)
@staticmethod
def search_orgs(**kwargs): # pylint: disable=too-many-locals
"""Search for orgs based on input parameters."""
orgs = {'orgs': []}
if kwargs.get('business_identifier', None):
affiliation: AffiliationModel = AffiliationModel. \
find_affiliations_by_business_identifier(kwargs.get('business_identifier'))
if affiliation:
orgs['orgs'].append(Org(OrgModel.find_by_org_id(affiliation.org_id)).as_dict())
else:
include_invitations: bool = False
page: int = int(kwargs.get('page'))
limit: int = int(kwargs.get('limit'))
statuses: str = kwargs.get('statuses', None)
name: str = kwargs.get('name', None)
# https://github.com/bcgov/entity/issues/4786
access_type, is_staff_admin = Org.refine_access_type(kwargs.get('access_type', None),
kwargs.get('token', None))
search_args = (access_type,
name,
statuses,
kwargs.get('bcol_account_id', None),
page, limit)
if statuses and OrgStatus.PENDING_ACTIVATION.value in statuses:
# only staff admin can see director search accounts
# https://github.com/bcgov/entity/issues/4786
if not is_staff_admin:
raise BusinessException(Error.INVALID_USER_CREDENTIALS, None)
org_models, total = OrgModel.search_pending_activation_orgs(name)
include_invitations = True
else:
org_models, total = OrgModel.search_org(*search_args)
for org in org_models:
org_dict = Org(org).as_dict()
org_dict['contacts'] = []
org_dict['invitations'] = []
if org.contacts:
org_dict['contacts'].append(
ContactSchema(exclude=('links',)).dump(org.contacts[0].contact, many=False))
if include_invitations and org.invitations:
org_dict['invitations'].append(
InvitationSchema(exclude=('membership',)).dump(org.invitations[0].invitation, many=False))
orgs['orgs'].append(org_dict)
orgs['total'] = total
orgs['page'] = page
orgs['limit'] = limit
return orgs
@staticmethod
def refine_access_type(access_type_str, token_info):
"""Find Access Type."""
roles = token_info.get('realm_access').get('roles')
is_staff_admin = token_info and (Role.STAFF_CREATE_ACCOUNTS.value in roles or
Role.STAFF_MANAGE_ACCOUNTS.value in roles)
access_type = [] if not access_type_str else access_type_str.split(',')
if not is_staff_admin:
if len(access_type) < 1:
# pass everything except DIRECTOR SEARCH
access_type = [item.value for item in AccessType if item != AccessType.ANONYMOUS]
else:
access_type.remove(AccessType.ANONYMOUS.value)
return access_type, is_staff_admin
@staticmethod
def bcol_account_link_check(bcol_account_id, org_id=None):
"""Validate the BCOL id is linked or not. If already linked, return True."""
if current_app.config.get('BCOL_ACCOUNT_LINK_CHECK'):
org = OrgModel.find_by_bcol_id(bcol_account_id)
if org and org.id != org_id: # check if already taken up by different org
return True
return False
@staticmethod
def change_org_status(org_id: int, status_code, suspension_reason_code, token_info: Dict = None):
"""Update the status of the org.
Used now for suspending/activate account.
1) check access .only staff can do it now
2) check org status/eligiblity
3) suspend it
"""
current_app.logger.debug('<change_org_status ')
org_model: OrgModel = OrgModel.find_by_org_id(org_id)
user: UserModel = UserModel.find_by_jwt_token(token=token_info)
current_app.logger.debug('<setting org status to ')
org_model.status_code = status_code
org_model.decision_made_by = user.username # not sure if a new field is needed for this.
if status_code == OrgStatus.SUSPENDED.value:
org_model.suspended_on = datetime.today()
org_model.suspension_reason_code = suspension_reason_code
org_model.save()
current_app.logger.debug('change_org_status>')
return Org(org_model)
@staticmethod
def approve_or_reject(org_id: int, is_approved: bool, token_info: Dict, origin_url: str = None):
"""Mark the affidavit as approved or rejected."""
current_app.logger.debug('<find_affidavit_by_org_id ')
# Get the org and check what's the current status
org: OrgModel = OrgModel.find_by_org_id(org_id)
# Current User
user: UserModel = UserModel.find_by_jwt_token(token=token_info)
# If status is PENDING_STAFF_REVIEW handle affidavit approve process, else raise error
if org.status_code == OrgStatus.PENDING_STAFF_REVIEW.value:
AffidavitService.approve_or_reject(org_id, is_approved, user)
else:
raise BusinessException(Error.INVALID_INPUT, None)
if is_approved:
org.status_code = OrgStatus.ACTIVE.value
else:
org.status_code = OrgStatus.REJECTED.value
org.decision_made_by = user.username
org.decision_made_on = datetime.now()
# TODO Publish to activity stream
org.save()
# Find admin email address
admin_email = ContactLinkModel.find_by_user_id(org.members[0].user.id).contact.email
Org.send_approved_rejected_notification(admin_email, org.name, org.status_code, origin_url)
current_app.logger.debug('>find_affidavit_by_org_id ')
return Org(org)
@staticmethod
def send_staff_review_account_reminder(user, org_id, origin_url):
"""Send staff review account reminder notification."""
current_app.logger.debug('<send_staff_review_account_reminder')
subject = '[BC Registries and Online Services] An out of province account needs to be approved.'
sender = current_app.config.get('MAIL_FROM_ID')
recipient = current_app.config.get('STAFF_ADMIN_EMAIL')
template = ENV.get_template('email_templates/staff_review_account_email.html')
context_path = f'review-account/{org_id}'
app_url = '{}/{}'.format(origin_url, current_app.config.get('AUTH_WEB_TOKEN_CONFIRM_PATH'))
review_url = '{}/{}'.format(app_url, context_path)
logo_url = f'{app_url}/{current_app.config.get("REGISTRIES_LOGO_IMAGE_NAME")}'
try:
sent_response = send_email(subject, sender, recipient,
template.render(url=review_url, user=user, logo_url=logo_url))
current_app.logger.debug('<send_staff_review_account_reminder')
if not sent_response:
current_app.logger.error('<send_staff_review_account_reminder failed')
raise BusinessException(Error.FAILED_NOTIFICATION, None)
except: # noqa=B901
current_app.logger.error('<send_staff_review_account_reminder failed')
raise BusinessException(Error.FAILED_NOTIFICATION, None)
@staticmethod
def send_approved_rejected_notification(receipt_admin_email, org_name, org_status: OrgStatus, origin_url):
"""Send Approved/Rejected notification to the user."""
current_app.logger.debug('<send_approved_rejected_notification')
sender = current_app.config.get('MAIL_FROM_ID')
if org_status == OrgStatus.ACTIVE.value:
template = ENV.get_template('email_templates/nonbcsc_org_approved_notification_email.html')
subject = '[BC Registries and Online Services] APPROVED Business Registry Account'
elif org_status == OrgStatus.REJECTED.value:
template = ENV.get_template('email_templates/nonbcsc_org_rejected_notification_email.html')
subject = '[BC Registries and Online Services] YOUR ACTION REQUIRED: ' \
'Business Registry Account cannot be approved'
else:
return # dont send mail for any other status change
app_url = '{}/{}'.format(origin_url, current_app.config.get('AUTH_WEB_TOKEN_CONFIRM_PATH'))
logo_url = f'{app_url}/{current_app.config.get("REGISTRIES_LOGO_IMAGE_NAME")}'
params = {'org_name': org_name}
try:
sent_response = send_email(subject, sender, receipt_admin_email,
template.render(url=app_url, params=params, org_name=org_name,
logo_url=logo_url))
current_app.logger.debug('<send_approved_rejected_notification')
if not sent_response:
current_app.logger.error('<send_approved_rejected_notification failed')
raise BusinessException(Error.FAILED_NOTIFICATION, None)
except: # noqa=B901
current_app.logger.error('<send_approved_rejected_notification failed')
raise BusinessException(Error.FAILED_NOTIFICATION, None)
| 46.563006
| 120
| 0.66708
|
2c18a86fc7c2e479b6b9e067dd868cbd770aa1fc
| 9,985
|
py
|
Python
|
app/geobox/lib/context.py
|
omniscale/gbi-client
|
c8af68ede195150b2aca0516ac8e030fe4ba1f6d
|
[
"Apache-2.0"
] | 2
|
2018-10-24T06:32:42.000Z
|
2021-01-20T02:25:05.000Z
|
app/geobox/lib/context.py
|
omniscale/gbi-client
|
c8af68ede195150b2aca0516ac8e030fe4ba1f6d
|
[
"Apache-2.0"
] | null | null | null |
app/geobox/lib/context.py
|
omniscale/gbi-client
|
c8af68ede195150b2aca0516ac8e030fe4ba1f6d
|
[
"Apache-2.0"
] | 17
|
2018-10-24T06:32:45.000Z
|
2022-02-09T13:10:54.000Z
|
# This file is part of the GBI project.
# Copyright (C) 2012 Omniscale GmbH & Co. KG <http://omniscale.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from shapely.geometry import asShape
import requests
import datetime
from geobox import model
from geobox.lib.couchdb import CouchDB
from werkzeug.exceptions import NotFound
class ContextError(Exception):
pass
class Context(object):
def __init__(self, doc):
self.doc = doc
def wmts_sources(self):
for lyr in self.doc.get('wmts_sources', []):
yield lyr
def wms_sources(self):
for lyr in self.doc.get('wms_sources', []):
yield lyr
def wfs_sources(self):
for lyr in self.doc.get('wfs_sources', []):
yield lyr
def logging_server(self):
return self.doc.get('logging', {}).get('url')
def update_coverage_url(self):
return self.doc.get('update_coverage', {}).get('url')
def couchdb_sources(self):
return self.doc.get('couchdb_sources', [])
def has_couchdb_sources(self):
return len(self.couchdb_sources()) > 0
def user(self):
return self.doc.get('user', {})
def prefix(self):
return self.doc.get('portal', {}).get('prefix').lower()
def version(self):
return self.doc.get('version')
def parcel_search_url(self):
return self.doc.get('parcel_search_url')
class ContextModelUpdater(object):
"""
Update the internal source/layer models from a new context.
"""
def __init__(self, session, version):
self.session = session
self.version = version
def sources_from_context(self, context):
prefix = context.doc.get('portal', {}).get('prefix').lower()
for source in context.wmts_sources():
yield self.source_for_conf(source, prefix, source_type='wmts')
for source in context.wms_sources():
yield self.source_for_conf(source, prefix, source_type='wms')
def source_for_conf(self, layer, prefix, source_type):
query = self.session.query(model.ExternalWMTSSource).filter_by(name=layer['name'])
query = query.filter_by(source_type=source_type)
if prefix:
query = query.filter_by(prefix=prefix)
source = query.all()
if source:
source = source[0]
else:
source = model.ExternalWMTSSource()
source.name = layer['name']
source.prefix = prefix
source.title = layer['title']
source.url = layer['url']
source.is_protected = layer.get('is_protected')
source.is_public = layer.get('is_public')
if source_type == 'wmts' and self.version == '0.1':
source.url = source.url + layer['layer'] + '/GoogleMapsCompatible-{TileMatrix}-{TileCol}-{TileRow}/tile'
if not source.is_protected:
source.username = layer.get('username')
source.password = layer.get('password')
source.format = layer['format']
source.is_overlay = layer['overlay']
source.background_layer = layer.get('is_background_layer', False)
source.max_tiles = layer.get('max_tiles')
if source_type == 'wms':
source.srs = layer['srs']
source.layer = layer['layer']
assert source_type in ('wmts', 'wms')
source.source_type = source_type
if 'view_restriction' in layer:
source.view_coverage = self.coverage_from_restriction(layer['view_restriction'])
source.view_level_start = layer['view_restriction'].get('zoom_level_start')
source.view_level_end = layer['view_restriction'].get('zoom_level_end')
else:
source.view_coverage = None
source.view_level_start = None
source.view_level_end = None
source.download_coverage = None
source.download_level_start = layer['download_restriction'].get('zoom_level_start')
source.download_level_end = layer['download_restriction'].get('zoom_level_end')
source.active = True
return source
def coverage_from_restriction(self, restriction):
geom = asShape(restriction['geometry'])
if geom.type not in ('Polygon', 'MultiPolygon'):
raise ContextError('unsupported geometry type %s' % geom.type)
return json.dumps(restriction['geometry'])
class AuthenticationError(Exception):
pass
def wfs_source_for_conf(session, layer, prefix):
query = session.query(model.ExternalWFSSource).filter_by(name=layer['name'])
if prefix:
query = query.filter_by(prefix=prefix)
source = query.all()
if source:
source = source[0]
else:
source = model.ExternalWFSSource()
source.prefix = prefix
source.id = layer['id']
source.name = layer['name']
source.layer = layer['layer']
source.host = layer['host']
source.url = layer['url']
source.srs = layer['srs']
source.geometry_field = layer['geometry_field']
source.feature_ns = layer['feature_ns']
source.typename = layer['typename']
source.search_property = layer.get('search_property')
source.is_protected = layer.get('is_protected')
if not source.is_protected:
source.username = layer.get('username')
source.password = layer.get('password')
source.active = True
return source
def load_context_document(gbi_server, db_session, user, password):
try:
result = requests.get(gbi_server.url, auth=(user, password))
except requests.exceptions.ConnectionError:
raise NotFound()
if result.status_code in (401, 403):
raise AuthenticationError()
context_doc = result.json()
context = Context(context_doc)
gbi_server.context = context
gbi_server.logging_url = context.logging_server()
gbi_server.update_coverage_url = context.update_coverage_url()
gbi_server.prefix = context.prefix()
gbi_server.home_server = context.has_couchdb_sources()
gbi_server.last_update = datetime.datetime.now()
if user is not None and user != '':
gbi_server.username = user
db_session.commit()
def test_context_document(url, user=None, password=None):
auth = (user, password)
auth = None if None in auth else auth
try:
result = requests.get(url, auth=(user, password))
except requests.exceptions.ConnectionError:
raise NotFound()
if result.status_code in (401, 403):
raise AuthenticationError()
# check if we can load context document
try:
context_doc = result.json()
Context(context_doc)
except (ValueError, ContextError):
raise ContextError()
return True
def update_raster_sources(gbi_server, db_session):
updater = ContextModelUpdater(db_session, gbi_server.context.version())
for source in updater.sources_from_context(gbi_server.context):
source.gbi_server = gbi_server
db_session.add(source)
def update_wfs_sources(gbi_server, db_session):
for source in gbi_server.context.wfs_sources():
wfs_source = wfs_source_for_conf(db_session, source, gbi_server.prefix)
wfs_source.gbi_server = gbi_server
db_session.add(wfs_source)
def update_parcel_search_source(gbi_server, db_session):
parcel_search_url = gbi_server.context.parcel_search_url()
if gbi_server.parcel_search_source: # existing source
if parcel_search_url:
gbi_server.parcel_search_source.url = parcel_search_url
gbi_server.parcel_search_source.active = True
else:
gbi_server.parcel_search_source.active = False
else:
if parcel_search_url:
parcel_search_source = model.ParcelSearchSource(
url=parcel_search_url,
active=True,
gbi_server=gbi_server
)
db_session.add(parcel_search_source)
def update_couchdb_sources(gbi_server, app_state):
couchdb_port = app_state.config.get_int('couchdb', 'port')
couchdb = CouchDB('http://127.0.0.1:%d' % couchdb_port, '_replicator')
couchdb_sources = gbi_server.context.couchdb_sources()
for couchdb_source in couchdb_sources:
replicate_database(couchdb, couchdb_source, app_state,
gbi_server.prefix)
def source_couchdb_url(couchdb_source):
dburl = couchdb_source['url'] + '/' + couchdb_source['dbname']
if 'username' in couchdb_source:
schema, dburl = dburl.split('://')
dburl = '%s://%s:%s@%s' % (
schema,
couchdb_source['username'],
couchdb_source['password'],
dburl,
)
return dburl
def replicate_database(couchdb, couchdb_source, app_state, prefix=None):
dbname_user = couchdb_source['dbname_user']
if prefix is not None:
dbname_user = '%s_%s' % (prefix, dbname_user)
dburl = source_couchdb_url(couchdb_source)
couch_url = 'http://127.0.0.1:%d' % app_state.config.get_int(
'couchdb', 'port')
target_couchdb = CouchDB(couch_url, dbname_user)
target_couchdb.init_db()
couchdb.replication(
repl_id=couchdb_source['dbname'],
source=dburl,
target=dbname_user,
continuous=True,
)
if couchdb_source['writable']:
couchdb.replication(
repl_id=couchdb_source['dbname'] + '_push',
source=dbname_user,
target=dburl,
continuous=True,
)
| 32.313916
| 116
| 0.658988
|
ae4e65b50588649a40260a74e43166822822e6ad
| 4,398
|
py
|
Python
|
src/lr-mnist.py
|
kah109xq/MalteHBy
|
f7761c1c8ed31a2501612302bc1fd80ca14b2d22
|
[
"MIT"
] | 1
|
2021-02-07T17:44:11.000Z
|
2021-02-07T17:44:11.000Z
|
src/lr-mnist.py
|
MalteHB/visual_analytics_cds
|
e1f569e41260eecd8b460c43ee5d27952cd0fd7c
|
[
"MIT"
] | null | null | null |
src/lr-mnist.py
|
MalteHB/visual_analytics_cds
|
e1f569e41260eecd8b460c43ee5d27952cd0fd7c
|
[
"MIT"
] | null | null | null |
import argparse
from utils.utils import fetch_mnist
import numpy as np
# Import sklearn
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
def main(args):
print("Initiating some awesome logistic regression classification!")
# Importing arguments from the arguments parser
random_state = args.rs
test_size = args.ts
scaling = args.s
minmax = args.mm
lr_mnist = LogisticRegressionMNIST()
X_train, X_test, y_train, y_test = lr_mnist.split_and_preprocess_data(random_state=random_state,
test_size=test_size,
scaling=scaling,
minmax=minmax)
clf_model = lr_mnist.train(X_train, X_test, y_train, y_test)
lr_mnist.print_eval_metrics(clf_model, X_test, y_test)
print("DONE! Have a nice day. :-)")
class LogisticRegressionMNIST:
def __init__(self):
self.X, self.y = fetch_mnist()
def split_and_preprocess_data(self, random_state=1, test_size=0.2, scaling=False, minmax=True):
"""Splits the data into a train/test-split
Args:
random_state (int, optional): The random state. Defaults to 9.
train_size (int, optional): Size of the training data. Defaults to 7500.
test_size (int, optional): Size of the test data. Defaults to 2500.
Returns:
X_train, X_test, y_train, y_test: Train/test-split of the data.
"""
self.X = np.array(self.X)
self.y = np.array(self.y)
if scaling:
X_norm = self.X / 255.0
elif minmax:
X_norm = (self.X - self.X.min()) / (self.X.max() - self.X.min())
else:
X_norm = self.X
X_train, X_test, y_train, y_test = train_test_split(X_norm,
self.y,
random_state=random_state,
test_size=test_size)
return X_train, X_test, y_train, y_test
def train(self, X_train, y_train):
"""Trains a the model.
Args:
X_train (np.array): Training images.
y_train (np.array): Training labels
Returns:
sklearn.model: Logistic Regression model
"""
clf_model = LogisticRegression(penalty='none',
tol=0.1,
solver='saga',
multi_class='multinomial').fit(X_train, y_train)
return clf_model
def print_eval_metrics(self, clf_model, X_test, y_test):
"""Prints the evaluation metrics to the terminal.
Args:
clf_model (sklearnModel): Logistic Regression model
X_test (np.array): Test images.
y_test (np.array): Test labels.
"""
y_pred = clf_model.predict(X_test)
cm = metrics.classification_report(y_test, y_pred)
print(cm)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--rs',
metavar="Random State",
type=int,
help='Random State of the logistic regression model.',
required=False,
default=1)
parser.add_argument('--ts',
metavar="Test Size",
type=int,
help='The test size of the test data.',
required=False,
default=2500)
parser.add_argument('--s',
metavar="Scaling",
type=bool,
help='Whether to scale the data of not to.',
required=False,
default=False)
parser.add_argument('--mm',
metavar="MinMax",
type=bool,
help='Whether to MinMax normalize the data of not to.',
required=False,
default=True)
main(parser.parse_args())
| 29.516779
| 100
| 0.510687
|
c0acacf94130e4c154fb991f218261717dac2497
| 7,420
|
py
|
Python
|
tests/test_regression.py
|
ogrisel/viztracer
|
4815724081b0dcb6416775962c9eb4638dc577be
|
[
"Apache-2.0"
] | null | null | null |
tests/test_regression.py
|
ogrisel/viztracer
|
4815724081b0dcb6416775962c9eb4638dc577be
|
[
"Apache-2.0"
] | null | null | null |
tests/test_regression.py
|
ogrisel/viztracer
|
4815724081b0dcb6416775962c9eb4638dc577be
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/viztracer/blob/master/NOTICE.txt
import multiprocessing
import os
import subprocess
import sys
import tempfile
import time
import viztracer
from viztracer import VizTracer, ignore_function
from .cmdline_tmpl import CmdlineTmpl
from .base_tmpl import BaseTmpl
class TestIssue1(BaseTmpl):
def test_datetime(self):
tracer = viztracer.VizTracer()
tracer.start()
from datetime import timedelta
timedelta(hours=5)
tracer.stop()
tracer.parse()
tracer.save(output_file="tmp.json")
tracer = viztracer.VizTracer()
tracer.start()
from datetime import timedelta
timedelta(hours=5)
tracer.stop()
tracer.parse()
tracer.save(output_file="tmp.json")
os.remove("tmp.json")
class TestStackOptimization(BaseTmpl):
# There's an order issue in tracefunc to skip the FEE log
# If the stack is empty(stack_top is NULL), and we entered
# into an ignored function, ignore_stack_depth will increment.
# However, when its corresponding exit comes, ignore_stack_depth
# won't be decrement because the function is skipped when
# stack is empty and it's a return function
def test_instant(self):
def s():
return 0
tracer = VizTracer()
tracer.start()
# This is a library function which will be ignored, but
# this could trick the system into a ignoring status
tracer.add_instant('name = {"a": 1}')
s()
s()
s()
tracer.stop()
entries = tracer.parse()
tracer.save()
self.assertEqual(entries, 4)
class TestSegFaultRegression(BaseTmpl):
# Without parsing, cleanup of C function had caused segfault
def test_cleanup(self):
tracer = VizTracer()
tracer.start()
_ = len([1, 2, 3])
_ = sum([2, 3, 4])
try:
raise Exception("lol")
except Exception:
pass
tracer.stop()
tracer.cleanup()
class TestFunctionArg(BaseTmpl):
def test_functionarg(self):
def f(n):
tracer.add_func_args("input", n)
if n < 2:
return 1
return f(n - 1) + f(n - 2)
tracer = VizTracer()
tracer.start()
f(5)
tracer.stop()
tracer.parse()
inputs = set()
for d in tracer.data["traceEvents"]:
if d["ph"] == "X":
inputs.add(d["args"]["input"])
self.assertEqual(inputs, set([0, 1, 2, 3, 4, 5]))
issue21_code = """
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--script_option", action="store_true")
parser.add_argument("-o", action="store_true")
options = parser.parse_args()
print(options)
if not options.script_option:
exit(1)
"""
class TestIssue21(CmdlineTmpl):
# viztracer --run my_script --script_option
# is not parsed correctly because the program gets confused
# about --script_option
def test_issue21(self):
self.template(["viztracer", "--include_files", "/", "--run", "cmdline_test.py", "--script_option"],
script=issue21_code)
self.template(["viztracer", "--include_files", "/", "--", "cmdline_test.py", "--script_option"],
script=issue21_code)
self.template(["viztracer", "cmdline_test.py", "--script_option"], script=issue21_code)
self.template(["viztracer", "--run", "cmdline_test.py", "-o", "--script_option"], script=issue21_code)
self.template(["viztracer", "--", "cmdline_test.py", "-o", "--script_option"], script=issue21_code)
self.template(["viztracer", "--run"], script=issue21_code, success=False, expected_output_file=None)
self.template(["viztracer", "--"], script=issue21_code, success=False, expected_output_file=None)
term_code = """
import time
a = []
a.append(1)
for i in range(10):
time.sleep(1)
"""
class TestTermCaught(CmdlineTmpl):
def test_term(self):
if sys.platform == "win32":
return
self.build_script(term_code)
cmd = ["viztracer", "-o", "term.json", "cmdline_test.py"]
if os.getenv("COVERAGE_RUN"):
cmd = ["coverage", "run", "--parallel-mode", "--pylib", "-m"] + cmd
p = subprocess.Popen(cmd)
time.sleep(1.5)
p.terminate()
p.wait(timeout=10)
self.assertFileExists("term.json", 10)
self.cleanup(output_file="term.json")
class TestIssue42(BaseTmpl):
def test_issue42(self):
@ignore_function
def f():
lst = []
lst.append(1)
tracer = VizTracer()
tracer.start()
f()
tracer.stop()
tracer.parse()
self.assertEventNumber(tracer.data, 0)
issue47_code = """
import sys
import gc
class C:
def __init__(self):
self.data = bytearray()
def change(self):
b = memoryview(self.data).tobytes()
self.data += b"123123"
del self.data[:1]
c = C()
c.change()
"""
class TestIssue47(CmdlineTmpl):
def test_issue47(self):
self.template(["viztracer", "cmdline_test.py", "-o", "result.json"],
script=issue47_code,
expected_output_file="result.json",
expected_entries=7)
class TestIssue58(CmdlineTmpl):
def test_issue58(self):
if multiprocessing.get_start_method() == "fork":
self.template(["viztracer", "--log_multiprocess", "-m", "tests.modules.issue58"],
expected_output_file="result.json")
class TestIssue83(CmdlineTmpl):
def test_issue83(self):
self.template(["viztracer", "--quiet", "-m", "tests.modules.issue83"],
expected_stdout="__main__")
issue119_code = """
import os
import sys
import tempfile
os.chdir(sys.argv[1])
"""
class TestIssue119(CmdlineTmpl):
def test_issue119(self):
with tempfile.TemporaryDirectory() as name:
filepath = os.path.join(name, "result.json")
cwd = os.getcwd()
os.chdir(name)
with tempfile.TemporaryDirectory() as script_dir:
try:
self.template(
["viztracer", "-o", "result.json", "cmdline_test.py", script_dir],
script=issue119_code,
expected_output_file=filepath
)
finally:
os.chdir(cwd)
issue121_code = """
import atexit
def fib(n):
if n <= 2:
return 1
return fib(n - 1) + fib(n - 2)
atexit.register(fib, 6)
"""
class TestIssue121(CmdlineTmpl):
def test_issue119(self):
self.template(
["viztracer", "cmdline_test.py"],
script=issue121_code,
expected_entries=18
)
issue141_code = """
import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor
import time
def my_function(*args):
time.sleep(0.5)
if __name__ == '__main__':
e = ProcessPoolExecutor(max_workers=3)
e.map(my_function, range(1))
"""
class TestIssue141(CmdlineTmpl):
def test_issue141(self):
self.template(
["viztracer", "cmdline_test.py"],
script=issue141_code,
)
| 27.179487
| 110
| 0.5969
|
46bc8661df8cc14506220a61bef680975b106f21
| 388
|
py
|
Python
|
apps/brew/management/commands/ingredients/style.py
|
martync/zython
|
e008bbb33e212f0856e85b8594003402e0a635c0
|
[
"Beerware"
] | null | null | null |
apps/brew/management/commands/ingredients/style.py
|
martync/zython
|
e008bbb33e212f0856e85b8594003402e0a635c0
|
[
"Beerware"
] | 5
|
2020-06-05T21:26:16.000Z
|
2022-01-13T01:21:27.000Z
|
apps/brew/management/commands/ingredients/style.py
|
martync/zython
|
e008bbb33e212f0856e85b8594003402e0a635c0
|
[
"Beerware"
] | null | null | null |
from django.conf import settings
from brew.models import BeerStyle
from brew.helpers import xml_import
from brew.beer_xml import STYLE_FIELDS
def do_import():
xml_file = "%sapps/brew/fixtures/Style.xml" % settings.ROOT_PROJECT
model_class = BeerStyle
parent_loop = "STYLES"
item_loop = "STYLE"
xml_import(xml_file, model_class, parent_loop, item_loop, STYLE_FIELDS)
| 29.846154
| 75
| 0.770619
|
18538acaddea2930045c5afa0b134e385ffcd14d
| 1,779
|
py
|
Python
|
TraFlow/utils/loger.py
|
Herding/SeeST
|
6a0a9fea2e0abe91bc30785f769eefb1ccba07b3
|
[
"MIT"
] | null | null | null |
TraFlow/utils/loger.py
|
Herding/SeeST
|
6a0a9fea2e0abe91bc30785f769eefb1ccba07b3
|
[
"MIT"
] | null | null | null |
TraFlow/utils/loger.py
|
Herding/SeeST
|
6a0a9fea2e0abe91bc30785f769eefb1ccba07b3
|
[
"MIT"
] | null | null | null |
"""日志相关操作
"""
import logging
# https://www.cnblogs.com/qianyuliang/p/7234217.html
LEVER = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
class Loger():
"""日志管理
Attributes:
_loger: 生成可管理日志的对象
"""
def __init__(self,
model_name,
log_path,
lever='INFO',
fmt="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
datafmt="%Y/%m/%d %H:%M"
):
"""初始化,包括
日志存放位置,日志输出的格式,日志设置的等级
"""
file_name = log_path + model_name + '.txt'
fmter = logging.Formatter(fmt=fmt, datefmt=datafmt)
sh = logging.FileHandler(filename=file_name)
sh.setLevel(LEVER[lever])
sh.setFormatter(fmter)
console = logging.StreamHandler()
console.setLevel(LEVER[lever])
console.setFormatter(fmter)
self._loger = logging.getLogger(model_name)
self._loger.setLevel(LEVER[lever])
self._loger.addHandler(sh)
self._loger.addHandler(console)
@property
def loger(self):
return self._loger
def add_info(self, information, lever):
if lever == 'DEBUG':
self._loger.debug(information)
elif lever == 'INFO':
self._loger.info(information)
elif lever == 'WARNING':
self._loger.warning(information)
elif lever == 'ERROR':
self._loger.error(information)
elif lever == 'CRITICAL':
self._loger.critical(information)
else:
raise ValueError(f'{lever} is not defined in [DEBUG, INFO, WARNING, ERROR, CRITICAL]')
| 26.954545
| 98
| 0.555368
|
ba3b2a49b109f1892d3b47eb767aaca6c47bb2cb
| 7,680
|
py
|
Python
|
nova/tests/unit/virt/libvirt/fake_imagebackend.py
|
cloud-zuiwanyuan/nova
|
0b59a2d9dc22e4fb172810019dba5ece09bb4526
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/virt/libvirt/fake_imagebackend.py
|
cloud-zuiwanyuan/nova
|
0b59a2d9dc22e4fb172810019dba5ece09bb4526
|
[
"Apache-2.0"
] | 1
|
2016-04-04T18:41:59.000Z
|
2016-04-04T18:41:59.000Z
|
nova/tests/unit/virt/libvirt/fake_imagebackend.py
|
cloud-zuiwanyuan/nova
|
0b59a2d9dc22e4fb172810019dba5ece09bb4526
|
[
"Apache-2.0"
] | 2
|
2015-12-04T23:51:46.000Z
|
2016-06-07T20:01:59.000Z
|
# Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import fixtures
import functools
import mock
import os
import six
from nova.virt.libvirt import config
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import utils as libvirt_utils
class ImageBackendFixture(fixtures.Fixture):
def __init__(self, got_files=None, imported_files=None, exists=None):
"""This fixture mocks imagebackend.Backend.backend, which is the
only entry point to libvirt.imagebackend from libvirt.driver.
:param got_files: A list of {'filename': path, 'size': size} for every
file which was created.
:param imported_files: A list of (local_filename, remote_filename) for
every invocation of import_file().
:param exists: An optional lambda which takes the disk name as an
argument, and returns True if the disk exists,
False otherwise.
"""
self.got_files = got_files
self.imported_files = imported_files
self.disks = collections.defaultdict(self._mock_disk)
"""A dict of name -> Mock image object. This is a defaultdict,
so tests may access it directly before a disk has been created."""
self._exists = exists
def setUp(self):
super(ImageBackendFixture, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.Backend.backend',
self._mock_backend))
@property
def created_disks(self):
"""disks, filtered to contain only disks which were actually created
by calling a relevant method.
"""
# A disk was created iff either cache() or import_file() was called.
return {name: disk for name, disk in six.iteritems(self.disks)
if any([disk.cache.called, disk.import_file.called])}
def _mock_disk(self):
# This is the generator passed to the disks defaultdict. It returns
# a mocked Image object, but note that the returned object has not
# yet been 'constructed'. We don't know at this stage what arguments
# will be passed to the constructor, so we don't know, eg, its type
# or path.
#
# The reason for this 2 phase construction is to allow tests to
# manipulate mocks for disks before they have been created. eg a
# test can do the following before executing the method under test:
#
# disks['disk'].cache.side_effect = ImageNotFound...
#
# When the 'constructor' (image_init in _mock_backend) later runs,
# it will return the same object we created here, and when the
# caller calls cache() it will raise the requested exception.
disk = mock.create_autospec(imagebackend.Image)
# NOTE(mdbooth): fake_cache and fake_import_file are for compatiblity
# with existing tests which test got_files and imported_files. They
# should be removed when they have no remaining users.
disk.cache.side_effect = self._fake_cache
disk.import_file.side_effect = self._fake_import_file
# NOTE(mdbooth): test_virt_drivers assumes libvirt_info has functional
# output
disk.libvirt_info.side_effect = \
functools.partial(self._fake_libvirt_info, disk)
return disk
def _mock_backend(self, backend_self, image_type=None):
# This method mocks Backend.backend, which returns a subclass of Image
# (it returns a class, not an instance). This mocked method doesn't
# return a class; it returns a function which returns a Mock. IOW,
# instead of the getting a QCow2, the caller gets image_init,
# so instead of:
#
# QCow2(instance, disk_name='disk')
#
# the caller effectively does:
#
# image_init(instance, disk_name='disk')
#
# Therefore image_init() must have the same signature as an Image
# subclass constructor, and return a mocked Image object.
#
# The returned mocked Image object has the following additional
# properties which are useful for testing:
#
# * Calls with the same disk_name return the same object from
# self.disks. This means tests can assert on multiple calls for
# the same disk without worrying about whether they were also on
# the same object.
#
# * Mocked objects have an additional image_type attribute set to
# the image_type originally passed to Backend.backend() during
# their construction. Tests can use this to assert that disks were
# created of the expected type.
def image_init(instance=None, disk_name=None, path=None):
# There's nothing special about this path except that it's
# predictable and unique for (instance, disk).
if path is None:
path = os.path.join(
libvirt_utils.get_instance_path(instance), disk_name)
else:
disk_name = os.path.basename(path)
disk = self.disks[disk_name]
# Used directly by callers. These would have been set if called
# the real constructor.
setattr(disk, 'path', path)
setattr(disk, 'is_block_dev', False)
# Used by tests. Note that image_init is a closure over image_type.
setattr(disk, 'image_type', image_type)
# Used by tests to manipulate which disks exist.
if self._exists is not None:
# We don't just cache the return value here because the
# caller may want, eg, a test where the disk initially does not
# exist and later exists.
disk.exists.side_effect = lambda: self._exists(disk_name)
else:
disk.exists.return_value = True
return disk
return image_init
def _fake_cache(self, fetch_func, filename, size=None, *args, **kwargs):
# For legacy tests which use got_files
if self.got_files is not None:
self.got_files.append({'filename': filename, 'size': size})
def _fake_import_file(self, instance, local_filename, remote_filename):
# For legacy tests which use imported_files
if self.imported_files is not None:
self.imported_files.append((local_filename, remote_filename))
def _fake_libvirt_info(self, mock_disk, disk_bus, disk_dev, device_type,
cache_mode, extra_specs, hypervisor_version):
# For tests in test_virt_drivers which expect libvirt_info to be
# functional
info = config.LibvirtConfigGuestDisk()
info.source_type = 'file'
info.source_device = device_type
info.target_bus = disk_bus
info.target_dev = disk_dev
info.driver_cache = cache_mode
info.driver_format = 'raw'
info.source_path = mock_disk.path
return info
| 42.197802
| 79
| 0.649479
|
621e1201cc22d3d2426938ede9040621af195228
| 426
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/choroplethmapbox/_showscale.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
venv/Lib/site-packages/plotly/validators/choroplethmapbox/_showscale.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
venv/Lib/site-packages/plotly/validators/choroplethmapbox/_showscale.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class ShowscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="showscale", parent_name="choroplethmapbox", **kwargs
):
super(ShowscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| 30.428571
| 79
| 0.664319
|
c70131190d386bb470e376512237454dc57ad9b4
| 739
|
py
|
Python
|
events/api/urls.py
|
dynamicguy/treeio
|
4f674898cff2331711639a9b5f6812c874a2cb25
|
[
"MIT"
] | 2
|
2019-02-22T16:02:19.000Z
|
2019-02-23T19:27:34.000Z
|
events/api/urls.py
|
dewmal/treeio
|
6299fbe7826800d576f7ab68b4c1996b7194540f
|
[
"MIT"
] | null | null | null |
events/api/urls.py
|
dewmal/treeio
|
6299fbe7826800d576f7ab68b4c1996b7194540f
|
[
"MIT"
] | 1
|
2019-02-03T03:54:06.000Z
|
2019-02-03T03:54:06.000Z
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
#-*- coding: utf-8 -*-
import handlers
from django.conf.urls.defaults import *
from treeio.core.api.auth import auth_engine
from treeio.core.api.doc import documentation_view
from treeio.core.api.resource import CsrfExemptResource
ad = { 'authentication': auth_engine }
#events resources
eventResource = CsrfExemptResource(handler = handlers.EventHandler, **ad)
urlpatterns = patterns('',
#Events
url(r'^doc$', documentation_view, kwargs={'module': handlers}, name="api_events_doc"),
url(r'^events$', eventResource, name="api_events"),
url(r'^event/(?P<object_ptr>\d+)', eventResource, name="api_events"),
)
| 27.37037
| 90
| 0.737483
|
81f14fd292ee4ff20b53492f12715ee5b681c8b5
| 308
|
py
|
Python
|
xchainpy/xchainpy_thorchain/xchainpy_thorchain/cosmos/message.py
|
SLjavad/xchainpy-lib
|
e79b1fd341adaf9267964f3368500dd48de60917
|
[
"MIT"
] | 8
|
2021-02-16T23:14:14.000Z
|
2022-03-22T09:35:58.000Z
|
xchainpy/xchainpy_thorchain/xchainpy_thorchain/cosmos/message.py
|
SLjavad/xchainpy-lib
|
e79b1fd341adaf9267964f3368500dd48de60917
|
[
"MIT"
] | 12
|
2021-04-06T19:31:46.000Z
|
2022-03-22T14:34:13.000Z
|
xchainpy/xchainpy_thorchain/xchainpy_thorchain/cosmos/message.py
|
SLjavad/xchainpy-lib
|
e79b1fd341adaf9267964f3368500dd48de60917
|
[
"MIT"
] | 10
|
2021-03-04T05:45:31.000Z
|
2022-03-31T14:44:01.000Z
|
from typing import List
from .models.MsgCoin import MsgCoin
from .models.MsgNativeTx import MsgNativeTx
def msg_native_tx_from_json(coins : List[MsgCoin] , memo : str , signer : str) -> MsgNativeTx :
# return MsgNativeTx(coins , memo , frombech32(signer))
return MsgNativeTx(coins , memo , signer)
| 34.222222
| 95
| 0.75
|
ed17df3bebf6688c1bbef92e9ecacebd3a38d332
| 6,412
|
py
|
Python
|
collection/collect2.py
|
aytimothy/3804ict-fandoms-connect
|
412e320030d62753ccffbaa82e71468e7b98d0ae
|
[
"MIT"
] | null | null | null |
collection/collect2.py
|
aytimothy/3804ict-fandoms-connect
|
412e320030d62753ccffbaa82e71468e7b98d0ae
|
[
"MIT"
] | null | null | null |
collection/collect2.py
|
aytimothy/3804ict-fandoms-connect
|
412e320030d62753ccffbaa82e71468e7b98d0ae
|
[
"MIT"
] | null | null | null |
# Import Configuration Variables (See config.py)
from config import *
# Import Libraries
import datetime
import mysql.connector
import praw
from praw.models import MoreComments
import pytz
import time
import traceback
import sys
'''
Basics of what it does:
1. Read existing data and work out who to start with.
2. Download all his comments and submissions, store on the server.
3. In all his submissions, create a list of users who have interacted (commented) on it.
4. Do Step 3, but for the submission each comment the user has made.
5. Go back to Step 1, but going through all the users in the list in Step 3.
'''
def output(string, end="\n"):
logfile = open("collect.log", "a")
print(str(string), end = end)
logfile.write(str(string) + end)
logfile.close()
reddit = praw.Reddit(client_id = reddit_appid, client_secret = reddit_secret, user_agent = "PRAW 6.3.1")
if reddit is None:
output("Could not connect to Reddit >:( \nAborting!")
sys.exit(0)
output("Successfully connected to Reddit.")
db_id = 0
db_conn = mysql.connector.connect(host = mysql_address, user = mysql_username[db_id], passwd = mysql_password, database = mysql_dbname[db_id])
if (db_conn is None):
output("Could not connect to the database >:( \nAborting!")
sys.exit(0)
db_cursor = db_conn.cursor()
output("Successfully connected to database.")
def storecompleteduser(username):
completedfile = open("completed.txt", "a")
completedfile.write(username + "\n")
completedfile.close()
def readcompletedusers():
completedfile = open("completed.txt", "r")
completed_users = [completeduser.rstrip() for completeduser in completedfile.readlines()]
return completed_users
def readuserqueue():
queuefile = open("queue.txt", "r")
queued_users = [queueduser.rstrip() for queueduser in queuefile.readlines()]
return queued_users
def store_comment(comment):
global processed_comments, db_conn, db_cursor
id = comment.id
parent = comment.parent_id[3:]
submission = comment.link_id[3:]
score = comment.score
author = comment.author
if author is None:
author = "[Deleted]"
else:
author = author.name
timestamp = datetime.datetime.fromtimestamp(comment.created_utc).isoformat()
body = comment.body
store_comment_query = "INSERT INTO `Comments` (`ID`, `Parent`, `Submission`, `Score`, `Author`, `Timestamp`, `Body`) VALUES ('" + id + "', '" + parent + "', '" + submission + "', '" + str(
score) + "', '" + author + "', '" + str(timestamp) + "', '" + body.replace("\"", "\\\"").replace("'",
"\\'") + "')"
try:
db_cursor.execute(store_comment_query)
except:
db_conn.rollback()
return False
db_conn.commit()
processed_comments += 1
return True
def store_submission(submission):
global db_conn, db_cursor
id = submission.id
subreddit = submission.subreddit.display_name
istext = submission.is_self
istext_num = 0
if istext is True:
istext_num = 1
title = submission.title
iscrosspost = False
iscrosspost_num = 0
if iscrosspost is True:
iscrosspost_num = 1
source = ""
link = submission.url
body = submission.selftext
score = submission.score
author = submission.author
if author is None:
author = "[Deleted]"
else:
author = author.name
timestamp = datetime.datetime.fromtimestamp(comment.created_utc).isoformat()
store_comment_query = "INSERT INTO `Submissions` (`ID`, `Subreddit`, `IsText`, `Title`, `IsCrosspost`, `Source`, `Link`, `Body`, `Score`, `Author`, `Timestamp`, `Processed`) VALUES ('" + id + "', '" + subreddit + "', '" + str(
istext_num) + "', '" + title + "', '" + str(
iscrosspost_num) + "', '" + source + "', '" + link + "', '" + body + "', '" + str(
score) + "', '" + author + "', '" + str(timestamp) + "', 0)"
try:
db_cursor.execute(store_comment_query)
except:
db_conn.rollback()
return False
db_conn.commit()
return True
def process_submission(submission):
global processed_submissions
store_submission(submission)
processed_submissions += 1
start_time = time.time()
processed_comments = 0
processed_submissions = 0
stored_comments = 0
stored_submissions = 0
submission_queue = []
# -----------------------------------
# Actual Script
# -----------------------------------
completed_users = readcompletedusers()
user_queue = readuserqueue()
userqueue_processed = 0
while len(user_queue) > 0 and userqueue_processed <= userqueue_maxprocessed:
try:
userqueue_processed += 1
username = user_queue.pop(0)
redditor = reddit.redditor(username)
if (redditor is None):
output("Error: Could not find /u/" + username)
continue
output("Processing /u/" + username + "...")
redditor_comments = redditor.comments.new()
redditor_submissions = redditor.submissions.new()
for comment in redditor_comments:
if store_comment(comment):
stored_comments += 1
processed_comments += 1
if store_submission(comment.submission):
stored_submissions += 1
processed_submissions += 1
output("Processed " + str(processed_comments) + " comment(s) and " + str(
processed_submissions) + " submission(s) after " + str(time.time() - start_time) + "...")
output("Stored " + str(stored_comments) + " comment(s) and " + str(
stored_submissions) + " submission(s) after " + str(time.time() - start_time) + "...")
for submission in redditor_submissions:
if store_submission(submission):
stored_submissions += 1
processed_submissions += 1
output("Processed " + str(processed_comments) + " comment(s) and " + str(
processed_submissions) + " submission(s) after " + str(time.time() - start_time) + "...")
output("Stored " + str(stored_comments) + " comment(s) and " + str(
stored_submissions) + " submission(s) after " + str(time.time() - start_time) + "...")
storecompleteduser(username)
except Exception as e:
output(e)
output(traceback.format_exc())
continue
| 36.022472
| 230
| 0.627417
|
76c6d8e74fd7165ce56b262fab9e33b085f80965
| 2,365
|
py
|
Python
|
solvebio/contrib/dash/tests/utils.py
|
PolinaBevad/solvebio-python
|
f6c736baa01b5a868a385cb0baf8f9dc2007cec3
|
[
"MIT"
] | 14
|
2015-01-07T15:31:00.000Z
|
2021-11-02T10:03:28.000Z
|
solvebio/contrib/dash/tests/utils.py
|
PolinaBevad/solvebio-python
|
f6c736baa01b5a868a385cb0baf8f9dc2007cec3
|
[
"MIT"
] | 200
|
2015-01-26T17:12:21.000Z
|
2022-01-14T08:59:30.000Z
|
solvebio/contrib/dash/tests/utils.py
|
PolinaBevad/solvebio-python
|
f6c736baa01b5a868a385cb0baf8f9dc2007cec3
|
[
"MIT"
] | 9
|
2015-02-18T22:49:28.000Z
|
2020-09-01T17:48:35.000Z
|
import time
TIMEOUT = 20 # Seconds
def clean_history(driver, domains):
temp = driver.get_location()
for domain in domains:
driver.open(domain)
driver.delete_all_visible_cookies()
driver.open(temp)
def invincible(func):
def wrap():
try:
return func()
except:
pass
return wrap
def switch_windows(driver):
new_window_handle = None
while not new_window_handle:
for handle in driver.window_handles:
if handle != driver.current_window_handle:
new_window_handle = handle
break
driver.switch_to.window(new_window_handle)
return new_window_handle
class WaitForTimeout(Exception):
"""This should only be raised inside the `wait_for` function."""
pass
def wait_for(condition_function, get_message=lambda: '', *args, **kwargs):
"""
Waits for condition_function to return True or raises WaitForTimeout.
:param (function) condition_function: Should return True on success.
:param args: Optional args to pass to condition_function.
:param kwargs: Optional kwargs to pass to condition_function.
if `timeout` is in kwargs, it will be used to override TIMEOUT
:raises: WaitForTimeout If condition_function doesn't return True in time.
Usage:
def get_element(selector):
# some code to get some element or return a `False`-y value.
selector = '.js-plotly-plot'
try:
wait_for(get_element, selector)
except WaitForTimeout:
self.fail('element never appeared...')
plot = get_element(selector) # we know it exists.
"""
def wrapped_condition_function():
"""We wrap this to alter the call base on the closure."""
if args and kwargs:
return condition_function(*args, **kwargs)
if args:
return condition_function(*args)
if kwargs:
return condition_function(**kwargs)
return condition_function()
if 'timeout' in kwargs:
timeout = kwargs['timeout']
del kwargs['timeout']
else:
timeout = TIMEOUT
start_time = time.time()
while time.time() < start_time + timeout:
if wrapped_condition_function():
return True
time.sleep(0.5)
raise WaitForTimeout(get_message())
| 29.197531
| 78
| 0.640592
|
d99ba822561f5aeffc6903b0a7a381c266526d24
| 867
|
py
|
Python
|
setup.py
|
drizzi-novalabs/avatar2
|
503e94c61224e1c1619c97f544a205fd51e57afb
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
drizzi-novalabs/avatar2
|
503e94c61224e1c1619c97f544a205fd51e57afb
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
drizzi-novalabs/avatar2
|
503e94c61224e1c1619c97f544a205fd51e57afb
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup
from sys import version_info
setup(
name='avatar2',
version='1.2.2',
packages=['avatar2',
'avatar2/archs',
'avatar2/targets',
'avatar2/protocols',
'avatar2/peripherals',
'avatar2/plugins',
'avatar2/plugins/arm',
'avatar2/installer'
],
install_requires=[
'pygdbmi>=0.7.3.1',
'intervaltree',
'posix_ipc>=1.0.0',
'capstone>=3.0.4',
'keystone-engine',
'parse',
'configparser',
'npyscreen',
'enum34',
'unicorn',
'pylink',
],
url='https://github.com/avatartwo/avatar2',
description='A Dynamic Multi-Target Orchestration Framework',
maintainer='Marius Muench',
maintainer_email='marius.muench@eurecom.fr'
)
| 24.771429
| 65
| 0.534025
|
37488a8e4993edd72d3c72e28e63b69986c5cf7c
| 39,398
|
py
|
Python
|
pandas/core/groupby/ops.py
|
Digres45/pandas
|
3aa2f3556a42371cf0fe0f102ab33226b0a5b7ab
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/groupby/ops.py
|
Digres45/pandas
|
3aa2f3556a42371cf0fe0f102ab33226b0a5b7ab
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/groupby/ops.py
|
Digres45/pandas
|
3aa2f3556a42371cf0fe0f102ab33226b0a5b7ab
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
"""
Provide classes to perform the groupby aggregate operations.
These are not exposed to the user and provide implementations of the grouping
operations, primarily in cython. These classes (BaseGrouper and BinGrouper)
are contained *in* the SeriesGroupBy and DataFrameGroupBy objects.
"""
from __future__ import annotations
import collections
import functools
from typing import (
Generic,
Hashable,
Iterator,
Sequence,
final,
overload,
)
import numpy as np
from pandas._libs import (
NaT,
lib,
)
import pandas._libs.groupby as libgroupby
import pandas._libs.reduction as libreduction
from pandas._typing import (
ArrayLike,
DtypeObj,
F,
FrameOrSeries,
Shape,
npt,
)
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import (
maybe_cast_pointwise_result,
maybe_downcast_to_dtype,
)
from pandas.core.dtypes.common import (
ensure_float64,
ensure_int64,
ensure_platform_int,
is_1d_only_ea_obj,
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
is_datetime64_any_dtype,
is_float_dtype,
is_integer_dtype,
is_numeric_dtype,
is_sparse,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.missing import (
isna,
maybe_fill,
)
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.floating import (
Float64Dtype,
FloatingDtype,
)
from pandas.core.arrays.integer import (
Int64Dtype,
_IntegerDtype,
)
from pandas.core.arrays.masked import (
BaseMaskedArray,
BaseMaskedDtype,
)
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import grouper
from pandas.core.indexes.api import (
CategoricalIndex,
Index,
MultiIndex,
ensure_index,
)
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index,
decons_obs_group_ids,
get_flattened_list,
get_group_index,
get_group_index_sorter,
get_indexer_dict,
)
class WrappedCythonOp:
"""
Dispatch logic for functions defined in _libs.groupby
"""
# Functions for which we do _not_ attempt to cast the cython result
# back to the original dtype.
cast_blocklist = frozenset(["rank", "count", "size", "idxmin", "idxmax"])
def __init__(self, kind: str, how: str):
self.kind = kind
self.how = how
_CYTHON_FUNCTIONS = {
"aggregate": {
"add": "group_add",
"prod": "group_prod",
"min": "group_min",
"max": "group_max",
"mean": "group_mean",
"median": "group_median",
"var": "group_var",
"first": "group_nth",
"last": "group_last",
"ohlc": "group_ohlc",
},
"transform": {
"cumprod": "group_cumprod",
"cumsum": "group_cumsum",
"cummin": "group_cummin",
"cummax": "group_cummax",
"rank": "group_rank",
},
}
_MASKED_CYTHON_FUNCTIONS = {"cummin", "cummax"}
_cython_arity = {"ohlc": 4} # OHLC
# Note: we make this a classmethod and pass kind+how so that caching
# works at the class level and not the instance level
@classmethod
@functools.lru_cache(maxsize=None)
def _get_cython_function(
cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool
):
dtype_str = dtype.name
ftype = cls._CYTHON_FUNCTIONS[kind][how]
# see if there is a fused-type version of function
# only valid for numeric
f = getattr(libgroupby, ftype)
if is_numeric:
return f
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Literal['object']")
elif dtype == object: # type: ignore[comparison-overlap]
if "object" not in f.__signatures__:
# raise NotImplementedError here rather than TypeError later
raise NotImplementedError(
f"function is not implemented for this dtype: "
f"[how->{how},dtype->{dtype_str}]"
)
return f
def get_cython_func_and_vals(self, values: np.ndarray, is_numeric: bool):
"""
Find the appropriate cython function, casting if necessary.
Parameters
----------
values : np.ndarray
is_numeric : bool
Returns
-------
func : callable
values : np.ndarray
"""
how = self.how
kind = self.kind
if how in ["median", "cumprod"]:
# these two only have float64 implementations
if is_numeric:
values = ensure_float64(values)
else:
raise NotImplementedError(
f"function is not implemented for this dtype: "
f"[how->{how},dtype->{values.dtype.name}]"
)
func = getattr(libgroupby, f"group_{how}_float64")
return func, values
func = self._get_cython_function(kind, how, values.dtype, is_numeric)
if values.dtype.kind in ["i", "u"]:
if how in ["add", "var", "prod", "mean", "ohlc"]:
# result may still include NaN, so we have to cast
values = ensure_float64(values)
return func, values
def _disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool = False):
"""
Check if we can do this operation with our cython functions.
Raises
------
NotImplementedError
This is either not a valid function for this dtype, or
valid but not implemented in cython.
"""
how = self.how
if is_numeric:
# never an invalid op for those dtypes, so return early as fastpath
return
if is_categorical_dtype(dtype):
# NotImplementedError for methods that can fall back to a
# non-cython implementation.
if how in ["add", "prod", "cumsum", "cumprod"]:
raise TypeError(f"{dtype} type does not support {how} operations")
raise NotImplementedError(f"{dtype} dtype not supported")
elif is_sparse(dtype):
# categoricals are only 1d, so we
# are not setup for dim transforming
raise NotImplementedError(f"{dtype} dtype not supported")
elif is_datetime64_any_dtype(dtype):
# we raise NotImplemented if this is an invalid operation
# entirely, e.g. adding datetimes
if how in ["add", "prod", "cumsum", "cumprod"]:
raise TypeError(f"datetime64 type does not support {how} operations")
elif is_timedelta64_dtype(dtype):
if how in ["prod", "cumprod"]:
raise TypeError(f"timedelta64 type does not support {how} operations")
def _get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape:
how = self.how
kind = self.kind
arity = self._cython_arity.get(how, 1)
out_shape: Shape
if how == "ohlc":
out_shape = (ngroups, 4)
elif arity > 1:
raise NotImplementedError(
"arity of more than 1 is not supported for the 'how' argument"
)
elif kind == "transform":
out_shape = values.shape
else:
out_shape = (ngroups,) + values.shape[1:]
return out_shape
def get_out_dtype(self, dtype: np.dtype) -> np.dtype:
how = self.how
if how == "rank":
out_dtype = "float64"
else:
if is_numeric_dtype(dtype):
out_dtype = f"{dtype.kind}{dtype.itemsize}"
else:
out_dtype = "object"
return np.dtype(out_dtype)
@overload
def _get_result_dtype(self, dtype: np.dtype) -> np.dtype:
... # pragma: no cover
@overload
def _get_result_dtype(self, dtype: ExtensionDtype) -> ExtensionDtype:
... # pragma: no cover
def _get_result_dtype(self, dtype: DtypeObj) -> DtypeObj:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : np.dtype or ExtensionDtype
Input dtype.
Returns
-------
np.dtype or ExtensionDtype
The desired dtype of the result.
"""
how = self.how
if how in ["add", "cumsum", "sum", "prod"]:
if dtype == np.dtype(bool):
return np.dtype(np.int64)
elif isinstance(dtype, (BooleanDtype, _IntegerDtype)):
return Int64Dtype()
elif how in ["mean", "median", "var"]:
if isinstance(dtype, (BooleanDtype, _IntegerDtype)):
return Float64Dtype()
elif is_float_dtype(dtype):
return dtype
elif is_numeric_dtype(dtype):
return np.dtype(np.float64)
return dtype
def uses_mask(self) -> bool:
return self.how in self._MASKED_CYTHON_FUNCTIONS
@final
def _ea_wrap_cython_operation(
self,
values: ExtensionArray,
min_count: int,
ngroups: int,
comp_ids: np.ndarray,
**kwargs,
) -> ArrayLike:
"""
If we have an ExtensionArray, unwrap, call _cython_operation, and
re-wrap if appropriate.
"""
# TODO: general case implementation overridable by EAs.
if isinstance(values, BaseMaskedArray) and self.uses_mask():
return self._masked_ea_wrap_cython_operation(
values,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
**kwargs,
)
if isinstance(values, (DatetimeArray, PeriodArray, TimedeltaArray)):
# All of the functions implemented here are ordinal, so we can
# operate on the tz-naive equivalents
npvalues = values._ndarray.view("M8[ns]")
elif isinstance(values.dtype, (BooleanDtype, _IntegerDtype)):
# IntegerArray or BooleanArray
npvalues = values.to_numpy("float64", na_value=np.nan)
elif isinstance(values.dtype, FloatingDtype):
# FloatingArray
npvalues = values.to_numpy(values.dtype.numpy_dtype, na_value=np.nan)
else:
raise NotImplementedError(
f"function is not implemented for this dtype: {values.dtype}"
)
res_values = self._cython_op_ndim_compat(
npvalues,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=None,
**kwargs,
)
if self.how in ["rank"]:
# i.e. how in WrappedCythonOp.cast_blocklist, since
# other cast_blocklist methods dont go through cython_operation
return res_values
return self._reconstruct_ea_result(values, res_values)
def _reconstruct_ea_result(self, values, res_values):
"""
Construct an ExtensionArray result from an ndarray result.
"""
# TODO: allow EAs to override this logic
if isinstance(values.dtype, (BooleanDtype, _IntegerDtype, FloatingDtype)):
dtype = self._get_result_dtype(values.dtype)
cls = dtype.construct_array_type()
return cls._from_sequence(res_values, dtype=dtype)
elif needs_i8_conversion(values.dtype):
i8values = res_values.view("i8")
return type(values)(i8values, dtype=values.dtype)
raise NotImplementedError
@final
def _masked_ea_wrap_cython_operation(
self,
values: BaseMaskedArray,
min_count: int,
ngroups: int,
comp_ids: np.ndarray,
**kwargs,
) -> BaseMaskedArray:
"""
Equivalent of `_ea_wrap_cython_operation`, but optimized for masked EA's
and cython algorithms which accept a mask.
"""
orig_values = values
# Copy to ensure input and result masks don't end up shared
mask = values._mask.copy()
arr = values._data
res_values = self._cython_op_ndim_compat(
arr,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=mask,
**kwargs,
)
dtype = self._get_result_dtype(orig_values.dtype)
assert isinstance(dtype, BaseMaskedDtype)
cls = dtype.construct_array_type()
return cls(res_values.astype(dtype.type, copy=False), mask)
@final
def _cython_op_ndim_compat(
self,
values: np.ndarray,
*,
min_count: int,
ngroups: int,
comp_ids: np.ndarray,
mask: np.ndarray | None,
**kwargs,
) -> np.ndarray:
if values.ndim == 1:
# expand to 2d, dispatch, then squeeze if appropriate
values2d = values[None, :]
if mask is not None:
mask = mask[None, :]
res = self._call_cython_op(
values2d,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=mask,
**kwargs,
)
if res.shape[0] == 1:
return res[0]
# otherwise we have OHLC
return res.T
return self._call_cython_op(
values,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=mask,
**kwargs,
)
@final
def _call_cython_op(
self,
values: np.ndarray, # np.ndarray[ndim=2]
*,
min_count: int,
ngroups: int,
comp_ids: np.ndarray,
mask: np.ndarray | None,
**kwargs,
) -> np.ndarray: # np.ndarray[ndim=2]
orig_values = values
dtype = values.dtype
is_numeric = is_numeric_dtype(dtype)
is_datetimelike = needs_i8_conversion(dtype)
if is_datetimelike:
values = values.view("int64")
is_numeric = True
elif is_bool_dtype(dtype):
values = values.astype("int64")
elif is_integer_dtype(dtype):
# e.g. uint8 -> uint64, int16 -> int64
dtype_str = dtype.kind + "8"
values = values.astype(dtype_str, copy=False)
elif is_numeric:
if not is_complex_dtype(dtype):
values = ensure_float64(values)
values = values.T
if mask is not None:
mask = mask.T
out_shape = self._get_output_shape(ngroups, values)
func, values = self.get_cython_func_and_vals(values, is_numeric)
out_dtype = self.get_out_dtype(values.dtype)
result = maybe_fill(np.empty(out_shape, dtype=out_dtype))
if self.kind == "aggregate":
counts = np.zeros(ngroups, dtype=np.int64)
if self.how in ["min", "max"]:
func(
result,
counts,
values,
comp_ids,
min_count,
is_datetimelike=is_datetimelike,
)
else:
func(result, counts, values, comp_ids, min_count)
else:
# TODO: min_count
if self.uses_mask():
func(
result,
values,
comp_ids,
ngroups,
is_datetimelike,
mask=mask,
**kwargs,
)
else:
func(result, values, comp_ids, ngroups, is_datetimelike, **kwargs)
if self.kind == "aggregate":
# i.e. counts is defined. Locations where count<min_count
# need to have the result set to np.nan, which may require casting,
# see GH#40767
if is_integer_dtype(result.dtype) and not is_datetimelike:
cutoff = max(1, min_count)
empty_groups = counts < cutoff
if empty_groups.any():
# Note: this conversion could be lossy, see GH#40767
result = result.astype("float64")
result[empty_groups] = np.nan
result = result.T
if self.how not in self.cast_blocklist:
# e.g. if we are int64 and need to restore to datetime64/timedelta64
# "rank" is the only member of cast_blocklist we get here
res_dtype = self._get_result_dtype(orig_values.dtype)
op_result = maybe_downcast_to_dtype(result, res_dtype)
else:
op_result = result
# error: Incompatible return value type (got "Union[ExtensionArray, ndarray]",
# expected "ndarray")
return op_result # type: ignore[return-value]
@final
def cython_operation(
self,
*,
values: ArrayLike,
axis: int,
min_count: int = -1,
comp_ids: np.ndarray,
ngroups: int,
**kwargs,
) -> ArrayLike:
"""
Call our cython function, with appropriate pre- and post- processing.
"""
if values.ndim > 2:
raise NotImplementedError("number of dimensions is currently limited to 2")
elif values.ndim == 2:
assert axis == 1, axis
elif not is_1d_only_ea_obj(values):
# Note: it is *not* the case that axis is always 0 for 1-dim values,
# as we can have 1D ExtensionArrays that we need to treat as 2D
assert axis == 0
dtype = values.dtype
is_numeric = is_numeric_dtype(dtype)
# can we do this operation with our cython functions
# if not raise NotImplementedError
self._disallow_invalid_ops(dtype, is_numeric)
if not isinstance(values, np.ndarray):
# i.e. ExtensionArray
return self._ea_wrap_cython_operation(
values,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
**kwargs,
)
return self._cython_op_ndim_compat(
values,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=None,
**kwargs,
)
class BaseGrouper:
"""
This is an internal Grouper class, which actually holds
the generated groups
Parameters
----------
axis : Index
groupings : Sequence[Grouping]
all the grouping instances to handle in this grouper
for example for grouper list to groupby, need to pass the list
sort : bool, default True
whether this grouper will give sorted result or not
group_keys : bool, default True
mutated : bool, default False
indexer : np.ndarray[np.intp], optional
the indexer created by Grouper
some groupers (TimeGrouper) will sort its axis and its
group_info is also sorted, so need the indexer to reorder
"""
axis: Index
def __init__(
self,
axis: Index,
groupings: Sequence[grouper.Grouping],
sort: bool = True,
group_keys: bool = True,
mutated: bool = False,
indexer: npt.NDArray[np.intp] | None = None,
dropna: bool = True,
):
assert isinstance(axis, Index), axis
self.axis = axis
self._groupings: list[grouper.Grouping] = list(groupings)
self._sort = sort
self.group_keys = group_keys
self.mutated = mutated
self.indexer = indexer
self.dropna = dropna
@property
def groupings(self) -> list[grouper.Grouping]:
return self._groupings
@property
def shape(self) -> Shape:
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self) -> int:
return len(self.groupings)
def get_iterator(
self, data: FrameOrSeries, axis: int = 0
) -> Iterator[tuple[Hashable, FrameOrSeries]]:
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, group in zip(keys, splitter):
yield key, group.__finalize__(data, method="groupby")
@final
def _get_splitter(self, data: FrameOrSeries, axis: int = 0) -> DataSplitter:
"""
Returns
-------
Generator yielding subsetted objects
__finalize__ has not been called for the subsetted objects returned.
"""
ids, _, ngroups = self.group_info
return get_splitter(data, ids, ngroups, axis=axis)
def _get_grouper(self):
"""
We are a grouper as part of another's groupings.
We have a specific method of grouping, so cannot
convert to a Index for our grouper.
"""
return self.groupings[0].grouping_vector
@final
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
return get_flattened_list(ids, ngroups, self.levels, self.codes)
@final
def apply(self, f: F, data: FrameOrSeries, axis: int = 0):
mutated = self.mutated
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
result_values = []
# This calls DataSplitter.__iter__
zipped = zip(group_keys, splitter)
for key, group in zipped:
object.__setattr__(group, "name", key)
# group might be modified
group_axes = group.axes
res = f(group)
if not _is_indexed_like(res, group_axes, axis):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
"""dict {group name -> group indices}"""
if len(self.groupings) == 1 and isinstance(self.result_index, CategoricalIndex):
# This shows unused categories in indices GH#38642
return self.groupings[0].indices
codes_list = [ping.codes for ping in self.groupings]
keys = [ping.group_index for ping in self.groupings]
return get_indexer_dict(codes_list, keys)
@property
def codes(self) -> list[np.ndarray]:
return [ping.codes for ping in self.groupings]
@property
def levels(self) -> list[Index]:
return [ping.group_index for ping in self.groupings]
@property
def names(self) -> list[Hashable]:
return [ping.name for ping in self.groupings]
@final
def size(self) -> Series:
"""
Compute group sizes.
"""
ids, _, ngroups = self.group_info
if ngroups:
out = np.bincount(ids[ids != -1], minlength=ngroups)
else:
out = []
return Series(out, index=self.result_index, dtype="int64")
@cache_readonly
def groups(self) -> dict[Hashable, np.ndarray]:
"""dict {group name -> group labels}"""
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = zip(*(ping.grouping_vector for ping in self.groupings))
index = Index(to_groupby)
return self.axis.groupby(index)
@final
@cache_readonly
def is_monotonic(self) -> bool:
# return if my group orderings are monotonic
return Index(self.group_info[0]).is_monotonic
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_codes()
ngroups = len(obs_group_ids)
comp_ids = ensure_platform_int(comp_ids)
return comp_ids, obs_group_ids, ngroups
@final
@cache_readonly
def codes_info(self) -> np.ndarray:
# return the codes of items in original grouped axis
ids, _, _ = self.group_info
if self.indexer is not None:
sorter = np.lexsort((ids, self.indexer))
ids = ids[sorter]
return ids
@final
def _get_compressed_codes(self) -> tuple[np.ndarray, np.ndarray]:
if len(self.groupings) > 1:
group_index = get_group_index(self.codes, self.shape, sort=True, xnull=True)
return compress_group_index(group_index, sort=self._sort)
ping = self.groupings[0]
return ping.codes, np.arange(len(ping.group_index))
@final
@cache_readonly
def ngroups(self) -> int:
return len(self.result_index)
@property
def reconstructed_codes(self) -> list[np.ndarray]:
codes = self.codes
ids, obs_ids, _ = self.group_info
return decons_obs_group_ids(ids, obs_ids, self.shape, codes, xnull=True)
@cache_readonly
def result_arraylike(self) -> ArrayLike:
"""
Analogous to result_index, but returning an ndarray/ExtensionArray
allowing us to retain ExtensionDtypes not supported by Index.
"""
# TODO: once Index supports arbitrary EAs, this can be removed in favor
# of result_index
if len(self.groupings) == 1:
return self.groupings[0].group_arraylike
# result_index is MultiIndex
return self.result_index._values
@cache_readonly
def result_index(self) -> Index:
if len(self.groupings) == 1:
return self.groupings[0].result_index.rename(self.names[0])
codes = self.reconstructed_codes
levels = [ping.result_index for ping in self.groupings]
return MultiIndex(
levels=levels, codes=codes, verify_integrity=False, names=self.names
)
@final
def get_group_levels(self) -> list[ArrayLike]:
# Note: only called from _insert_inaxis_grouper_inplace, which
# is only called for BaseGrouper, never for BinGrouper
if len(self.groupings) == 1:
return [self.groupings[0].group_arraylike]
name_list = []
for ping, codes in zip(self.groupings, self.reconstructed_codes):
codes = ensure_platform_int(codes)
levels = ping.group_arraylike.take(codes)
name_list.append(levels)
return name_list
# ------------------------------------------------------------
# Aggregation functions
@final
def _cython_operation(
self,
kind: str,
values,
how: str,
axis: int,
min_count: int = -1,
**kwargs,
) -> ArrayLike:
"""
Returns the values of a cython operation.
"""
assert kind in ["transform", "aggregate"]
cy_op = WrappedCythonOp(kind=kind, how=how)
ids, _, _ = self.group_info
ngroups = self.ngroups
return cy_op.cython_operation(
values=values,
axis=axis,
min_count=min_count,
comp_ids=ids,
ngroups=ngroups,
**kwargs,
)
@final
def agg_series(
self, obj: Series, func: F, preserve_dtype: bool = False
) -> ArrayLike:
"""
Parameters
----------
obj : Series
func : function taking a Series and returning a scalar-like
preserve_dtype : bool
Whether the aggregation is known to be dtype-preserving.
Returns
-------
np.ndarray or ExtensionArray
"""
# test_groupby_empty_with_category gets here with self.ngroups == 0
# and len(obj) > 0
if len(obj) == 0:
# SeriesGrouper would raise if we were to call _aggregate_series_fast
result = self._aggregate_series_pure_python(obj, func)
elif not isinstance(obj._values, np.ndarray):
# _aggregate_series_fast would raise TypeError when
# calling libreduction.Slider
# In the datetime64tz case it would incorrectly cast to tz-naive
# TODO: can we get a performant workaround for EAs backed by ndarray?
result = self._aggregate_series_pure_python(obj, func)
# we can preserve a little bit more aggressively with EA dtype
# because maybe_cast_pointwise_result will do a try/except
# with _from_sequence. NB we are assuming here that _from_sequence
# is sufficiently strict that it casts appropriately.
preserve_dtype = True
elif obj.index._has_complex_internals:
# Preempt TypeError in _aggregate_series_fast
result = self._aggregate_series_pure_python(obj, func)
elif isinstance(self, BinGrouper):
# Not yet able to remove the BaseGrouper aggregate_series_fast,
# as test_crosstab.test_categorical breaks without it
result = self._aggregate_series_pure_python(obj, func)
else:
result = self._aggregate_series_fast(obj, func)
npvalues = lib.maybe_convert_objects(result, try_float=False)
if preserve_dtype:
out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only=True)
else:
out = npvalues
return out
def _aggregate_series_fast(self, obj: Series, func: F) -> npt.NDArray[np.object_]:
# At this point we have already checked that
# - obj.index is not a MultiIndex
# - obj is backed by an ndarray, not ExtensionArray
# - len(obj) > 0
func = com.is_builtin_func(func)
ids, _, ngroups = self.group_info
# avoids object / Series creation overhead
indexer = get_group_index_sorter(ids, ngroups)
obj = obj.take(indexer)
ids = ids.take(indexer)
sgrouper = libreduction.SeriesGrouper(obj, func, ids, ngroups)
result, _ = sgrouper.get_result()
return result
@final
def _aggregate_series_pure_python(
self, obj: Series, func: F
) -> npt.NDArray[np.object_]:
ids, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = np.empty(ngroups, dtype="O")
initialized = False
# equiv: splitter = self._get_splitter(obj, axis=0)
splitter = get_splitter(obj, ids, ngroups, axis=0)
for i, group in enumerate(splitter):
# Each step of this loop corresponds to
# libreduction._BaseGrouper._apply_to_group
res = func(group)
res = libreduction.extract_result(res)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(res, group.dtype)
initialized = True
counts[i] = group.shape[0]
result[i] = res
return result
class BinGrouper(BaseGrouper):
"""
This is an internal Grouper class
Parameters
----------
bins : the split index of binlabels to group the item of axis
binlabels : the label list
mutated : bool, default False
indexer : np.ndarray[np.intp]
Examples
--------
bins: [2, 4, 6, 8, 10]
binlabels: DatetimeIndex(['2005-01-01', '2005-01-03',
'2005-01-05', '2005-01-07', '2005-01-09'],
dtype='datetime64[ns]', freq='2D')
the group_info, which contains the label of each item in grouped
axis, the index of label in label list, group number, is
(array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5)
means that, the grouped axis has 10 items, can be grouped into 5
labels, the first and second items belong to the first label, the
third and forth items belong to the second label, and so on
"""
bins: np.ndarray # np.ndarray[np.int64]
binlabels: Index
mutated: bool
def __init__(
self,
bins,
binlabels,
mutated: bool = False,
indexer=None,
):
self.bins = ensure_int64(bins)
self.binlabels = ensure_index(binlabels)
self.mutated = mutated
self.indexer = indexer
# These lengths must match, otherwise we could call agg_series
# with empty self.bins, which would raise in libreduction.
assert len(self.binlabels) == len(self.bins)
@cache_readonly
def groups(self):
"""dict {group name -> group labels}"""
# this is mainly for compat
# GH 3881
result = {
key: value
for key, value in zip(self.binlabels, self.bins)
if key is not NaT
}
return result
@property
def nkeys(self) -> int:
# still matches len(self.groupings), but we can hard-code
return 1
def _get_grouper(self):
"""
We are a grouper as part of another's groupings.
We have a specific method of grouping, so cannot
convert to a Index for our grouper.
"""
return self
def get_iterator(self, data: FrameOrSeries, axis: int = 0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if axis == 0:
slicer = lambda start, edge: data.iloc[start:edge]
else:
slicer = lambda start, edge: data.iloc[:, start:edge]
length = len(data.axes[axis])
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not NaT:
yield label, slicer(start, edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start, None)
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def group_info(self):
ngroups = self.ngroups
obs_group_ids = np.arange(ngroups, dtype=np.int64)
rep = np.diff(np.r_[0, self.bins])
rep = ensure_platform_int(rep)
if ngroups == len(self.bins):
comp_ids = np.repeat(np.arange(ngroups), rep)
else:
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
return (
ensure_platform_int(comp_ids),
obs_group_ids,
ngroups,
)
@cache_readonly
def reconstructed_codes(self) -> list[np.ndarray]:
# get unique result indices, and prepend 0 as groupby starts from the first
return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]]
@cache_readonly
def result_index(self):
if len(self.binlabels) != 0 and isna(self.binlabels[0]):
return self.binlabels[1:]
return self.binlabels
@property
def levels(self) -> list[Index]:
return [self.binlabels]
@property
def names(self) -> list[Hashable]:
return [self.binlabels.name]
@property
def groupings(self) -> list[grouper.Grouping]:
lev = self.binlabels
ping = grouper.Grouping(lev, lev, in_axis=False, level=None)
return [ping]
def _aggregate_series_fast(self, obj: Series, func: F) -> np.ndarray:
# -> np.ndarray[object]
raise NotImplementedError(
"This should not be reached; use _aggregate_series_pure_python"
)
def _is_indexed_like(obj, axes, axis: int) -> bool:
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.axes[axis].equals(axes[axis])
elif isinstance(obj, DataFrame):
return obj.axes[axis].equals(axes[axis])
return False
# ----------------------------------------------------------------------
# Splitting / application
class DataSplitter(Generic[FrameOrSeries]):
def __init__(
self,
data: FrameOrSeries,
labels: npt.NDArray[np.intp],
ngroups: int,
axis: int = 0,
):
self.data = data
self.labels = ensure_platform_int(labels) # _should_ already be np.intp
self.ngroups = ngroups
self.axis = axis
assert isinstance(axis, int), axis
@cache_readonly
def slabels(self) -> npt.NDArray[np.intp]:
# Sorted labels
return self.labels.take(self._sort_idx)
@cache_readonly
def _sort_idx(self) -> npt.NDArray[np.intp]:
# Counting sort indexer
return get_group_index_sorter(self.labels, self.ngroups)
def __iter__(self):
sdata = self.sorted_data
if self.ngroups == 0:
# we are inside a generator, rather than raise StopIteration
# we merely return signal the end
return
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
for start, end in zip(starts, ends):
yield self._chop(sdata, slice(start, end))
@cache_readonly
def sorted_data(self) -> FrameOrSeries:
return self.data.take(self._sort_idx, axis=self.axis)
def _chop(self, sdata, slice_obj: slice) -> NDFrame:
raise AbstractMethodError(self)
class SeriesSplitter(DataSplitter):
def _chop(self, sdata: Series, slice_obj: slice) -> Series:
# fastpath equivalent to `sdata.iloc[slice_obj]`
mgr = sdata._mgr.get_slice(slice_obj)
# __finalize__ not called here, must be applied by caller if applicable
# fastpath equivalent to:
# `return sdata._constructor(mgr, name=sdata.name, fastpath=True)`
obj = type(sdata)._from_mgr(mgr)
object.__setattr__(obj, "_flags", sdata._flags)
object.__setattr__(obj, "_name", sdata._name)
return obj
class FrameSplitter(DataSplitter):
def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame:
# Fastpath equivalent to:
# if self.axis == 0:
# return sdata.iloc[slice_obj]
# else:
# return sdata.iloc[:, slice_obj]
mgr = sdata._mgr.get_slice(slice_obj, axis=1 - self.axis)
# __finalize__ not called here, must be applied by caller if applicable
# fastpath equivalent to `return sdata._constructor(mgr)`
obj = type(sdata)._from_mgr(mgr)
object.__setattr__(obj, "_flags", sdata._flags)
return obj
def get_splitter(
data: FrameOrSeries, labels: np.ndarray, ngroups: int, axis: int = 0
) -> DataSplitter:
if isinstance(data, Series):
klass: type[DataSplitter] = SeriesSplitter
else:
# i.e. DataFrame
klass = FrameSplitter
return klass(data, labels, ngroups, axis)
| 31.2187
| 88
| 0.588532
|
fbceb01f7ef2b0c2b67ffdf02f48066e8e078922
| 7,055
|
py
|
Python
|
sdg/schemas/SchemaInputBase.py
|
Defra-Data-Science-Centre-of-Excellence/sdg-build
|
716a7597e5cb7de248915b21fa1a9e4a5cd1e0e0
|
[
"MIT"
] | 7
|
2019-02-25T03:50:17.000Z
|
2022-03-31T12:58:42.000Z
|
sdg/schemas/SchemaInputBase.py
|
Defra-Data-Science-Centre-of-Excellence/sdg-build
|
716a7597e5cb7de248915b21fa1a9e4a5cd1e0e0
|
[
"MIT"
] | 185
|
2019-01-14T15:35:53.000Z
|
2022-03-28T19:16:37.000Z
|
sdg/schemas/SchemaInputBase.py
|
Defra-Data-Science-Centre-of-Excellence/sdg-build
|
716a7597e5cb7de248915b21fa1a9e4a5cd1e0e0
|
[
"MIT"
] | 18
|
2019-01-14T11:20:05.000Z
|
2022-02-08T14:10:17.000Z
|
# -*- coding: utf-8 -*-
import os
import json
import jsonschema
from sdg import check_csv
from sdg.Loggable import Loggable
class SchemaInputBase(Loggable):
"""A base class for importing a schema, querying it, and validating with it.
This class assumes imported schema (self.schema) are valid JSON Schema."""
def __init__(self, schema_path='', logging=None, scope=None,
request_params=None, meta_suffix=None):
"""Create a new SchemaBase object
Parameters
----------
schema_path : string
A path to the schema file to input
scope : string
An optional 'scope' to apply to all metadata fields
meta_suffix : string
A suffix to add to each metadata key. Useful when using the same
schema for both global and national metadata, for example.
request_params : dict or None
Optional dict of parameters to be passed to remote file fetches.
Corresponds to the options passed to a urllib.request.Request.
@see https://docs.python.org/3/library/urllib.request.html#urllib.request.Request
"""
Loggable.__init__(self, logging=logging)
self.schema_path = schema_path
self.scope = scope
self.meta_suffix = meta_suffix
self.request_params = request_params
self.field_order = []
self.schema = None
self.load_schema()
self.load_validator()
def load_schema(self):
"""Load the schema. This should be overridden by a subclass."""
raise NotImplementedError
def load_validator(self):
"""Load the validator for this schema."""
try:
validator_class = jsonschema.validators.validator_for(self.schema)
validator_class.check_schema(self.schema)
self.validator = validator_class(self.schema)
except Exception as e:
print(e)
def validate(self, indicator):
"""Validate the data and/or metadata for an Indicator object.
Parameters
----------
indicator : Indicator
The instance of Indicator to validate
Returns
-------
boolean
True if validation passes, False otherwise
"""
status = True
if indicator.has_meta():
try:
self.validator.validate(indicator.meta)
except:
status = False
print('Validation errors for indicator ' + indicator.inid)
for error in self.validator.iter_errors(indicator.meta):
ignore = ['properties', 'type']
things = []
for thing in error.schema_path:
if thing not in ignore:
things.append(str(thing))
things = '/'.join(things)
error_title = error.schema['title'] if 'title' in error.schema else '...'
print('- ' + error_title + ' (' + things + '): ' + error.message)
if indicator.has_data():
df = indicator.data
inid = indicator.inid
# Apply these checks on the dataframe. These are common issues that
# can happen with CSVs, but are important regardless of the source.
status = status & check_csv.check_headers(df, inid)
status = status & check_csv.check_data_types(df, inid)
status = status & check_csv.check_leading_whitespace(df, inid)
status = status & check_csv.check_trailing_whitespace(df, inid)
status = status & check_csv.check_empty_rows(df, inid)
return status
def get(self, field, default=None, must_exist=False):
"""Fetch a field from the schema by key.
Parameters
----------
field : string
The name of a field to get
default : string or None
A default value if the field is not present
must_exist : boolean
If True, an exception will be raised if the field is not present
Return
------
mixed or None
The value of the field if present, otherwise None
"""
f = self.schema['properties'].get(field, default)
if must_exist and f is None:
raise ValueError(field + " doesn't exist in schema")
return f
def get_values(self, field):
"""Get the allowed values for a select or multiselect field.
Parameters
----------
field : string
The name of a field to get allowed values for
Returns
-------
list
List of allowed values
"""
options = self.get_allowed_options(field)
return [x['enum'][0] for x in options]
def get_allowed_options(self, field):
"""Return a list of allowed options for a field from the schema.
Parameters
----------
field : string
The name of a field to get allowed options for
Returns
-------
list
List of allowed options (dicts)
"""
field = self.get(field)
# In JSON Schema the options are in "anyOf", which can be in 2 places.
if 'anyOf' in field:
return field['anyOf']
elif 'items' in field and 'anyOf' in field['items']:
return field['items']['anyOf']
return []
def get_value_translation(self, field):
"""Get a map of values to 'translation_key' for schema field options.
Parameters
----------
field : string
The name of a field to get a value translation map for
Returns
-------
Dict
Dict of allowed values to translation keys for a particular field
"""
options = self.get_allowed_options(field)
if len(options) == 0:
raise ValueError(field + " field does not have options element")
return {x['enum'][0]: x['translation_key'] for x in options}
def add_item_to_field_order(self, field):
"""Add a field to the list, in case an output needs a field order.
Parameters
----------
field : string
The name of a field to add to the list
"""
self.field_order.append(field)
def get_field_order(self):
"""Get the list of fields in the preserved order.
Returns
-------
list
A list of field names in a particular order
"""
return self.field_order if len(self.field_order) > 0 else self.schema['properties'].keys()
def alter_key(self, key):
"""Make any changes to a key before adding it to the schema.
Parameters
----------
key : string
The key to alter
Returns
-------
string
The altered key
"""
if self.meta_suffix is not None:
key = key + self.meta_suffix
return key
| 30.673913
| 98
| 0.568816
|
8b39ed9e4cfe4d86b1e16224e4e55b1820c54173
| 888
|
py
|
Python
|
bookchook/forms.py
|
tombasche/BookChook
|
2b1b28c0156754da75e100e2f4ba9ada7b8be31d
|
[
"MIT"
] | 7
|
2017-12-12T08:33:05.000Z
|
2019-07-29T14:14:53.000Z
|
bookchook/forms.py
|
tombasche/BookChook
|
2b1b28c0156754da75e100e2f4ba9ada7b8be31d
|
[
"MIT"
] | 20
|
2016-10-16T03:43:15.000Z
|
2018-10-29T00:14:23.000Z
|
bookchook/forms.py
|
tombasche/BookChook
|
2b1b28c0156754da75e100e2f4ba9ada7b8be31d
|
[
"MIT"
] | 1
|
2018-10-23T04:06:59.000Z
|
2018-10-23T04:06:59.000Z
|
from django import forms
from django.contrib.auth.models import User
from .models import Book, Series
from taggit.forms import *
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = ('name', 'author', 'number', 'series', 'location', 'comment')
widgets = {
'series' : forms.Select(attrs={"onChange":'displaySeriesNumber()', 'onLoad': 'displaySeriesNumber()', 'id': 'series-dropdown'}),
'number' : forms.NumberInput(attrs={"class": "series-number"})
}
class BookTagsForm(forms.Form):
tags = forms.CharField()
class SeriesForm(forms.ModelForm):
class Meta:
model = Series
fields = ('name',)
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'email', 'password')
widgets = {
'password': forms.PasswordInput()
}
| 28.645161
| 140
| 0.61036
|
1f1337f7df33adfddbde2a50c6ac19e05b1e95a6
| 16,027
|
py
|
Python
|
rb/cluster.py
|
commonlims/rb
|
e9ab7769760d05f1c9bbd2c983f94a22f52aa225
|
[
"Apache-2.0"
] | null | null | null |
rb/cluster.py
|
commonlims/rb
|
e9ab7769760d05f1c9bbd2c983f94a22f52aa225
|
[
"Apache-2.0"
] | null | null | null |
rb/cluster.py
|
commonlims/rb
|
e9ab7769760d05f1c9bbd2c983f94a22f52aa225
|
[
"Apache-2.0"
] | null | null | null |
from redis.client import Script
from redis.connection import ConnectionPool, UnixDomainSocketConnection
try:
from redis.connection import SSLConnection
except ImportError:
SSLConnection = None
import functools
from hashlib import sha1
from threading import Lock
from rb.router import PartitionRouter
from rb.clients import RoutingClient, LocalClient
import sys
if sys.version_info > (3,):
long = int
class HostInfo(object):
def __init__(self, host_id, host, port, unix_socket_path=None, db=0,
password=None, ssl=False, ssl_options=None):
self.host_id = host_id
self.host = host
self.unix_socket_path = unix_socket_path
self.port = port
self.db = db
self.password = password
self.ssl = ssl
self.ssl_options = ssl_options
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self.host_id == other.host_id
def __ne__(self, other):
rv = self.__eq__(other)
if rv is NotImplemented:
return NotImplemented
return not rv
def __hash__(self):
return self.host_id
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
' '.join('%s=%r' % x for x in sorted(self.__dict__.items())),
)
def _iter_hosts(iterable):
if isinstance(iterable, dict):
iterable = iterable.items() if sys.version_info > (3,) else iterable.iteritems()
for item in iterable:
if isinstance(item, tuple):
host_id, cfg = item
cfg = dict(cfg)
cfg['host_id'] = host_id
else:
cfg = item
yield cfg
class Cluster(object):
"""The cluster is the core object behind rb. It holds the connection
pools to the individual nodes and can be shared for the duration of
the application in a central location.
Basic example of a cluster over four redis instances with the default
router::
cluster = Cluster(hosts={
0: {'port': 6379},
1: {'port': 6380},
2: {'port': 6381},
3: {'port': 6382},
}, host_defaults={
'host': '127.0.0.1',
})
`hosts` is a dictionary of hosts which maps the number host IDs to
configuration parameters. The parameters correspond to the signature
of the :meth:`add_host` function. The defaults for these parameters
are pulled from `host_defaults`. To override the pool class the
`pool_cls` and `pool_options` parameters can be used. The same
applies to `router_cls` and `router_options` for the router. The pool
options are useful for setting socket timeouts and similar parameters.
"""
def __init__(self, hosts, host_defaults=None, pool_cls=None,
pool_options=None, router_cls=None, router_options=None):
if pool_cls is None:
pool_cls = ConnectionPool
if router_cls is None:
router_cls = PartitionRouter
self._lock = Lock()
self.pool_cls = pool_cls
self.pool_options = pool_options
self.router_cls = router_cls
self.router_options = router_options
self._pools = {}
self._router = None
self.hosts = {}
self._hosts_age = 0
self.host_defaults = host_defaults or {}
for host_config in _iter_hosts(hosts):
if self.host_defaults:
for k, v in six.iteritems(self.host_defaults):
host_config.setdefault(k, v)
self.add_host(**host_config)
def add_host(self, host_id=None, host='localhost', port=6379,
unix_socket_path=None, db=0, password=None,
ssl=False, ssl_options=None):
"""Adds a new host to the cluster. This is only really useful for
unittests as normally hosts are added through the constructor and
changes after the cluster has been used for the first time are
unlikely to make sense.
"""
if host_id is None:
raise RuntimeError('Host ID is required')
elif not isinstance(host_id, (int, long)):
raise ValueError('The host ID has to be an integer')
host_id = int(host_id)
with self._lock:
if host_id in self.hosts:
raise TypeError('Two hosts share the same host id (%r)' %
(host_id,))
self.hosts[host_id] = HostInfo(host_id=host_id, host=host,
port=port, db=db,
unix_socket_path=unix_socket_path,
password=password, ssl=ssl,
ssl_options=ssl_options)
self._hosts_age += 1
def remove_host(self, host_id):
"""Removes a host from the client. This only really useful for
unittests.
"""
with self._lock:
rv = self._hosts.pop(host_id, None) is not None
pool = self._pools.pop(host_id, None)
if pool is not None:
pool.disconnect()
self._hosts_age += 1
return rv
def disconnect_pools(self):
"""Disconnects all connections from the internal pools."""
with self._lock:
for pool in self._pools.values():
pool.disconnect()
self._pools.clear()
def get_router(self):
"""Returns the router for the cluster. If the cluster reconfigures
the router will be recreated. Usually you do not need to interface
with the router yourself as the cluster's routing client does that
automatically.
This returns an instance of :class:`BaseRouter`.
"""
cached_router = self._router
ref_age = self._hosts_age
if cached_router is not None:
router, router_age = cached_router
if router_age == ref_age:
return router
with self._lock:
router = self.router_cls(self, **(self.router_options or {}))
self._router = (router, ref_age)
return router
def get_pool_for_host(self, host_id):
"""Returns the connection pool for the given host.
This connection pool is used by the redis clients to make sure
that it does not have to reconnect constantly. If you want to use
a custom redis client you can pass this in as connection pool
manually.
"""
if isinstance(host_id, HostInfo):
host_info = host_id
host_id = host_info.host_id
else:
host_info = self.hosts.get(host_id)
if host_info is None:
raise LookupError('Host %r does not exist' % (host_id,))
rv = self._pools.get(host_id)
if rv is not None:
return rv
with self._lock:
rv = self._pools.get(host_id)
if rv is None:
opts = dict(self.pool_options or ())
opts['db'] = host_info.db
opts['password'] = host_info.password
if host_info.unix_socket_path is not None:
opts['path'] = host_info.unix_socket_path
opts['connection_class'] = UnixDomainSocketConnection
if host_info.ssl:
raise TypeError('SSL is not supported for unix '
'domain sockets.')
else:
opts['host'] = host_info.host
opts['port'] = host_info.port
if host_info.ssl:
if SSLConnection is None:
raise TypeError('This version of py-redis does '
'not support SSL connections.')
opts['connection_class'] = SSLConnection
opts.update(('ssl_' + k, v) for k, v in
(host_info.ssl_options or {}).items())
rv = self.pool_cls(**opts)
self._pools[host_id] = rv
return rv
def get_local_client(self, host_id):
"""Returns a localized client for a specific host ID. This client
works like a regular Python redis client and returns results
immediately.
"""
return LocalClient(
connection_pool=self.get_pool_for_host(host_id))
def get_local_client_for_key(self, key):
"""Similar to :meth:`get_local_client_for_key` but returns the
client based on what the router says the key destination is.
"""
return self.get_local_client(self.get_router().get_host_for_key(key))
def get_routing_client(self, auto_batch=True):
"""Returns a routing client. This client is able to automatically
route the requests to the individual hosts. It's thread safe and
can be used similar to the host local client but it will refused
to execute commands that cannot be directly routed to an
individual node.
The default behavior for the routing client is to attempt to batch
eligible commands into batch versions thereof. For instance multiple
`GET` commands routed to the same node can end up merged into an
`MGET` command. This behavior can be disabled by setting `auto_batch`
to `False`. This can be useful for debugging because `MONITOR` will
more accurately reflect the commands issued in code.
See :class:`RoutingClient` for more information.
"""
return RoutingClient(self, auto_batch=auto_batch)
def map(self, timeout=None, max_concurrency=64, auto_batch=True):
"""Shortcut context manager for getting a routing client, beginning
a map operation and joining over the result. `max_concurrency`
defines how many outstanding parallel queries can exist before an
implicit join takes place.
In the context manager the client available is a
:class:`MappingClient`. Example usage::
results = {}
with cluster.map() as client:
for key in keys_to_fetch:
results[key] = client.get(key)
for key, promise in results.iteritems():
print '%s => %s' % (key, promise.value)
"""
return self.get_routing_client(auto_batch).map(
timeout=timeout, max_concurrency=max_concurrency)
def fanout(self, hosts=None, timeout=None, max_concurrency=64,
auto_batch=True):
"""Shortcut context manager for getting a routing client, beginning
a fanout operation and joining over the result.
In the context manager the client available is a
:class:`FanoutClient`. Example usage::
with cluster.fanout(hosts='all') as client:
client.flushdb()
"""
return self.get_routing_client(auto_batch).fanout(
hosts=hosts, timeout=timeout, max_concurrency=max_concurrency)
def all(self, timeout=None, max_concurrency=64, auto_batch=True):
"""Fanout to all hosts. Works otherwise exactly like :meth:`fanout`.
Example::
with cluster.all() as client:
client.flushdb()
"""
return self.fanout('all', timeout=timeout,
max_concurrency=max_concurrency,
auto_batch=auto_batch)
def execute_commands(self, mapping, *args, **kwargs):
"""Concurrently executes a sequence of commands on a Redis cluster that
are associated with a routing key, returning a new mapping where
values are a list of results that correspond to the command in the same
position. For example::
>>> cluster.execute_commands({
... 'foo': [
... ('PING',),
... ('TIME',),
... ],
... 'bar': [
... ('CLIENT', 'GETNAME'),
... ],
... })
{'bar': [<Promise None>],
'foo': [<Promise True>, <Promise (1454446079, 418404)>]}
Commands that are instances of :class:`redis.client.Script` will first
be checked for their existence on the target nodes then loaded on the
targets before executing and can be interleaved with other commands::
>>> from redis.client import Script
>>> TestScript = Script(None, 'return {KEYS, ARGV}')
>>> cluster.execute_commands({
... 'foo': [
... (TestScript, ('key:1', 'key:2'), range(0, 3)),
... ],
... 'bar': [
... (TestScript, ('key:3', 'key:4'), range(3, 6)),
... ],
... })
{'bar': [<Promise [['key:3', 'key:4'], ['3', '4', '5']]>],
'foo': [<Promise [['key:1', 'key:2'], ['0', '1', '2']]>]}
Internally, :class:`FanoutClient` is used for issuing commands.
"""
def is_script_command(command):
return isinstance(command[0], Script)
def check_script_load_result(script, result):
if script.sha != result:
raise AssertionError(
'Hash mismatch loading {!r}: expected {!r}, got {!r}'.format(
script,
script.sha,
result,
)
)
# Run through all the commands and check to see if there are any
# scripts, and whether or not they have been loaded onto the target
# hosts.
exists = {}
with self.fanout(*args, **kwargs) as client:
for key, commands in list(mapping.items()):
targeted = client.target_key(key)
for command in filter(is_script_command, commands):
script = command[0]
# Set the script hash if it hasn't already been set.
if not script.sha:
script.sha = sha1(script.script).hexdigest()
# Check if the script has been loaded on each host that it
# will be executed on.
for host in targeted._target_hosts:
if script not in exists.setdefault(host, {}):
exists[host][script] = targeted.execute_command('SCRIPT EXISTS', script.sha)
# Execute the pending commands, loading scripts onto servers where they
# do not already exist.
results = {}
with self.fanout(*args, **kwargs) as client:
for key, commands in list(mapping.items()):
results[key] = []
targeted = client.target_key(key)
for command in commands:
# If this command is a script, we need to check and see if
# it needs to be loaded before execution.
if is_script_command(command):
script = command[0]
for host in targeted._target_hosts:
if script in exists[host]:
result = exists[host].pop(script)
if not result.value[0]:
targeted.execute_command('SCRIPT LOAD', script.script).done(
on_success=functools.partial(check_script_load_result, script)
)
keys, arguments = command[1:]
parameters = list(keys) + list(arguments)
results[key].append(targeted.execute_command('EVALSHA', script.sha, len(keys), *parameters))
else:
results[key].append(targeted.execute_command(*command))
return results
| 40.370277
| 116
| 0.564173
|
ac87290eec352e4f3b5b73f3da2d2d0d66946331
| 6,875
|
py
|
Python
|
tests/test_http.py
|
polyfemos/atavism
|
d5c7cbb2d861ac800bbf19f7a1504cf966f0deef
|
[
"Unlicense"
] | null | null | null |
tests/test_http.py
|
polyfemos/atavism
|
d5c7cbb2d861ac800bbf19f7a1504cf966f0deef
|
[
"Unlicense"
] | null | null | null |
tests/test_http.py
|
polyfemos/atavism
|
d5c7cbb2d861ac800bbf19f7a1504cf966f0deef
|
[
"Unlicense"
] | 1
|
2022-02-01T10:26:45.000Z
|
2022-02-01T10:26:45.000Z
|
import os
import unittest
from atavism.http11.client import HttpClient
from atavism.http11.content import Content, FileContent
from atavism.http11.headers import Headers
from atavism.http11.objects import HttpRequest
class TestHeaders(unittest.TestCase):
def test_001_creation(self):
hb = Headers()
self.assertEqual(len(hb), 0)
self.assertEqual(len(hb.headers), 0)
self.assertFalse(hb.finished)
def test_002_input(self):
input = [
[
(b'GET / HTTP/1.1\r\n', 16, False),
(b'Host: 192.168.1', 15, False),
(b'.1\r\n\r\n', 6, True),
],
[
(b'GET / HTTP/1.1\r\n', 16, False),
(b'Host: 192.168.1', 15, False),
(b'.1\r\n', 4, False),
(b'\r\nHello World', 2, True)
]
]
for inp_set in input:
hb = Headers()
for inp in inp_set:
self.assertEqual(hb.read_content(inp[0]), inp[1],
"read_content('{}') should have returned {}".format(inp[0], inp[1]))
self.assertEqual(hb.finished, inp[2])
self.assertEqual(len(hb), 33)
self.assertEqual(hb.status_line, 'GET / HTTP/1.1')
self.assertEqual(len(hb.headers), 1)
def test_003_output(self):
hb = Headers()
hb.add_header('Host', '127.0.0.1')
self.assertEqual(len(hb.headers), 1)
self.assertIn("Host: 127.0.0.1\r\n\r\n", str(hb))
hb.add_headers({'User-Agent': 'Test/0.1',
'Accept-Encoding': 'identity'})
self.assertEqual(len(hb.headers), 4)
hdr_str = str(hb)
self.assertEqual(len(hdr_str.split("\r\n")), 6)
self.assertIn("Accept-Encoding: identity\r\n", hdr_str)
class TestContent(unittest.TestCase):
def test_001_creation(self):
ct = Content()
self.assertFalse(ct.finished)
self.assertIsNone(ct.content_type)
self.assertIsNone(ct.content_length)
ct2 = Content(content_length=12, content_type='text/plain')
self.assertFalse(ct2.finished)
self.assertEqual(ct2.content_length, 12)
def test_002(self):
cases = [
([b'Hello World!'], 12, 'text/plain', 'Hello World!'),
([b'{"origin"', b': "127.0.0.1"}'], 23, 'application/json', {'origin': '127.0.0.1'}),
]
for c in cases:
ct = Content(data=c[0][0], content_length=c[1], content_type=c[2])
self.assertEqual(ct.finished, True if len(c[0]) == 1 else False)
self.assertEqual(len(ct), len(c[0][0]))
for n in range(1, len(c[0])):
ct.read_content(c[0][n])
self.assertTrue(ct.finished)
self.assertEqual(ct.decoded_content(), c[3])
def test_003(self):
ct = Content(data=b'012345678901234567890')
self.assertEqual(len(ct), 21)
self.assertEqual(ct[0:2], b'01')
class TestFileContent(unittest.TestCase):
def test_001(self):
fc = FileContent('tests/test_http.py')
self.assertEqual(len(fc), os.path.getsize('tests/test_http.py'))
self.assertEqual(fc[0:10], b'import os\n')
def test_002(self):
fn = 'tests/hello_world.txt'
if os.path.exists(fn):
os.unlink(fn)
fc = FileContent(fn)
self.assertEqual(len(fc), 0)
fc.add_content(b'Hello World!')
self.assertEqual(len(fc), 12)
fc.write()
self.assertTrue(os.path.exists(fn))
self.assertEqual(os.path.getsize(fn), 12)
class HttpbinTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.http = HttpClient('httpbin.org')
def test_001_ip(self):
self.assertTrue(self.http.verify())
self.assertIsNotNone(self.http.simple_request('/ip'))
def test_002_headers(self):
self.http.user_agent = 'Test/0.1'
hdrs = self.http.simple_request('/headers')
self.assertIsInstance(hdrs, dict)
self.assertIn('headers', hdrs)
self.assertIn('User-Agent', hdrs['headers'])
self.assertEqual(hdrs['headers']['User-Agent'], 'Test/0.1')
def test_003_post(self):
resp = self.http.post_data('/post', {'a': 1, 'b': 2})
self.assertEqual(resp.code, 200)
self.assertEqual(resp.get('content-type'), 'application/json')
json_data = resp.decoded_content()
self.assertIn('data', json_data)
self.assertEqual(json_data['form'], {'a': '1', 'b': '2'})
self.assertEqual(json_data['headers']['Content-Length'], '7')
self.assertIn('files', json_data)
def test_004_stream(self):
lines = self.http.request('/stream/10')
self.assertEqual(lines.code, 200)
self.assertEqual(len(lines.decoded_content().split(b'\n')), 11)
def test_005_gzip(self):
gzip = self.http.request('/gzip')
data = gzip.decoded_content()
self.assertEqual(type(data), dict)
self.assertIn('gzipped', data)
self.assertTrue(data["gzipped"])
self.assertNotEqual(gzip._content.content_length, len(gzip._content))
def test_006_deflate(self):
obj = self.http.request('/deflate')
data = obj.decoded_content()
self.assertEqual(type(data), dict)
self.assertIn('deflated', data)
self.assertTrue(data["deflated"])
self.assertNotEqual(obj._content.content_length, len(obj._content))
def test_007_drip(self):
resp = self.http.request('/drip', {'numbytes': 1500,'duration': 5, 'code': 200})
self.assertEqual(resp.code, 200)
self.assertEqual(len(resp), 1500)
class RangeRequestTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.http = HttpClient('www.lysator.liu.se')
def test_001(self):
# http://www.lysator.liu.se/pinball/expo/
req = HttpRequest(path='/pinball/expo/')
req.add_range(0, 61)
resp = self.http.send_request(req)
self.assertEqual(resp.code, 206)
self.assertEqual(len(resp), 62)
self.assertEqual(resp.decoded_content(), '''<html>
<head>
<base="http://www.lysator.liu.se/pinball/expo/">''')
def test_002(self):
# http://www.lysator.liu.se/pinball/expo/
req = HttpRequest(method='GET', path='/pinball/expo/')
req.add_range(0, 61)
req.add_range(end=-10)
resp = self.http.send_request(req)
self.assertEqual(resp.code, 206)
parts = resp.decoded_content()
self.assertEqual(len(parts), 2)
self.assertIn('Content-Type', parts[0])
self.assertIn('Content-Range', parts[0])
self.assertEqual(parts[0]['content'], '''<html>
<head>
<base="http://www.lysator.liu.se/pinball/expo/">''')
self.assertEqual(parts[1]['content'], '''>
</html>''')
| 35.807292
| 101
| 0.588509
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.