hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace82d976d0633a0bdf49e281c8cfe60f52b1117 | 42,299 | py | Python | distributed/utils_test.py | aktech/distributed | 09d9799e401da695981600fe98ab4d4de4ec419e | [
"BSD-3-Clause"
] | null | null | null | distributed/utils_test.py | aktech/distributed | 09d9799e401da695981600fe98ab4d4de4ec419e | [
"BSD-3-Clause"
] | null | null | null | distributed/utils_test.py | aktech/distributed | 09d9799e401da695981600fe98ab4d4de4ec419e | [
"BSD-3-Clause"
] | null | null | null | import asyncio
import collections
import gc
from contextlib import contextmanager, suppress
import copy
import functools
from glob import glob
import io
import itertools
import logging
import logging.config
import os
import queue
import re
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import threading
from time import sleep
import uuid
import warnings
import weakref
try:
import ssl
except ImportError:
ssl = None
import pytest
import dask
from tlz import merge, memoize, assoc
from tornado import gen
from tornado.ioloop import IOLoop
from . import system
from .client import default_client, _global_clients, Client
from .compatibility import WINDOWS
from .comm import Comm
from .config import initialize_logging
from .core import connect, rpc, CommClosedError, Status
from .deploy import SpecCluster
from .metrics import time
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
log_errors,
mp_context,
get_ip,
get_ipv6,
DequeHandler,
reset_logger_locks,
sync,
iscoroutinefunction,
thread_state,
_offload_executor,
TimeoutError,
)
from .worker import Worker
from .nanny import Nanny
from .diagnostics.plugin import WorkerPlugin
try:
import dask.array # register config
except ImportError:
pass
logger = logging.getLogger(__name__)
logging_levels = {
name: logger.level
for name, logger in logging.root.manager.loggerDict.items()
if isinstance(logger, logging.Logger)
}
_offload_executor.submit(lambda: None).result() # create thread during import
@pytest.fixture(scope="session")
def valid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("print('hello world!')")
return local_file
@pytest.fixture(scope="session")
def client_contract_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("distributed_script.py")
lines = (
"from distributed import Client",
"e = Client('127.0.0.1:8989')",
"print(e)",
)
local_file.write("\n".join(lines))
return local_file
@pytest.fixture(scope="session")
def invalid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("a+1")
return local_file
async def cleanup_global_workers():
for worker in Worker._instances:
await worker.close(report=False, executor_wait=False)
@pytest.fixture
def loop():
with check_instances():
with pristine_loop() as loop:
# Monkey-patch IOLoop.start to wait for loop stop
orig_start = loop.start
is_stopped = threading.Event()
is_stopped.set()
def start():
is_stopped.clear()
try:
orig_start()
finally:
is_stopped.set()
loop.start = start
yield loop
# Stop the loop in case it's still running
try:
sync(loop, cleanup_global_workers, callback_timeout=0.500)
loop.add_callback(loop.stop)
except RuntimeError as e:
if not re.match("IOLoop is clos(ed|ing)", str(e)):
raise
except TimeoutError:
pass
else:
is_stopped.wait()
@pytest.fixture
def loop_in_thread():
with pristine_loop() as loop:
thread = threading.Thread(target=loop.start, name="test IOLoop")
thread.daemon = True
thread.start()
loop_started = threading.Event()
loop.add_callback(loop_started.set)
loop_started.wait()
yield loop
loop.add_callback(loop.stop)
thread.join(timeout=5)
@pytest.fixture
def zmq_ctx():
import zmq
ctx = zmq.Context.instance()
yield ctx
ctx.destroy(linger=0)
@contextmanager
def pristine_loop():
IOLoop.clear_instance()
IOLoop.clear_current()
loop = IOLoop()
loop.make_current()
assert IOLoop.current() is loop
try:
yield loop
finally:
try:
loop.close(all_fds=True)
except (KeyError, ValueError):
pass
IOLoop.clear_instance()
IOLoop.clear_current()
@contextmanager
def mock_ipython():
from unittest import mock
from distributed._ipython_utils import remote_magic
ip = mock.Mock()
ip.user_ns = {}
ip.kernel = None
def get_ip():
return ip
with mock.patch("IPython.get_ipython", get_ip), mock.patch(
"distributed._ipython_utils.get_ipython", get_ip
):
yield ip
# cleanup remote_magic client cache
for kc in remote_magic._clients.values():
kc.stop_channels()
remote_magic._clients.clear()
original_config = copy.deepcopy(dask.config.config)
def reset_config():
dask.config.config.clear()
dask.config.config.update(copy.deepcopy(original_config))
def nodebug(func):
"""
A decorator to disable debug facilities during timing-sensitive tests.
Warning: this doesn't affect already created IOLoops.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
try:
return func(*args, **kwargs)
finally:
if old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = old_asyncio_debug
return wrapped
def nodebug_setup_module(module):
"""
A setup_module() that you can install in a test module to disable
debug facilities.
"""
module._old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if module._old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
def nodebug_teardown_module(module):
"""
A teardown_module() that you can install in a test module to reenable
debug facilities.
"""
if module._old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = module._old_asyncio_debug
def inc(x):
return x + 1
def dec(x):
return x - 1
def mul(x, y):
return x * y
def div(x, y):
return x / y
def deep(n):
if n > 0:
return deep(n - 1)
else:
return True
def throws(x):
raise RuntimeError("hello!")
def double(x):
return x * 2
def slowinc(x, delay=0.02):
sleep(delay)
return x + 1
def slowdec(x, delay=0.02):
sleep(delay)
return x - 1
def slowdouble(x, delay=0.02):
sleep(delay)
return 2 * x
def randominc(x, scale=1):
from random import random
sleep(random() * scale)
return x + 1
def slowadd(x, y, delay=0.02):
sleep(delay)
return x + y
def slowsum(seq, delay=0.02):
sleep(delay)
return sum(seq)
def slowidentity(*args, **kwargs):
delay = kwargs.get("delay", 0.02)
sleep(delay)
if len(args) == 1:
return args[0]
else:
return args
def run_for(duration, timer=time):
"""
Burn CPU for *duration* seconds.
"""
deadline = timer() + duration
while timer() <= deadline:
pass
# This dict grows at every varying() invocation
_varying_dict = collections.defaultdict(int)
_varying_key_gen = itertools.count()
class _ModuleSlot:
def __init__(self, modname, slotname):
self.modname = modname
self.slotname = slotname
def get(self):
return getattr(sys.modules[self.modname], self.slotname)
def varying(items):
"""
Return a function that returns a result (or raises an exception)
from *items* at each call.
"""
# cloudpickle would serialize the *values* of all globals
# used by *func* below, so we can't use `global <something>`.
# Instead look up the module by name to get the original namespace
# and not a copy.
slot = _ModuleSlot(__name__, "_varying_dict")
key = next(_varying_key_gen)
def func():
dct = slot.get()
i = dct[key]
if i == len(items):
raise IndexError
else:
x = items[i]
dct[key] = i + 1
if isinstance(x, Exception):
raise x
else:
return x
return func
def map_varying(itemslists):
"""
Like *varying*, but return the full specification for a map() call
on multiple items lists.
"""
def apply(func, *args, **kwargs):
return func(*args, **kwargs)
return apply, list(map(varying, itemslists))
async def geninc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
async def asyncinc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
_readone_queues = {}
async def readone(comm):
"""
Read one message at a time from a comm that reads lists of
messages.
"""
try:
q = _readone_queues[comm]
except KeyError:
q = _readone_queues[comm] = asyncio.Queue()
async def background_read():
while True:
try:
messages = await comm.read()
except CommClosedError:
break
for msg in messages:
q.put_nowait(msg)
q.put_nowait(None)
del _readone_queues[comm]
background_read()
msg = await q.get()
if msg is None:
raise CommClosedError
else:
return msg
def run_scheduler(q, nputs, port=0, **kwargs):
from distributed import Scheduler
# On Python 2.7 and Unix, fork() is used to spawn child processes,
# so avoid inheriting the parent's IO loop.
with pristine_loop() as loop:
async def _():
scheduler = await Scheduler(
validate=True, host="127.0.0.1", port=port, **kwargs
)
for i in range(nputs):
q.put(scheduler.address)
await scheduler.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_worker(q, scheduler_q, **kwargs):
from distributed import Worker
reset_logger_locks()
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Worker(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_nanny(q, scheduler_q, **kwargs):
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Nanny(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
@contextmanager
def check_active_rpc(loop, active_rpc_timeout=1):
active_before = set(rpc.active)
yield
# Some streams can take a bit of time to notice their peer
# has closed, and keep a coroutine (*) waiting for a CommClosedError
# before calling close_rpc() after a CommClosedError.
# This would happen especially if a non-localhost address is used,
# as Nanny does.
# (*) (example: gather_from_workers())
def fail():
pytest.fail(
"some RPCs left active by test: %s" % (set(rpc.active) - active_before)
)
async def wait():
await async_wait_for(
lambda: len(set(rpc.active) - active_before) == 0,
timeout=active_rpc_timeout,
fail_func=fail,
)
loop.run_sync(wait)
@pytest.fixture
def cluster_fixture(loop):
with cluster() as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def s(cluster_fixture):
scheduler, workers = cluster_fixture
return scheduler
@pytest.fixture
def a(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[0]
@pytest.fixture
def b(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[1]
@pytest.fixture
def client(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@pytest.fixture
def client_secondary(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@contextmanager
def tls_cluster_context(
worker_kwargs=None, scheduler_kwargs=None, security=None, **kwargs
):
security = security or tls_only_security()
worker_kwargs = assoc(worker_kwargs or {}, "security", security)
scheduler_kwargs = assoc(scheduler_kwargs or {}, "security", security)
with cluster(
worker_kwargs=worker_kwargs, scheduler_kwargs=scheduler_kwargs, **kwargs
) as (s, workers):
yield s, workers
@pytest.fixture
def tls_cluster(loop, security):
with tls_cluster_context(security=security) as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def tls_client(tls_cluster, loop, security):
s, workers = tls_cluster
with Client(s["address"], security=security, loop=loop) as client:
yield client
@pytest.fixture
def security():
return tls_only_security()
@contextmanager
def cluster(
nworkers=2,
nanny=False,
worker_kwargs={},
active_rpc_timeout=1,
disconnect_timeout=3,
scheduler_kwargs={},
):
ws = weakref.WeakSet()
enable_proctitle_on_children()
with clean(timeout=active_rpc_timeout, threads=False) as loop:
if nanny:
_run_worker = run_nanny
else:
_run_worker = run_worker
# The scheduler queue will receive the scheduler's address
scheduler_q = mp_context.Queue()
# Launch scheduler
scheduler = mp_context.Process(
name="Dask cluster test: Scheduler",
target=run_scheduler,
args=(scheduler_q, nworkers + 1),
kwargs=scheduler_kwargs,
)
ws.add(scheduler)
scheduler.daemon = True
scheduler.start()
# Launch workers
workers = []
for i in range(nworkers):
q = mp_context.Queue()
fn = "_test_worker-%s" % uuid.uuid4()
kwargs = merge(
{
"nthreads": 1,
"local_directory": fn,
"memory_limit": system.MEMORY_LIMIT,
},
worker_kwargs,
)
proc = mp_context.Process(
name="Dask cluster test: Worker",
target=_run_worker,
args=(q, scheduler_q),
kwargs=kwargs,
)
ws.add(proc)
workers.append({"proc": proc, "queue": q, "dir": fn})
for worker in workers:
worker["proc"].start()
try:
for worker in workers:
worker["address"] = worker["queue"].get(timeout=5)
except queue.Empty:
raise pytest.xfail.Exception("Worker failed to start in test")
saddr = scheduler_q.get()
start = time()
try:
try:
security = scheduler_kwargs["security"]
rpc_kwargs = {"connection_args": security.get_connection_args("client")}
except KeyError:
rpc_kwargs = {}
with rpc(saddr, **rpc_kwargs) as s:
while True:
nthreads = loop.run_sync(s.ncores)
if len(nthreads) == nworkers:
break
if time() - start > 5:
raise Exception("Timeout on cluster creation")
# avoid sending processes down to function
yield {"address": saddr}, [
{"address": w["address"], "proc": weakref.ref(w["proc"])}
for w in workers
]
finally:
logger.debug("Closing out test cluster")
loop.run_sync(
lambda: disconnect_all(
[w["address"] for w in workers],
timeout=disconnect_timeout,
rpc_kwargs=rpc_kwargs,
)
)
loop.run_sync(
lambda: disconnect(
saddr, timeout=disconnect_timeout, rpc_kwargs=rpc_kwargs
)
)
scheduler.terminate()
scheduler_q.close()
scheduler_q._reader.close()
scheduler_q._writer.close()
for w in workers:
w["proc"].terminate()
w["queue"].close()
w["queue"]._reader.close()
w["queue"]._writer.close()
scheduler.join(2)
del scheduler
for proc in [w["proc"] for w in workers]:
proc.join(timeout=2)
with suppress(UnboundLocalError):
del worker, w, proc
del workers[:]
for fn in glob("_test_worker-*"):
with suppress(OSError):
shutil.rmtree(fn)
try:
client = default_client()
except ValueError:
pass
else:
client.close()
start = time()
while any(proc.is_alive() for proc in ws):
text = str(list(ws))
sleep(0.2)
assert time() < start + 5, ("Workers still around after five seconds", text)
async def disconnect(addr, timeout=3, rpc_kwargs=None):
rpc_kwargs = rpc_kwargs or {}
async def do_disconnect():
with suppress(EnvironmentError, CommClosedError):
with rpc(addr, **rpc_kwargs) as w:
await w.terminate(close=True)
await asyncio.wait_for(do_disconnect(), timeout=timeout)
async def disconnect_all(addresses, timeout=3, rpc_kwargs=None):
await asyncio.gather(*[disconnect(addr, timeout, rpc_kwargs) for addr in addresses])
def gen_test(timeout=10):
"""Coroutine test
@gen_test(timeout=5)
async def test_foo():
await ... # use tornado coroutines
"""
def _(func):
def test_func():
with clean() as loop:
if iscoroutinefunction(func):
cor = func
else:
cor = gen.coroutine(func)
loop.run_sync(cor, timeout=timeout)
return test_func
return _
from .scheduler import Scheduler
from .worker import Worker
async def start_cluster(
nthreads,
scheduler_addr,
loop,
security=None,
Worker=Worker,
scheduler_kwargs={},
worker_kwargs={},
):
s = await Scheduler(
loop=loop,
validate=True,
security=security,
port=0,
host=scheduler_addr,
**scheduler_kwargs,
)
workers = [
Worker(
s.address,
nthreads=ncore[1],
name=i,
security=security,
loop=loop,
validate=True,
host=ncore[0],
**(merge(worker_kwargs, ncore[2]) if len(ncore) > 2 else worker_kwargs),
)
for i, ncore in enumerate(nthreads)
]
# for w in workers:
# w.rpc = workers[0].rpc
await asyncio.gather(*workers)
start = time()
while len(s.workers) < len(nthreads) or any(
comm.comm is None for comm in s.stream_comms.values()
):
await asyncio.sleep(0.01)
if time() - start > 5:
await asyncio.gather(*[w.close(timeout=1) for w in workers])
await s.close(fast=True)
raise Exception("Cluster creation timeout")
return s, workers
async def end_cluster(s, workers):
logger.debug("Closing out test cluster")
async def end_worker(w):
with suppress(TimeoutError, CommClosedError, EnvironmentError):
await w.close(report=False)
await asyncio.gather(*[end_worker(w) for w in workers])
await s.close() # wait until scheduler stops completely
s.stop()
def gen_cluster(
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 2)],
ncores=None,
scheduler="127.0.0.1",
timeout=10,
security=None,
Worker=Worker,
client=False,
scheduler_kwargs={},
worker_kwargs={},
client_kwargs={},
active_rpc_timeout=1,
config={},
clean_kwargs={},
allow_unclosed=False,
):
from distributed import Client
""" Coroutine test with small cluster
@gen_cluster()
async def test_foo(scheduler, worker1, worker2):
await ... # use tornado coroutines
See also:
start
end
"""
if ncores is not None:
warnings.warn("ncores= has moved to nthreads=", stacklevel=2)
nthreads = ncores
worker_kwargs = merge(
{"memory_limit": system.MEMORY_LIMIT, "death_timeout": 10}, worker_kwargs
)
def _(func):
if not iscoroutinefunction(func):
func = gen.coroutine(func)
def test_func():
result = None
workers = []
with clean(timeout=active_rpc_timeout, **clean_kwargs) as loop:
async def coro():
with dask.config.set(config):
s = False
for i in range(5):
try:
s, ws = await start_cluster(
nthreads,
scheduler,
loop,
security=security,
Worker=Worker,
scheduler_kwargs=scheduler_kwargs,
worker_kwargs=worker_kwargs,
)
except Exception as e:
logger.error(
"Failed to start gen_cluster, retrying",
exc_info=True,
)
await asyncio.sleep(1)
else:
workers[:] = ws
args = [s] + workers
break
if s is False:
raise Exception("Could not start cluster")
if client:
c = await Client(
s.address,
loop=loop,
security=security,
asynchronous=True,
**client_kwargs,
)
args = [c] + args
try:
future = func(*args)
if timeout:
future = asyncio.wait_for(future, timeout)
result = await future
if s.validate:
s.validate_state()
finally:
if client and c.status not in ("closing", "closed"):
await c._close(fast=s.status == Status.closed)
await end_cluster(s, workers)
await asyncio.wait_for(cleanup_global_workers(), 1)
try:
c = await default_client()
except ValueError:
pass
else:
await c._close(fast=True)
def get_unclosed():
return [c for c in Comm._instances if not c.closed()] + [
c
for c in _global_clients.values()
if c.status != "closed"
]
try:
start = time()
while time() < start + 5:
gc.collect()
if not get_unclosed():
break
await asyncio.sleep(0.05)
else:
if allow_unclosed:
print(f"Unclosed Comms: {get_unclosed()}")
else:
raise RuntimeError("Unclosed Comms", get_unclosed())
finally:
Comm._instances.clear()
_global_clients.clear()
return result
result = loop.run_sync(
coro, timeout=timeout * 2 if timeout else timeout
)
for w in workers:
if getattr(w, "data", None):
try:
w.data.clear()
except EnvironmentError:
# zict backends can fail if their storage directory
# was already removed
pass
del w.data
return result
return test_func
return _
def raises(func, exc=Exception):
try:
func()
return False
except exc:
return True
def terminate_process(proc):
if proc.poll() is None:
if sys.platform.startswith("win"):
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGINT)
try:
proc.wait(10)
finally:
# Make sure we don't leave the process lingering around
with suppress(OSError):
proc.kill()
@contextmanager
def popen(args, **kwargs):
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.PIPE
if sys.platform.startswith("win"):
# Allow using CTRL_C_EVENT / CTRL_BREAK_EVENT
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
dump_stdout = False
args = list(args)
if sys.platform.startswith("win"):
args[0] = os.path.join(sys.prefix, "Scripts", args[0])
else:
args[0] = os.path.join(
os.environ.get("DESTDIR", "") + sys.prefix, "bin", args[0]
)
proc = subprocess.Popen(args, **kwargs)
try:
yield proc
except Exception:
dump_stdout = True
raise
finally:
try:
terminate_process(proc)
finally:
# XXX Also dump stdout if return code != 0 ?
out, err = proc.communicate()
if dump_stdout:
print("\n\nPrint from stderr\n %s\n=================\n" % args[0][0])
print(err.decode())
print("\n\nPrint from stdout\n=================\n")
print(out.decode())
def wait_for_port(address, timeout=5):
assert isinstance(address, tuple)
deadline = time() + timeout
while True:
timeout = deadline - time()
if timeout < 0:
raise RuntimeError("Failed to connect to %s" % (address,))
try:
sock = socket.create_connection(address, timeout=timeout)
except EnvironmentError:
pass
else:
sock.close()
break
def wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail("condition not reached until %s seconds" % (timeout,))
async def async_wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
await asyncio.sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail("condition not reached until %s seconds" % (timeout,))
@memoize
def has_ipv6():
"""
Return whether IPv6 is locally functional. This doesn't guarantee IPv6
is properly configured outside of localhost.
"""
serv = cli = None
try:
serv = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
serv.bind(("::", 0))
serv.listen(5)
cli = socket.create_connection(serv.getsockname()[:2])
except EnvironmentError:
return False
else:
return True
finally:
if cli is not None:
cli.close()
if serv is not None:
serv.close()
if has_ipv6():
def requires_ipv6(test_func):
return test_func
else:
requires_ipv6 = pytest.mark.skip("ipv6 required")
async def assert_can_connect(addr, timeout=0.5, **kwargs):
"""
Check that it is possible to connect to the distributed *addr*
within the given *timeout*.
"""
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_cannot_connect(
addr, timeout=0.5, exception_class=EnvironmentError, **kwargs
):
"""
Check that it is impossible to connect to the distributed *addr*
within the given *timeout*.
"""
with pytest.raises(exception_class):
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_can_connect_from_everywhere_4_6(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 and IPv6 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_can_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_4(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_cannot_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_cannot_connect(
"%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs
),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_4(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv4 addresses.
"""
futures = [assert_can_connect("tcp://127.0.0.1:%d" % port, **kwargs)]
if get_ip() != "127.0.0.1": # No outside IPv4 connectivity?
futures += [assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs)]
if has_ipv6():
futures += [
assert_cannot_connect("tcp://[::1]:%d" % port, **kwargs),
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_6(port, **kwargs):
"""
Check that the local *port* is reachable from all IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
assert_can_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_6(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
]
if get_ipv6() != "::1": # No outside IPv6 connectivity?
futures += [
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs)
]
await asyncio.gather(*futures)
@contextmanager
def captured_logger(logger, level=logging.INFO, propagate=None):
"""Capture output from the given Logger."""
if isinstance(logger, str):
logger = logging.getLogger(logger)
orig_level = logger.level
orig_handlers = logger.handlers[:]
if propagate is not None:
orig_propagate = logger.propagate
logger.propagate = propagate
sio = io.StringIO()
logger.handlers[:] = [logging.StreamHandler(sio)]
logger.setLevel(level)
try:
yield sio
finally:
logger.handlers[:] = orig_handlers
logger.setLevel(orig_level)
if propagate is not None:
logger.propagate = orig_propagate
@contextmanager
def captured_handler(handler):
"""Capture output from the given logging.StreamHandler."""
assert isinstance(handler, logging.StreamHandler)
orig_stream = handler.stream
handler.stream = io.StringIO()
try:
yield handler.stream
finally:
handler.stream = orig_stream
@contextmanager
def new_config(new_config):
"""
Temporarily change configuration dictionary.
"""
from .config import defaults
config = dask.config.config
orig_config = copy.deepcopy(config)
try:
config.clear()
config.update(copy.deepcopy(defaults))
dask.config.update(config, new_config)
initialize_logging(config)
yield
finally:
config.clear()
config.update(orig_config)
initialize_logging(config)
@contextmanager
def new_environment(changes):
saved_environ = os.environ.copy()
os.environ.update(changes)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
@contextmanager
def new_config_file(c):
"""
Temporarily change configuration file to match dictionary *c*.
"""
import yaml
old_file = os.environ.get("DASK_CONFIG")
fd, path = tempfile.mkstemp(prefix="dask-config")
try:
with os.fdopen(fd, "w") as f:
f.write(yaml.dump(c))
os.environ["DASK_CONFIG"] = path
try:
yield
finally:
if old_file:
os.environ["DASK_CONFIG"] = old_file
else:
del os.environ["DASK_CONFIG"]
finally:
os.remove(path)
certs_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "tests"))
def get_cert(filename):
"""
Get the path to one of the test TLS certificates.
"""
path = os.path.join(certs_dir, filename)
assert os.path.exists(path), path
return path
def tls_config():
"""
A functional TLS configuration with our test certs.
"""
ca_file = get_cert("tls-ca-cert.pem")
keycert = get_cert("tls-key-cert.pem")
return {
"distributed": {
"comm": {
"tls": {
"ca-file": ca_file,
"client": {"cert": keycert},
"scheduler": {"cert": keycert},
"worker": {"cert": keycert},
}
}
}
}
def tls_only_config():
"""
A functional TLS configuration with our test certs, disallowing
plain TCP communications.
"""
c = tls_config()
c["distributed"]["comm"]["require-encryption"] = True
return c
def tls_security():
"""
A Security object with proper TLS configuration.
"""
with new_config(tls_config()):
sec = Security()
return sec
def tls_only_security():
"""
A Security object with proper TLS configuration and disallowing plain
TCP communications.
"""
with new_config(tls_only_config()):
sec = Security()
assert sec.require_encryption
return sec
def get_server_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def get_client_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def bump_rlimit(limit, desired):
resource = pytest.importorskip("resource")
try:
soft, hard = resource.getrlimit(limit)
if soft < desired:
resource.setrlimit(limit, (desired, max(hard, desired)))
except Exception as e:
pytest.skip("rlimit too low (%s) and can't be increased: %s" % (soft, e))
def gen_tls_cluster(**kwargs):
kwargs.setdefault("nthreads", [("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)])
return gen_cluster(
scheduler="tls://127.0.0.1", security=tls_only_security(), **kwargs
)
@contextmanager
def save_sys_modules():
old_modules = sys.modules
old_path = sys.path
try:
yield
finally:
for i, elem in enumerate(sys.path):
if elem not in old_path:
del sys.path[i]
for elem in sys.modules.keys():
if elem not in old_modules:
del sys.modules[elem]
@contextmanager
def check_thread_leak():
active_threads_start = set(threading._active)
yield
start = time()
while True:
bad = [
t
for t, v in threading._active.items()
if t not in active_threads_start
and "Threaded" not in v.name
and "watch message" not in v.name
and "TCP-Executor" not in v.name
]
if not bad:
break
else:
sleep(0.01)
if time() > start + 5:
from distributed import profile
tid = bad[0]
thread = threading._active[tid]
call_stacks = profile.call_stack(sys._current_frames()[tid])
assert False, (thread, call_stacks)
@contextmanager
def check_process_leak(check=True):
for proc in mp_context.active_children():
proc.terminate()
yield
if check:
for i in range(200):
if not set(mp_context.active_children()):
break
else:
sleep(0.2)
else:
assert not mp_context.active_children()
for proc in mp_context.active_children():
proc.terminate()
@contextmanager
def check_instances():
Client._instances.clear()
Worker._instances.clear()
Scheduler._instances.clear()
SpecCluster._instances.clear()
# assert all(n.status == "closed" for n in Nanny._instances), {
# n: n.status for n in Nanny._instances
# }
Nanny._instances.clear()
_global_clients.clear()
Comm._instances.clear()
yield
start = time()
while set(_global_clients):
sleep(0.1)
assert time() < start + 10
_global_clients.clear()
for w in Worker._instances:
with suppress(RuntimeError): # closed IOLoop
w.loop.add_callback(w.close, report=False, executor_wait=False)
if w.status == Status.running:
w.loop.add_callback(w.close)
Worker._instances.clear()
for i in range(5):
if all(c.closed() for c in Comm._instances):
break
else:
sleep(0.1)
else:
L = [c for c in Comm._instances if not c.closed()]
Comm._instances.clear()
print("Unclosed Comms", L)
# raise ValueError("Unclosed Comms", L)
assert all(
n.status == Status.closed or n.status == Status.init for n in Nanny._instances
), {n: n.status for n in Nanny._instances}
# assert not list(SpecCluster._instances) # TODO
assert all(c.status == Status.closed for c in SpecCluster._instances), list(
SpecCluster._instances
)
SpecCluster._instances.clear()
Nanny._instances.clear()
DequeHandler.clear_all_instances()
@contextmanager
def clean(threads=not WINDOWS, instances=True, timeout=1, processes=True):
@contextmanager
def null():
yield
with check_thread_leak() if threads else null():
with pristine_loop() as loop:
with check_process_leak(check=processes):
with check_instances() if instances else null():
with check_active_rpc(loop, timeout):
reset_config()
dask.config.set({"distributed.comm.timeouts.connect": "5s"})
# Restore default logging levels
# XXX use pytest hooks/fixtures instead?
for name, level in logging_levels.items():
logging.getLogger(name).setLevel(level)
yield loop
with suppress(AttributeError):
del thread_state.on_event_loop_thread
@pytest.fixture
def cleanup():
with clean():
yield
class TaskStateMetadataPlugin(WorkerPlugin):
"""WorkPlugin to populate TaskState.metadata"""
def setup(self, worker):
self.worker = worker
def transition(self, key, start, finish, **kwargs):
ts = self.worker.tasks[key]
if start == "ready" and finish == "executing":
ts.metadata["start_time"] = time()
elif start == "executing" and finish == "memory":
ts.metadata["stop_time"] = time()
| 27.184447 | 88 | 0.567957 |
ace82da47b41936ac95a73f67fec358081c1ecc2 | 2,793 | py | Python | smartshark/management/commands/delete_project.py | benjaminLedel/serverSHARK | 97decc03ba7bde8ad9c5f55d446d21ed5a26709c | [
"Apache-2.0"
] | null | null | null | smartshark/management/commands/delete_project.py | benjaminLedel/serverSHARK | 97decc03ba7bde8ad9c5f55d446d21ed5a26709c | [
"Apache-2.0"
] | null | null | null | smartshark/management/commands/delete_project.py | benjaminLedel/serverSHARK | 97decc03ba7bde8ad9c5f55d446d21ed5a26709c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from django.core.management.base import BaseCommand
from django.db import connections
from bson.objectid import ObjectId
from smartshark.models import Project
from smartshark.utils import projectUtils
from smartshark.utils.projectUtils import delete_file_from_gridfs_for_project
class Command(BaseCommand):
help = 'Deletes all data of a project'
def handle(self, *args, **options):
for p in Project.objects.all():
self.stdout.write(p.name)
try:
l = input("Which project should be deleted? ")
project = Project.objects.all().get(name__iexact=l)
self.stdout.write("Calculate data tree for {}".format(project.name))
except (Project.DoesNotExist, Project.MultipleObjectsReturned) as e:
self.stdout.write(self.style.ERROR('Error loading project: {}'.format(e)))
sys.exit(-1)
schemas = projectUtils.getPlugins()
# Analyze the schema
deb = []
x = projectUtils.findDependencyOfSchema('project', schemas.values(), [])
project_schema = projectUtils.SchemaReference('project', '_id', x)
deb.append(project_schema)
projectUtils.count_on_dependency_tree(project_schema, ObjectId(project.mongo_id))
self._print_dependency_tree(deb, project)
l = input("Continue with data deletion? (y/n) ")
if(l == "yes" or l == "y"):
self.stdout.write('Deleting project from the MongoDB')
# Before everything else we need to delete the file from the gridfs
delete_file_from_gridfs_for_project(ObjectId(project.mongo_id))
projectUtils.delete_on_dependency_tree(project_schema, ObjectId(project.mongo_id))
self.stdout.write(self.style.SUCCESS('Successfully deleted project from the MongoDB'))
connections['default'].close()
self.stdout.write('Deleting project from the serverSHARK')
project.delete()
self.stdout.write(self.style.SUCCESS('Successfully deleted project from the serverSHARK'))
else:
self.stdout.write(self.style.ERROR('No data deleted'))
def _print_dependency_tree(self, deb, project):
self.stdout.write("Project data of {}".format(project.name))
for dependency in deb:
self.stdout.write('{} ({})'.format(dependency.collection_name, dependency.count))
self._print_sub_dependency(dependency.dependencys, 1)
def _print_sub_dependency(self, deb, depth):
for dependency in deb:
self.stdout.write('{} └── {} ({})'.format(' ' * (depth - 1), dependency.collection_name, dependency.count))
self._print_sub_dependency(dependency.dependencys, depth + 1)
| 40.478261 | 120 | 0.667741 |
ace82eacb41740f0f89d0f9ff4053dd648e921ae | 5,962 | py | Python | mzgtfs/test_feed.py | andreyz/mapzen-gtfs | d445f1588ed10713eea9a1ca2878eef792121eca | [
"MIT"
] | 29 | 2015-06-08T00:49:52.000Z | 2021-09-25T21:46:53.000Z | mzgtfs/test_feed.py | andreyz/mapzen-gtfs | d445f1588ed10713eea9a1ca2878eef792121eca | [
"MIT"
] | 12 | 2015-07-28T07:12:55.000Z | 2017-05-11T14:24:12.000Z | mzgtfs/test_feed.py | andreyz/mapzen-gtfs | d445f1588ed10713eea9a1ca2878eef792121eca | [
"MIT"
] | 10 | 2015-07-28T06:57:51.000Z | 2021-01-05T05:56:27.000Z | """Feed unit tests."""
import unittest
import os
import json
import inspect
import tempfile
import csv
import zipfile
import util
import feed
import entities
def test_outfile():
# Create a temporary filename.
# You'll have to unlink file when done.
# This of course is not secure against file creation problems.
outfile = tempfile.NamedTemporaryFile()
name = outfile.name
outfile.close()
return name
class TestFeed(unittest.TestCase):
"""Test Feed Reader.
TODO: Test Unicode?
"""
agency_expect = {
'agency_url': 'http://google.com',
'agency_name': 'Demo Transit Authority',
'agency_id': 'DTA',
'agency_timezone': 'America/Los_Angeles'
}
route_expect = {
'route_long_name': 'Airport - Bullfrog',
'route_id': 'AB',
'route_type': '3',
'route_text_color': '',
'agency_id': 'DTA',
'route_color': '',
'route_url': '',
'route_desc': '',
'route_short_name': '10'
}
stop_expect = {
'stop_lat': '36.425288',
'stop_lon': '-117.133162',
'stop_id': 'FUR_CREEK_RES',
'stop_name': 'Furnace Creek Resort (Demo)'
}
def test_init(self):
f = feed.Feed(util.example_feed())
def test_read(self):
# Test basic read
f = feed.Feed(util.example_feed())
data = f.read('stops')
# check we got 9 entities
assert len(data) == 9
# check cache
assert 'stops' in f.by_id
def test_read_path(self):
# Test overlay
f = feed.Feed(
util.example_feed(),
path=os.path.dirname(util.example_feed())
)
assert f.stop('TEST')
with self.assertRaises(Exception):
f.stop('FUR_CREEK_RES')
def test_read_missing(self):
f = feed.Feed(
util.example_feed(),
path=os.path.dirname(util.example_feed())
)
with self.assertRaises(Exception):
f.read('missing')
def test_write(self):
f = feed.Feed()
data = [entities.Agency(**self.agency_expect)]
outfile = test_outfile()
f.write(outfile, data, sortkey='agency_id')
# Check the output...
with open(outfile) as csvfile:
reader = csv.reader(csvfile)
headers = reader.next()
assert len(self.agency_expect.keys()) == len(headers)
for i in headers:
assert i in self.agency_expect
rows = []
for i in reader:
rows.append(i)
assert len(rows) == 1
row = rows[0]
for k,v in zip(headers, row):
assert self.agency_expect[k] == v
# Delete temp file
os.unlink(outfile)
def test_write_exists(self):
f = feed.Feed()
data = [entities.Agency(**self.agency_expect)]
outfile = test_outfile()
f.write(outfile, data, sortkey='agency_id')
with self.assertRaises(IOError):
f.write(outfile, data, sortkey='agency_id')
os.unlink(outfile)
def test_make_zip(self):
f = feed.Feed()
outfile = test_outfile()
f.make_zip(
outfile,
path=os.path.dirname(util.example_feed()),
clone=util.example_feed()
)
expect = [
'agency.txt',
'calendar.txt',
'calendar_dates.txt',
'fare_attributes.txt',
'fare_rules.txt',
'frequencies.txt',
'routes.txt',
'shapes.txt',
'stop_times.txt',
'trips.txt',
'stops.txt'
]
zf = zipfile.ZipFile(outfile)
for i,j in zip(sorted(zf.namelist()), sorted(expect)):
assert i == j
zf.close()
os.unlink(outfile)
def test_make_zip_exists(self):
f = feed.Feed()
outfile = test_outfile()
f.make_zip(
outfile,
path=os.path.dirname(util.example_feed()),
clone=util.example_feed()
)
with self.assertRaises(IOError):
f.make_zip(
outfile,
path=os.path.dirname(util.example_feed()),
clone=util.example_feed()
)
os.unlink(outfile)
def test_make_zip_compression(self):
f = feed.Feed()
outfile = test_outfile()
f.make_zip(
outfile,
path=os.path.dirname(util.example_feed()),
clone=util.example_feed()
,compress=False
)
outfile2 = test_outfile()
f.make_zip(
outfile2,
path=os.path.dirname(util.example_feed()),
clone=util.example_feed()
)
assert os.stat(outfile).st_size > os.stat(outfile2).st_size
os.unlink(outfile)
os.unlink(outfile2)
def test_cache(self):
f = feed.Feed(util.example_feed())
# Read a first time
data1 = f.read('stops')
# Read a second time
data2 = f.read('stops')
assert len(data1) == len(data2)
assert 'stops' in f.by_id
assert len(data1) == len(f.by_id['stops'])
def test_read_invalidfile(self):
f = feed.Feed(util.example_feed())
with self.assertRaises(KeyError):
f.read('invalidfile')
def test_read_padding(self):
# The Google GTFS example feed is missing columns in
# stop_times.txt. Check the padding mechanism works.
f = feed.Feed(util.example_feed())
data = f.read('stop_times')
# Check that all 9 elements are present.
for entity in f.read('stop_times'):
assert len(entity) == 9
def test_agencies(self):
f = feed.Feed(util.example_feed())
data = f.agencies()
assert len(data) == 1
def test_agency(self):
f = feed.Feed(util.example_feed())
data = f.agency(self.agency_expect['agency_id'])
for k in self.agency_expect:
assert self.agency_expect[k] == data[k]
def test_routes(self):
f = feed.Feed(util.example_feed())
assert len(f.routes()) == 5
def test_route(self):
f = feed.Feed(util.example_feed())
data = f.route(self.route_expect['route_id'])
for k in self.route_expect:
assert self.route_expect[k] == data[k]
def test_stops(self):
f = feed.Feed(util.example_feed())
assert len(f.stops()) == 9
def test_stop(self):
f = feed.Feed(util.example_feed())
data = f.stop(self.stop_expect['stop_id'])
for k in self.stop_expect:
assert self.stop_expect[k] == data[k]
| 25.478632 | 64 | 0.621603 |
ace82f347758006d33e6e00c4c12d62b24c86d4b | 9,255 | py | Python | skactiveml/stream/tests/test_stream.py | LukasLuehrs/scikit-activeml | 04d7107272ef0438070808475599131d8726f547 | [
"BSD-3-Clause"
] | null | null | null | skactiveml/stream/tests/test_stream.py | LukasLuehrs/scikit-activeml | 04d7107272ef0438070808475599131d8726f547 | [
"BSD-3-Clause"
] | null | null | null | skactiveml/stream/tests/test_stream.py | LukasLuehrs/scikit-activeml | 04d7107272ef0438070808475599131d8726f547 | [
"BSD-3-Clause"
] | null | null | null | import inspect
import unittest
from collections import deque
from importlib import import_module
from os import path
import numpy as np
from sklearn.datasets import make_classification
from sklearn.utils import check_random_state
from skactiveml import stream
from skactiveml.base import SingleAnnotatorStreamQueryStrategy
from skactiveml.classifier import ParzenWindowClassifier
from skactiveml.utils import call_func
class TestStream(unittest.TestCase):
def setUp(self):
self.query_strategies = {}
for qs_name in stream.__all__:
qs = getattr(stream, qs_name)
if inspect.isclass(qs) and issubclass(
qs, SingleAnnotatorStreamQueryStrategy
):
self.query_strategies[qs_name] = qs
self.clf = ParzenWindowClassifier()
def test_selection_strategies(self):
# Create data set for testing.
rand = np.random.RandomState(0)
stream_length = 100
train_init_size = 10
training_size = 50
X, y = make_classification(
n_samples=stream_length + train_init_size,
random_state=rand.randint(2**31 - 1),
shuffle=True,
)
clf = ParzenWindowClassifier(
classes=[0, 1], random_state=rand.randint(2**31 - 1)
)
X_init = X[:train_init_size, :]
y_init = y[:train_init_size]
X_stream = X[train_init_size:, :]
y_stream = y[train_init_size:]
# # Build dictionary of attributes.
# query_strategy_classes = {}
# for s_class in stream.__all__:
# query_strategy_classes[s_class] = getattr(stream, s_class)
# Test predictions of classifiers.
for qs_name, qs_class in self.query_strategies.items():
self._test_query_strategy(
rand.randint(2**31 - 1),
qs_class,
clf,
X_init,
y_init,
X_stream,
y_stream,
training_size,
qs_name,
)
self._test_update_before_query(
rand.randint(2**31 - 1),
qs_class,
clf,
X_init,
y_init,
X_stream,
y_stream,
training_size,
qs_name,
)
def _test_query_strategy(
self,
rand_seed,
query_strategy_class,
clf,
X_init,
y_init,
X_stream,
y_stream,
training_size,
qs_name,
):
rand = check_random_state(rand_seed)
random_state = rand.randint(2**31 - 1)
query_strategy = query_strategy_class(random_state=random_state)
query_strategy2 = query_strategy_class(random_state=random_state)
X_train = deque(maxlen=training_size)
X_train.extend(X_init)
y_train = deque(maxlen=training_size)
y_train.extend(y_init)
for t, (x_t, y_t) in enumerate(zip(X_stream, y_stream)):
return_utilities = t % 2 == 0
qs_output = call_func(
query_strategy.query,
candidates=x_t.reshape([1, -1]),
clf=clf,
return_utilities=return_utilities,
)
for i in range(3):
qs_output2 = call_func(
query_strategy2.query,
candidates=x_t.reshape([1, -1]),
clf=clf,
return_utilities=return_utilities,
)
if return_utilities:
queried_indices, utilities = qs_output
queried_indices2, utilities2 = qs_output2
self.assertEqual(utilities, utilities2)
else:
queried_indices = qs_output
queried_indices2 = qs_output2
utilities = [0.5]
utilities2 = [0.5]
self.assertEqual(len(queried_indices), len(queried_indices2))
budget_manager_param_dict1 = {"utilities": utilities}
budget_manager_param_dict2 = {"utilities": utilities2}
call_func(
query_strategy.update,
candidates=x_t.reshape([1, -1]),
queried_indices=queried_indices,
budget_manager_param_dict=budget_manager_param_dict1,
)
call_func(
query_strategy2.update,
candidates=x_t.reshape([1, -1]),
queried_indices=queried_indices2,
budget_manager_param_dict=budget_manager_param_dict2,
)
X_train.append(x_t)
if len(queried_indices):
y_train.append(y_t)
else:
y_train.append(clf.missing_label)
clf.fit(X_train, y_train)
def _test_update_before_query(
self,
rand_seed,
query_strategy_class,
clf,
X_init,
y_init,
X_stream,
y_stream,
training_size,
qs_name,
):
rand = check_random_state(rand_seed)
random_state = rand.randint(2**31 - 1)
query_strategy = query_strategy_class(random_state=random_state)
query_strategy2 = query_strategy_class(random_state=random_state)
X_train = deque(maxlen=training_size)
X_train.extend(X_init)
y_train = deque(maxlen=training_size)
y_train.extend(y_init)
for t, (x_t, y_t) in enumerate(zip(X_stream, y_stream)):
return_utilities = t % 2 == 0
qs_output = call_func(
query_strategy.query,
candidates=x_t.reshape([1, -1]),
clf=clf,
return_utilities=return_utilities,
)
if return_utilities:
queried_indices, utilities = qs_output
else:
queried_indices = qs_output
utilities = [0.5]
budget_manager_param_dict1 = {"utilities": utilities}
budget_manager_param_dict2 = {"utilities": utilities}
call_func(
query_strategy.update,
candidates=x_t.reshape([1, -1]),
queried_indices=queried_indices,
budget_manager_param_dict=budget_manager_param_dict1,
)
call_func(
query_strategy2.update,
candidates=x_t.reshape([1, -1]),
queried_indices=queried_indices,
budget_manager_param_dict=budget_manager_param_dict2,
)
X_train.append(x_t)
if len(queried_indices):
y_train.append(y_t)
else:
y_train.append(clf.missing_label)
clf.fit(X_train, y_train)
def test_param(self):
not_test = ["self", "kwargs"]
for qs_name in self.query_strategies:
with self.subTest(msg="Param Test", qs_name=qs_name):
# Get initial parameters.
qs_class = self.query_strategies[qs_name]
init_params = inspect.signature(qs_class).parameters.keys()
init_params = list(init_params)
# Get query parameters.
query_params = inspect.signature(qs_class.query).parameters
query_params = list(query_params.keys())
# Check initial parameters.
values = [Dummy() for i in range(len(init_params))]
qs_obj = qs_class(*values)
for param, value in zip(init_params, values):
self.assertTrue(
hasattr(qs_obj, param),
msg=f'"{param}" not tested for __init__()',
)
self.assertEqual(getattr(qs_obj, param), value)
# Get class to check.
class_filename = path.basename(inspect.getfile(qs_class))[:-3]
mod = "skactiveml.stream.tests.test" + class_filename
mod = import_module(mod)
test_class_name = "Test" + qs_class.__name__
msg = f"{qs_name} has no test called {test_class_name}."
self.assertTrue(hasattr(mod, test_class_name), msg=msg)
test_obj = getattr(mod, test_class_name)
# Check init parameters.
for param in np.setdiff1d(init_params, not_test):
test_func_name = "test_init_param_" + param
self.assertTrue(
hasattr(test_obj, test_func_name),
msg="'{}()' missing for parameter '{}' of "
"__init__()".format(test_func_name, param),
)
# Check query parameters.
for param in np.setdiff1d(query_params, not_test):
test_func_name = "test_query_param_" + param
msg = (
f"'{test_func_name}()' missing for parameter "
f"'{param}' of query()"
)
self.assertTrue(hasattr(test_obj, test_func_name), msg)
class Dummy:
def __init__(self):
pass
| 35.056818 | 78 | 0.548244 |
ace82f3edaaeb58c55045c8df297e2241576236e | 943 | py | Python | matplotlib/sample_subplot2.py | zjhdota/practice | de28003e7adf6140dfc06a1ffa3a808e514dbbc0 | [
"MIT"
] | 1 | 2018-01-10T11:08:48.000Z | 2018-01-10T11:08:48.000Z | matplotlib/sample_subplot2.py | zjhdota/practice | de28003e7adf6140dfc06a1ffa3a808e514dbbc0 | [
"MIT"
] | 1 | 2021-01-31T04:21:48.000Z | 2021-01-31T04:21:48.000Z | matplotlib/sample_subplot2.py | zjhdota/practice | de28003e7adf6140dfc06a1ffa3a808e514dbbc0 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import matplotlib.gridspec as gridspec
# method 1:
plt.figure()
ax1 = plt.subplot2grid((3, 3), (0,0), colspan=3, rowspan=1)
ax2 = plt.subplot2grid((3, 3), (1,0), colspan=2, rowspan=1)
ax3 = plt.subplot2grid((3, 3), (1,2), colspan=1, rowspan=2)
ax4 = plt.subplot2grid((3, 3), (2,0), colspan=1, rowspan=1)
ax5 = plt.subplot2grid((3, 3), (2,1), colspan=1, rowspan=1)
ax1.plot([1,2], [1,2])
ax2.plot([1,2], [1,2])
ax3.plot([1,2], [1,2])
ax4.plot([1,2], [1,2])
ax5.plot([1,2], [1,2])
ax1.set_title('ax1_title')
# method 2:
plt.figure()
gs = gridspec.GridSpec(3, 3)
ax1 = plt.subplot(gs[0, :])
ax2 = plt.subplot(gs[1, :2])
ax3 = plt.subplot(gs[1:, 2])
ax4 = plt.subplot(gs[-1, 0])
ax5 = plt.subplot(gs[-1, -2])
# method 3:
plt.figure()
# sahrexs 共享X轴
f, ((ax11),(ax12),(ax21, ax22)) = plt.subplots(2, 2, sharex=True, sharey=True)
ax11.scatter([1,2], [1,2])
plt.tight_layout()
plt.show()
| 21.930233 | 78 | 0.627784 |
ace82f5a36b911b5a6903bf9b2e6044dee1e1adb | 4,078 | py | Python | pre_commit/xargs.py | orcutt989/pre-commit | 75043079d0cc0d3ccb23ae0f378e0b04dd002a16 | [
"MIT"
] | null | null | null | pre_commit/xargs.py | orcutt989/pre-commit | 75043079d0cc0d3ccb23ae0f378e0b04dd002a16 | [
"MIT"
] | null | null | null | pre_commit/xargs.py | orcutt989/pre-commit | 75043079d0cc0d3ccb23ae0f378e0b04dd002a16 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import concurrent.futures
import contextlib
import math
import os
import subprocess
import sys
import six
from pre_commit import parse_shebang
from pre_commit.util import cmd_output_b
from pre_commit.util import cmd_output_p
def _environ_size(_env=None):
environ = _env if _env is not None else getattr(os, 'environb', os.environ)
size = 8 * len(environ) # number of pointers in `envp`
for k, v in environ.items():
size += len(k) + len(v) + 2 # c strings in `envp`
return size
def _get_platform_max_length(): # pragma: no cover (platform specific)
if os.name == 'posix':
maximum = os.sysconf(str('SC_ARG_MAX')) - 2048 - _environ_size()
maximum = max(min(maximum, 2 ** 17), 2 ** 12)
return maximum
elif os.name == 'nt':
return 2 ** 15 - 2048 # UNICODE_STRING max - headroom
else:
# posix minimum
return 2 ** 12
def _command_length(*cmd):
full_cmd = ' '.join(cmd)
# win32 uses the amount of characters, more details at:
# https://github.com/pre-commit/pre-commit/pull/839
if sys.platform == 'win32':
# the python2.x apis require bytes, we encode as UTF-8
if six.PY2:
return len(full_cmd.encode('utf-8'))
else:
return len(full_cmd.encode('utf-16le')) // 2
else:
return len(full_cmd.encode(sys.getfilesystemencoding()))
class ArgumentTooLongError(RuntimeError):
pass
def partition(cmd, varargs, target_concurrency, _max_length=None):
_max_length = _max_length or _get_platform_max_length()
# Generally, we try to partition evenly into at least `target_concurrency`
# partitions, but we don't want a bunch of tiny partitions.
max_args = max(4, math.ceil(len(varargs) / target_concurrency))
cmd = tuple(cmd)
ret = []
ret_cmd = []
# Reversed so arguments are in order
varargs = list(reversed(varargs))
total_length = _command_length(*cmd) + 1
while varargs:
arg = varargs.pop()
arg_length = _command_length(arg) + 1
if (
total_length + arg_length <= _max_length and
len(ret_cmd) < max_args
):
ret_cmd.append(arg)
total_length += arg_length
elif not ret_cmd:
raise ArgumentTooLongError(arg)
else:
# We've exceeded the length, yield a command
ret.append(cmd + tuple(ret_cmd))
ret_cmd = []
total_length = _command_length(*cmd) + 1
varargs.append(arg)
ret.append(cmd + tuple(ret_cmd))
return tuple(ret)
@contextlib.contextmanager
def _thread_mapper(maxsize):
if maxsize == 1:
yield map
else:
with concurrent.futures.ThreadPoolExecutor(maxsize) as ex:
yield ex.map
def xargs(cmd, varargs, **kwargs):
"""A simplified implementation of xargs.
color: Make a pty if on a platform that supports it
target_concurrency: Target number of partitions to run concurrently
"""
color = kwargs.pop('color', False)
target_concurrency = kwargs.pop('target_concurrency', 1)
max_length = kwargs.pop('_max_length', _get_platform_max_length())
cmd_fn = cmd_output_p if color else cmd_output_b
retcode = 0
stdout = b''
try:
cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
return e.to_output()[:2]
partitions = partition(cmd, varargs, target_concurrency, max_length)
def run_cmd_partition(run_cmd):
return cmd_fn(
*run_cmd, retcode=None, stderr=subprocess.STDOUT, **kwargs
)
threads = min(len(partitions), target_concurrency)
with _thread_mapper(threads) as thread_map:
results = thread_map(run_cmd_partition, partitions)
for proc_retcode, proc_out, _ in results:
retcode = max(retcode, proc_retcode)
stdout += proc_out
return retcode, stdout
| 29.128571 | 79 | 0.653261 |
ace82fb041e29ff5238889c701b305987a2e4ec7 | 2,029 | py | Python | .eggs/PyScaffold-2.5.6-py3.5.egg/pyscaffold/contrib/setuptools_scm/testing/test_functions.py | awacs/bpp3 | 4168563f93bd399de1d4521fc6bab87542143dd2 | [
"MIT"
] | null | null | null | .eggs/PyScaffold-2.5.6-py3.5.egg/pyscaffold/contrib/setuptools_scm/testing/test_functions.py | awacs/bpp3 | 4168563f93bd399de1d4521fc6bab87542143dd2 | [
"MIT"
] | null | null | null | .eggs/PyScaffold-2.5.6-py3.5.egg/pyscaffold/contrib/setuptools_scm/testing/test_functions.py | awacs/bpp3 | 4168563f93bd399de1d4521fc6bab87542143dd2 | [
"MIT"
] | null | null | null | import pytest
import pkg_resources
from setuptools_scm import dump_version
from setuptools_scm.version import guess_next_version, meta, format_version
class MockTime(object):
def __format__(self, *k):
return 'time'
@pytest.mark.parametrize('tag, expected', [
('1.1', '1.2.dev0'),
('1.2.dev', '1.2.dev0'),
('1.1a2', '1.1a3.dev0'),
])
def test_next_tag(tag, expected):
version = pkg_resources.parse_version(tag)
assert guess_next_version(version, 0) == expected
VERSIONS = {
'exact': meta('1.1', None, False),
'zerodistance': meta('1.1', 0, False),
'dirty': meta('1.1', None, True),
'distance': meta('1.1', 3, False),
'distancedirty': meta('1.1', 3, True),
}
@pytest.mark.parametrize('version,scheme,expected', [
('exact', 'guess-next-dev node-and-date', '1.1'),
('zerodistance', 'guess-next-dev node-and-date', '1.2.dev0+nNone'),
('dirty', 'guess-next-dev node-and-date', '1.2.dev0+nNone.dtime'),
('distance', 'guess-next-dev node-and-date', '1.2.dev3+nNone'),
('distancedirty', 'guess-next-dev node-and-date', '1.2.dev3+nNone.dtime'),
('exact', 'post-release node-and-date', '1.1'),
('zerodistance', 'post-release node-and-date', '1.1.post0+nNone'),
('dirty', 'post-release node-and-date', '1.1.post0+nNone.dtime'),
('distance', 'post-release node-and-date', '1.1.post3+nNone'),
('distancedirty', 'post-release node-and-date', '1.1.post3+nNone.dtime'),
])
def test_format_version(version, monkeypatch, scheme, expected):
version = VERSIONS[version]
monkeypatch.setattr(version, 'time', MockTime())
vs, ls = scheme.split()
assert format_version(
version,
version_scheme=vs,
local_scheme=ls) == expected
def test_dump_version_doesnt_bail_on_value_error(tmpdir):
write_to = "VERSION"
version = VERSIONS['exact']
with pytest.raises(ValueError) as exc_info:
dump_version(tmpdir.strpath, version, write_to)
assert str(exc_info.value).startswith("bad file format:")
| 34.389831 | 78 | 0.656974 |
ace8304f544225a14041e2898482b7c63ad4d392 | 1,044 | py | Python | isi_sdk_8_0_1/test/test_compatibilities_ssd_active_id_params.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_0_1/test/test_compatibilities_ssd_active_id_params.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_0_1/test/test_compatibilities_ssd_active_id_params.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 4
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_0_1
from isi_sdk_8_0_1.models.compatibilities_ssd_active_id_params import CompatibilitiesSsdActiveIdParams # noqa: E501
from isi_sdk_8_0_1.rest import ApiException
class TestCompatibilitiesSsdActiveIdParams(unittest.TestCase):
"""CompatibilitiesSsdActiveIdParams unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCompatibilitiesSsdActiveIdParams(self):
"""Test CompatibilitiesSsdActiveIdParams"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_0_1.models.compatibilities_ssd_active_id_params.CompatibilitiesSsdActiveIdParams() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.463415 | 124 | 0.745211 |
ace830c8a11939830dc44e877c13e4de852f4e55 | 1,189 | py | Python | profiles_api/serializers.py | jcurt11n/profiles-rest-api | f11ae2183ca8f9385665730ab9c86f2fafa18bcc | [
"MIT"
] | null | null | null | profiles_api/serializers.py | jcurt11n/profiles-rest-api | f11ae2183ca8f9385665730ab9c86f2fafa18bcc | [
"MIT"
] | null | null | null | profiles_api/serializers.py | jcurt11n/profiles-rest-api | f11ae2183ca8f9385665730ab9c86f2fafa18bcc | [
"MIT"
] | null | null | null | from rest_framework import serializers
from profiles_api import models
class HelloSerializer(serializers.Serializer):
"""Serializes a name field for testing out APIView"""
name = serializers.CharField(max_length=10)
class UserProfileSerializer(serializers.ModelSerializer):
""" Serializers a user profile object"""
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name', 'password')
extra_kwargs = {
'password': {
'write_only': True,
'style': {'input_type': 'password'}
}
}
def create(self, validated_data):
"""Create and return a new user"""
user = models.UserProfile.objects.create_user(
email=validated_data['email'],
name=validated_data['name'],
password=validated_data['password']
)
return user
def update(self, instance, validated_data):
"""Handle Updating user account"""
if 'password' in validated_data:
password = validated_data.pop('password')
instance.set_password(password)
return super().update(instance, validated_data)
| 28.309524 | 57 | 0.618167 |
ace831180c1ea53e09dbe9c48289215ba32031cf | 6,689 | py | Python | RegSimple/AutoDomainFinder_v2.0.py | kxu776/WebScrapingMirror | 85a7dbb61baa29ce805b1e41d7ab21ee091d0bb7 | [
"MIT"
] | null | null | null | RegSimple/AutoDomainFinder_v2.0.py | kxu776/WebScrapingMirror | 85a7dbb61baa29ce805b1e41d7ab21ee091d0bb7 | [
"MIT"
] | null | null | null | RegSimple/AutoDomainFinder_v2.0.py | kxu776/WebScrapingMirror | 85a7dbb61baa29ce805b1e41d7ab21ee091d0bb7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: ujejskik
"""
'''
---------- NOTES ----------
REGSIMPLE Scraper v2.0 - This script intends to scrape websites for PDFs, which are subsequently filtered for specific terms.
SEE: CSVreader.py for excel filtering example.
It accepts a CSV input containing a list of countries and their corresponding financial regulator/central bank.
SEE: FinReg.csv
It then iterates through every link in the input CSV, recursively scraping every site and returning a list of every PDF link found on the site (In dictionary form).
This data is written to a CSV file, containing the PDFs URL, Country and Title. (And potentially Year of publication in the future).
----------------------------
'''
from bs4 import BeautifulSoup, SoupStrainer
from urllib.request import urlopen
import urllib.request as ul
from urllib.parse import urlparse, urljoin
import time
import tkinter
from tkinter.filedialog import askopenfilename
from random import randint
from Blacklists import BLACKLIST, INVALID_TYPES
import csv
#Constant defining maximum recursion depth
R_DEPTH = 3
###Defined as sets due to O(1) access performance as compared with O(n) of lists
#List of visited webpages
visited = set()
#DictList of all PDF links found
pdfLinks = []
#URL of webpage currently being scraped
currentURL= str()
#Header for user-agent spoofing
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
# Recursively crawls entire website for all PDF links
def recPDFCrawl(url, r_level):
#print(str(url) + ": " + str(r_level))
global currentURL
currentURL = url
#Marks current URL as visited
visited.add(url)
parsed_uri = urlparse(url)
# Parses domain substring
domain = '{uri.netloc}'.format(uri=parsed_uri)
# Generates list of all links
try:
urls = getlinks(url)
unvisited_domains = []
#Stores all .PDF links found
for linkDict in urls:
if(('.pdf' in str(linkDict["URL"]) or '.PDF' in str(linkDict["URL"]))):
pdfLinks.append(linkDict)
#Adds all unvisted internal domains to list for expansion
elif ((domain in linkDict["URL"]) and linkValid(linkDict["URL"]) and not(linkDict["URL"] in visited)):
unvisited_domains.append(linkDict["URL"])
#Ends recursive expansion if Maximum expansion depth is reached, or if no more unvisted pages are found
if (not unvisited_domains or (r_level == R_DEPTH)):
return
else:
#Visits all unvisted urls designated for expansion
for link in unvisited_domains:
if not(link in visited):
#Random wait to prevent spamming requests
time.sleep(randint(99,501)/float(100))
#Recursive call
recPDFCrawl(link, r_level+1)
except Exception as e:
print(str(url) + ": " + str(e) + str(time.strftime(" %I:%M:%S")))
pass
return
# Generates list of all links on a webpage - Returns linkDict
def getlinks(url):
request = ul.Request(url, headers=hdr)
page = urlopen(request)
#Creates soup of all anchor tags (i.e. links) on webpage
soup = BeautifulSoup(page, 'lxml', parse_only = SoupStrainer('a'))
urls = []
# Parses all urls into list
for anchor in soup.findAll('a', href=True):
linkDict = {'Country': "", 'URL': "", 'Title': "", 'Year' : "", 'Document Type' : "" }
#Assumes text in anchor tag as title
linkDict['Title'] = anchor.string
link = anchor.get('href')
#If link uses content disposition (ie incomplete url), prefixes domain to form complete URL.
if not(link.startswith('http')):
link = urljoin(url, link)
linkDict['URL'] = link
urls.append(linkDict)
return urls
#Return boolean result of link validity check
def linkValid(link):
if ((not blacklisted(link,BLACKLIST)) and validType(link)):
return True
else:
return False
#Checks if link belongs to manually blacklisted domains.
#Takes as params: the link to be checked, and a manually constructed blacklist(List of banned domains as strings)
def blacklisted(link, blacklist):
for banned_domain in blacklist:
if banned_domain in link:
return True
return False
#Filters out non-html urls to prevent recursive scraping dead-end
#Takes as param: link to be checked
def validType(link):
urlTokens = link.split('.')
if(('.' + urlTokens[-1]) in INVALID_TYPES):
return False
else:
return True
#File chooser for inputFile
tkinter.Tk().withdraw()
inputfile = askopenfilename()
#Column names for output CSV file
fieldnames = ['Country','URL','Title','Year','Document Type']
#Opens input CSV, containing list of Countries and URLS of their central bank/regulator
with open(inputfile) as inputfile:
reader = csv.DictReader(inputfile)
with open('FocusedResults7.csv', 'w', encoding ='utf-8') as outputfile:
writer = csv.DictWriter(outputfile, fieldnames, extrasaction='ignore',lineterminator='\n')
#Writes column names to output file
writer.writeheader()
print("Initialising scraping...")
try:
#For every row of input CSV
for inputDict in reader:
print(inputDict['URL'])
print(str(time.strftime(" %I:%M:%S")))
#Clears URL collection variables between scraping each site
visited.clear()
pdfLinks.clear()
#Reads country corresponding to link from csv
inputCountry = inputDict['Country']
#Gathers all PDF links on site, beggining with URL read from CSV
recPDFCrawl(inputDict['URL'],0)
#Writes every PDF found to seperate row in outputfile, along with its corresponding country
for pdfDict in pdfLinks:
pdfDict['Country'] = inputCountry
writer.writerow(pdfDict)
except (KeyboardInterrupt, SystemExit):
outputfile.close()
print(currentURL)
raise
except Exception as e:
print(str(e) + str(time.strftime(" %I:%M:%S")))
pass
print("Done.")
outputfile.close()
input("Press Enter to continue...") | 39.116959 | 164 | 0.641949 |
ace831335b84872998f7fc1dcad79d43be2e43e2 | 5,763 | py | Python | spfy/asynch/result.py | alin23/spotifycli | 7d91bc36de5d6e39ebd74c61d2975f456bbdf405 | [
"Apache-2.0",
"MIT"
] | 11 | 2017-12-31T20:43:53.000Z | 2021-08-05T21:49:10.000Z | spfy/asynch/result.py | alin23/spfy | 7d91bc36de5d6e39ebd74c61d2975f456bbdf405 | [
"Apache-2.0",
"MIT"
] | null | null | null | spfy/asynch/result.py | alin23/spfy | 7d91bc36de5d6e39ebd74c61d2975f456bbdf405 | [
"Apache-2.0",
"MIT"
] | null | null | null | import random
from urllib.parse import parse_qs, urlparse, urlunparse
import addict
from cached_property import cached_property
from .. import config
from ..constants import API
from . import limited_as_completed
LOCAL_ATTRIBUTES = {"_client", "_next_result", "_next_result_available", "_playable"}
class Playable:
def __init__(self, result):
self.result = result
self.client = self.result._client
async def play(self, device=None, index=None):
return await self.result._put_with_params(
dict(device_id=device, payload=self.get_data(index)), url=API.PLAY.value
)
def get_data(self, index=None):
data = {}
if "tracks" in self.result or "audio_features" in self.result:
data["uris"] = list(map(self.client._get_track_uri, self.result))
return data
item = self.result[index] if index is not None else random.choice(self.result)
if "playlists" in self.result:
data["context_uri"] = self.client._get_playlist_uri(item)
elif "artists" in self.result:
data["context_uri"] = self.client._get_artist_uri(item)
elif "albums" in self.result:
data["context_uri"] = self.client._get_album_uri(item)
elif "items" in self.result:
data["context_uri"] = self.client._get_uri(item.type, item)
elif self.result.type and self.result.type in {"album", "artist", "playlist"}:
data["context_uri"] = self.client._get_uri(self.result.type, self.result)
elif item.type == "track":
data["uris"] = list(map(self.client._get_track_uri, self.result))
return data
# pylint: disable=too-few-public-methods
class SpotifyResultIterator:
def __init__(self, result, limit=None, ignore_exceptions=False):
self.result = result
self.limit = limit
self.params_list = self.result.get_next_params_list(limit)
self.requests = (
self.result._get_with_params(params) for params in self.params_list
)
self.responses = limited_as_completed(
self.requests,
config.http.concurrent_connections,
ignore_exceptions=ignore_exceptions,
)
def __aiter__(self):
return self.iterate()
async def iterate(self):
for item in self.result:
yield item
# pylint: disable=not-an-iterable
async for responses in self.responses:
if responses is None:
continue
for response in responses:
yield response
class SpotifyResult(addict.Dict):
ITER_KEYS = (
"items",
"artists",
"tracks",
"albums",
"audio_features",
"playlists",
"devices",
)
def __init__(self, *args, _client=None, **kwargs):
super().__init__(*args, **kwargs)
self._client = _client
self._next_result = None
self._playable = Playable(self)
def __missing__(self, name):
return addict.Dict(__parent=self, __key=name)
def __iter__(self):
for key in self.ITER_KEYS:
if key in self:
if "items" in self[key]:
return iter(self[key]["items"])
return iter(self[key])
return super().__iter__()
def __getitem__(self, item):
if isinstance(item, int):
for key in self.ITER_KEYS:
if key in self:
if "items" in self[key]:
return iter(self[key]["items"][item])
return iter(self[key][item])
return super().__getitem__(item)
@classmethod
def _hook(cls, item):
if isinstance(item, dict):
return addict.Dict(item)
if isinstance(item, (list, tuple)):
return type(item)(cls._hook(elem) for elem in item)
return item
def items(self):
return filter(lambda i: i[0] not in LOCAL_ATTRIBUTES, super().items())
def keys(self):
return super().keys() - LOCAL_ATTRIBUTES
def values(self):
return [v for k, v in self.items()]
async def play(self, device=None, index=None):
return await self._playable.play(device, index)
@cached_property
def base_url(self):
return urlunparse([*urlparse(self.href)[:3], "", "", ""])
async def _get_with_params(self, params, url=None):
return await self._client._get(url or self.base_url, **params)
async def _put_with_params(self, params, url=None):
return await self._client._put(url or self.base_url, **params)
def get_next_params_list(self, limit=None):
if "href" in self and "next" in self and self["next"] and self["href"]:
max_limit = limit or 50
url = urlparse(self["href"])
params = {k: v[0] for k, v in parse_qs(url.query).items()}
limit = int(params.pop("limit", 20))
offset = int(params.pop("offset", 0))
return [
{**params, "limit": max_limit, "offset": off}
for off in range(offset + limit, self.total, max_limit)
]
return []
async def all(self, limit=None):
# pylint: disable=not-an-iterable
return [item async for item in self.iterall(limit)]
async def next(self):
if "_next_result" in self and self._next_result:
return self._next_result
if "next" in self and self["next"]:
return await self._client._get(self["next"])
return None
def iterall(self, limit=None, ignore_exceptions=False):
return SpotifyResultIterator(
self, limit=limit, ignore_exceptions=ignore_exceptions
)
| 31.664835 | 86 | 0.602117 |
ace831a8970ac8d18c8af105b6dcf3bdab8b8c61 | 8,515 | py | Python | old_game/container.py | jwvhewitt/dmeternal | bb09f2d497daf9b40dd8cfee10c55be55fb7c3cb | [
"Apache-2.0"
] | 53 | 2015-07-03T21:25:36.000Z | 2022-02-18T23:08:38.000Z | old_game/container.py | jwvhewitt/dmeternal | bb09f2d497daf9b40dd8cfee10c55be55fb7c3cb | [
"Apache-2.0"
] | 5 | 2015-07-03T21:27:12.000Z | 2016-12-08T14:40:38.000Z | old_game/container.py | jwvhewitt/dmeternal | bb09f2d497daf9b40dd8cfee10c55be55fb7c3cb | [
"Apache-2.0"
] | 14 | 2016-02-02T06:49:51.000Z | 2022-02-24T13:24:35.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Anne Archibald <peridot.faceted@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
class ContainerError(ValueError):
"""Error signaling something went wrong with container handling"""
pass
class Container(object):
"""A container is an object that manages objects it contains.
The objects in a container each have a .container attribute that
points to the container. This attribute is managed by the container
itself.
This class is a base class that provides common container functionality,
to be used to simplify implementation of list and dict containers.
"""
def _set_container(self, item):
if hasattr( item, "container" ) and item.container not in (None,self):
# raise ContainerError("Item %s was added to container %s but was already in container %s" % (item, self, item.container))
item.container.remove( item )
item.container = self
def _unset_container(self, item):
if item.container is not self:
raise ContainerError("Item %s was removed from container %s but was not in it" % (item, self))
item.container = None
def _set_container_multi(self, items):
"""Put items in the container in an all-or-nothing way"""
r = []
try:
for i in items:
self._set_container(i)
r.append(i)
r = None
finally: # Make sure items don't get added to this if any fail
if r is not None:
for i in r:
try:
self._unset_container(i)
except ContainerError:
pass
def _unset_container_multi(self, items):
"""Remove items from the container in an all-or-nothing way"""
r = []
try:
for i in items:
self._unset_container(i)
r.append(i)
r = None
finally:
if r is not None:
for i in r:
try:
self._set_container(i)
except ContainerError:
pass
class ContainerList(list,Container):
"""A ContainerList is a list whose children know they're in it.
Each element in the ContainerList has a .container attribute which points
to the ContainerList itself. This container pointer is maintained automatically.
"""
def __init__(self, items=[], owner=None):
list.__init__(self, items)
self._set_container_multi(items)
self.owner = owner
def __repr__(self):
return "<CL %s>" % list.__repr__(self)
def append(self, item):
self._set_container(item)
list.append(self,item)
def extend(self, items):
self._set_container_multi(items)
list.extend(self,items)
def insert(self, i, item):
self._set_container(item)
list.insert(self,i,item)
def remove(self, item):
self._unset_container(item)
list.remove(self,item)
def pop(self, i=-1):
self._unset_container(self[i])
return list.pop(self,i)
# These don't work because they make the elements part of more than one list, or one list more than once
def __add__(self, other):
raise NotImplementedError
def __radd__(self, other):
raise NotImplementedError
def __imul__(self,other):
raise NotImplementedError
def __mul__(self, other):
raise NotImplementedError
def __rmul__(self,other):
raise NotImplementedError
# only works if other is not also a Container
def __iadd__(self, other):
self.extend(other)
return self
def __setitem__(self, key, value):
# FIXME: check slices work okay
if isinstance(key, slice):
self._unset_container_multi(self[key])
try:
self._set_container_multi(value)
except ContainerError:
self._set_container_multi(self[key])
raise
else:
self._unset_container(self[key])
try:
self._set_container(value)
except ContainerError:
self._set_container(self[key])
raise
list.__setitem__(self,key,value)
def __delitem__(self, key):
# FIXME: check slices work okay
if isinstance(key, slice):
self._unset_container_multi(self[key])
else:
self._unset_container(self[key])
list.__delitem__(self,key)
# Needed for python2, forbidden for python3
def __delslice__(self,i,j):
del self[slice(i,j,None)]
class ContainerDict(dict,Container):
"""A ContainerDict is a dict whose children know they're in it.
Each element in the ContainerDict has a .container attribute which points
to the ContainerDict itself. This container pointer is maintained automatically.
"""
def __init__(self, contents=None, **kwargs):
if contents is None:
dict.__init__(self, **kwargs)
else:
dict.__init__(self, contents, **kwargs)
self._set_container_multi(list(self.values()))
def __repr__(self):
return "<CD %s>" % dict.__repr__(self)
def __setitem__(self, key, value):
if key in self:
self._unset_container(self[key])
try:
self._set_container(value)
except ContainerError:
if key in self:
self._set_container(self[key])
raise
dict.__setitem__(self,key,value)
def __delitem__(self, key):
if key in self:
self._unset_container(self[key])
dict.__delitem__(self,key)
def pop(self, key):
if key in self:
self._unset_container(self[key])
return dict.pop(self,key)
def popitem(self):
key, value = dict.popitem(self)
self._unset_container(value)
return key, value
def setdefault(self, key, default=None):
if key not in self:
self._set_container(default)
dict.setdefault(self, key, default)
def update(self, other):
for (k,v) in list(other.items()):
self[k] = v
if __name__=='__main__':
class Gear(object):
def __init__(self, name, container=None):
self.name = name
self.container = container
def __repr__(self):
return "<G "+str(self.name)+">"
gears = [Gear(n) for n in range(10)]
a = Gear("A")
b = Gear("B")
c = Gear("C")
d = Gear("D")
e = Gear("E")
p = ContainerList([a,b,c])
print(p)
try:
p.append(a)
except ContainerError as err:
print(err)
else:
raise AssertionError
print(p[1])
print(p[::2])
p[1] = d
print(p)
p[1] = b
p[::2] = [d,e]
print(p)
del p[:]
p2 = ContainerList([a,b,c])
print(p2)
p2.extend([d,e])
print(p2)
print(p2.pop())
print(p2)
p2.remove(d)
print(p2)
p2 += [d,e]
print(p2)
try:
d = ContainerDict(a=a, b=b, c=c)
except ContainerError as err:
print(err)
else:
raise AssertionError
del p2[:]
d = ContainerDict(a=a, b=b, c=c)
print(d)
print(d["a"])
d["a"] = a
try:
d["a"] = b
except ContainerError as err:
print(err)
else:
raise AssertionError
del d["a"]
d["a"] = a
d.pop("a")
print(d)
d["a"] = a
k,v = d.popitem()
d[k] = v
d.setdefault("e",e)
d.setdefault("e",e)
print(d)
del d["e"]
d.update(dict(e=e))
print(d)
| 29.362069 | 133 | 0.582854 |
ace8321542e358d28c3a70ed17bcfee6672a7992 | 34,025 | py | Python | BootloaderCorePkg/Tools/ConfigEditor.py | cshur/slimbootloader | 30e24581266f98c68c8bcf9c5adf0b88b769b931 | [
"BSD-2-Clause-NetBSD",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"BSD-2-Clause-Patent"
] | 299 | 2018-09-13T23:17:25.000Z | 2022-03-19T14:25:54.000Z | BootloaderCorePkg/Tools/ConfigEditor.py | cshur/slimbootloader | 30e24581266f98c68c8bcf9c5adf0b88b769b931 | [
"BSD-2-Clause-NetBSD",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"BSD-2-Clause-Patent"
] | 494 | 2018-09-18T19:31:55.000Z | 2022-03-30T16:52:52.000Z | BootloaderCorePkg/Tools/ConfigEditor.py | cshur/slimbootloader | 30e24581266f98c68c8bcf9c5adf0b88b769b931 | [
"BSD-2-Clause-NetBSD",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"BSD-2-Clause-Patent"
] | 142 | 2018-09-13T23:43:17.000Z | 2022-03-25T03:40:13.000Z | ## @ ConfigEditor.py
#
# Copyright (c) 2018 - 2020, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import re
import sys
import marshal
from pathlib import Path
sys.dont_write_bytecode = True
import tkinter
import tkinter.ttk as ttk
import tkinter.messagebox as messagebox
import tkinter.filedialog as filedialog
from GenCfgData import CGenCfgData, bytes_to_value, bytes_to_bracket_str, value_to_bytes, array_str_to_value
class create_tool_tip(object):
'''
create a tooltip for a given widget
'''
in_progress = False
def __init__(self, widget, text=''):
self.top_win = None
self.widget = widget
self.text = text
self.widget.bind("<Enter>", self.enter)
self.widget.bind("<Leave>", self.leave)
def enter(self, event=None):
if self.in_progress:
return
if self.widget.winfo_class() == 'Treeview':
# Only show help when cursor is on row header.
rowid = self.widget.identify_row(event.y)
if rowid != '':
return
else:
x, y, cx, cy = self.widget.bbox("insert")
cursor = self.widget.winfo_pointerxy()
x = self.widget.winfo_rootx() + 35
y = self.widget.winfo_rooty() + 20
if cursor[1] > y and cursor[1] < y + 20:
y += 20
# creates a toplevel window
self.top_win = tkinter.Toplevel(self.widget)
# Leaves only the label and removes the app window
self.top_win.wm_overrideredirect(True)
self.top_win.wm_geometry("+%d+%d" % (x, y))
label = tkinter.Message(self.top_win,
text=self.text,
justify='left',
background='bisque',
relief='solid',
borderwidth=1,
font=("times", "10", "normal"))
label.pack(ipadx=1)
self.in_progress = True
def leave(self, event=None):
if self.top_win:
self.top_win.destroy()
self.in_progress = False
class validating_entry(tkinter.Entry):
def __init__(self, master, **kw):
tkinter.Entry.__init__(*(self, master), **kw)
self.parent = master
self.old_value = ''
self.last_value = ''
self.variable = tkinter.StringVar()
self.variable.trace("w", self.callback)
self.config(textvariable=self.variable)
self.config({"background": "#c0c0c0"})
self.bind("<Return>", self.move_next)
self.bind("<Tab>", self.move_next)
self.bind("<Escape>", self.cancel)
for each in ['BackSpace', 'Delete']:
self.bind("<%s>" % each, self.ignore)
self.display (None)
def ignore (self, even):
return "break"
def move_next (self, event):
if self.row < 0:
return
row, col = self.row, self.col
txt, row_id, col_id = self.parent.get_next_cell (row, col)
self.display (txt, row_id, col_id)
return "break"
def cancel (self, event):
self.variable.set(self.old_value)
self.display (None)
def display (self, txt, row_id = '', col_id = ''):
if txt is None:
self.row = -1
self.col = -1
self.place_forget()
else:
row = int('0x' + row_id[1:], 0) - 1
col = int(col_id[1:]) - 1
self.row = row
self.col = col
self.old_value = txt
self.last_value = txt
x, y, width, height = self.parent.bbox(row_id, col)
self.place(x=x, y=y, w=width)
self.variable.set(txt)
self.focus_set()
self.icursor(0)
def callback(self, *Args):
cur_val = self.variable.get()
new_val = self.validate(cur_val)
if new_val is not None and self.row >= 0:
self.last_value = new_val
self.parent.set_cell (self.row , self.col, new_val)
self.variable.set(self.last_value)
def validate(self, value):
if len(value) > 0:
try:
int(value, 16)
except:
return None
# Normalize the cell format
self.update()
cell_width = self.winfo_width ()
max_len = custom_table.to_byte_length(cell_width) * 2
cur_pos = self.index("insert")
if cur_pos == max_len + 1:
value = value[-max_len:]
else:
value = value[:max_len]
if value == '':
value = '0'
fmt = '%%0%dX' % max_len
return fmt % int(value, 16)
class custom_table(ttk.Treeview):
_Padding = 20
_Char_width = 6
def __init__(self, parent, col_hdr, bins):
cols = len(col_hdr)
col_byte_len = []
for col in range(cols): #Columns
col_byte_len.append(int(col_hdr[col].split(':')[1]))
byte_len = sum(col_byte_len)
rows = (len(bins) + byte_len - 1) // byte_len
self.rows = rows
self.cols = cols
self.col_byte_len = col_byte_len
self.col_hdr = col_hdr
self.size = len(bins)
self.last_dir = ''
style = ttk.Style()
style.configure("Custom.Treeview.Heading", font=('calibri', 10, 'bold'), foreground="blue")
ttk.Treeview.__init__(self, parent, height=rows, columns=[''] + col_hdr, show='headings', style="Custom.Treeview", selectmode='none')
self.bind("<Button-1>", self.click)
self.bind("<FocusOut>", self.focus_out)
self.entry = validating_entry(self, width=4, justify=tkinter.CENTER)
self.heading(0, text='LOAD')
self.column (0, width=60, stretch=0, anchor=tkinter.CENTER)
for col in range(cols): #Columns
text = col_hdr[col].split(':')[0]
byte_len = int(col_hdr[col].split(':')[1])
self.heading(col+1, text=text)
self.column(col+1, width=self.to_cell_width(byte_len), stretch=0, anchor=tkinter.CENTER)
idx = 0
for row in range(rows): #Rows
text = '%04X' % (row * len(col_hdr))
vals = ['%04X:' % (cols * row)]
for col in range(cols): #Columns
if idx >= len(bins):
break
byte_len = int(col_hdr[col].split(':')[1])
value = bytes_to_value (bins[idx:idx+byte_len])
hex = ("%%0%dX" % (byte_len * 2) ) % value
vals.append (hex)
idx += byte_len
self.insert('', 'end', values=tuple(vals))
if idx >= len(bins):
break
@staticmethod
def to_cell_width(byte_len):
return byte_len * 2 * custom_table._Char_width + custom_table._Padding
@staticmethod
def to_byte_length(cell_width):
return (cell_width - custom_table._Padding) // (2 * custom_table._Char_width)
def focus_out (self, event):
self.entry.display (None)
def refresh_bin (self, bins):
if not bins:
return
# Reload binary into widget
bin_len = len(bins)
for row in range(self.rows):
iid = self.get_children()[row]
for col in range(self.cols):
idx = row * sum(self.col_byte_len) + sum(self.col_byte_len[:col])
byte_len = self.col_byte_len[col]
if idx + byte_len <= self.size:
byte_len = int(self.col_hdr[col].split(':')[1])
if idx + byte_len > bin_len:
val = 0
else:
val = bytes_to_value (bins[idx:idx+byte_len])
hex_val = ("%%0%dX" % (byte_len * 2) ) % val
self.set (iid, col + 1, hex_val)
def get_cell (self, row, col):
iid = self.get_children()[row]
txt = self.item(iid, 'values')[col]
return txt
def get_next_cell (self, row, col):
rows = self.get_children()
col += 1
if col > self.cols:
col = 1
row +=1
cnt = row * sum(self.col_byte_len) + sum(self.col_byte_len[:col])
if cnt > self.size:
# Reached the last cell, so roll back to beginning
row = 0
col = 1
txt = self.get_cell(row, col)
row_id = rows[row]
col_id = '#%d' % (col + 1)
return (txt, row_id, col_id)
def set_cell (self, row, col, val):
iid = self.get_children()[row]
self.set (iid, col, val)
def load_bin (self):
# Load binary from file
path = filedialog.askopenfilename(
initialdir=self.last_dir,
title="Load binary file",
filetypes=(("Binary files", "*.bin"), (
"binary files", "*.bin")))
if path:
self.last_dir = os.path.dirname(path)
fd = open(path, 'rb')
bins = bytearray(fd.read())[:self.size]
fd.close()
bins.extend (b'\x00' * (self.size - len(bins)))
return bins
return None
def click (self, event):
row_id = self.identify_row(event.y)
col_id = self.identify_column(event.x)
if row_id == '' and col_id == '#1':
# Clicked on "LOAD" cell
bins = self.load_bin ()
self.refresh_bin (bins)
return
if col_id == '#1':
# Clicked on column 1 (Offset column)
return
item = self.identify('item', event.x, event.y)
if not item or not col_id:
# Not clicked on valid cell
return
# Clicked cell
row = int('0x' + row_id[1:], 0) - 1
col = int(col_id[1:]) - 1
if row * self.cols + col > self.size:
return
vals = self.item(item, 'values')
if col < len(vals):
txt = self.item(item, 'values')[col]
self.entry.display (txt, row_id, col_id)
def get(self):
bins = bytearray()
row_ids = self.get_children()
for row_id in row_ids:
row = int('0x' + row_id[1:], 0) - 1
for col in range(self.cols):
idx = row * sum(self.col_byte_len) + sum(self.col_byte_len[:col])
byte_len = self.col_byte_len[col]
if idx + byte_len > self.size:
break
hex = self.item(row_id, 'values')[col + 1]
values = value_to_bytes (int(hex, 16) & ((1 << byte_len * 8) - 1), byte_len)
bins.extend(values)
return bins
class state:
def __init__(self):
self.state = False
def set(self, value):
self.state = value
def get(self):
return self.state
class application(tkinter.Frame):
def __init__(self, master=None):
root = master
self.debug = True
self.page_id = ''
self.page_list = {}
self.conf_list = {}
self.cfg_data_obj = None
self.org_cfg_data_bin = None
self.in_left = state()
self.in_right = state()
# Check if current directory contains a file with a .yaml extension
# if not default self.last_dir to a Platform directory where it is easier to locate *BoardPkg\CfgData\*Def.yaml files
self.last_dir = '.'
if not any(fname.endswith('.yaml') for fname in os.listdir('.')):
platform_path = Path(os.path.realpath(__file__)).parents[2].joinpath('Platform')
if platform_path.exists():
self.last_dir = platform_path
tkinter.Frame.__init__(self, master, borderwidth=2)
self.menu_string = [
'Save Config Data to Binary', 'Load Config Data from Binary',
'Load Config Changes from Delta File',
'Save Config Changes to Delta File',
'Save Full Config Data to Delta File'
]
root.geometry("1200x800")
paned = ttk.Panedwindow(root, orient=tkinter.HORIZONTAL)
paned.pack(fill=tkinter.BOTH, expand=True, padx=(4, 4))
status = tkinter.Label(master, text="", bd=1, relief=tkinter.SUNKEN, anchor=tkinter.W)
status.pack(side=tkinter.BOTTOM, fill=tkinter.X)
frame_left = ttk.Frame(paned, height=800, relief="groove")
self.left = ttk.Treeview(frame_left, show="tree")
# Set up tree HScroller
pady = (10, 10)
self.tree_scroll = ttk.Scrollbar(frame_left,
orient="vertical",
command=self.left.yview)
self.left.configure(yscrollcommand=self.tree_scroll.set)
self.left.bind("<<TreeviewSelect>>", self.on_config_page_select_change)
self.left.bind("<Enter>", lambda e: self.in_left.set(True))
self.left.bind("<Leave>", lambda e: self.in_left.set(False))
self.left.bind("<MouseWheel>", self.on_tree_scroll)
self.left.pack(side='left',
fill=tkinter.BOTH,
expand=True,
padx=(5, 0),
pady=pady)
self.tree_scroll.pack(side='right', fill=tkinter.Y, pady=pady, padx=(0, 5))
frame_right = ttk.Frame(paned, relief="groove")
self.frame_right = frame_right
self.conf_canvas = tkinter.Canvas(frame_right, highlightthickness=0)
self.page_scroll = ttk.Scrollbar(frame_right,
orient="vertical",
command=self.conf_canvas.yview)
self.right_grid = ttk.Frame(self.conf_canvas)
self.conf_canvas.configure(yscrollcommand=self.page_scroll.set)
self.conf_canvas.pack(side='left',
fill=tkinter.BOTH,
expand=True,
pady=pady,
padx=(5, 0))
self.page_scroll.pack(side='right', fill=tkinter.Y, pady=pady, padx=(0, 5))
self.conf_canvas.create_window(0, 0, window=self.right_grid, anchor='nw')
self.conf_canvas.bind('<Enter>', lambda e: self.in_right.set(True))
self.conf_canvas.bind('<Leave>', lambda e: self.in_right.set(False))
self.conf_canvas.bind("<Configure>", self.on_canvas_configure)
self.conf_canvas.bind_all("<MouseWheel>", self.on_page_scroll)
paned.add(frame_left, weight=2)
paned.add(frame_right, weight=10)
style = ttk.Style()
style.layout("Treeview", [('Treeview.treearea', {'sticky': 'nswe'})])
menubar = tkinter.Menu(root)
file_menu = tkinter.Menu(menubar, tearoff=0)
file_menu.add_command(label="Open Config YAML file...",
command=self.load_from_yaml)
file_menu.add_command(label=self.menu_string[0],
command=self.save_to_bin,
state='disabled')
file_menu.add_command(label=self.menu_string[1],
command=self.load_from_bin,
state='disabled')
file_menu.add_command(label=self.menu_string[2],
command=self.load_from_delta,
state='disabled')
file_menu.add_command(label=self.menu_string[3],
command=self.save_to_delta,
state='disabled')
file_menu.add_command(label=self.menu_string[4],
command=self.save_full_to_delta,
state='disabled')
file_menu.add_command(label="About", command=self.about)
menubar.add_cascade(label="File", menu=file_menu)
self.file_menu = file_menu
root.config(menu=menubar)
if len(sys.argv) > 1:
path = sys.argv[1]
if not path.endswith('.yaml') and not path.endswith('.pkl'):
messagebox.showerror('LOADING ERROR', "Unsupported file '%s' !" % path)
return
else:
self.load_cfg_file (path)
if len(sys.argv) > 2:
path = sys.argv[2]
if path.endswith('.dlt'):
self.load_delta_file (path)
elif path.endswith('.bin'):
self.load_bin_file (path)
else:
messagebox.showerror('LOADING ERROR', "Unsupported file '%s' !" % path)
return
def set_object_name(self, widget, name):
self.conf_list[id(widget)] = name
def get_object_name(self, widget):
if id(widget) in self.conf_list:
return self.conf_list[id(widget)]
else:
return None
def limit_entry_size(self, variable, limit):
value = variable.get()
if len(value) > limit:
variable.set(value[:limit])
def on_canvas_configure(self, event):
self.right_grid.grid_columnconfigure(0, minsize=event.width)
def on_tree_scroll(self, event):
if not self.in_left.get() and self.in_right.get():
# This prevents scroll event from being handled by both left and
# right frame at the same time.
self.on_page_scroll (event)
return 'break'
def on_page_scroll(self, event):
if self.in_right.get():
# Only scroll when it is in active area
min, max = self.page_scroll.get()
if not ((min == 0.0) and (max == 1.0)):
self.conf_canvas.yview_scroll(-1 * int(event.delta / 120), 'units')
def update_visibility_for_widget(self, widget, args):
visible = True
item = self.get_config_data_item_from_widget(widget, True)
if item is None:
return visible
elif not item:
return visible
result = 1
if item['condition']:
result = self.evaluate_condition(item)
if result == 2:
# Gray
if not isinstance(widget, custom_table):
widget.configure(state='disabled')
elif result == 0:
# Hide
visible = False
widget.grid_remove()
else:
# Show
widget.grid()
if not isinstance(widget, custom_table):
widget.configure(state='normal')
return visible
def update_widgets_visibility_on_page(self):
self.walk_widgets_in_layout(self.right_grid,
self.update_visibility_for_widget)
def combo_select_changed(self, event):
self.update_config_data_from_widget(event.widget, None)
self.update_widgets_visibility_on_page()
def edit_num_finished(self, event):
widget = event.widget
item = self.get_config_data_item_from_widget(widget)
if not item:
return
parts = item['type'].split(',')
if len(parts) > 3:
min = parts[2].lstrip()[1:]
max = parts[3].rstrip()[:-1]
min_val = array_str_to_value(min)
max_val = array_str_to_value(max)
text = widget.get()
if ',' in text:
text = '{ %s }' % text
try:
value = array_str_to_value(text)
if value < min_val or value > max_val:
raise Exception('Invalid input!')
self.set_config_item_value(item, text)
except Exception as e:
pass
text = item['value'].strip('{').strip('}').strip()
widget.delete(0, tkinter.END)
widget.insert(0, text)
self.update_widgets_visibility_on_page()
def update_page_scroll_bar(self):
# Update scrollbar
self.frame_right.update()
self.conf_canvas.config(scrollregion=self.conf_canvas.bbox("all"))
def on_config_page_select_change(self, event):
self.update_config_data_on_page()
sel = self.left.selection()
if len(sel) > 0:
page_id = sel[0]
self.build_config_data_page(page_id)
self.update_widgets_visibility_on_page()
self.update_page_scroll_bar()
def walk_widgets_in_layout(self, parent, callback_function, args=None):
for widget in parent.winfo_children():
callback_function(widget, args)
def clear_widgets_inLayout(self, parent=None):
if parent is None:
parent = self.right_grid
for widget in parent.winfo_children():
widget.destroy()
parent.grid_forget()
self.conf_list.clear()
def build_config_page_tree(self, cfg_page, parent):
for page in cfg_page['child']:
page_id = next(iter(page))
# Put CFG items into related page list
self.page_list[page_id] = self.cfg_data_obj.get_cfg_list (page_id)
self.page_list[page_id].sort (key=lambda x: x['order'])
page_name = self.cfg_data_obj.get_page_title(page_id)
child = self.left.insert(
parent, 'end',
iid=page_id, text=page_name,
value=0)
if len(page[page_id]) > 0:
self.build_config_page_tree(page[page_id], child)
def is_config_data_loaded(self):
return True if len(self.page_list) else False
def set_current_config_page(self, page_id):
self.page_id = page_id
def get_current_config_page(self):
return self.page_id
def get_current_config_data(self):
page_id = self.get_current_config_page()
if page_id in self.page_list:
return self.page_list[page_id]
else:
return []
def build_config_data_page(self, page_id):
self.clear_widgets_inLayout()
self.set_current_config_page(page_id)
disp_list = []
for item in self.get_current_config_data():
disp_list.append(item)
row = 0
disp_list.sort(key=lambda x:x['order'])
for item in disp_list:
self.add_config_item (item, row)
row += 2
def load_config_data(self, file_name):
gen_cfg_data = CGenCfgData()
if file_name.endswith('.pkl'):
with open(file_name, "rb") as pkl_file:
gen_cfg_data.__dict__ = marshal.load(pkl_file)
gen_cfg_data.prepare_marshal (False)
elif file_name.endswith('.yaml'):
if gen_cfg_data.load_yaml(file_name) != 0:
raise Exception(gen_cfg_data.get_last_error())
else:
raise Exception('Unsupported file "%s" !' % file_name)
return gen_cfg_data
def about(self):
msg = 'Configuration Editor\n--------------------------------\nVersion 0.8\n2020'
lines = msg.split('\n')
width = 30
text = []
for line in lines:
text.append(line.center(width, ' '))
messagebox.showinfo('Config Editor', '\n'.join(text))
def update_last_dir (self, path):
self.last_dir = os.path.dirname(path)
def get_open_file_name(self, ftype):
if self.is_config_data_loaded():
if ftype == 'dlt':
question = ''
elif ftype == 'bin':
question = 'All configuration will be reloaded from BIN file, continue ?'
elif ftype == 'yaml':
question = ''
else:
raise Exception('Unsupported file type !')
if question:
reply = messagebox.askquestion('', question, icon='warning')
if reply == 'no':
return None
if ftype == 'yaml':
file_type = 'YAML or PKL'
file_ext = 'pkl *Def.yaml'
else:
file_type = ftype.upper()
file_ext = ftype
path = filedialog.askopenfilename(
initialdir=self.last_dir,
title="Load file",
filetypes=(("%s files" % file_type, "*.%s" % file_ext), (
"all files", "*.*")))
if path:
self.update_last_dir (path)
return path
else:
return None
def load_from_delta(self):
path = self.get_open_file_name('dlt')
if not path:
return
self.load_delta_file (path)
def load_delta_file (self, path):
self.reload_config_data_from_bin(self.org_cfg_data_bin)
try:
self.cfg_data_obj.override_default_value(path)
except Exception as e:
messagebox.showerror('LOADING ERROR', str(e))
return
self.update_last_dir (path)
self.refresh_config_data_page()
def load_from_bin(self):
path = self.get_open_file_name('bin')
if not path:
return
self.load_bin_file (path)
def load_bin_file (self, path):
with open(path, 'rb') as fd:
bin_data = bytearray(fd.read())
if len(bin_data) < len(self.org_cfg_data_bin):
messagebox.showerror('Binary file size is smaller than what YAML requires !')
return
try:
self.reload_config_data_from_bin(bin_data)
except Exception as e:
messagebox.showerror('LOADING ERROR', str(e))
return
def load_cfg_file(self, path):
# Save current values in widget and clear database
self.clear_widgets_inLayout()
self.left.delete(*self.left.get_children())
self.cfg_data_obj = self.load_config_data(path)
self.update_last_dir (path)
self.org_cfg_data_bin = self.cfg_data_obj.generate_binary_array()
self.build_config_page_tree(self.cfg_data_obj.get_cfg_page()['root'], '')
for menu in self.menu_string:
self.file_menu.entryconfig(menu, state="normal")
return 0
def load_from_yaml(self):
path = self.get_open_file_name('yaml')
if not path:
return
self.load_cfg_file(path)
def get_save_file_name (self, extension):
path = filedialog.asksaveasfilename(
initialdir=self.last_dir,
title="Save file",
defaultextension=extension)
if path:
self.last_dir = os.path.dirname(path)
return path
else:
return None
def save_delta_file(self, full=False):
path = self.get_save_file_name (".dlt")
if not path:
return
self.update_config_data_on_page()
new_data = self.cfg_data_obj.generate_binary_array()
self.cfg_data_obj.generate_delta_file_from_bin (path, self.org_cfg_data_bin, new_data, full)
def save_to_delta(self):
self.save_delta_file()
def save_full_to_delta(self):
self.save_delta_file(True)
def save_to_bin(self):
path = self.get_save_file_name (".bin")
if not path:
return
self.update_config_data_on_page()
with open(path, 'wb') as fd:
bins = self.cfg_data_obj.generate_binary_array()
fd.write(bins)
def refresh_config_data_page(self):
self.clear_widgets_inLayout()
self.on_config_page_select_change(None)
def reload_config_data_from_bin(self, bin_dat):
self.cfg_data_obj.load_default_from_bin(bin_dat)
self.refresh_config_data_page()
def set_config_item_value(self, item, value_str):
itype = item['type'].split(',')[0]
if itype == "Table":
new_value = value_str
elif itype == "EditText":
length = (self.cfg_data_obj.get_cfg_item_length(item) + 7) // 8
new_value = value_str[:length]
if item['value'].startswith("'"):
new_value = "'%s'" % new_value
else:
try:
new_value = self.cfg_data_obj.reformat_value_str (value_str, self.cfg_data_obj.get_cfg_item_length(item), item['value'])
except:
print("WARNING: Failed to format value string '%s' for '%s' !" % (value_str, item['path']))
new_value = item['value']
if item['value'] != new_value:
if self.debug:
print('Update %s from %s to %s !' % (item['cname'], item['value'], new_value))
item['value'] = new_value
def get_config_data_item_from_widget(self, widget, label=False):
name = self.get_object_name(widget)
if not name or not len(self.page_list):
return None
if name.startswith('LABEL_'):
if label:
path = name[6:]
else:
return None
else:
path = name
item = self.cfg_data_obj.get_item_by_path (path)
return item
def update_config_data_from_widget(self, widget, args):
item = self.get_config_data_item_from_widget(widget)
if item is None:
return
elif not item:
if isinstance(widget, tkinter.Label):
return
raise Exception('Failed to find "%s" !' % self.get_object_name(widget))
itype = item['type'].split(',')[0]
if itype == "Combo":
opt_list = self.cfg_data_obj.get_cfg_item_options (item)
tmp_list = [opt[0] for opt in opt_list]
idx = widget.current()
self.set_config_item_value(item, tmp_list[idx])
elif itype in ["EditNum", "EditText"]:
self.set_config_item_value(item, widget.get())
elif itype in ["Table"]:
new_value = bytes_to_bracket_str(widget.get())
self.set_config_item_value(item, new_value)
def evaluate_condition(self, item):
try:
result = self.cfg_data_obj.evaluate_condition(item)
except:
print("WARNING: Condition '%s' is invalid for '%s' !" % (item['condition'], item['path']))
result = 1
return result
def add_config_item(self, item, row):
parent = self.right_grid
name = tkinter.Label(parent, text=item['name'], anchor="w")
parts = item['type'].split(',')
itype = parts[0]
widget = None
if itype == "Combo":
# Build
opt_list = self.cfg_data_obj.get_cfg_item_options (item)
current_value = self.cfg_data_obj.get_cfg_item_value (item, False)
option_list = []
current = None
for idx, option in enumerate(opt_list):
option_str = option[0]
try:
option_value = self.cfg_data_obj.get_value(option_str, len(option_str), False)
except:
option_value = 0
print('WARNING: Option "%s" has invalid format for "%s" !' % (option_str, item['path']))
if option_value == current_value:
current = idx
option_list.append(option[1])
widget = ttk.Combobox(parent, value=option_list, state="readonly")
widget.bind("<<ComboboxSelected>>", self.combo_select_changed)
widget.unbind_class("TCombobox", "<MouseWheel>")
if current is None:
print('WARNING: Value "%s" is an invalid option for "%s" !' %
(current_value, item['path']))
else:
widget.current(current)
elif itype in ["EditNum", "EditText"]:
txt_val = tkinter.StringVar()
widget = tkinter.Entry(parent, textvariable=txt_val)
value = item['value'].strip("'")
if itype in ["EditText"]:
txt_val.trace(
'w',
lambda *args: self.limit_entry_size(txt_val, (self.cfg_data_obj.get_cfg_item_length(item) + 7) // 8))
elif itype in ["EditNum"]:
value = item['value'].strip("{").strip("}").strip()
widget.bind("<FocusOut>", self.edit_num_finished)
txt_val.set(value)
elif itype in ["Table"]:
bins = self.cfg_data_obj.get_cfg_item_value(item, True)
col_hdr = item['option'].split(',')
widget = custom_table(parent, col_hdr, bins)
else:
if itype and itype not in ["Reserved"]:
print ("WARNING: Type '%s' is invalid for '%s' !" % (itype, item['path']))
if widget:
ttp = create_tool_tip(widget, item['help'])
self.set_object_name(name, 'LABEL_' + item['path'])
self.set_object_name(widget, item['path'])
name.grid(row=row, column=0, padx=10, pady=5, sticky="nsew")
widget.grid(row=row + 1, rowspan=1, column=0, padx=10, pady=5, sticky="nsew")
def update_config_data_on_page(self):
self.walk_widgets_in_layout(self.right_grid,
self.update_config_data_from_widget)
if __name__ == '__main__':
root = tkinter.Tk()
app = application(master=root)
root.title("Config Editor")
root.mainloop()
| 36.005291 | 142 | 0.540838 |
ace8334781a076fe08048985f511eb026bfc90bb | 18,209 | py | Python | facebook_business/adobjects/adsinsights.py | oakhan3/facebook-python-business-sdk | 270942be977607aa29511d0a3799b2163184fadf | [
"CNRI-Python"
] | null | null | null | facebook_business/adobjects/adsinsights.py | oakhan3/facebook-python-business-sdk | 270942be977607aa29511d0a3799b2163184fadf | [
"CNRI-Python"
] | null | null | null | facebook_business/adobjects/adsinsights.py | oakhan3/facebook-python-business-sdk | 270942be977607aa29511d0a3799b2163184fadf | [
"CNRI-Python"
] | null | null | null | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.helpers.adsinsightsmixin import AdsInsightsMixin
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class AdsInsights(
AdsInsightsMixin,
AbstractObject,
):
def __init__(self, api=None):
super(AdsInsights, self).__init__()
self._isAdsInsights = True
self._api = api
class Field(AbstractObject.Field):
account_currency = 'account_currency'
account_id = 'account_id'
account_name = 'account_name'
action_values = 'action_values'
actions = 'actions'
ad_bid_type = 'ad_bid_type'
ad_bid_value = 'ad_bid_value'
ad_click_actions = 'ad_click_actions'
ad_delivery = 'ad_delivery'
ad_id = 'ad_id'
ad_impression_actions = 'ad_impression_actions'
ad_name = 'ad_name'
adset_bid_type = 'adset_bid_type'
adset_bid_value = 'adset_bid_value'
adset_budget_type = 'adset_budget_type'
adset_budget_value = 'adset_budget_value'
adset_delivery = 'adset_delivery'
adset_end = 'adset_end'
adset_id = 'adset_id'
adset_name = 'adset_name'
adset_start = 'adset_start'
age_targeting = 'age_targeting'
auction_bid = 'auction_bid'
auction_competitiveness = 'auction_competitiveness'
auction_max_competitor_bid = 'auction_max_competitor_bid'
buying_type = 'buying_type'
campaign_id = 'campaign_id'
campaign_name = 'campaign_name'
canvas_avg_view_percent = 'canvas_avg_view_percent'
canvas_avg_view_time = 'canvas_avg_view_time'
clicks = 'clicks'
conversion_rate_ranking = 'conversion_rate_ranking'
conversion_values = 'conversion_values'
conversions = 'conversions'
cost_per_15_sec_video_view = 'cost_per_15_sec_video_view'
cost_per_2_sec_continuous_video_view = 'cost_per_2_sec_continuous_video_view'
cost_per_action_type = 'cost_per_action_type'
cost_per_ad_click = 'cost_per_ad_click'
cost_per_conversion = 'cost_per_conversion'
cost_per_dda_countby_convs = 'cost_per_dda_countby_convs'
cost_per_estimated_ad_recallers = 'cost_per_estimated_ad_recallers'
cost_per_inline_link_click = 'cost_per_inline_link_click'
cost_per_inline_post_engagement = 'cost_per_inline_post_engagement'
cost_per_one_thousand_ad_impression = 'cost_per_one_thousand_ad_impression'
cost_per_outbound_click = 'cost_per_outbound_click'
cost_per_store_visit_action = 'cost_per_store_visit_action'
cost_per_thruplay = 'cost_per_thruplay'
cost_per_unique_action_type = 'cost_per_unique_action_type'
cost_per_unique_click = 'cost_per_unique_click'
cost_per_unique_conversion = 'cost_per_unique_conversion'
cost_per_unique_inline_link_click = 'cost_per_unique_inline_link_click'
cost_per_unique_outbound_click = 'cost_per_unique_outbound_click'
cpc = 'cpc'
cpm = 'cpm'
cpp = 'cpp'
created_time = 'created_time'
ctr = 'ctr'
date_start = 'date_start'
date_stop = 'date_stop'
dda_countby_convs = 'dda_countby_convs'
engagement_rate_ranking = 'engagement_rate_ranking'
estimated_ad_recall_rate = 'estimated_ad_recall_rate'
estimated_ad_recall_rate_lower_bound = 'estimated_ad_recall_rate_lower_bound'
estimated_ad_recall_rate_upper_bound = 'estimated_ad_recall_rate_upper_bound'
estimated_ad_recallers = 'estimated_ad_recallers'
estimated_ad_recallers_lower_bound = 'estimated_ad_recallers_lower_bound'
estimated_ad_recallers_upper_bound = 'estimated_ad_recallers_upper_bound'
frequency = 'frequency'
full_view_impressions = 'full_view_impressions'
full_view_reach = 'full_view_reach'
gender_targeting = 'gender_targeting'
impressions = 'impressions'
inline_link_click_ctr = 'inline_link_click_ctr'
inline_link_clicks = 'inline_link_clicks'
inline_post_engagement = 'inline_post_engagement'
instant_experience_clicks_to_open = 'instant_experience_clicks_to_open'
instant_experience_clicks_to_start = 'instant_experience_clicks_to_start'
instant_experience_outbound_clicks = 'instant_experience_outbound_clicks'
labels = 'labels'
location = 'location'
mobile_app_purchase_roas = 'mobile_app_purchase_roas'
objective = 'objective'
outbound_clicks = 'outbound_clicks'
outbound_clicks_ctr = 'outbound_clicks_ctr'
place_page_name = 'place_page_name'
purchase_roas = 'purchase_roas'
quality_ranking = 'quality_ranking'
quality_score_ectr = 'quality_score_ectr'
quality_score_ecvr = 'quality_score_ecvr'
quality_score_organic = 'quality_score_organic'
reach = 'reach'
social_spend = 'social_spend'
spend = 'spend'
store_visit_actions = 'store_visit_actions'
unique_actions = 'unique_actions'
unique_clicks = 'unique_clicks'
unique_conversions = 'unique_conversions'
unique_ctr = 'unique_ctr'
unique_inline_link_click_ctr = 'unique_inline_link_click_ctr'
unique_inline_link_clicks = 'unique_inline_link_clicks'
unique_link_clicks_ctr = 'unique_link_clicks_ctr'
unique_outbound_clicks = 'unique_outbound_clicks'
unique_outbound_clicks_ctr = 'unique_outbound_clicks_ctr'
unique_video_continuous_2_sec_watched_actions = 'unique_video_continuous_2_sec_watched_actions'
unique_video_view_15_sec = 'unique_video_view_15_sec'
updated_time = 'updated_time'
video_15_sec_watched_actions = 'video_15_sec_watched_actions'
video_30_sec_watched_actions = 'video_30_sec_watched_actions'
video_avg_time_watched_actions = 'video_avg_time_watched_actions'
video_continuous_2_sec_watched_actions = 'video_continuous_2_sec_watched_actions'
video_p100_watched_actions = 'video_p100_watched_actions'
video_p25_watched_actions = 'video_p25_watched_actions'
video_p50_watched_actions = 'video_p50_watched_actions'
video_p75_watched_actions = 'video_p75_watched_actions'
video_p95_watched_actions = 'video_p95_watched_actions'
video_play_actions = 'video_play_actions'
video_play_curve_actions = 'video_play_curve_actions'
video_play_retention_0_to_15s_actions = 'video_play_retention_0_to_15s_actions'
video_play_retention_20_to_60s_actions = 'video_play_retention_20_to_60s_actions'
video_play_retention_graph_actions = 'video_play_retention_graph_actions'
video_thruplay_watched_actions = 'video_thruplay_watched_actions'
video_time_watched_actions = 'video_time_watched_actions'
website_ctr = 'website_ctr'
website_purchase_roas = 'website_purchase_roas'
wish_bid = 'wish_bid'
class ActionAttributionWindows:
value_1d_click = '1d_click'
value_1d_view = '1d_view'
value_28d_click = '28d_click'
value_28d_view = '28d_view'
value_7d_click = '7d_click'
value_7d_view = '7d_view'
value_default = 'default'
class ActionBreakdowns:
action_canvas_component_name = 'action_canvas_component_name'
action_carousel_card_id = 'action_carousel_card_id'
action_carousel_card_name = 'action_carousel_card_name'
action_destination = 'action_destination'
action_device = 'action_device'
action_reaction = 'action_reaction'
action_target_id = 'action_target_id'
action_type = 'action_type'
action_video_sound = 'action_video_sound'
action_video_type = 'action_video_type'
class ActionReportTime:
conversion = 'conversion'
impression = 'impression'
class Breakdowns:
ad_format_asset = 'ad_format_asset'
age = 'age'
body_asset = 'body_asset'
call_to_action_asset = 'call_to_action_asset'
country = 'country'
description_asset = 'description_asset'
device_platform = 'device_platform'
dma = 'dma'
frequency_value = 'frequency_value'
gender = 'gender'
hourly_stats_aggregated_by_advertiser_time_zone = 'hourly_stats_aggregated_by_advertiser_time_zone'
hourly_stats_aggregated_by_audience_time_zone = 'hourly_stats_aggregated_by_audience_time_zone'
image_asset = 'image_asset'
impression_device = 'impression_device'
link_url_asset = 'link_url_asset'
place_page_id = 'place_page_id'
platform_position = 'platform_position'
product_id = 'product_id'
publisher_platform = 'publisher_platform'
region = 'region'
title_asset = 'title_asset'
video_asset = 'video_asset'
class DatePreset:
last_14d = 'last_14d'
last_28d = 'last_28d'
last_30d = 'last_30d'
last_3d = 'last_3d'
last_7d = 'last_7d'
last_90d = 'last_90d'
last_month = 'last_month'
last_quarter = 'last_quarter'
last_week_mon_sun = 'last_week_mon_sun'
last_week_sun_sat = 'last_week_sun_sat'
last_year = 'last_year'
lifetime = 'lifetime'
this_month = 'this_month'
this_quarter = 'this_quarter'
this_week_mon_today = 'this_week_mon_today'
this_week_sun_today = 'this_week_sun_today'
this_year = 'this_year'
today = 'today'
yesterday = 'yesterday'
class Level:
account = 'account'
ad = 'ad'
adset = 'adset'
campaign = 'campaign'
class SummaryActionBreakdowns:
action_canvas_component_name = 'action_canvas_component_name'
action_carousel_card_id = 'action_carousel_card_id'
action_carousel_card_name = 'action_carousel_card_name'
action_destination = 'action_destination'
action_device = 'action_device'
action_reaction = 'action_reaction'
action_target_id = 'action_target_id'
action_type = 'action_type'
action_video_sound = 'action_video_sound'
action_video_type = 'action_video_type'
# @deprecated get_endpoint function is deprecated
@classmethod
def get_endpoint(cls):
return 'insights'
_field_types = {
'account_currency': 'string',
'account_id': 'string',
'account_name': 'string',
'action_values': 'list<AdsActionStats>',
'actions': 'list<AdsActionStats>',
'ad_bid_type': 'string',
'ad_bid_value': 'string',
'ad_click_actions': 'list<AdsActionStats>',
'ad_delivery': 'string',
'ad_id': 'string',
'ad_impression_actions': 'list<AdsActionStats>',
'ad_name': 'string',
'adset_bid_type': 'string',
'adset_bid_value': 'string',
'adset_budget_type': 'string',
'adset_budget_value': 'string',
'adset_delivery': 'string',
'adset_end': 'string',
'adset_id': 'string',
'adset_name': 'string',
'adset_start': 'string',
'age_targeting': 'string',
'auction_bid': 'string',
'auction_competitiveness': 'string',
'auction_max_competitor_bid': 'string',
'buying_type': 'string',
'campaign_id': 'string',
'campaign_name': 'string',
'canvas_avg_view_percent': 'string',
'canvas_avg_view_time': 'string',
'clicks': 'string',
'conversion_rate_ranking': 'string',
'conversion_values': 'list<AdsActionStats>',
'conversions': 'list<AdsActionStats>',
'cost_per_15_sec_video_view': 'list<AdsActionStats>',
'cost_per_2_sec_continuous_video_view': 'list<AdsActionStats>',
'cost_per_action_type': 'list<AdsActionStats>',
'cost_per_ad_click': 'list<AdsActionStats>',
'cost_per_conversion': 'list<AdsActionStats>',
'cost_per_dda_countby_convs': 'string',
'cost_per_estimated_ad_recallers': 'string',
'cost_per_inline_link_click': 'string',
'cost_per_inline_post_engagement': 'string',
'cost_per_one_thousand_ad_impression': 'list<AdsActionStats>',
'cost_per_outbound_click': 'list<AdsActionStats>',
'cost_per_store_visit_action': 'list<AdsActionStats>',
'cost_per_thruplay': 'list<AdsActionStats>',
'cost_per_unique_action_type': 'list<AdsActionStats>',
'cost_per_unique_click': 'string',
'cost_per_unique_conversion': 'list<AdsActionStats>',
'cost_per_unique_inline_link_click': 'string',
'cost_per_unique_outbound_click': 'list<AdsActionStats>',
'cpc': 'string',
'cpm': 'string',
'cpp': 'string',
'created_time': 'string',
'ctr': 'string',
'date_start': 'string',
'date_stop': 'string',
'dda_countby_convs': 'string',
'engagement_rate_ranking': 'string',
'estimated_ad_recall_rate': 'string',
'estimated_ad_recall_rate_lower_bound': 'string',
'estimated_ad_recall_rate_upper_bound': 'string',
'estimated_ad_recallers': 'string',
'estimated_ad_recallers_lower_bound': 'string',
'estimated_ad_recallers_upper_bound': 'string',
'frequency': 'string',
'full_view_impressions': 'string',
'full_view_reach': 'string',
'gender_targeting': 'string',
'impressions': 'string',
'inline_link_click_ctr': 'string',
'inline_link_clicks': 'string',
'inline_post_engagement': 'string',
'instant_experience_clicks_to_open': 'string',
'instant_experience_clicks_to_start': 'string',
'instant_experience_outbound_clicks': 'string',
'labels': 'string',
'location': 'string',
'mobile_app_purchase_roas': 'list<AdsActionStats>',
'objective': 'string',
'outbound_clicks': 'list<AdsActionStats>',
'outbound_clicks_ctr': 'list<AdsActionStats>',
'place_page_name': 'string',
'purchase_roas': 'list<AdsActionStats>',
'quality_ranking': 'string',
'quality_score_ectr': 'string',
'quality_score_ecvr': 'string',
'quality_score_organic': 'string',
'reach': 'string',
'social_spend': 'string',
'spend': 'string',
'store_visit_actions': 'list<AdsActionStats>',
'unique_actions': 'list<AdsActionStats>',
'unique_clicks': 'string',
'unique_conversions': 'list<AdsActionStats>',
'unique_ctr': 'string',
'unique_inline_link_click_ctr': 'string',
'unique_inline_link_clicks': 'string',
'unique_link_clicks_ctr': 'string',
'unique_outbound_clicks': 'list<AdsActionStats>',
'unique_outbound_clicks_ctr': 'list<AdsActionStats>',
'unique_video_continuous_2_sec_watched_actions': 'list<AdsActionStats>',
'unique_video_view_15_sec': 'list<AdsActionStats>',
'updated_time': 'string',
'video_15_sec_watched_actions': 'list<AdsActionStats>',
'video_30_sec_watched_actions': 'list<AdsActionStats>',
'video_avg_time_watched_actions': 'list<AdsActionStats>',
'video_continuous_2_sec_watched_actions': 'list<AdsActionStats>',
'video_p100_watched_actions': 'list<AdsActionStats>',
'video_p25_watched_actions': 'list<AdsActionStats>',
'video_p50_watched_actions': 'list<AdsActionStats>',
'video_p75_watched_actions': 'list<AdsActionStats>',
'video_p95_watched_actions': 'list<AdsActionStats>',
'video_play_actions': 'list<AdsActionStats>',
'video_play_curve_actions': 'list<Object>',
'video_play_retention_0_to_15s_actions': 'list<Object>',
'video_play_retention_20_to_60s_actions': 'list<Object>',
'video_play_retention_graph_actions': 'list<Object>',
'video_thruplay_watched_actions': 'list<AdsActionStats>',
'video_time_watched_actions': 'list<AdsActionStats>',
'website_ctr': 'list<AdsActionStats>',
'website_purchase_roas': 'list<AdsActionStats>',
'wish_bid': 'string',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['ActionAttributionWindows'] = AdsInsights.ActionAttributionWindows.__dict__.values()
field_enum_info['ActionBreakdowns'] = AdsInsights.ActionBreakdowns.__dict__.values()
field_enum_info['ActionReportTime'] = AdsInsights.ActionReportTime.__dict__.values()
field_enum_info['Breakdowns'] = AdsInsights.Breakdowns.__dict__.values()
field_enum_info['DatePreset'] = AdsInsights.DatePreset.__dict__.values()
field_enum_info['Level'] = AdsInsights.Level.__dict__.values()
field_enum_info['SummaryActionBreakdowns'] = AdsInsights.SummaryActionBreakdowns.__dict__.values()
return field_enum_info
| 45.29602 | 108 | 0.694656 |
ace8337cff47764895598ca8c50a46ed7b47550b | 150 | py | Python | Exercicios/ex006b.py | MateusBarboza99/Python-03- | 9c6df88aaa8ba83d385b92722ed1df5873df3a77 | [
"MIT"
] | null | null | null | Exercicios/ex006b.py | MateusBarboza99/Python-03- | 9c6df88aaa8ba83d385b92722ed1df5873df3a77 | [
"MIT"
] | null | null | null | Exercicios/ex006b.py | MateusBarboza99/Python-03- | 9c6df88aaa8ba83d385b92722ed1df5873df3a77 | [
"MIT"
] | null | null | null | n = int(input('Digite um número: '))
print('O dobro de {} vale {}. \nA raiz quadrada de {} é igual a {:.2f}.'.format(n, (n * 2), (n*3), (n**(1/2))))
| 37.5 | 111 | 0.526667 |
ace833864f75558fc43627da4f7dbb0ac705d871 | 3,017 | py | Python | horton_helpers/autorest_service_apis/service20180630/models/configuration.py | v-greach/iot-sdks-e2e-fx | 2ceb178c886ced2a639dcd61bf11206a58685509 | [
"MIT"
] | 12 | 2019-02-02T00:15:13.000Z | 2022-02-08T18:20:08.000Z | horton_helpers/autorest_service_apis/service20180630/models/configuration.py | v-greach/iot-sdks-e2e-fx | 2ceb178c886ced2a639dcd61bf11206a58685509 | [
"MIT"
] | 36 | 2019-02-14T22:53:17.000Z | 2022-03-22T22:41:38.000Z | horton_helpers/autorest_service_apis/service20180630/models/configuration.py | gregga/my-iot-sdks-e2e-fx | f043aff93b415e214136110ed52c15e581892b42 | [
"MIT"
] | 12 | 2019-02-19T13:28:25.000Z | 2022-02-08T18:20:55.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Configuration(Model):
"""Configuration for IotHub devices and modules.
:param id: Gets Identifier for the configuration
:type id: str
:param schema_version: Gets Schema version for the configuration
:type schema_version: str
:param labels: Gets or sets labels for the configuration
:type labels: dict[str, str]
:param content: Gets or sets Content for the configuration
:type content: ~service20180630.models.ConfigurationContent
:param target_condition: Gets or sets Target Condition for the
configuration
:type target_condition: str
:param created_time_utc: Gets creation time for the configuration
:type created_time_utc: datetime
:param last_updated_time_utc: Gets last update time for the configuration
:type last_updated_time_utc: datetime
:param priority: Gets or sets Priority for the configuration
:type priority: int
:param system_metrics: System Configuration Metrics
:type system_metrics: ~service20180630.models.ConfigurationMetrics
:param metrics: Custom Configuration Metrics
:type metrics: ~service20180630.models.ConfigurationMetrics
:param etag: Gets or sets configuration's ETag
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'schema_version': {'key': 'schemaVersion', 'type': 'str'},
'labels': {'key': 'labels', 'type': '{str}'},
'content': {'key': 'content', 'type': 'ConfigurationContent'},
'target_condition': {'key': 'targetCondition', 'type': 'str'},
'created_time_utc': {'key': 'createdTimeUtc', 'type': 'iso-8601'},
'last_updated_time_utc': {'key': 'lastUpdatedTimeUtc', 'type': 'iso-8601'},
'priority': {'key': 'priority', 'type': 'int'},
'system_metrics': {'key': 'systemMetrics', 'type': 'ConfigurationMetrics'},
'metrics': {'key': 'metrics', 'type': 'ConfigurationMetrics'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, schema_version=None, labels=None, content=None, target_condition=None, created_time_utc=None, last_updated_time_utc=None, priority=None, system_metrics=None, metrics=None, etag=None):
super(Configuration, self).__init__()
self.id = id
self.schema_version = schema_version
self.labels = labels
self.content = content
self.target_condition = target_condition
self.created_time_utc = created_time_utc
self.last_updated_time_utc = last_updated_time_utc
self.priority = priority
self.system_metrics = system_metrics
self.metrics = metrics
self.etag = etag
| 45.712121 | 215 | 0.652304 |
ace834f68e38d4fc00bb3a35a14807d25fedd19a | 26,229 | py | Python | carla_vehicle_annotator.py | Song-Jingyu/Carla-Roadside-Dataset-Generator | 0f14a49069b6d8d0f4df93b3b0cd7f66608147f9 | [
"MIT"
] | 4 | 2021-11-02T20:36:39.000Z | 2022-03-08T09:46:55.000Z | carla_vehicle_annotator.py | Song-Jingyu/Carla-Roadside-Dataset-Generator | 0f14a49069b6d8d0f4df93b3b0cd7f66608147f9 | [
"MIT"
] | null | null | null | carla_vehicle_annotator.py | Song-Jingyu/Carla-Roadside-Dataset-Generator | 0f14a49069b6d8d0f4df93b3b0cd7f66608147f9 | [
"MIT"
] | null | null | null | ### Functions to extract 2D vehicle Bounding Box from CARLA
### By Mukhlas Adib
### Based example from CARLA Github client_bounding_boxes.py
### 2020
### Last tested on CARLA 0.9.10.1
### All of functions in PART 1 and PART 2 are copied from client_bounding_boxes.py example
### Except functions that convert 3D bounding boxes to 2D bounding boxes
### CARLA Simulator and client_bounding_boxes.py are licensed under the terms of the MIT license
### For a copy, see <https://opensource.org/licenses/MIT>
### For more information about CARLA Simulator, visit https://carla.org/
import numpy as np
import PIL
from PIL import Image
from PIL import ImageDraw
import json
import pickle
import os
import glob
import sys
import cv2
import carla
### PART 0
### Calculate bounding boxes and apply the filter ###
#####################################################
### Use this function to get 2D bounding boxes of visible vehicles to camera using semantic LIDAR
def auto_annotate_lidar(vehicles, camera, lidar_data, max_dist = 100, min_detect = 5, show_img = None, json_path = None):
filtered_data = filter_lidar(lidar_data, camera, max_dist)
if show_img != None:
show_lidar(filtered_data, camera, show_img)
### Delete this section if object_idx issue has been fixed in CARLA
filtered_data = np.array([p for p in filtered_data if p.object_idx != 0])
filtered_data = get_points_id(filtered_data, vehicles, camera, max_dist)
###
visible_id, idx_counts = np.unique([p.object_idx for p in filtered_data], return_counts=True)
visible_vehicles = [v for v in vehicles if v.id in visible_id]
visible_vehicles = [v for v in vehicles if idx_counts[(visible_id == v.id).nonzero()[0]] >= min_detect]
bounding_boxes_2d = [get_2d_bb(vehicle, camera) for vehicle in visible_vehicles]
filtered_out = {}
filtered_out['vehicles'] = visible_vehicles
filtered_out['bbox'] = bounding_boxes_2d
if json_path is not None:
filtered_out['class'] = get_vehicle_class(visible_vehicles, json_path)
return filtered_out, filtered_data
### Use this function to get 2D bounding boxes of visible vehicle to camera
def auto_annotate(vehicles, camera, depth_img, max_dist=100, depth_margin=-1, patch_ratio=0.5, resize_ratio=0.5, json_path=None):
depth_show = False
vehicles = filter_angle_distance(vehicles, camera, max_dist)
bounding_boxes_2d = [get_2d_bb(vehicle, camera) for vehicle in vehicles]
if json_path is not None:
vehicle_class = get_vehicle_class(vehicles, json_path)
else:
vehicle_class = []
filtered_out, removed_out, _, _ = filter_occlusion_bbox(bounding_boxes_2d, vehicles, camera, depth_img, vehicle_class, depth_show, depth_margin, patch_ratio, resize_ratio)
return filtered_out, removed_out
### Same with auto_annotate(), but with debugging function for the occlusion filter
def auto_annotate_debug(vehicles, camera, depth_img, depth_show=False, max_dist=100, depth_margin=-1, patch_ratio=0.5, resize_ratio=0.5, json_path=None):
vehicles = filter_angle_distance(vehicles, camera, max_dist)
bounding_boxes_2d = [get_2d_bb(vehicle, camera) for vehicle in vehicles]
if json_path is not None:
vehicle_class = get_vehicle_class(vehicles, json_path)
else:
vehicle_class = []
filtered_out, removed_out, depth_area, depth_show = filter_occlusion_bbox(bounding_boxes_2d, vehicles, camera, depth_img, vehicle_class, depth_show, depth_margin, patch_ratio, resize_ratio)
return filtered_out, removed_out, depth_area, depth_show
#####################################################
#####################################################
### PART 1
### Use this function to get camera k matrix ########
#####################################################
### Get camera intrinsic matrix 'k'
def get_camera_intrinsic(sensor):
VIEW_WIDTH = int(sensor.attributes['image_size_x'])
VIEW_HEIGHT = int(sensor.attributes['image_size_y'])
VIEW_FOV = int(float(sensor.attributes['fov']))
calibration = np.identity(3)
calibration[0, 2] = VIEW_WIDTH / 2.0
calibration[1, 2] = VIEW_HEIGHT / 2.0
calibration[0, 0] = calibration[1, 1] = VIEW_WIDTH / (2.0 * np.tan(VIEW_FOV * np.pi / 360.0))
return calibration
#######################################################
#######################################################
### PART 2
### Use these functions to find 2D BB in the image ####
#######################################################
### Extract bounding box vertices of vehicle
def create_bb_points(vehicle):
cords = np.zeros((8, 4))
extent = vehicle.bounding_box.extent
cords[0, :] = np.array([extent.x, extent.y, -extent.z, 1])
cords[1, :] = np.array([-extent.x, extent.y, -extent.z, 1])
cords[2, :] = np.array([-extent.x, -extent.y, -extent.z, 1])
cords[3, :] = np.array([extent.x, -extent.y, -extent.z, 1])
cords[4, :] = np.array([extent.x, extent.y, extent.z, 1])
cords[5, :] = np.array([-extent.x, extent.y, extent.z, 1])
cords[6, :] = np.array([-extent.x, -extent.y, extent.z, 1])
cords[7, :] = np.array([extent.x, -extent.y, extent.z, 1])
return cords
### Get transformation matrix from carla.Transform object
def get_matrix(transform):
rotation = transform.rotation
location = transform.location
c_y = np.cos(np.radians(rotation.yaw))
s_y = np.sin(np.radians(rotation.yaw))
c_r = np.cos(np.radians(rotation.roll))
s_r = np.sin(np.radians(rotation.roll))
c_p = np.cos(np.radians(rotation.pitch))
s_p = np.sin(np.radians(rotation.pitch))
matrix = np.matrix(np.identity(4))
matrix[0, 3] = location.x
matrix[1, 3] = location.y
matrix[2, 3] = location.z
matrix[0, 0] = c_p * c_y
matrix[0, 1] = c_y * s_p * s_r - s_y * c_r
matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r
matrix[1, 0] = s_y * c_p
matrix[1, 1] = s_y * s_p * s_r + c_y * c_r
matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r
matrix[2, 0] = s_p
matrix[2, 1] = -c_p * s_r
matrix[2, 2] = c_p * c_r
return matrix
### Transform coordinate from vehicle reference to world reference
def vehicle_to_world(cords, vehicle):
bb_transform = carla.Transform(vehicle.bounding_box.location)
bb_vehicle_matrix = get_matrix(bb_transform)
vehicle_world_matrix = get_matrix(vehicle.get_transform())
bb_world_matrix = np.dot(vehicle_world_matrix, bb_vehicle_matrix)
world_cords = np.dot(bb_world_matrix, np.transpose(cords))
return world_cords
### Transform coordinate from world reference to sensor reference
def world_to_sensor(cords, sensor):
sensor_world_matrix = get_matrix(sensor.get_transform())
world_sensor_matrix = np.linalg.inv(sensor_world_matrix)
sensor_cords = np.dot(world_sensor_matrix, cords)
return sensor_cords
### Transform coordinate from vehicle reference to sensor reference
def vehicle_to_sensor(cords, vehicle, sensor):
world_cord = vehicle_to_world(cords, vehicle)
sensor_cord = world_to_sensor(world_cord, sensor)
return sensor_cord
### Summarize bounding box creation and project the poins in sensor image
def get_bounding_box(vehicle, sensor):
camera_k_matrix = get_camera_intrinsic(sensor)
bb_cords = create_bb_points(vehicle)
cords_x_y_z = vehicle_to_sensor(bb_cords, vehicle, sensor)[:3, :]
cords_y_minus_z_x = np.concatenate([cords_x_y_z[1, :], -cords_x_y_z[2, :], cords_x_y_z[0, :]])
bbox = np.transpose(np.dot(camera_k_matrix, cords_y_minus_z_x))
camera_bbox = np.concatenate([bbox[:, 0] / bbox[:, 2], bbox[:, 1] / bbox[:, 2], bbox[:, 2]], axis=1)
return camera_bbox
### Draw 2D bounding box (4 vertices) from 3D bounding box (8 vertices) in image
### 2D bounding box is represented by two corner points
def p3d_to_p2d_bb(p3d_bb):
min_x = np.amin(p3d_bb[:,0])
min_y = np.amin(p3d_bb[:,1])
max_x = np.amax(p3d_bb[:,0])
max_y = np.amax(p3d_bb[:,1])
p2d_bb = np.array([[min_x,min_y] , [max_x,max_y]])
return p2d_bb
### Summarize 2D bounding box creation
def get_2d_bb(vehicle, sensor):
p3d_bb = get_bounding_box(vehicle, sensor)
p2d_bb = p3d_to_p2d_bb(p3d_bb)
return p2d_bb
#######################################################
#######################################################
### PART 3
### Use these functions to remove invisible vehicles ##
#######################################################
### Get numpy 2D array of vehicles' location and rotation from world reference, also locations from sensor reference
def get_list_transform(vehicles_list, sensor):
t_list = []
for vehicle in vehicles_list:
v = vehicle.get_transform()
transform = [v.location.x , v.location.y , v.location.z , v.rotation.roll , v.rotation.pitch , v.rotation.yaw]
t_list.append(transform)
t_list = np.array(t_list).reshape((len(t_list),6))
transform_h = np.concatenate((t_list[:,:3],np.ones((len(t_list),1))),axis=1)
sensor_world_matrix = get_matrix(sensor.get_transform())
world_sensor_matrix = np.linalg.inv(sensor_world_matrix)
transform_s = np.dot(world_sensor_matrix, transform_h.T).T
return t_list , transform_s
### Remove vehicles that are not in the FOV of the sensor
def filter_angle(vehicles_list, v_transform, v_transform_s, sensor):
attr_dict = sensor.attributes
VIEW_FOV = float(attr_dict['fov'])
v_angle = np.arctan2(v_transform_s[:,1],v_transform_s[:,0]) * 180 / np.pi
selector = np.array(np.absolute(v_angle) < (int(VIEW_FOV)/2))
vehicles_list_f = [v for v, s in zip(vehicles_list, selector) if s]
v_transform_f = v_transform[selector[:,0],:]
v_transform_s_f = v_transform_s[selector[:,0],:]
return vehicles_list_f , v_transform_f , v_transform_s_f
### Remove vehicles that have distance > max_dist from the sensor
def filter_distance(vehicles_list, v_transform, v_transform_s, sensor, max_dist=100):
s = sensor.get_transform()
s_transform = np.array([s.location.x , s.location.y , s.location.z])
dist2 = np.sum(np.square(v_transform[:,:3] - s_transform), axis=1)
selector = dist2 < (max_dist**2)
vehicles_list_f = [v for v, s in zip(vehicles_list, selector) if s]
v_transform_f = v_transform[selector,:]
v_transform_s_f = v_transform_s[selector,:]
return vehicles_list_f , v_transform_f , v_transform_s_f
### Remove vehicles that are occluded from the sensor view based on one point depth measurement
### NOT USED by default because of the unstable result
def filter_occlusion_1p(vehicles_list, v_transform, v_transform_s, sensor, depth_img, depth_margin=2.0):
camera_k_matrix = get_camera_intrinsic(sensor)
CAM_W = int(sensor.attributes['image_size_x'])
CAM_H = int(sensor.attributes['image_size_y'])
pos_x_y_z = v_transform_s.T
pos_y_minus_z_x = np.concatenate([pos_x_y_z[1, :], -pos_x_y_z[2, :]-0.0, pos_x_y_z[0, :]])
img_pos = np.transpose(np.dot(camera_k_matrix, pos_y_minus_z_x))
camera_pos = np.concatenate([img_pos[:, 0] / img_pos[:, 2], img_pos[:, 1] / img_pos[:, 2], img_pos[:, 2]], axis=1)
u_arr = np.array(camera_pos[:,0]).flatten()
v_arr = np.array(camera_pos[:,1]).flatten()
dist = np.array(v_transform_s[:,0]).flatten()
depth_patches = []
v_depth = []
for u, v in zip(list(u_arr),list(v_arr)):
if u<=CAM_W and v<=CAM_H:
v_depth.append(depth_img[int(v),int(u)])
depth_a = np.array([[int(u)-3,int(v)-3] , [int(u)+3,int(v)+3]])
depth_patches.append(depth_a)
else:
v_depth.append(0)
v_depth = np.array(v_depth)
selector = (dist-v_depth) < depth_margin
vehicles_list_f = [v for v, s in zip(vehicles_list, selector) if s]
v_transform_f = v_transform[selector,:]
v_transform_s_f = v_transform_s[selector,:]
return vehicles_list_f , v_transform_f , v_transform_s_f, depth_patches
### Apply angle and distance filters in one function
def filter_angle_distance(vehicles_list, sensor, max_dist=100):
vehicles_transform , vehicles_transform_s = get_list_transform(vehicles_list, sensor)
vehicles_list , vehicles_transform , vehicles_transform_s = filter_distance(vehicles_list, vehicles_transform, vehicles_transform_s, sensor, max_dist)
vehicles_list , vehicles_transform , vehicles_transform_s = filter_angle(vehicles_list, vehicles_transform, vehicles_transform_s, sensor)
return vehicles_list
### Apply occlusion filter based on resized bounding box depth values
def filter_occlusion_bbox(bounding_boxes, vehicles, sensor, depth_img, v_class=None, depth_capture=False, depth_margin=-1, patch_ratio=0.5, resize_ratio=0.5):
filtered_bboxes = []
filtered_vehicles = []
filtered_v_class = []
filtered_out = {}
removed_bboxes = []
removed_vehicles = []
removed_v_class = []
removed_out = {}
selector = []
patches = []
patch_delta = []
_, v_transform_s = get_list_transform(vehicles, sensor)
for v, vs, bbox in zip(vehicles,v_transform_s,bounding_boxes):
dist = vs[:,0]
if depth_margin < 0:
depth_margin = (v.bounding_box.extent.x**2+v.bounding_box.extent.y**2)**0.5 + 0.25
uc = int((bbox[0,0]+bbox[1,0])/2)
vc = int((bbox[0,1]+bbox[1,1])/2)
wp = int((bbox[1,0]-bbox[0,0])*resize_ratio/2)
hp = int((bbox[1,1]-bbox[0,1])*resize_ratio/2)
u1 = uc-wp
u2 = uc+wp
v1 = vc-hp
v2 = vc+hp
depth_patch = np.array(depth_img[v1:v2,u1:u2])
dist_delta = dist-depth_patch
s_patch = np.array(dist_delta < depth_margin)
s = np.sum(s_patch) > s_patch.shape[0]*patch_ratio
selector.append(s)
patches.append(np.array([[u1,v1],[u2,v2]]))
patch_delta.append(dist_delta)
for bbox,v,s in zip(bounding_boxes,vehicles,selector):
if s:
filtered_bboxes.append(bbox)
filtered_vehicles.append(v)
else:
removed_bboxes.append(bbox)
removed_vehicles.append(v)
vehicle_3d_bbox = []
vehicle_id = []
for vehicle in filtered_vehicles:
vehicle_id.append(vehicle.id)
bb_cords = create_bb_points(vehicle)
world_cords = vehicle_to_world(bb_cords,vehicle)
vehicle_3d_bbox.append(world_cords)
filtered_out['id'] = vehicle_id
filtered_out['3dbbox'] = vehicle_3d_bbox
filtered_out['bbox']=filtered_bboxes
filtered_out['vehicles']=filtered_vehicles
removed_out['bbox']=removed_bboxes
removed_out['vehicles']=removed_vehicles
if v_class is not None:
for cls,s in zip(v_class,selector):
if s:
filtered_v_class.append(cls)
else:
removed_v_class.append(cls)
filtered_out['class']=filtered_v_class
removed_out['class']=removed_v_class
if depth_capture:
depth_debug(patches, depth_img, sensor)
for i,matrix in enumerate(patch_delta):
print("\nvehicle "+ str(i) +": \n" + str(matrix))
depth_capture = False
return filtered_out, removed_out, patches, depth_capture
### Display area in depth image where measurement values are taken
def depth_debug(depth_patches, depth_img, sensor):
CAM_W = int(sensor.attributes['image_size_x'])
CAM_H = int(sensor.attributes['image_size_y'])
#depth_img = depth_img/1000*255
depth_img = np.log10(depth_img)
depth_img = depth_img*255/4
depth_img
depth_3ch = np.zeros((CAM_H,CAM_W,3))
depth_3ch[:,:,0] = depth_3ch[:,:,1] = depth_3ch[:,:,2] = depth_img
depth_3ch = np.uint8(depth_3ch)
image = Image.fromarray(depth_3ch, 'RGB')
img_draw = ImageDraw.Draw(image)
for crop in depth_patches:
u1 = int(crop[0,0])
v1 = int(crop[0,1])
u2 = int(crop[1,0])
v2 = int(crop[1,1])
crop_bbox = [(u1,v1),(u2,v2)]
img_draw.rectangle(crop_bbox, outline ="red")
image.show()
### Filter out lidar points that are outside camera FOV
def filter_lidar(lidar_data, camera, max_dist):
CAM_W = int(camera.attributes['image_size_x'])
CAM_H = int(camera.attributes['image_size_y'])
CAM_HFOV = float(camera.attributes['fov'])
CAM_VFOV = np.rad2deg(2*np.arctan(np.tan(np.deg2rad(CAM_HFOV/2))*CAM_H/CAM_W))
lidar_points = np.array([[p.point.y,-p.point.z,p.point.x] for p in lidar_data])
dist2 = np.sum(np.square(lidar_points), axis=1).reshape((-1))
p_angle_h = np.absolute(np.arctan2(lidar_points[:,0],lidar_points[:,2]) * 180 / np.pi).reshape((-1))
p_angle_v = np.absolute(np.arctan2(lidar_points[:,1],lidar_points[:,2]) * 180 / np.pi).reshape((-1))
selector = np.array(np.logical_and(dist2 < (max_dist**2), np.logical_and(p_angle_h < (CAM_HFOV/2), p_angle_v < (CAM_VFOV/2))))
filtered_lidar = [pt for pt, s in zip(lidar_data, selector) if s]
return filtered_lidar
### Save camera image with projected lidar points for debugging purpose
def show_lidar(lidar_data, camera, carla_img):
lidar_np = np.array([[p.point.y,-p.point.z,p.point.x] for p in lidar_data])
cam_k = get_camera_intrinsic(camera)
# Project LIDAR 3D to Camera 2D
lidar_2d = np.transpose(np.dot(cam_k,np.transpose(lidar_np)))
lidar_2d = (lidar_2d/lidar_2d[:,2].reshape((-1,1))).astype(int)
# Visualize the result
c_scale = []
for pts in lidar_data:
if pts.object_idx == 0: c_scale.append(255)
else: c_scale.append(0)
carla_img.convert(carla.ColorConverter.Raw)
img_bgra = np.array(carla_img.raw_data).reshape((carla_img.height,carla_img.width,4))
img_rgb = np.zeros((carla_img.height,carla_img.width,3))
img_rgb[:,:,0] = img_bgra[:,:,2]
img_rgb[:,:,1] = img_bgra[:,:,1]
img_rgb[:,:,2] = img_bgra[:,:,0]
img_rgb = np.uint8(img_rgb)
for p,c in zip(lidar_2d,c_scale):
c = int(c)
cv2.circle(img_rgb,tuple(p[:2]),1,(c,c,c),-1)
filename = 'out_lidar_img/%06d.jpg' % carla_img.frame
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
img_rgb = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2RGB)
cv2.imwrite(filename, img_rgb)
### Add actor ID of the vehcile hit by the lidar points
### Only used before the object_id issue of semantic lidar solved
def get_points_id(lidar_points, vehicles, camera, max_dist):
vehicles_f = filter_angle_distance(vehicles, camera, max_dist)
fixed_lidar_points = []
for p in lidar_points:
sensor_world_matrix = get_matrix(camera.get_transform())
pw = np.dot(sensor_world_matrix, [[p.point.x],[p.point.y],[p.point.z],[1]])
pw = carla.Location(pw[0,0],pw[1,0],pw[2,0])
for v in vehicles_f:
if v.bounding_box.contains(pw, v.get_transform()):
p.object_idx = v.id
break
fixed_lidar_points.append(p)
return fixed_lidar_points
#######################################################
#######################################################
### PART 4
### Function to return vehicle's label ################
#######################################################
def get_vehicle_class(vehicles, json_path=None):
f = open(json_path)
json_data = json.load(f)
vehicles_data = json_data['classification']
other_class = json_data["reference"].get('others')
class_list = []
for v in vehicles:
v_class = int(vehicles_data.get(v.type_id, other_class))
class_list.append(v_class)
return class_list
#######################################################
#######################################################
### PART 5
### Function to save output ###########################
#######################################################
### Use this function to save the rgb image (with and without bounding box) and bounding boxes data
def save_output(carla_img, bboxes, vehicle_class=None, old_bboxes=None, old_vehicle_class=None, cc_rgb=carla.ColorConverter.Raw, path='', save_patched=False, add_data=None, out_format='pickle'):
carla_img.save_to_disk(path + 'out_rgb/%06d.png' % carla_img.frame, cc_rgb)
out_dict = {}
bboxes_list = [bbox.tolist() for bbox in bboxes]
out_dict['bboxes'] = bboxes_list
if vehicle_class is not None:
out_dict['vehicle_class'] = vehicle_class
if old_bboxes is not None:
old_bboxes_list = [bbox.tolist() for bbox in old_bboxes]
out_dict['removed_bboxes'] = old_bboxes_list
if old_vehicle_class is not None:
out_dict['removed_vehicle_class'] = old_vehicle_class
if add_data is not None:
out_dict['others'] = add_data
if out_format=='json':
filename = path + 'out_bbox/%06d.txt' % carla_img.frame
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as outfile:
json.dump(out_dict, outfile, indent=4)
else:
filename = path + 'out_bbox/%06d.pkl' % carla_img.frame
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as outfile:
json.dump(out_dict, outfile, indent=4)
if save_patched:
carla_img.convert(cc_rgb)
img_bgra = np.array(carla_img.raw_data).reshape((carla_img.height,carla_img.width,4))
img_rgb = np.zeros((carla_img.height,carla_img.width,3))
img_rgb[:,:,0] = img_bgra[:,:,2]
img_rgb[:,:,1] = img_bgra[:,:,1]
img_rgb[:,:,2] = img_bgra[:,:,0]
img_rgb = np.uint8(img_rgb)
image = Image.fromarray(img_rgb, 'RGB')
img_draw = ImageDraw.Draw(image)
for crop in bboxes:
u1 = int(crop[0,0])
v1 = int(crop[0,1])
u2 = int(crop[1,0])
v2 = int(crop[1,1])
crop_bbox = [(u1,v1),(u2,v2)]
img_draw.rectangle(crop_bbox, outline ="red")
filename = path + 'out_rgb_bbox/%06d.png' % carla_img.frame
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
image.save(filename)
def ensure_safe_bound(val):
if val > 1:
val = 1
if val < 0:
val = 0
return val
### Use this function to save bounding box result in darknet training format
def save2darknet(bboxes, vehicle_class, carla_img, data_path = '', cc_rgb = carla.ColorConverter.Raw, save_train = False, customName=''):
# check whether target path exists
if customName != '':
customName = str(customName) + '_'
data_path = data_path + 'data/'
if not os.path.exists(os.path.dirname(data_path)):
os.makedirs(os.path.dirname(data_path))
print(data_path + ' directory did not exists, new directory created')
obj_path = data_path + 'obj/'
if not os.path.exists(os.path.dirname(obj_path)):
print(obj_path + ' directory did not exists, new directory created')
os.makedirs(os.path.dirname(obj_path))
bbr = bboxes is not None
vcr = vehicle_class is not None
cir = carla_img is not None
if bbr or vcr or cir:
# save image
carla_img.convert(cc_rgb)
img_bgra = np.array(carla_img.raw_data).reshape((carla_img.height,carla_img.width,4))
img_rgb = np.zeros((carla_img.height,carla_img.width,3))
img_rgb[:,:,0] = img_bgra[:,:,2]
img_rgb[:,:,1] = img_bgra[:,:,1]
img_rgb[:,:,2] = img_bgra[:,:,0]
img_rgb = np.uint8(img_rgb)
image = Image.fromarray(img_rgb, 'RGB')
#os.makedirs(os.path.dirname(obj_path + '/%06d.jpg' % carla_img.frame))
image.save(obj_path + '/' + str(customName) + '%06d.jpg' % carla_img.frame)
# save bounding box data
datastr = ''
for box, v_class in zip(bboxes, vehicle_class):
uc = ((box[0,0] + box[1,0])/2) / carla_img.width
vc = ((box[0,1] + box[1,1])/2) / carla_img.height
w = (box[1,0] - box[0,0]) / carla_img.width
h = (box[1,1] - box[0,1]) / carla_img.height
datastr = datastr + f"{v_class} {uc} {vc} {w} {h} \n"
with open(obj_path + '/' + str(customName) + '%06d.txt' % carla_img.frame, 'w') as filetxt:
filetxt.write(datastr)
filetxt.close()
# save train.txt
if save_train:
img_list = glob.glob(obj_path + '/*.jpg')
if len(img_list) == 0:
print('no image found')
else:
trainstr = ''
for imgname in img_list:
trainstr = trainstr + imgname + '\n'
trainstr = trainstr.replace('\\','/')
with open(data_path + '/train.txt', 'w') as filetxt:
filetxt.write(trainstr)
filetxt.close()
### Use this function to convert depth image (carla.Image) to a depth map in meter
def extract_depth(depth_img):
depth_img.convert(carla.ColorConverter.Depth)
depth_meter = np.array(depth_img.raw_data).reshape((depth_img.height,depth_img.width,4))[:,:,0] * 1000 / 255
return depth_meter
### Use this function to get vehciles' snapshots that can be processed by auto_annotate() function.
def snap_processing(vehiclesActor, worldSnap):
vehicles = []
for v in vehiclesActor:
vid = v.id
vsnap = worldSnap.find(vid)
if vsnap is None:
continue
vsnap.bounding_box = v.bounding_box
vsnap.type_id = v.type_id
vehicles.append(vsnap)
return vehicles
#######################################################
#######################################################
| 43.78798 | 195 | 0.626482 |
ace836b3a1b02b4aa48f8ea83a52106157d138c7 | 5,824 | gyp | Python | LuaKitProject/src/Projects/ipc/ipc.gyp | andrewvmail/luakit | edbadd7824bd17b6a430d8323f255d404498c27a | [
"MIT"
] | 321 | 2018-06-17T03:52:46.000Z | 2022-03-18T02:34:52.000Z | ipc/ipc.gyp | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 19 | 2018-06-26T10:37:45.000Z | 2020-12-09T03:16:45.000Z | ipc/ipc.gyp | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 58 | 2018-06-21T10:43:03.000Z | 2022-03-29T12:42:11.000Z | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'includes': [
'ipc.gypi',
],
'targets': [
{
'target_name': 'ipc',
'type': '<(component)',
'variables': {
'ipc_target': 1,
},
'dependencies': [
'../base/base.gyp:base',
# TODO(viettrungluu): Needed for base/lazy_instance.h, which is suspect.
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
],
# TODO(gregoryd): direct_dependent_settings should be shared with the
# 64-bit target, but it doesn't work due to a bug in gyp
'direct_dependent_settings': {
'include_dirs': [
'..',
],
},
},
{
'target_name': 'ipc_tests',
'type': '<(gtest_target_type)',
'dependencies': [
'ipc',
'test_support_ipc',
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../base/base.gyp:test_support_base',
'../testing/gtest.gyp:gtest',
],
'include_dirs': [
'..'
],
'sources': [
'file_descriptor_set_posix_unittest.cc',
'ipc_channel_posix_unittest.cc',
'ipc_channel_unittest.cc',
'ipc_fuzzing_tests.cc',
'ipc_message_unittest.cc',
'ipc_message_utils_unittest.cc',
'ipc_send_fds_test.cc',
'ipc_sync_channel_unittest.cc',
'ipc_sync_message_unittest.cc',
'ipc_sync_message_unittest.h',
'ipc_test_base.cc',
'ipc_test_base.h',
'run_all_unittests.cc',
'sync_socket_unittest.cc',
'unix_domain_socket_util_unittest.cc',
],
'conditions': [
['toolkit_uses_gtk == 1', {
'dependencies': [
'../build/linux/system.gyp:gtk',
],
}],
['OS == "win" or OS == "ios"', {
'sources!': [
'unix_domain_socket_util_unittest.cc',
],
}],
['OS == "android" and gtest_target_type == "shared_library"', {
'dependencies': [
'../testing/android/native_test.gyp:native_test_native_code',
],
}],
['os_posix == 1 and OS != "mac" and OS != "android"', {
'conditions': [
['linux_use_tcmalloc==1', {
'dependencies': [
'../base/allocator/allocator.gyp:allocator',
],
}],
],
}]
],
},
{
'target_name': 'ipc_perftests',
'type': '<(gtest_target_type)',
# TODO(viettrungluu): Figure out which dependencies are really needed.
'dependencies': [
'ipc',
'test_support_ipc',
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../base/base.gyp:test_support_base',
'../base/base.gyp:test_support_perf',
'../testing/gtest.gyp:gtest',
],
'include_dirs': [
'..'
],
'sources': [
'ipc_perftests.cc',
'ipc_test_base.cc',
'ipc_test_base.h',
],
'conditions': [
['toolkit_uses_gtk == 1', {
'dependencies': [
'../build/linux/system.gyp:gtk',
],
}],
['OS == "android" and gtest_target_type == "shared_library"', {
'dependencies': [
'../testing/android/native_test.gyp:native_test_native_code',
],
}],
['os_posix == 1 and OS != "mac" and OS != "android"', {
'conditions': [
['linux_use_tcmalloc==1', {
'dependencies': [
'../base/allocator/allocator.gyp:allocator',
],
}],
],
}]
],
},
{
'target_name': 'test_support_ipc',
'type': 'static_library',
'dependencies': [
'ipc',
'../base/base.gyp:base',
'../testing/gtest.gyp:gtest',
],
'sources': [
'ipc_multiprocess_test.cc',
'ipc_multiprocess_test.h',
'ipc_test_sink.cc',
'ipc_test_sink.h',
],
},
],
'conditions': [
['OS=="win" and target_arch=="ia32"', {
'targets': [
{
'target_name': 'ipc_win64',
'type': '<(component)',
'variables': {
'ipc_target': 1,
},
'dependencies': [
'../base/base.gyp:base_nacl_win64',
# TODO(viettrungluu): Needed for base/lazy_instance.h, which is
# suspect.
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations_win64',
],
# TODO(gregoryd): direct_dependent_settings should be shared with the
# 32-bit target, but it doesn't work due to a bug in gyp
'direct_dependent_settings': {
'include_dirs': [
'..',
],
},
'configurations': {
'Common_Base': {
'msvs_target_platform': 'x64',
},
},
},
],
}],
# Special target to wrap a gtest_target_type==shared_library
# ipc_tests into an android apk for execution.
# See base.gyp for TODO(jrg)s about this strategy.
['OS == "android" and gtest_target_type == "shared_library"', {
'targets': [
{
'target_name': 'ipc_tests_apk',
'type': 'none',
'dependencies': [
'ipc_tests',
],
'variables': {
'test_suite_name': 'ipc_tests',
'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)ipc_tests<(SHARED_LIB_SUFFIX)',
},
'includes': [ '../build/apk_test.gypi' ],
}],
}],
],
}
| 29.266332 | 104 | 0.503777 |
ace837eaa9607afe1388de95f97d6f1b1025e1d2 | 20,549 | py | Python | flux_combined_high_binding/model_553.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | flux_combined_high_binding/model_553.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | flux_combined_high_binding/model_553.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 65000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 70000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.134259 | 798 | 0.804127 |
ace838f2c99d3ed6a889bd353f1cb73f777fac17 | 1,599 | py | Python | examples/createExtendedSourceFromTemplate.py | BjoernBiltzinger/astromodels | d94a3d3bc607def2b5e3cd145c3922e0a00a7b15 | [
"BSD-3-Clause"
] | 12 | 2019-03-07T09:45:35.000Z | 2022-02-13T14:05:07.000Z | examples/createExtendedSourceFromTemplate.py | grburgess/astromodels | a657411ca29de4a806838ba05f8a062f99fa1ab5 | [
"BSD-3-Clause"
] | 83 | 2019-01-27T18:57:51.000Z | 2022-01-28T12:57:26.000Z | examples/createExtendedSourceFromTemplate.py | grburgess/astromodels | a657411ca29de4a806838ba05f8a062f99fa1ab5 | [
"BSD-3-Clause"
] | 25 | 2019-01-10T09:02:16.000Z | 2022-03-23T03:52:36.000Z | # code to show how to create an extended source via uploading a FITs image template
# author: Andrea Albert (aalbert@slac.stanford.edu)
# date: Oct 26, 2016
from threeML import *
# the class SpatialTemplate_2D expects a FITs file that contains a header with the following info: reference pixels (e.g. 'CRPIX1'), pixels step in degrees (e.g. 'CDELT1'), RA and DEC values at reference pixel (e.g. 'CRVAL1')
# initialize shape object
shape = SpatialTemplate_2D()
# load in template file
# by default the extension number is set to zero (ihdu = 0)
shape.load_file("exampleDMtemplate.fits",ihdu=0)
# just for example let's assume a powerlaw spectrum
spectrum = Powerlaw()
source = ExtendedSource("M31",spatial_shape=shape,spectral_shape=spectrum)
# The code assumes the template is normalized to 1 sr. If it isn't be default then you should set the optional normalization (K) appropriately. The example template is already normalized to 1 sr so we'll keep K set to 1. Note K is set to 1 and fixed by default, we include the following commands as an example of how to manipulate K
shape.K = 1.
shape.K.fix = True
# The following are example commands that get called during fitting
# get the edges of the template
(min_ra,max_ra),(min_dec,max_dec) = shape.get_boundaries()
# return the values at various pixels at locations (x,y). Note the code assumes x=RA (degrees) and y=DEC(degrees). Note the code will return a value of 0 is the pixel is outside the template ROI...in this example only the 2nd pixel will have a non-zero value
val = shape.evaluate(x=[1.,10.,10.],y=[1.,40.,89.],K=1) | 53.3 | 335 | 0.752345 |
ace839e165c27c8e4e378212069eac0d51f1f380 | 249 | py | Python | nautilus/api/util/summarize_mutation_io.py | AlecAivazis/python | 70e2acef27a2f87355590be1a6ca60ce3ab4d09c | [
"MIT"
] | 9 | 2019-02-17T01:33:43.000Z | 2022-02-03T02:14:12.000Z | nautilus/api/util/summarize_mutation_io.py | AlecAivazis/python | 70e2acef27a2f87355590be1a6ca60ce3ab4d09c | [
"MIT"
] | 59 | 2016-03-14T15:55:50.000Z | 2016-07-17T15:22:56.000Z | nautilus/api/util/summarize_mutation_io.py | AlecAivazis/python | 70e2acef27a2f87355590be1a6ca60ce3ab4d09c | [
"MIT"
] | 3 | 2017-08-03T20:18:59.000Z | 2018-07-18T02:03:41.000Z | def summarize_mutation_io(name, type, required=False):
"""
This function returns the standard summary for mutations inputs
and outputs
"""
return dict(
name=name,
type=type,
required=required
) | 24.9 | 71 | 0.610442 |
ace83a15e8fb0fe3f6262bb0502365db60b3b071 | 1,025 | py | Python | test/run.py | ls-cwi/heinz | a033944da7bb6f786a1fcf1e797bc96bde80529b | [
"MIT"
] | 11 | 2015-01-30T13:08:35.000Z | 2020-06-11T12:31:13.000Z | test/run.py | ls-cwi/heinz | a033944da7bb6f786a1fcf1e797bc96bde80529b | [
"MIT"
] | 15 | 2015-04-04T19:06:02.000Z | 2021-11-21T14:35:10.000Z | test/run.py | ls-cwi/heinz | a033944da7bb6f786a1fcf1e797bc96bde80529b | [
"MIT"
] | 5 | 2016-10-12T18:51:12.000Z | 2021-12-21T10:42:19.000Z | #!/usr/bin/python
import sys
import subprocess
if len(sys.argv) != 5:
sys.stderr.write("Usage: " + sys.argv[0] + " <executable> <check_executable> <input_file> <output_file>\n")
sys.exit(1)
executable = sys.argv[1]
check_executable = sys.argv[2]
input_file = sys.argv[3]
output_file = sys.argv[4]
time_limit = 10
threads = 1
print "Running " + executable + " on " + input_file
print(executable + " " + input_file + " " + str(time_limit) + " " + str(threads) + " " + output_file)
print(executable + " " + input_file + " " + str(time_limit) + " " + str(threads) + " " + output_file)
status = subprocess.call(executable + " -stp " + input_file + " -t " + str(time_limit) + " -m " + str(threads) + " -o " + output_file, shell=True)
if status != 0:
sys.exit(status)
else:
print "Checking solution " + output_file
print(check_executable + " -stp " + input_file + " -s " + output_file)
status = subprocess.call(check_executable + " -stp " + input_file + " -s " + output_file, shell=True)
sys.exit(status)
| 37.962963 | 146 | 0.643902 |
ace83ac6cf8975c906dc7bc1dc0f8355928ec9b3 | 914 | py | Python | rally/plugins/common/verification/testr.py | lolwww/rally | fcb1fb6c608e29dd62549cf6b3cec2e90529932f | [
"Apache-2.0"
] | 263 | 2015-04-26T16:05:34.000Z | 2022-02-28T11:17:07.000Z | rally/plugins/common/verification/testr.py | lolwww/rally | fcb1fb6c608e29dd62549cf6b3cec2e90529932f | [
"Apache-2.0"
] | 19 | 2015-04-23T11:53:10.000Z | 2019-02-20T11:23:09.000Z | rally/plugins/common/verification/testr.py | lolwww/rally | fcb1fb6c608e29dd62549cf6b3cec2e90529932f | [
"Apache-2.0"
] | 287 | 2015-04-23T11:28:03.000Z | 2021-09-16T13:05:53.000Z | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.plugins.verification.testr import * # noqa: F401,F403
from rally.plugins.verification import testr as _new
# import it as last item to be sure that we use the right module
from rally.common import logging
logging.log_deprecated_module(
target=__name__, new_module=_new.__name__, release="3.0.0"
)
| 36.56 | 78 | 0.743982 |
ace83b0638507830f48e98dd291c1c60f433d80e | 3,664 | py | Python | bindings/python/ensmallen/datasets/string/candidatuswolfebacteriabacteriumrifcsplowo201full4519.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/candidatuswolfebacteriabacteriumrifcsplowo201full4519.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/candidatuswolfebacteriabacteriumrifcsplowo201full4519.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Candidatus Wolfebacteria bacterium RIFCSPLOWO2_01_FULL_45_19.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def CandidatusWolfebacteriaBacteriumRifcsplowo201Full4519(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Candidatus Wolfebacteria bacterium RIFCSPLOWO2_01_FULL_45_19 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Candidatus Wolfebacteria bacterium RIFCSPLOWO2_01_FULL_45_19 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="CandidatusWolfebacteriaBacteriumRifcsplowo201Full4519",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 34.895238 | 223 | 0.692959 |
ace83bdba26fef30ae6ffb3efae70bf9591c07f9 | 614 | py | Python | desktop/core/ext-py/django-extensions-1.5.0/django_extensions/tests/json_field.py | vinaymundada27/Hue | 7bffb33bbe7cfa34d340241c4ba3b19476211b2a | [
"Apache-2.0"
] | 285 | 2019-12-23T09:50:21.000Z | 2021-12-08T09:08:49.000Z | desktop/core/ext-py/django-extensions-1.5.0/django_extensions/tests/json_field.py | vinaymundada27/Hue | 7bffb33bbe7cfa34d340241c4ba3b19476211b2a | [
"Apache-2.0"
] | null | null | null | desktop/core/ext-py/django-extensions-1.5.0/django_extensions/tests/json_field.py | vinaymundada27/Hue | 7bffb33bbe7cfa34d340241c4ba3b19476211b2a | [
"Apache-2.0"
] | 9 | 2019-12-23T12:59:25.000Z | 2022-03-15T05:12:11.000Z | from django_extensions.tests.fields import FieldTestCase
from django_extensions.tests.testapp.models import JSONFieldTestModel
class JsonFieldTest(FieldTestCase):
def testCharFieldCreate(self):
j = JSONFieldTestModel.objects.create(a=6, j_field=dict(foo='bar'))
self.assertEqual(j.a, 6)
def testDefault(self):
j = JSONFieldTestModel.objects.create(a=1)
self.assertEqual(j.j_field, {})
def testEmptyList(self):
j = JSONFieldTestModel.objects.create(a=6, j_field=[])
self.assertTrue(isinstance(j.j_field, list))
self.assertEqual(j.j_field, [])
| 34.111111 | 75 | 0.708469 |
ace83c43ed805469bd3381f132ff3b4cbdcc9906 | 1,129 | py | Python | shenfun/forms/__init__.py | jaisw7/shenfun | 7482beb5b35580bc45f72704b69343cc6fc1d773 | [
"BSD-2-Clause"
] | 1 | 2021-03-06T09:29:39.000Z | 2021-03-06T09:29:39.000Z | shenfun/forms/__init__.py | jaisw7/shenfun | 7482beb5b35580bc45f72704b69343cc6fc1d773 | [
"BSD-2-Clause"
] | null | null | null | shenfun/forms/__init__.py | jaisw7/shenfun | 7482beb5b35580bc45f72704b69343cc6fc1d773 | [
"BSD-2-Clause"
] | null | null | null | #pylint: disable=missing-docstring
from .project import *
from .inner import *
from .operators import *
from .arguments import *
def extract_bc_matrices(mats):
"""Extract boundary matrices from list of ``mats``
Parameters
----------
mats : list of list of :class:`.TPMatrix`es
Returns
-------
list
list of boundary matrices.
Note
----
The ``mats`` list is modified in place since boundary matrices are
extracted.
"""
#bc_mats = []
#for a in mats:
# for b in a:
# if b.is_bc_matrix():
# bc_mats.append(b)
# a.remove(b)
#return bc_mats
from shenfun.matrixbase import SparseMatrix, TPMatrix
bc_mats = []
for a in mats:
for b in a.copy():
if isinstance(b, SparseMatrix):
if b.trialfunction[0].boundary_condition() == 'Apply':
bc_mats.append(b)
a.remove(b)
elif isinstance(b, TPMatrix):
if b.is_bc_matrix():
bc_mats.append(b)
a.remove(b)
return bc_mats
| 25.088889 | 70 | 0.540301 |
ace83c63a1ea1ff021ab82aceda3de957fa4ce97 | 1,894 | py | Python | wordsearch/api/urbandict.py | Kalvinbw/dictionary | 07d5c2a08e94d8b52ff0f6fea0c68ae53718655d | [
"Apache-2.0"
] | null | null | null | wordsearch/api/urbandict.py | Kalvinbw/dictionary | 07d5c2a08e94d8b52ff0f6fea0c68ae53718655d | [
"Apache-2.0"
] | 1 | 2021-11-12T17:52:14.000Z | 2021-11-12T17:52:14.000Z | wordsearch/api/urbandict.py | Kalvinbw/dictionary | 07d5c2a08e94d8b52ff0f6fea0c68ae53718655d | [
"Apache-2.0"
] | null | null | null | import requests
import os
import json
from dotenv import load_dotenv
load_dotenv()
class Urban():
def __init__(self, data):
self.word = data['list'][0]['word']
self.definitions = []
if len(data['list']) > 5:
self.meanings = 5
else:
self.meanings = len(data['list'])
for meaning in range(0, self.meanings):
myDict = {
'number': meaning + 1,
'definition': data['list'][meaning]['definition'].replace('[', '').replace(']', '')
}
self.definitions.append(myDict)
def urbanDict(term):
url = "https://mashape-community-urban-dictionary.p.rapidapi.com/define"
querystring = {"term":term}
headers = {
'x-rapidapi-host': os.environ.get('URBAN_DICT_HOST'), #str(os.getenv('URBAN_DICT_HOST')),
'x-rapidapi-key': os.environ.get('URBAN_DICT_KEY')# str(os.getenv('URBAN_DICT_KEY'))
}
response = requests.request("GET", url, headers=headers, params=querystring)
cleaned_response = json.loads(response.text)
if 'error' in cleaned_response:
return os.error
else:
result = Urban(cleaned_response)
return result
# example object (comes in a list)
# {11 items
# "definition":"The only [proper] [response] to something that makes absolutely [no sense]."
# "permalink":"http://wat.urbanup.com/3322419"
# "thumbs_up":3874
# "sound_urls":[...]3 items
# "author":"watwat"
# "word":"wat"
# "defid":3322419
# "current_vote":""
# "written_on":"2008-09-04T02:15:08.000Z"
# "example":"1: If all the animals on the [equator] were capable of [flattery], Halloween and Easter would fall on the same day.
# 2: wat
# 1: Wow your cock is almost as big as my dad's.
# 2: wat
# 1: I accidentially a whole [coke bottle]
# 2: You accidentially what?
# 1: A whole coke bottle
# 2: wat"
# "thumbs_down":439
# } | 30.063492 | 128 | 0.621964 |
ace83c67f81b33ae2a15950be8022edf1571410c | 14,858 | py | Python | onnxruntime/python/tools/transformers/onnx_model_bert.py | toothache/onnxruntime | 217b2c9f931b5b0f704df0c8336def47025d2148 | [
"MIT"
] | 1 | 2021-09-01T17:35:53.000Z | 2021-09-01T17:35:53.000Z | onnxruntime/python/tools/transformers/onnx_model_bert.py | toothache/onnxruntime | 217b2c9f931b5b0f704df0c8336def47025d2148 | [
"MIT"
] | 19 | 2021-08-21T08:43:10.000Z | 2022-03-18T21:52:45.000Z | onnxruntime/python/tools/transformers/onnx_model_bert.py | toothache/onnxruntime | 217b2c9f931b5b0f704df0c8336def47025d2148 | [
"MIT"
] | 2 | 2020-02-05T00:10:14.000Z | 2021-04-16T02:09:55.000Z | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
from logging import getLogger
from typing import List
from onnx import ModelProto, TensorProto, helper
from onnx_model import OnnxModel
from fusion_reshape import FusionReshape
from fusion_layernorm import FusionLayerNormalization, FusionLayerNormalizationTF
from fusion_skiplayernorm import FusionSkipLayerNormalization, FusionBiasSkipLayerNormalization
from fusion_embedlayer import FusionEmbedLayerNormalization
from fusion_attention import FusionAttention, AttentionMask, AttentionMaskFormat
from fusion_gelu import FusionGelu
from fusion_fastgelu import FusionFastGelu
from fusion_biasgelu import FusionBiasGelu
from fusion_gelu_approximation import FusionGeluApproximation
from fusion_utils import FusionUtils
from fusion_options import FusionOptions
logger = getLogger(__name__)
class BertOptimizationOptions(FusionOptions):
""" This class is deprecated
"""
def __init__(self, model_type):
logger.warning(f"BertOptimizationOptions is depreciated. Please use FusionOptions instead.")
super().__init__(model_type)
class BertOnnxModel(OnnxModel):
def __init__(self, model: ModelProto, num_heads: int = 0, hidden_size: int = 0):
"""Initialize BERT ONNX Model.
Args:
model (ModelProto): the ONNX model
num_heads (int, optional): number of attentioin heads. Defaults to 0, and we will detect the parameter automatically.
hidden_size (int, optional): hidden dimension. Defaults to 0, and we will detect the parameter automatically.
"""
assert (num_heads == 0 and hidden_size == 0) or (num_heads > 0 and hidden_size % num_heads == 0)
super().__init__(model)
self.num_heads = num_heads
self.hidden_size = hidden_size
self.attention_mask = AttentionMask(self)
self.attention_fusion = FusionAttention(self, self.hidden_size, self.num_heads, self.attention_mask)
self.utils = FusionUtils(self)
def fuse_attention(self):
self.attention_fusion.apply()
def fuse_gelu(self):
fusion = FusionGelu(self)
fusion.apply()
fusion = FusionFastGelu(self)
fusion.apply()
def fuse_bias_gelu(self, is_fastgelu):
fusion = FusionBiasGelu(self, is_fastgelu)
fusion.apply()
def gelu_approximation(self):
fusion = FusionGeluApproximation(self)
fusion.apply()
def fuse_add_bias_skip_layer_norm(self):
fusion = FusionBiasSkipLayerNormalization(self)
fusion.apply()
def fuse_reshape(self):
fusion = FusionReshape(self)
fusion.apply()
def fuse_embed_layer(self):
fusion = FusionEmbedLayerNormalization(self)
fusion.apply()
def fuse_layer_norm(self):
fusion = FusionLayerNormalization(self)
fusion.apply()
fusion = FusionLayerNormalizationTF(self)
fusion.apply()
def fuse_skip_layer_norm(self):
fusion = FusionSkipLayerNormalization(self)
fusion.apply()
def get_graph_inputs_from_node_type(self, op_type: str, input_indices: List[int], casted: bool):
"""
Get graph inputs that feed into node type (like EmbedLayerNormalization or Attention).
Returns a list of the graph input names based on the filter whether it is casted or not.
"""
graph_inputs = []
output_name_to_node = self.output_name_to_node()
nodes = self.get_nodes_by_op_type(op_type)
for node in nodes:
bert_inputs = [node.input[i] for i in input_indices if i < len(node.input)]
for bert_input in bert_inputs:
if self.find_graph_input(bert_input):
if not casted:
graph_inputs.append(bert_input)
elif bert_input in output_name_to_node:
parent = output_name_to_node[bert_input]
if parent.op_type == 'Cast' and self.find_graph_input(parent.input[0]) is not None:
if casted:
graph_inputs.append(parent.input[0])
return graph_inputs
def get_graph_inputs_from_fused_nodes(self, casted: bool):
inputs = self.get_graph_inputs_from_node_type('EmbedLayerNormalization', [0, 1, 7], casted)
inputs += self.get_graph_inputs_from_node_type('Attention', [3], casted)
return inputs
def change_input_to_int32(self):
original_opset_version = self.model.opset_import[0].version
graph = self.graph()
new_graph_inputs = []
casted_bert_graph_inputs = self.get_graph_inputs_from_fused_nodes(casted=True)
for input in graph.input:
if input.name in casted_bert_graph_inputs:
self.utils.remove_cast_int32(input.name)
int32_input = helper.make_tensor_value_info(input.name, TensorProto.INT32,
self.tensor_shape_to_list(input.type.tensor_type))
new_graph_inputs.append(int32_input)
else:
new_graph_inputs.append(input)
graph_def = helper.make_graph(graph.node,
'int32 inputs',
new_graph_inputs,
graph.output,
initializer=graph.initializer,
value_info=graph.value_info)
self.model = helper.make_model(graph_def, producer_name='onnxruntime-tools')
# restore opset version
self.model.opset_import[0].version = original_opset_version
def use_dynamic_axes(self, dynamic_batch_dim='batch_size', dynamic_seq_len='max_seq_len'):
"""
Update input and output shape to use dynamic axes.
"""
bert_graph_inputs = self.get_graph_inputs_from_fused_nodes(
casted=True) + self.get_graph_inputs_from_fused_nodes(casted=False)
dynamic_batch_inputs = {}
for input in self.model.graph.input:
if input.name in bert_graph_inputs:
dim_proto = input.type.tensor_type.shape.dim[0]
dim_proto.dim_param = dynamic_batch_dim
if dynamic_seq_len is not None:
dim_proto = input.type.tensor_type.shape.dim[1]
dim_proto.dim_param = dynamic_seq_len
for output in self.model.graph.output:
dim_proto = output.type.tensor_type.shape.dim[0]
dim_proto.dim_param = dynamic_batch_dim
def preprocess(self):
self.adjust_reshape_and_expand()
return
def adjust_reshape_and_expand(self):
nodes_to_remove = []
for node in self.nodes():
if node.op_type == 'Reshape':
# Clean up unneccessary reshape nodes.
# Find reshape nodes with no actually data in "shape" attribute and remove.
reshape_shape = self.get_constant_value(node.input[1])
if reshape_shape is not None and reshape_shape.size == 0:
nodes_to_remove.extend([node])
self.replace_input_of_all_nodes(node.output[0], node.input[0])
continue
# Find path "Slice" -> "Reshape" -> "Expand" -> "Expand" -> current "Reshape", simplify the graph by
# changing current reshape's input to output of slice.
reshape_path = self.match_parent_path(node, ['Expand', 'Expand', 'Reshape', 'Slice'], [0, 0, 0, 0],
self.output_name_to_node())
if reshape_path is not None:
expand_node = reshape_path[-3]
expand_shape_value = self.get_constant_value(expand_node.input[1])
reshape_before_expand = reshape_path[-2]
shape_value = self.get_constant_value(reshape_before_expand.input[1])
slice_node = reshape_path[-1]
if expand_shape_value is not None and shape_value is not None and len(
expand_shape_value) == 2 and len(
shape_value) == 1 and expand_shape_value[1] == shape_value[0]:
node.input[0] = slice_node.output[0]
if nodes_to_remove:
self.remove_nodes(nodes_to_remove)
logger.info(f"Removed Reshape and Expand count: {len(nodes_to_remove)}")
def clean_graph(self):
output_name_to_node = self.output_name_to_node()
nodes_to_remove = []
for node in self.nodes():
# Before:
# input_ids --> Shape --> Gather(indices=0) --> Unsqueeze ------+
# | |
# | v
# +----> Shape --> Gather(indices=1) --> Unsqueeze---> Concat --> ConstantOfShape -->Cast --> EmbedLayerNormaliation/ReduceSum
# After:
# input_ids --> Shape --> ConstantOfShape -->Cast --> EmbedLayerNormaliation/ReduceSum
# TODO: merge ConstantOfShape -->Cast to ConstantOfShape (need update the data type of value)
op_input_id = {"EmbedLayerNormalization": 1, "ReduceSum": 0, "Attention": 3}
if node.op_type in op_input_id:
i = op_input_id[node.op_type]
parent_nodes = self.match_parent_path(
node, ['Cast', 'ConstantOfShape', 'Concat', 'Unsqueeze', 'Gather', 'Shape'], [i, 0, 0, 0, 0, 0],
output_name_to_node)
if parent_nodes is not None:
cast, constantOfShape, concat, unsqueeze, gather, shape = parent_nodes
if shape.input[0] == self.graph().input[0].name:
constantOfShape.input[0] = shape.output[0]
output_name_to_node = self.output_name_to_node()
if node.op_type == 'Attention':
# Before:
# input_ids --> Shape -->ConstantOfShape -->Cast --> ReduceSum --> Attention
# After:
# remove this path, and remove the optional mask_index input of Attention node.
parent_nodes = self.match_parent_path(node, ['ReduceSum', 'Cast', 'ConstantOfShape', 'Shape'],
[3, 0, 0, 0], output_name_to_node)
if parent_nodes is not None:
if parent_nodes[-1].input[0] == self.graph().input[0].name:
attention_node = helper.make_node('Attention',
inputs=node.input[0:len(node.input) - 1],
outputs=node.output,
name=node.name + "_remove_mask")
attention_node.domain = "com.microsoft"
attention_node.attribute.extend([helper.make_attribute("num_heads", self.num_heads)])
self.add_node(attention_node, self.get_graph_by_node(attention_node).name)
nodes_to_remove.append(node)
self.remove_nodes(nodes_to_remove)
def postprocess(self):
self.clean_graph()
self.prune_graph()
def optimize(self, options: FusionOptions = None, add_dynamic_axes=False):
if (options is None) or options.enable_layer_norm:
self.fuse_layer_norm()
if (options is None) or options.enable_gelu:
self.fuse_gelu()
self.preprocess()
self.fuse_reshape()
if (options is None) or options.enable_skip_layer_norm:
self.fuse_skip_layer_norm()
if (options is None) or options.enable_attention:
if options is not None:
self.attention_mask.set_mask_format(options.attention_mask_format)
self.fuse_attention()
if (options is None) or options.enable_embed_layer_norm:
self.fuse_embed_layer()
# Remove reshape nodes that having same shape of input and output based on symbolic shape inference.
FusionUtils.remove_useless_reshape_nodes(self)
self.postprocess()
# Bias fusion is done after postprocess to avoid extra Reshape between bias and Gelu/FastGelu/SkipLayerNormalization
if (options is None) or options.enable_bias_gelu:
# Fuse Gelu and Add Bias before it.
self.fuse_bias_gelu(is_fastgelu=True)
self.fuse_bias_gelu(is_fastgelu=False)
if (options is None) or options.enable_bias_skip_layer_norm:
# Fuse SkipLayerNormalization and Add Bias before it.
self.fuse_add_bias_skip_layer_norm()
if (options is not None and options.enable_gelu_approximation):
self.gelu_approximation()
self.remove_unused_constant()
# Use symbolic batch dimension in input and output.
if add_dynamic_axes:
self.use_dynamic_axes()
logger.info(f"opset verion: {self.model.opset_import[0].version}")
def get_fused_operator_statistics(self):
"""
Returns node count of fused operators.
"""
op_count = {}
ops = [
'EmbedLayerNormalization', 'Attention', 'Gelu', 'FastGelu', 'BiasGelu', 'LayerNormalization',
'SkipLayerNormalization'
]
for op in ops:
nodes = self.get_nodes_by_op_type(op)
op_count[op] = len(nodes)
logger.info(f"Optimized operators:{op_count}")
return op_count
def is_fully_optimized(self):
"""
Returns True when the model is fully optimized.
"""
op_count = self.get_fused_operator_statistics()
embed = op_count['EmbedLayerNormalization']
attention = op_count['Attention']
gelu = op_count['Gelu'] + op_count['BiasGelu'] + op_count['FastGelu']
layer_norm = op_count['LayerNormalization'] + op_count['SkipLayerNormalization']
is_perfect = (embed > 0) and (attention > 0) and (attention == gelu) and (layer_norm >= 2 * attention)
if layer_norm == 0:
logger.debug("Layer Normalization not fused")
if gelu == 0:
logger.debug("Gelu/FastGelu not fused")
if embed == 0:
logger.debug("Embed Layer not fused")
if attention == 0:
logger.warning("Attention not fused")
return is_perfect
| 43.95858 | 148 | 0.600888 |
ace83caf40394ae4e517d29b67c0df989288044a | 2,776 | py | Python | fintech/newsletter/migrations/0001_initial.py | fpark7/cs3240-s17-team31 | 5095e8ef29d5fa14917a4c542e021025d38b6123 | [
"MIT"
] | null | null | null | fintech/newsletter/migrations/0001_initial.py | fpark7/cs3240-s17-team31 | 5095e8ef29d5fa14917a4c542e021025d38b6123 | [
"MIT"
] | null | null | null | fintech/newsletter/migrations/0001_initial.py | fpark7/cs3240-s17-team31 | 5095e8ef29d5fa14917a4c542e021025d38b6123 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-03 15:45
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='reports/')),
('encrypted', models.CharField(choices=[('Y', 'Yes'), ('N', 'No')], max_length=1)),
],
),
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.CharField(max_length=50)),
('ceo_name', models.CharField(max_length=30)),
('group', models.CharField(blank=True, max_length=30)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('is_private', models.CharField(choices=[('Y', 'Yes'), ('N', 'No')], max_length=1)),
('company_name', models.CharField(max_length=45)),
('company_Phone', models.CharField(max_length=11)),
('company_location', models.CharField(max_length=45)),
('company_country', models.CharField(choices=[('US', 'United States'), ('CA', 'Canada'), ('GB', 'Great Britain'), ('MX', 'Mexico')], max_length=2)),
('sector', models.CharField(max_length=45)),
('company_email', models.EmailField(max_length=45)),
('industry', models.CharField(max_length=45)),
('time', models.DateTimeField(auto_now_add=True)),
('projects', models.TextField(default='project', max_length=300)),
('content', models.ManyToManyField(default='none', to='newsletter.File')),
],
),
migrations.CreateModel(
name='SiteUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('usertype', models.CharField(choices=[('c', 'Company User'), ('i', 'Investor User')], max_length=1)),
('groups', models.ManyToManyField(to='auth.Group')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 47.050847 | 164 | 0.581772 |
ace83dea8ea2a8a0df0ef63a624687b74733c8d2 | 2,894 | py | Python | src/timeseers/fourier_seasonality.py | maxthemillion/timeseers | 3dafb96a08fda6025127ef2c67d0536735d3b48e | [
"MIT"
] | 233 | 2020-05-13T14:57:56.000Z | 2022-03-28T20:12:58.000Z | src/timeseers/fourier_seasonality.py | maxthemillion/timeseers | 3dafb96a08fda6025127ef2c67d0536735d3b48e | [
"MIT"
] | 25 | 2020-04-12T11:51:43.000Z | 2022-02-15T20:28:35.000Z | src/timeseers/fourier_seasonality.py | maxthemillion/timeseers | 3dafb96a08fda6025127ef2c67d0536735d3b48e | [
"MIT"
] | 30 | 2020-05-20T12:14:07.000Z | 2022-03-28T01:00:53.000Z | import numpy as np
import pandas as pd
import pymc3 as pm
from timeseers.timeseries_model import TimeSeriesModel
from timeseers.utils import add_subplot, get_group_definition
class FourierSeasonality(TimeSeriesModel):
def __init__(
self,
name: str = None,
n: int = 10,
period: pd.Timedelta = pd.Timedelta(days=365.25),
shrinkage_strength=100,
pool_cols=None,
pool_type='complete'
):
self.n = n
self.period = period
self.shrinkage_strength = shrinkage_strength
self.pool_cols = pool_cols
self.pool_type = pool_type
self.name = name or f"FourierSeasonality(period={self.period})"
super().__init__()
@staticmethod
def _X_t(t, p=365.25, n=10):
x = 2 * np.pi * (np.arange(n) + 1) * t[:, None] / p
return np.concatenate((np.cos(x), np.sin(x)), axis=1)
def definition(self, model, X, scale_factor):
t = X["t"].values
group, n_groups, self.groups_ = get_group_definition(X, self.pool_cols, self.pool_type)
self.p_ = self.period / scale_factor['t']
n_params = self.n * 2
with model:
if self.pool_type == 'partial':
mu_beta = pm.Normal(self._param_name("mu_beta"), mu=0, sigma=1, shape=n_params)
sigma_beta = pm.HalfNormal(self._param_name("sigma_beta"), 0.1, shape=n_params)
offset_beta = pm.Normal(
self._param_name("offset_beta"),
0,
1 / self.shrinkage_strength,
shape=(n_groups, n_params)
)
beta = pm.Deterministic(self._param_name("beta"), mu_beta + offset_beta * sigma_beta)
else:
beta = pm.Normal(self._param_name("beta"), 0, 1, shape=(n_groups, n_params))
seasonality = pm.math.sum(self._X_t(t, self.p_, self.n) * beta[group], axis=1)
return seasonality
def _predict(self, trace, t, pool_group=0):
return self._X_t(t, self.p_, self.n) @ trace[self._param_name("beta")][:, pool_group].T
def plot(self, trace, scaled_t, y_scaler):
ax = add_subplot()
ax.set_title(str(self))
seasonality_return = np.empty((len(scaled_t), len(self.groups_)))
for group_code, group_name in self.groups_.items():
scaled_s = self._predict(trace, scaled_t, group_code)
s = y_scaler.inv_transform(scaled_s)
ax.plot(list(range(self.period.days)), s.mean(axis=1)[:self.period.days], label=group_name)
seasonality_return[:, group_code] = scaled_s.mean(axis=1)
return seasonality_return
def __repr__(self):
return f"FourierSeasonality(n={self.n}, " \
f"period={self.period}," \
f"pool_cols={self.pool_cols}, " \
f"pool_type={self.pool_type}"
| 36.632911 | 103 | 0.596061 |
ace84090b9532916217f7852d484937def4131f2 | 2,541 | py | Python | filestore/odm_templates.py | licode/filestore | aba593f60942ce917defa922048953eddb8d25a4 | [
"BSD-3-Clause"
] | null | null | null | filestore/odm_templates.py | licode/filestore | aba593f60942ce917defa922048953eddb8d25a4 | [
"BSD-3-Clause"
] | null | null | null | filestore/odm_templates.py | licode/filestore | aba593f60942ce917defa922048953eddb8d25a4 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, print_function
from mongoengine import (Document, FloatField, DateTimeField, StringField,
DictField, ReferenceField, IntField, BooleanField,
DENY)
import time
ALIAS = 'fs'
class Resource(Document):
"""
Parameters
----------
spec : str
spec used to determine what handler to use to open this
resource.
resource_path : str
Url to the physical location of the resource
resource_kwargs : dict
name/value pairs of additional kwargs to be
passed to the handler to open this resource.
"""
spec = StringField(required=True, unique=False)
resource_path = StringField(required=True, unique=False)
resource_kwargs = DictField(required=False)
meta = {'indexes': ['-_id'], 'db_alias': ALIAS}
class ResourceAttributes(Document):
"""
Parameters
----------
"""
resource = ReferenceField(Resource, reverse_delete_rule=DENY,
required=True,
db_field='resource_id')
shape = StringField(unique=False, required=True)
dtype = StringField(unique=False, required=True)
total_bytes = IntField(min_value=0, required=False, default=0)
hashed_data = StringField(required=False)
last_access = FloatField(required=False, default=time.time())
datetime_last_access = DateTimeField(required=False)
in_use = BooleanField(required=False, default=False)
custom_attributes = DictField(required=False)
meta = {'indexes': ['-_id', '-shape', '-dtype'], 'db_alias': ALIAS}
# TODO: add documentation
class Datum(Document):
"""
Document to represent a single datum in a resource.
There is a many-to-one mapping between Datum and Resource
Parameters
----------
resource : Resource or Resource.id
Resource object
datum_id : str
Unique identifier for this datum. This is the value stored in
metadatastore and is the value passed to `retrieve` to get
the data back out.
datum_kwargs : dict
dict with any kwargs needed to retrieve this specific datum from the
resource.
"""
resource = ReferenceField(Resource,
reverse_delete_rule=DENY,
required=True)
datum_id = StringField(required=True, unique=True)
datum_kwargs = DictField(required=False)
meta = {'indexes': ['-_id', '-datum_id'], 'db_alias': ALIAS}
| 28.550562 | 76 | 0.640299 |
ace841608ee0e6f01d10be9ea5341b37b92c3bb7 | 125 | py | Python | ps4rp/__version__.py | kingcreek/ps4-remote-play | b7dec18ef4ad3252816ba45084cc393cc655509b | [
"Apache-2.0"
] | 10 | 2019-07-01T07:52:02.000Z | 2021-12-09T10:00:40.000Z | ps4rp/__version__.py | industriousonesoft/ps4-remote-play | 08670e786ef3071849e9413eea3d7924835ea3c7 | [
"Apache-2.0"
] | null | null | null | ps4rp/__version__.py | industriousonesoft/ps4-remote-play | 08670e786ef3071849e9413eea3d7924835ea3c7 | [
"Apache-2.0"
] | 4 | 2019-07-04T05:57:03.000Z | 2021-06-22T05:05:17.000Z | # Copyright (c) 2018, Pierre Bourdon <delroth@gmail.com>
# SPDX-License-Identifier: Apache-2.0
__version__ = '0.1.0-alpha1'
| 25 | 56 | 0.728 |
ace84269047c27bbbd17bbb4199ff014f76154a0 | 4,348 | py | Python | grr/client/client_actions/plist_test.py | theGreenJedi/grr | d9e11e304dc299d49c76b7fdf6fdbfcd4b8eec39 | [
"Apache-2.0"
] | null | null | null | grr/client/client_actions/plist_test.py | theGreenJedi/grr | d9e11e304dc299d49c76b7fdf6fdbfcd4b8eec39 | [
"Apache-2.0"
] | null | null | null | grr/client/client_actions/plist_test.py | theGreenJedi/grr | d9e11e304dc299d49c76b7fdf6fdbfcd4b8eec39 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
# Copyright 2010 Google Inc. All Rights Reserved.
"""Tests for grr.client.client_actions.plist."""
import os
# pylint: disable=unused-import
from grr.client import client_plugins
# pylint: enable=unused-import
from grr.lib import flags
from grr.lib import plist as plist_lib
from grr.lib import test_lib
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import plist as rdf_plist
# This variable holds the same contents as the ondisk test plist
test_plist_dict = {
"date": 978307200000000,
"nested1":
{
"nested11":
{
"data113": "\xde\xad\xbe\xef",
"key111": "value111",
"key112": "value112"
}
},
"numbers": [1, "2", "3"]
}
# y Safari History plist
safari_plist_dict = {
"WebHistoryDates": [
{"": "http://www.google.com",
"title": "Google",
"lastVisited": "374606652.9",
"visitCount": 2},
{"": "http://www.apple.com",
"title": "Apple",
"lastVisited": "374606652.9",
"visitCount": 1},
],
"WebHistoryFileVersion": 1,
}
class PlistTest(test_lib.EmptyActionTest):
def testParseFilter(self):
queries = [
('bla is "red"', True),
('bla.bla is "red"', True),
('bla."bla bliek" is "red"', True),
('bla.bla bliek is "red"', False),
]
for query, result in queries:
if result:
plist_lib.PlistFilterParser(query).Parse()
else:
filter_parser = plist_lib.PlistFilterParser(query)
self.assertRaises(Exception, filter_parser.Parse)
def testMatches(self):
query = '"nested1"."nested11"."key112" contains "value112"'
parser = plist_lib.PlistFilterParser(query).Parse()
matcher = parser.Compile(plist_lib.PlistFilterImplementation)
self.assertEqual(matcher.Matches(test_plist_dict), True)
def testActionFullRetrievalOfAPlist(self):
results = self._RunQuery(query="", context="")
if not results:
raise Exception("no results were found...")
self.assertDictEqual(results[0][0].ToDict(), test_plist_dict)
def testActionSingleValueRetrieval(self):
results = self._RunQuery(query="", context="date")
if not results:
raise Exception("no results were found...")
self.assertEqual(results[0][0], 978307200000000)
def testActionFilteredValueRetrieval(self):
# Numbers does NOT contain a 2, but a "2", this should return nothing
results = self._RunQuery(query="numbers contains 2", context="")
self.assertListEqual(list(list(results)[0]), [])
# This one should return the full dict
results = self._RunQuery(query="numbers contains '2'", context="")
self.assertEqual(results[0][0], test_plist_dict)
# SAFARI PLIST
results = self._RunQuery(plist="History.plist",
query='title contains "oogle"',
context="WebHistoryDates")
self.assertEqual(results[0][0], safari_plist_dict["WebHistoryDates"][0])
# And now SAFARI XML
results = self._RunQuery(plist="History.xml.plist",
query='title contains "oogle"',
context="WebHistoryDates")
self.assertEqual(results[0][0], safari_plist_dict["WebHistoryDates"][0])
def testActionNonexistantFile(self):
self.assertRaises(IOError,
self._RunQuery,
query="",
context="",
plist="nonexistantfile")
def testActionInvalidFile(self):
self.assertRaises(Exception,
self._RunQuery,
query="",
context="",
plist="History")
def _RunQuery(self, plist="test.plist", query="", context=""):
path = os.path.join(self.base_path, plist)
pathspec = rdf_paths.PathSpec(path=path,
pathtype=rdf_paths.PathSpec.PathType.OS)
plistrequest = rdf_plist.PlistRequest()
plistrequest.query = query
plistrequest.context = context
plistrequest.pathspec = pathspec
return self.RunAction("PlistQuery", plistrequest)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| 31.507246 | 76 | 0.612925 |
ace842ec733ecb0a9d0cafd32ed62e3045f38161 | 2,093 | py | Python | aiida/cmdline/commands/cmd_rehash.py | joepvd/aiida_core | 6e9711046753332933f982971db1d7ac7e7ade58 | [
"BSD-2-Clause"
] | null | null | null | aiida/cmdline/commands/cmd_rehash.py | joepvd/aiida_core | 6e9711046753332933f982971db1d7ac7e7ade58 | [
"BSD-2-Clause"
] | null | null | null | aiida/cmdline/commands/cmd_rehash.py | joepvd/aiida_core | 6e9711046753332933f982971db1d7ac7e7ade58 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""`verdi rehash` command."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import click
from aiida.cmdline.commands.cmd_verdi import verdi
from aiida.cmdline.params import arguments
from aiida.cmdline.params.types.plugin import PluginParamType
from aiida.cmdline.utils import decorators, echo
@verdi.command('rehash')
@arguments.NODES()
@click.option(
'-e',
'--entry-point',
type=PluginParamType(group=('node', 'calculations', 'data'), load=True),
default='node',
help='Only include nodes that are class or sub class of the class identified by this entry point.')
@decorators.with_dbenv()
def rehash(nodes, entry_point):
"""Recompute the hash for nodes in the database
The set of nodes that will be rehashed can be filtered by their identifier and/or based on their class.
"""
from aiida.orm.querybuilder import QueryBuilder
if nodes:
to_hash = [(node,) for node in nodes if isinstance(node, entry_point)]
else:
builder = QueryBuilder()
builder.append(entry_point, tag='node')
to_hash = builder.all()
if not to_hash:
echo.echo_critical('no matching nodes found')
count = 0
for i, (node,) in enumerate(to_hash):
if i % 100 == 0:
echo.echo('.', nl=False)
node.rehash()
count += 1
echo.echo('')
echo.echo_success('{} nodes re-hashed'.format(count))
| 34.883333 | 107 | 0.59054 |
ace843211327c2b51217cf9bad3f711fee450a39 | 15,031 | py | Python | fesutils/schemautils/_schemautils.py | tinybees/fesutils | 13b25ee156a11020140d98c6b81510c3efca264d | [
"MIT"
] | 1 | 2020-09-11T08:08:37.000Z | 2020-09-11T08:08:37.000Z | fesutils/schemautils/_schemautils.py | tinybees/fesutils | 13b25ee156a11020140d98c6b81510c3efca264d | [
"MIT"
] | null | null | null | fesutils/schemautils/_schemautils.py | tinybees/fesutils | 13b25ee156a11020140d98c6b81510c3efca264d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding=utf-8
"""
@author: guoyanfeng
@software: PyCharm
@time: 19-1-21 下午6:47
schema校验,需要安装flask或者sanic
"""
import copy
from collections import MutableMapping, MutableSequence
from functools import wraps
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Type, Union
import aelog
from marshmallow import EXCLUDE, Schema, ValidationError, fields
from .._err_msg import schema_msg
from .._strutils import under2camel
from ..err import FuncArgsError, HttpError
__all__ = ("sanic_schema_validate", "flask_schema_validate", "verify_schema", "schema2swagger", "gen_schema")
def _verify_message(src_message: Dict, message: Union[List, Dict]) -> Dict:
"""
对用户提供的message进行校验
Args:
src_message: 默认提供的消息内容
message: 指定的消息内容
Returns:
"""
src_message = dict(src_message)
message = message if isinstance(message, MutableSequence) else [message]
required_field = {"msg_code", "msg_zh", "msg_en"}
for msg in message:
if isinstance(msg, MutableMapping):
if set(msg.keys()).intersection(required_field) == required_field and msg["msg_code"] in src_message:
src_message[msg["msg_code"]].update(msg)
return src_message
def verify_schema(schema_cls, json_data: Union[List[Dict], Dict],
required: Union[Tuple, List] = tuple(),
excluded: Union[Tuple, List] = tuple(),
is_extends: bool = True,
message: Dict = None) -> Union[List[Dict], Dict]:
"""
校验post的json格式和类型是否正确
主要用于接口内部校验,非装饰器校验
Args:
schema_cls: 定义的schema对象
json_data: json data
required: 需要标记require的字段
excluded: 排除不需要的字段
is_extends: 是否继承schemea本身其他字段的require属性, 默认继承
message: 提示消息
Returns:
"""
schema_obj = schema_cls(unknown=EXCLUDE)
if required:
for key, val in schema_obj.fields.items():
if key in required: # 反序列化期间,把特别需要的字段标记为required
setattr(schema_obj.fields[key], "dump_only", False)
schema_obj.load_fields[key] = schema_obj.fields[key]
elif not is_extends:
setattr(schema_obj.fields[key], "required", False)
try:
valid_data = schema_obj.load(json_data, unknown=EXCLUDE)
# 把load后不需要的字段过滤掉,主要用于不允许修改的字段load后过滤掉
if excluded and isinstance(valid_data, dict):
for val in excluded:
valid_data.pop(val, None)
except ValidationError as err:
message = schema_msg if message is None else message
aelog.exception('Request body validation error, please check! error={}'.format(err.messages))
raise HttpError(400, message=message[201]["msg_zh"], error=err.messages)
except Exception as err:
message = schema_msg if message is None else message
aelog.exception("Request body validation unknow error, please check!. error={}".format(str(err)))
raise HttpError(400, message=message[202]["msg_zh"], error=str(err))
else:
return valid_data
def _schema_validated(schema_cls: Type[Schema], required: Union[Tuple, List] = tuple(),
is_extends: bool = True, excluded: Union[Tuple, List] = tuple(),
is_async: bool = True, message: Dict = None) -> Callable:
"""
校验post的json格式和类型是否正确
Args:
schema_cls: 定义的schema对象
required: 需要标记require的字段
excluded: 排除不需要的字段
is_extends: 是否继承schemea本身其他字段的require属性, 默认继承
message: 提示消息
is_async: 是否异步库,用于区分sanic和flask,默认sanic异步框架
Returns:
"""
if is_async is True:
try:
from sanic.request import Request
except ImportError as e:
raise ImportError(f"please pip install Sanic >= 19.9 {e}")
else:
try:
from flask import g, request
except ImportError as e:
raise ImportError(f"please pip install Flask {e}")
if not issubclass(schema_cls, Schema):
raise FuncArgsError(message="schema_obj type error!")
if not isinstance(required, (tuple, list)):
raise FuncArgsError(message="required type error!")
if not isinstance(excluded, (tuple, list)):
raise FuncArgsError(message="excluded type error!")
# 此处的功能保证,如果调用了多个校验装饰器,则其中一个更改了,所有的都会更改
if not getattr(_schema_validated, "message", None):
setattr(_schema_validated, "message", _verify_message(schema_msg, message or {}))
schema_message = getattr(_schema_validated, "message", None)
def _validated(func):
"""
校验post的json格式和类型是否正确
"""
@wraps(func)
async def _async_wrapper(*args, **kwargs):
"""
校验post的json格式和类型是否正确
"""
request_ = args[0] if isinstance(args[0], Request) else args[1]
request_.ctx.json = verify_schema(schema_cls, request_.json, required, excluded, is_extends, schema_message)
return await func(*args, **kwargs)
@wraps(func)
def _wrapper(*args, **kwargs):
"""
校验post的json格式和类型是否正确
"""
g.json = verify_schema(schema_cls, request.json, required, excluded, is_extends, schema_message)
return func(*args, **kwargs)
return _async_wrapper if is_async is True else _wrapper
return _validated
def sanic_schema_validate(schema_cls: Type[Schema], required: Union[Tuple, List] = tuple(),
is_extends: bool = True, excluded: Union[Tuple, List] = tuple(),
message: Dict = None) -> Callable:
"""
校验post的json格式和类型是否正确
用于sanic框架
Args:
schema_cls: 定义的schema对象
required: 需要标记require的字段
excluded: 排除不需要的字段
is_extends: 是否继承schemea本身其他字段的require属性, 默认继承
message: 提示消息
Returns:
"""
return _schema_validated(schema_cls, required, is_extends, excluded, message=message)
def flask_schema_validate(schema_cls: Type[Schema], required: Union[Tuple, List] = tuple(),
is_extends: bool = True, excluded: Union[Tuple, List] = tuple(),
message: Dict = None) -> Callable:
"""
校验post的json格式和类型是否正确
用于flask框架
Args:
schema_cls: 定义的schema对象
required: 需要标记require的字段
excluded: 排除不需要的字段
is_extends: 是否继承schemea本身其他字段的require属性, 默认继承
message: 提示消息
Returns:
"""
return _schema_validated(schema_cls, required, is_extends, excluded, is_async=False, message=message)
def schema2swagger(schema_cls: Schema, excluded: Union[Tuple, List] = tuple(),
require_only: Union[Tuple, List] = tuple()):
"""
转换schema为swagger的dict,这样就减少书写swagger文档的麻烦
Args:
schema_cls: schema class
excluded: 排除那些字段不需要展示
require_only: 仅需要展示的字段
Returns:
返回swagger json body doc.JsonBody
"""
try:
from sanic_openapi import doc
except ImportError as e:
raise ImportError(f"please pip install sanic-openapi {e}")
schema_swagger_map = {
fields.Email.__name__: doc.String,
fields.String.__name__: doc.String,
fields.Integer.__name__: doc.Integer,
fields.Url.__name__: doc.String,
fields.Boolean.__name__: doc.Boolean,
fields.Float.__name__: doc.Float,
fields.DateTime.__name__: doc.DateTime,
fields.UUID.__name__: doc.String,
fields.Number.__name__: doc.Float,
fields.Raw.__name__: doc.String # 暂时没有合适的去表示这个映射
}
if not isinstance(require_only, (tuple, list)):
raise FuncArgsError(message="require_only type error!")
if not isinstance(excluded, (tuple, list)):
raise FuncArgsError(message="excluded type error!")
def iter_schema(sub_schema_cls, iter_once=False):
"""
递归处理每个迭代子集
Args:
sub_schema_cls: schema class
iter_once: 控制是否迭代一次,对于包含自身的嵌套来说是有用的
Returns:
返回 dictionary
"""
swagger_dict = {}
if not issubclass(sub_schema_cls, Schema):
raise FuncArgsError("schema_cls must be sub clss of Schema.")
for key, obj in getattr(sub_schema_cls, "_declared_fields", {}).items():
if require_only and key not in require_only: # require_only 和 excluded互斥
continue
elif key in ("created_time", "updated_time", *excluded): # 过滤掉时间字段
continue
if isinstance(obj, fields.Field):
obj_name = obj.__class__.__name__
obj_required = getattr(obj, "required", None)
verbose_name = getattr(obj, "metadata").get("verbose_name", "")
if obj_name in schema_swagger_map: # 处理普通schema
swagger_dict[key] = schema_swagger_map[obj_name](verbose_name, required=obj_required)
elif obj_name == "List": # 处理列表
swagger_dict[key] = handle_list(obj, obj_required, verbose_name)
elif obj_name == "Dict":
swagger_dict[key] = handle_dict(obj, obj_required, verbose_name)
elif obj_name == "Nested": # 递归处理嵌套schema
if getattr(obj, "nested") == "self": # 处理schema包含自身的情况, 包含自身的情况只递归处理一次
if not iter_once:
swagger_dict[key] = iter_schema(sub_schema_cls, True)
else:
sub_schema_cls = getattr(obj, "nested")
swagger_dict[key] = iter_schema(sub_schema_cls)
return swagger_dict
def handle_list(obj, obj_required, verbose_name):
"""
递归处理每个迭代子集
Args:
obj: 最外层循环的object对象
obj_required: 最外层对象require参数值
verbose_name: 最外层的名称
Returns:
"""
sub_obj = getattr(obj, "container", None)
sub_obj_name = sub_obj.__class__.__name__
sub_verbose_name = getattr(sub_obj, "metadata").get("verbose_name", "")
sub_obj_required = getattr(sub_obj, "required", None)
if sub_obj_name == "Nested": # 递归处理嵌套schema,处理list中包含嵌套的schema情况
sub_schema_cls = getattr(sub_obj, "nested") # 暂时没有遇到列表中会嵌套自身的情况,不做处理
return doc.List(iter_schema(sub_schema_cls), required=obj_required, description=verbose_name)
elif sub_obj_name == "Dict":
value_obj = handle_dict(sub_obj, sub_obj_required, sub_verbose_name)
return doc.List(value_obj, required=obj_required, description=verbose_name)
else:
return doc.List(schema_swagger_map[sub_obj_name](sub_verbose_name, required=sub_obj_required),
required=obj_required, description=verbose_name)
def handle_dict(obj, obj_required, verbose_name):
"""
递归处理每个迭代子集
Args:
obj: 最外层循环的object对象
obj_required: 最外层对象require参数值
verbose_name: 最外层的名称
Returns:
"""
key_obj, value_obj = getattr(obj, "key_container", None), getattr(obj, "value_container", None)
key_name, value_name = key_obj.__class__.__name__, value_obj.__class__.__name__
key_required, value_required = getattr(key_obj, "required", None), getattr(key_obj, "required", None)
# 个别情况下会存在MySQL存储的json,而这时候不需要对json内部的结构进行预估
if key_required and value_required:
key_verbose_name = getattr(key_obj, "metadata").get("verbose_name", "")
value_verbose_name = getattr(value_obj, "metadata").get("verbose_name", "")
key_obj = schema_swagger_map[key_name](key_verbose_name, required=key_required)
if value_name == "Nested":
dict_schema_cls = getattr(value_obj, "nested")
value_obj = iter_schema(dict_schema_cls)
elif value_name == "Dict":
value_obj = handle_dict(value_obj, value_required, value_verbose_name)
elif value_name == "List":
value_obj = handle_list(value_obj, value_required, value_verbose_name)
else:
value_obj = schema_swagger_map[value_name](value_verbose_name, required=value_required)
return doc.Dictionary({"key": key_obj, "value": value_obj}, description=verbose_name, required=obj_required)
else:
return doc.Dictionary(description=verbose_name, required=obj_required)
result = iter_schema(schema_cls)
return doc.JsonBody(result)
def gen_schema(schema_cls: Type[Schema], class_suffix: str = None, table_suffix: str = None,
table_name: str = None, field_mapping: Dict[str, str] = None,
schema_fields: Optional[Sequence] = None):
"""
用于根据现有的schema生成新的schema类
1.主要用于分表的查询和插入生成新的schema,这时候生成的schema和原有的schema一致,主要是类名和表明不同.
2.映射字段主要用来处理同一个字段在不同的库中有不同的名称的情况
3.生成新的schema类时的字段多少,如果字段比schema_cls类中的多,则按照schema_cls中的字段为准,
如果字段比schema_cls类中的少,则以schema_fields中的为准
Args:
schema_cls: 要生成分表的schema类
class_suffix: 新的schema类名的后缀,生成新的类时需要使用
table_suffix: 新的table名的后缀,生成新的表名时需要使用
table_name: 如果指定了table name则使用,否则使用schema_cls的table name
field_mapping: 字段映射,字段别名,如果有字段别名则生成的别名按照映射中的别名来,
如果没有则按照schema_cls中的name来处理
schema_fields: 生成新的schema类时的字段多少,如果字段比schema_cls类中的多,则按照schema_cls中的字段为准,
如果字段比schema_cls类中的少,则以schema_fields中的为准
Returns:
新生成的schema类
"""
if not issubclass(schema_cls, Schema):
raise ValueError("schema_cls must be Schema type.")
if table_name is None:
table_name = f"{getattr(schema_cls, '__tablename__', schema_cls.__name__.rstrip('Schema'))}"
if class_suffix:
class_name = f"{under2camel(table_name)}{class_suffix.capitalize()}Schema"
else:
class_name = f"{under2camel(table_name)}Schema"
if table_suffix:
table_name = f"{table_name}_{table_suffix}"
if getattr(schema_cls, "_cache_class", None) is None:
setattr(schema_cls, "_cache_class", {})
schema_cls_ = getattr(schema_cls, "_cache_class").get(class_name, None)
if schema_cls_ is None:
attr_fields = {}
field_mapping = {} if not isinstance(field_mapping, MutableMapping) else field_mapping
schema_fields = tuple() if not isinstance(
schema_fields, MutableSequence) else (*schema_fields, *field_mapping.keys())
for attr_name, attr_field in getattr(schema_cls, "_declared_fields", {}).items():
if schema_fields and attr_name not in schema_fields:
continue
attr_field = copy.copy(attr_field)
setattr(attr_field, "attribute", field_mapping.get(attr_name))
attr_fields[attr_name] = attr_field
schema_cls_ = type(class_name, (Schema,), {
"__doc__": schema_cls.__doc__,
"__tablename__": table_name,
"__module__": schema_cls.__module__,
**attr_fields})
getattr(schema_cls, "_cache_class")[class_name] = schema_cls_
return schema_cls_
| 39.76455 | 120 | 0.641674 |
ace84421383d3f7806fd0beb31ad58dd1f5a5e0f | 2,210 | py | Python | NOV2019/LongestSequencewithTwoUniqueNumbers.py | dexterchan/DailyChallenge | 1f38dc3b22983835836a84d6281777d8e20fce7a | [
"Apache-2.0"
] | null | null | null | NOV2019/LongestSequencewithTwoUniqueNumbers.py | dexterchan/DailyChallenge | 1f38dc3b22983835836a84d6281777d8e20fce7a | [
"Apache-2.0"
] | null | null | null | NOV2019/LongestSequencewithTwoUniqueNumbers.py | dexterchan/DailyChallenge | 1f38dc3b22983835836a84d6281777d8e20fce7a | [
"Apache-2.0"
] | null | null | null | #Skill : array, dynamic programming
#Given a sequence of numbers, find the longest sequence that contains only 2 unique numbers.
#Example:
#Input: [1, 3, 5, 3, 1, 3, 1, 5]
#Output: 4
#The longest sequence that contains just 2 unique numbers is [3, 1, 3, 1]
class Solution:
def findSequenceHelper (self, seq):
#res = self.__findSequenceBrutalForce(seq)
res = self.__findSequenceDynamic(seq)
#print ("substring start at %d" % res[1])
return res[0]
#O(N^2)
def __findSequenceBrutalForce(self, seq):
inx = 0
maxLen = 0
pos = None
while inx < len(seq)-1:
unqiueset = set()
myLength = 1
unqiueset.add(seq[inx])
for j in range(inx+1, len(seq)):
unqiueset.add(seq[j])
if len(unqiueset) <= 2:
myLength = myLength + 1
elif len(unqiueset) > 2:
break
if (maxLen < myLength and len(unqiueset) >= 2):
pos = inx
maxLen = myLength
inx = inx + 1
return maxLen, pos
#O(N)
def __findSequenceDynamic(self, seq):
inx = 1
maxLen = 0
pos = 0
uniqueset = set()
myLength = 1
lastNum = seq[0]
uniqueset.add(lastNum)
posMap = {}
while inx < len(seq):
cur = seq[inx]
uniqueset.add(cur)
if len(uniqueset) > 2:
uniqueset.clear()
uniqueset.add(seq[pos+2])
uniqueset.add(seq[pos+1])
if maxLen < myLength:
maxLen = myLength
posMap[maxLen] = pos
myLength = 2
pos = pos + 1
inx = pos + 1
else:
myLength = myLength + 1
inx = inx + 1
if maxLen < myLength:
maxLen = myLength
posMap[maxLen] = pos
return maxLen, posMap[maxLen]
def findSequence(seq):
solu = Solution()
return solu.findSequenceHelper(seq)
#print (findSequence([1, 3, 5, 3, 1, 3, 1, 5]) )
# 4
print (findSequence([1, 3, 0, 1, 1, 3, 1, 5]) ) | 27.283951 | 92 | 0.49276 |
ace84496db0cbc4212e05ffd9527c994bfa486b9 | 1,994 | py | Python | 2.py | niharikasingh/aoc2018 | 21d430d393321e6066eca22d7c6b49e5eb42d756 | [
"MIT"
] | null | null | null | 2.py | niharikasingh/aoc2018 | 21d430d393321e6066eca22d7c6b49e5eb42d756 | [
"MIT"
] | null | null | null | 2.py | niharikasingh/aoc2018 | 21d430d393321e6066eca22d7c6b49e5eb42d756 | [
"MIT"
] | null | null | null | # from collections import Counter
#
# count2 = 0
# count3 = 0
# with open('2input1.txt', 'r') as ifile:
# for line in ifile:
# temp_values = Counter(list(line.strip())).values()
# if (2 in temp_values):
# count2 += 1
# if (3 in temp_values):
# count3 += 1
# print(count2*count3)
#
# from collections import OrderedDict
# from pprint import pprint
# mydict = {}
# with open('2input2.txt', 'r') as ifile:
# for line in ifile:
# line = line.strip()
# sline = ''.join(sorted(line))
# if (sline in mydict):
# mydict[sline].append(line)
# else:
# mydict[sline] = [line]
#
# omydict = OrderedDict(sorted(mydict.items(), key=lambda t: t[0]))
# pprint(dict(omydict.items()))
# keys_omydict = list(omydict.keys())
# for i in range(len(keys_omydict) - 1):
# first_key_array = omydict[keys_omydict[i]]
# second_key_array = omydict[keys_omydict[i + 1]]
# for first_key in first_key_array:
# for second_key in second_key_array:
# differences = 0
# for i1 in range(len(first_key)):
# if first_key[i1] != second_key[i1]:
# differences += 1
# if (differences > 1):
# break
# if (differences == 1):
# print(first_key, second_key)
# break
myarr = []
with open('2input1.txt', 'r') as ifile:
for line in ifile:
line = line.strip()
myarr.append(line)
for i in range(len(myarr)):
for j in range(i, len(myarr)):
first_key = myarr[i]
second_key = myarr[j]
differences = 0
for k in range(len(first_key)):
if first_key[k] != second_key[k]:
differences += 1
if (differences > 1):
break
if (differences == 1):
print(first_key)
print(second_key)
break
if (differences == 1):
break
| 29.761194 | 67 | 0.530592 |
ace84673c985d3b25a3f390d37ab1b890b250efe | 2,301 | py | Python | mattspy/url_walk.py | beckermr/mattspy | e3752dab96479a9439f369000c6ea05c8b717113 | [
"BSD-3-Clause"
] | null | null | null | mattspy/url_walk.py | beckermr/mattspy | e3752dab96479a9439f369000c6ea05c8b717113 | [
"BSD-3-Clause"
] | 4 | 2022-02-08T15:50:52.000Z | 2022-02-11T21:08:31.000Z | mattspy/url_walk.py | beckermr/mattspy | e3752dab96479a9439f369000c6ea05c8b717113 | [
"BSD-3-Clause"
] | null | null | null | import os
import urllib2
class url_walk(object):
"""
url_walk is just os.walk, but works for Apache file/directory lists online
Example:
for root,drs,fls in url_walk(webaddrs):
# do something
# os.path.join(root,fls) is the full web address to the files in fls
"""
def __init__(self,base,user=None,password=None):
self.base = base
if user is not None and password is not None:
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
top_level_url = base
password_mgr.add_password(None, top_level_url, user, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
self.queue = [base]
def __iter__(self):
while len(self.queue) > 0:
root,dirs,fls = self._breakup_url(self.queue.pop(0))
yield root,dirs,fls
for dr in dirs:
self.queue.append(os.path.join(root,dr))
def _breakup_url(self,url):
rep = urllib2.urlopen(url)
html = rep.read()
if url[-1] == '/':
root = url[:-1]
else:
root = url
dirs = []
files = []
for link in html.split('</a>')[:-1]:
if 'alt="[DIR]"' not in link:
is_file = True
else:
is_file = False
items = link.split('<a ')[-1]
items = items.split('>')
tag = items[0].split('"')
if len(tag)%2 != 0:
tag = tag[:-1]
props = {}
for i in xrange(0,len(tag),2):
props[str(tag[i][:-1].split())[2:-2]] = tag[i+1]
nm = items[1]
if nm not in ['Name','Last modified','Size','Description','Parent Directory']:
if 'href' in props:
lapp = props['href']
if lapp[-1] == '/':
lapp = lapp[:-1]
if is_file:
files.append(lapp)
else:
dirs.append(lapp)
return root,dirs,files
| 31.520548 | 90 | 0.471534 |
ace847cceb40f9fa132436d28ac17e439fad857f | 3,627 | py | Python | scraper.py | YashIyengar/Selenium-youtube-scraper-prac | f0fa9774ead73803532c603bd6e3e058a98db025 | [
"MIT"
] | null | null | null | scraper.py | YashIyengar/Selenium-youtube-scraper-prac | f0fa9774ead73803532c603bd6e3e058a98db025 | [
"MIT"
] | null | null | null | scraper.py | YashIyengar/Selenium-youtube-scraper-prac | f0fa9774ead73803532c603bd6e3e058a98db025 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
import pandas as pd
import smtplib
import os
import json
import gspread
from oauth2client.service_account import ServiceAccountCredentials
YOUTUBE_TRENDNG_URL = 'https://www.youtube.com/feed/trending'
def get_driver():
chrome_options = Options()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument('--headless')
driver = webdriver.Chrome(options=chrome_options)
return driver
def get_videos(driver):
VIDEO_DIVS_TAG = 'ytd-video-renderer'
driver.get(YOUTUBE_TRENDNG_URL)
videos = driver.find_elements(By.TAG_NAME, VIDEO_DIVS_TAG)
return videos
def parse_video(video):
# title, url, thumbnail_url, channel, views, uploaded, description.
title_tag = video.find_element(By.ID, 'video-title')
title = title_tag.text
url = title_tag.get_attribute('href')
thumbnail_tag = video.find_element(By.TAG_NAME, 'img')
thumbnail_url = thumbnail_tag.get_attribute('src')
channel_div = video.find_element(By.CLASS_NAME, 'ytd-channel-name')
channel_name = channel_div.text
description = video.find_element(By.ID, 'description-text').text
return {
"title": title,
"url": url,
"thumbnail_url": thumbnail_url,
'channel': channel_name,
'description': description
}
def dict_to_googlesheets(api_key, spread_sheet, data):
# define the scope
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
# add credentials to the account
creds = ServiceAccountCredentials.from_json_keyfile_name(api_key, scope)
client = gspread.authorize(creds)
# get the instance of the Spreadsheet
sheet = client.open(spread_sheet).sheet1
data_df = pd.DataFrame(data)
final_sheet = sheet.insert_rows(data_df.values.tolist())
return final_sheet
def send_email(body):
try:
server_ssl = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server_ssl.ehlo()
SENDER_EMAIL = "yash.iyengar.project@gmail.com"
RECEIVER_EMAIL = "iyengar2589@gmail.com"
SENDER_PASSWORD = os.environ['GMAIL_PASSWORD']
# print('password', SENDER_PASSWORD)
subject = 'YouTube Trending Videos'
email_text = f"""\
From: {SENDER_EMAIL}
To: {RECEIVER_EMAIL}
Subject: {subject}
{body}
"""
server_ssl.login(SENDER_EMAIL, SENDER_PASSWORD)
server_ssl.sendmail(SENDER_EMAIL, RECEIVER_EMAIL, email_text)
server_ssl.close()
except Exception as e:
print("Something went wrong...", e)
if __name__ == "__main__":
print("Creating Driver...")
driver = get_driver()
print('Fetching trending videos...')
videos = get_videos(driver)
print(f'Found {len(videos)} videos')
print('Parsing the top 10 videos ')
videos_data = [parse_video(video) for video in videos[:10]]
print('Saving the data to Google Spreadsheets')
dict_to_googlesheets('scrapping-practice-11696e2cdf5c.json', 'Youtube Trending Data', videos_data)
#print("Save the data to CSV")
# videos_df = pd.DataFrame(videos_data)
# print(videos_df)
# videos_df.to_csv("trending.csv", index=None)
print("Send the results over email")
body = {
"link to data": "https://docs.google.com/spreadsheets/d/16psRhOF8onOjVPHi8-AALGUnrhppGjH9USDl5hrHdFs/edit#gid=0"}
send_email(body)
print('Finished.')
| 27.477273 | 121 | 0.700579 |
ace848b63fc0e741de840d5243ffa515abea49f6 | 4,620 | py | Python | Lista-implementacao-1/Python/2_Bisseccao/bisseccao.py | henrique-tavares/IFB-Calculo-Numerico | 2c1a9de3b3c3ff7d9ed82771fe12bccfa7a05aab | [
"MIT"
] | null | null | null | Lista-implementacao-1/Python/2_Bisseccao/bisseccao.py | henrique-tavares/IFB-Calculo-Numerico | 2c1a9de3b3c3ff7d9ed82771fe12bccfa7a05aab | [
"MIT"
] | null | null | null | Lista-implementacao-1/Python/2_Bisseccao/bisseccao.py | henrique-tavares/IFB-Calculo-Numerico | 2c1a9de3b3c3ff7d9ed82771fe12bccfa7a05aab | [
"MIT"
] | null | null | null | import numpy as np
from prettytable import PrettyTable
import matplotlib.pyplot as mpl
# --------------------------------ATENÇÃO--------------------------------#
# Estou usando uma biblioteca chamada: "Prettytable", para deixar a #
# tabela com uma aparência bonita. #
# #
# Para instalar no windows é só digitar no prompt de comando (admin): #
# "pip3 install PTable". #
# Para instalar no linux é só digitar no terminal: #
# "sudo pip3 install PTable". #
# #
# -----------------------------------------------------------------------#
tabela2 = PrettyTable()
tabela2.title = "Valores dos Polinnmios de Taylor"
tabela2.field_names = ["i", "x", "p0(x)", "p1(x)", "p2(x)", "p3(x)", "p4(x)", "f(x)"]
tabela3 = PrettyTable()
tabela3.title = "Diferenca entre os Polinomios de Taylor e a funcao original"
tabela3.field_names = ["i", "x", "p0(x)-f(x)", "p1(x)-f(x)", "p2(x)-f(x)", "p3(x)-f(x)", "p4(x)-f(x)"]
tabela4 = PrettyTable()
tabela4.title = "Aproximacao do primeiro ponto distante do Polinomio de Taylor em relacao a funcao original, usando o metodo da bisseccao"
tabela4.field_names = ["i", "x0", "xk", "xb", "h(xb)", "pi(xb)", "f(xb)"]
def f(x):
return -0.1 * x ** 4 - 0.15 * x ** 3 - 0.5 * x ** 2 - 0.25 * x + 1.2
def p(i, x, at, fat):
res_p = 0
for j in range(i + 1):
res_p += x ** j * at[j] / fat[j]
return res_p
def h(x, y):
return y - f(x) - 0.001
def bisseccao(x0, xk, i, xv, pxv, fxv):
xb0 = x0
xbk = xk
for j in range(1000):
xm = (x0 + xk) / 2
xv.append(xm)
pxv.append(p(i, xm, at, fat))
fxv.append(f(xm))
tabela4.add_row([j + 1, x0, xk, xm, h(xm, p(i, xm, at, fat)), p(i, xm, at, fat), f(xm)])
if abs(h(xm, p(i, xm, at, fat))) < 0.0001:
print(tabela4)
tabela4.clear_rows()
break
else:
if (np.sign(h(x0, p(i, x0, at, fat))) * np.sign(h(xm, p(i, xm, at, fat)))) < 0:
xk = xm
else:
x0 = xm
def grafico(fx, fy, px, py, xb, pxb, fxb):
mpl.figure()
mpl.plot(px, py, color="m", label="pi(x)")
mpl.plot(fx, fy, color="k", label="f(x)", linestyle="dashed")
mpl.plot(xb, pxb, "ro", label="x -> pi(x)")
mpl.plot(xb, fxb, "bo", label="x -> f(x)")
mpl.xlabel("x")
mpl.ylabel("f(x)/pi(x)")
mpl.legend()
mpl.grid(True)
x = [0, 0.4, 0.8, 1.2, 1.6, 2, 2.4, 2.8, 3.2, 3.6, 4]
at = [1.2, -0.25, -1, -0.9, -2.4]
fat = [1, 1, 2, 6, 24]
fx = []
p0 = []
p1 = [1.2]
p2 = [1.2]
p3 = [1.2]
p4 = [1.2]
res_t = 0
res_f = 0
x0 = 0
xk = 0
xm = 0
xb0 = 0
xbk = 0
i_0 = []
f_x = []
p_0 = []
p_1 = []
p_2 = []
p_3 = []
p_4 = []
xb0 = []
xb1 = []
xb2 = []
xb3 = []
fxb0 = []
fxb1 = []
fxb2 = []
fxb3 = []
pxb0 = []
pxb1 = []
pxb2 = []
pxb3 = []
for i in range(11): # Calculando os valores da f(x) e dos polinômios de Taylor e salvando-os em vetores
fx.append(f(x[i]))
for j in range(5):
res_t = p(j, x[i], at, fat)
if j == 0:
p0.append(res_t)
elif j == 1:
p1.append(res_t)
elif j == 2:
p2.append(res_t)
elif j == 3:
p3.append(res_t)
elif j == 4:
p4.append(res_t)
if abs(res_t - f(x[i])) < 0.001:
break
res_t = 0
for i in range(11): # Adicionando os valores calculados anteriormente as tabelas
tabela2.add_row([i, x[i], p0[i], p1[i], p2[i], p3[i], p4[i], fx[i]])
tabela3.add_row([i, x[i], p0[i] - fx[i], p1[i] - fx[i], p2[i] - fx[i], p3[i] - fx[i], p4[i] - fx[i]])
for j in range(
0, 41, 4
): # Salvando os valores calculados para a f(x) e os polinômios de Taylor para poder plotar os seus graficos
i = j / 100
i_0.append(i)
f_x.append(f(i))
p_0.append(p(0, i, at, fat))
p_1.append(p(1, i, at, fat))
p_2.append(p(2, i, at, fat))
p_3.append(p(3, i, at, fat))
p_4.append(p(4, i, at, fat))
print(tabela2)
print("")
print(tabela3)
print("")
bisseccao(x[0], x[1], 0, xb0, pxb0, fxb0)
grafico(i_0, f_x, i_0, p_0, xb0, pxb0, fxb0)
bisseccao(x[0], x[1], 1, xb1, pxb1, fxb1)
grafico(i_0, f_x, i_0, p_1, xb1, pxb1, fxb1)
bisseccao(x[0], x[1], 2, xb2, pxb2, fxb2)
grafico(i_0, f_x, i_0, p_2, xb2, pxb2, fxb2)
bisseccao(x[0], x[1], 3, xb3, pxb3, fxb3)
grafico(i_0, f_x, i_0, p_3, xb3, pxb3, fxb3)
mpl.show() | 25.384615 | 138 | 0.483117 |
ace84906bc3fa9bb0c5f0b5f823c6ecb4d7af0c6 | 5,165 | py | Python | tests/test_tagpack_schema.py | iknaio/graphsense-tagpack-tool | d0b84618d90808f442ad4fb3d6ccef8909ead524 | [
"MIT"
] | null | null | null | tests/test_tagpack_schema.py | iknaio/graphsense-tagpack-tool | d0b84618d90808f442ad4fb3d6ccef8909ead524 | [
"MIT"
] | 2 | 2022-02-24T11:24:03.000Z | 2022-02-24T11:24:06.000Z | tests/test_tagpack_schema.py | iknaio/graphsense-tagpack-tool | d0b84618d90808f442ad4fb3d6ccef8909ead524 | [
"MIT"
] | null | null | null | from datetime import date
import pytest
from tagpack.tagpack_schema import TagPackSchema, ValidationError
from tagpack.taxonomy import Taxonomy
@pytest.fixture
def schema(monkeypatch):
tagpack_schema = TagPackSchema()
return tagpack_schema
@pytest.fixture
def taxonomies():
tax_entity = Taxonomy('entity', 'http://example.com/entity')
tax_entity.add_concept('exchange', 'Exchange', 'Some description')
tax_abuse = Taxonomy('abuse', 'http://example.com/abuse')
tax_abuse.add_concept('bad_coding', 'Bad coding', 'Really bad')
taxonomies = {'entity': tax_entity, 'abuse': tax_abuse}
return taxonomies
def test_init(schema):
assert isinstance(schema, TagPackSchema)
assert schema.definition == 'tagpack_schema.yaml'
def test_header_fields(schema):
assert isinstance(schema.header_fields, dict)
assert 'tags' in schema.header_fields
assert 'title' in schema.header_fields
assert 'type' in schema.header_fields['title']
assert 'text' in schema.header_fields['title']['type']
assert 'mandatory' in schema.header_fields['title']
assert schema.header_fields['title']['mandatory'] is True
assert schema.header_fields['creator']['mandatory'] is True
assert schema.header_fields['tags']['mandatory'] is True
def test_mandatory_header_fields(schema):
assert isinstance(schema.mandatory_header_fields, dict)
assert 'title' in schema.mandatory_header_fields
assert 'tags' in schema.mandatory_header_fields
assert 'creator' in schema.mandatory_header_fields
assert 'notmandatory' not in schema.mandatory_header_fields
def test_tag_fields(schema):
assert isinstance(schema.tag_fields, dict)
assert 'label' in schema.tag_fields
assert 'type' in schema.tag_fields['label']
assert 'mandatory' in schema.tag_fields['label']
assert 'address' in schema.tag_fields
def test_mandatory_tag_fields(schema):
assert isinstance(schema.mandatory_tag_fields, dict)
assert 'address' in schema.mandatory_tag_fields
assert 'label' in schema.mandatory_tag_fields
assert 'source' in schema.mandatory_tag_fields
assert 'currency' in schema.mandatory_tag_fields
assert 'created' not in schema.mandatory_tag_fields
assert 'lastmod' not in schema.mandatory_tag_fields
def test_all_tag_fields(schema):
assert isinstance(schema.tag_fields, dict)
assert 'address' in schema.tag_fields
assert 'label' in schema.tag_fields
def test_all_fields(schema):
assert isinstance(schema.all_fields, dict)
assert all(field in schema.all_fields
for field in ['title', 'label', 'address'])
def test_field_type(schema):
assert schema.field_type('title') == 'text'
assert schema.field_type('creator') == 'text'
assert schema.field_type('owner') == 'text'
assert schema.field_type('description') == 'text'
assert schema.field_type('address') == 'text'
assert schema.field_type('label') == 'text'
assert schema.field_type('source') == 'text'
assert schema.field_type('currency') == 'text'
assert schema.field_type('context') == 'text'
assert schema.field_type('confidence') == 'text'
assert schema.field_type('category') == 'text'
assert schema.field_type('abuse') == 'text'
assert schema.field_type('created') == 'datetime'
assert schema.field_type('lastmod') == 'datetime'
assert schema.field_type('is_cluster_definer') == 'boolean'
assert schema.field_type('is_public') == 'boolean'
assert schema.field_type('tags') == 'list'
def test_field_taxonomy(schema):
assert schema.field_taxonomy('category') == 'entity'
def test_field_no_taxonomy(schema):
assert schema.field_taxonomy('title') is None
def test_check_type(schema):
assert schema.check_type('title', 'some test string')
with(pytest.raises(ValidationError)) as e:
assert schema.check_type('title', 5)
assert "Field title must be of type text" in str(e.value)
assert schema.check_type('lastmod', date.fromisoformat('2021-04-21'))
with(pytest.raises(ValidationError)) as e:
assert schema.check_type('lastmod', 5)
assert "Field lastmod must be of type datetime" in str(e.value)
assert schema.check_type('address', "string")
with(pytest.raises(ValidationError)) as e:
assert schema.check_type('address', 0x2342)
assert "Field address must be of type text" in str(e.value)
assert schema.check_type('tags', [{'a': 1}, {'b': 2}])
with(pytest.raises(ValidationError)) as e:
assert schema.check_type('tags', '56abc')
assert "Field tags must be of type list" in str(e.value)
def test_check_taxonomies(schema, taxonomies):
assert schema.check_taxonomies('category', 'exchange', taxonomies)
with(pytest.raises(ValidationError)) as e:
assert schema.check_taxonomies('category', 'test', taxonomies)
assert "Undefined concept test in field category" in str(e.value)
schema.schema['tag']['dummy'] = {'taxonomy': 'test'}
with(pytest.raises(ValidationError)) as e:
assert schema.check_taxonomies('dummy', 'test', taxonomies)
assert "Unknown taxonomy test" in str(e.value)
| 35.376712 | 73 | 0.721007 |
ace8499dfbe943b17bde1a21bf776a8d50a018fc | 5,872 | py | Python | Application_Files/launch.py | quillyBeans/VisualConvexHull | 4f34668e51b368dc7e52ceabde9a4a341be9bd32 | [
"MIT"
] | null | null | null | Application_Files/launch.py | quillyBeans/VisualConvexHull | 4f34668e51b368dc7e52ceabde9a4a341be9bd32 | [
"MIT"
] | null | null | null | Application_Files/launch.py | quillyBeans/VisualConvexHull | 4f34668e51b368dc7e52ceabde9a4a341be9bd32 | [
"MIT"
] | 2 | 2015-01-06T14:10:01.000Z | 2021-01-27T12:12:56.000Z | import pygame
from pygame.locals import *
from convexHull import convexHull
class Button:
def __init__(self, button_message, coordinates):
self.caption = " "+button_message
self.btn_width = 90
self.btn_height = 30
self.rect = pygame.Rect(coordinates[0], coordinates[1], self.btn_width, self.btn_height)
self.surface = pygame.Surface(self.rect.size)
self.bg_color = pygame.Color(b'lightgray')
self.fg_color = pygame.Color(b'black')
pygame.font.init()
self.font = pygame.font.Font('freesansbold.ttf', 14)
self._update()
def pressed(self, mouse):
# if mouse right or left is within the button
if mouse[0] > self.rect.topleft[0] and mouse[1] > self.rect.topleft[1]:
if mouse[0] < self.rect.bottomright[0] and mouse[1] < self.rect.bottomright[1]:
return True
return False
def draw(self, display_surface):
display_surface.blit(self.surface, self.rect)
def _update(self):
w = self.rect.width
h = self.rect.height
# fill the button background
self.surface.fill(self.bg_color)
# render the caption and return a rectangle
caption_surf = self.font.render(self.caption, True, self.fg_color, self.bg_color)
caption_rect = caption_surf.get_rect()
# inflate in place, moves the text to a more pleasing spot in the button
caption_rect.inflate_ip(-10, -17)
# commits the caption
self.surface.blit(caption_surf, caption_rect)
# draw border for normal button
pygame.draw.rect(self.surface, pygame.Color(b'black'), pygame.Rect((0, 0, w, h)), 1)
pygame.draw.line(self.surface, pygame.Color(b'white'), (1, 1), (w - 2, 1))
pygame.draw.line(self.surface, pygame.Color(b'white'), (1, 1), (1, h - 2))
pygame.draw.line(self.surface, pygame.Color(b'darkgray'), (1, h - 1), (w - 1, h - 1))
pygame.draw.line(self.surface, pygame.Color(b'darkgray'), (w - 1, 1), (w - 1, h - 1))
pygame.draw.line(self.surface, pygame.Color(b'gray'), (2, h - 2), (w - 2, h - 2))
pygame.draw.line(self.surface, pygame.Color(b'gray'), (w - 2, 2), (w - 2, h - 2))
class App:
def __init__(self):
self.button_width = 20
self.button_spacing = 100
self.button_offset = 0
self.button_right_most = 840
self.msg_display_top_left = 20, 25
self.running = True
self.display_surf = None
self.size = self.weight, self.height = 960, 720
self.bg_color = None
self.point_color = None
self.hull_color = None
self.mouse_x, self.mouse_y = 0, 0
self.font_obj = None
self.points = []
self.ch_points = []
self.msg = "Click points then 'Get Hull'"
# self.btn_interactive = Button("Interactive", (self.button_right_most - self.button_offset, self.button_width))
# self.button_offset = self.button_spacing
self.btn_reset = Button("Reset", (self.button_right_most - self.button_offset, self.button_width))
self.button_offset += self.button_spacing
self.btn_get_convex = Button("Get Hull", (self.button_right_most - self.button_offset, self.button_width))
def on_init(self):
pygame.init()
self.font_obj = pygame.font.Font('freesansbold.ttf', 24)
self.bg_color = pygame.Color(0, 0, 0)
self.point_color = pygame.Color(255, 255, 255)
self.hull_color = pygame.Color(b'red')
self.display_surf = pygame.display.set_mode(self.size)
self.running = True
def on_event(self, event):
if event.type == QUIT:
self.running = False
elif event.type == MOUSEBUTTONUP and event.button in (1, 2, 3):
# if event is button, make hull or reset screen, else add to point list
if self.btn_get_convex.pressed(event.pos):
self.msg = "Convex Hull"
del self.ch_points[:]
self.ch_points = convexHull(self.points)
elif self.btn_reset.pressed(event.pos):
del self.points[:]
del self.ch_points[:]
self.msg = "Click points then 'Get Hull'"
else:
self.points.append(event.pos)
self.msg = "x , y : " + str(event.pos)
def on_loop(self):
self.display_surf.fill(self.bg_color)
self.btn_get_convex.draw(self.display_surf)
self.btn_reset.draw(self.display_surf)
# self.btn_interactive.draw(self.display_surf)
# draws out the regular coordinate dots if populated
for coord in self.points:
pygame.draw.circle(self.display_surf, self.point_color, coord, 3, 0)
# draws out the convex hull coordinate dots if populated
for coord in self.ch_points:
pygame.draw.circle(self.display_surf, self.hull_color, coord, 3, 0)
# draws the edges to show the convex hull if populated
if len(self.ch_points) > 0:
pygame.draw.lines(self.display_surf, self.hull_color, True, self.ch_points, 1)
# message display window
msg_surface_obj = self.font_obj.render(self.msg, False, self.point_color)
msg_rect_obj = msg_surface_obj.get_rect()
msg_rect_obj.topleft = (self.msg_display_top_left[0], self.msg_display_top_left[1])
self.display_surf.blit(msg_surface_obj, msg_rect_obj)
def on_render(self):
pygame.display.update()
def on_cleanup(self):
pygame.quit()
def execute(self):
self.on_init()
while self.running:
for event in pygame.event.get():
self.on_event(event)
self.on_loop()
self.on_render()
self.on_cleanup()
if __name__ == "__main__":
lets_go = App()
lets_go.execute()
| 40.777778 | 120 | 0.61921 |
ace84a25caf0ca3c969e38156ee5a4360ab56352 | 8,082 | py | Python | main.py | Darkoneskk/Kiny-Painel | 8e86c047a2c9e767c64ff982ee7e5fbcc07cce3b | [
"MIT"
] | 1 | 2021-01-26T03:21:22.000Z | 2021-01-26T03:21:22.000Z | main.py | Darkoneskk/Kiny-Painel | 8e86c047a2c9e767c64ff982ee7e5fbcc07cce3b | [
"MIT"
] | null | null | null | main.py | Darkoneskk/Kiny-Painel | 8e86c047a2c9e767c64ff982ee7e5fbcc07cce3b | [
"MIT"
] | null | null | null | import os
import sys
import base64, json, re
import time
import requests
import api
from time import sleep as timeout
from requests import get
R='\033[1;31m'; B='\033[1;34m'; C='\033[1;37m'; Y='\033[1;33m'; G='\033[1;32m'; RT='\033[;0m'
os.system('git pull && clear')
a='aHR0cDovL3d3dy5qdXZlbnR1ZGV3ZWIubXRlLmdvdi5ici9wbnBlcGVzcXVpc2FzLmFzcA=='
a=a.encode('ascii')
a=base64.b64decode(a)
a=a.decode('ascii')
def restart():
python = sys.executable
os.execl(python, python, *sys.argv)
def menu():
os.system("pkg install figlet")
os.system("clear")
print("Coded By: \033[1;36m KINY \033[m in 14/12/2020")
print()
os.system("figlet KINY")
print()
print("\033[32m{1} BUSCADOR DE CEP\033[m")
print("\033[32m{2} GEO LOCALIZADOR DE IP\033[m")
print("\033[32m{3} KINY-SITE-INFOGA\033[m")
print("\033[32m{4} CONSULTA DE CNPJ\033[m")
print("\033[32m{5} CONSULTA BANCARIA\033[m")
print("\033[32m{6} CONSULTA CPF\033[m")
print()
print("\033[32m{99} Update && Upgrade\033[m")
print("\033[32m{00} EXIT\033[m")
op = input("\033[32m===>\033[m ").strip()
if op == '6' or op == '06':
def tipos():
os.system("clear")
print("\033[32m{1}CONSULTAR CPF\033[m")
print("\033[32m{1}GERAR CPF\033[m")
tool=input(f'{C}[{G}+{C}] Selecione a forma de operação ({G}1 {C}ou {G}2{C}): ')
if tool=='1':
cpf=input(f'{C}[{G}*{C}] Informe o CPF a ser consultado (sem pontos ou traços): {B}')
consulta(cpf)
elif tool=='2':
gerarcpf()
else:
print(f'{C}[{R}-{C}] Seleção inválida.')
time.sleep(1)
tipos()
def gerarcpf():
print(f'{C}[{G}*{C}] Gerando CPF...')
time.sleep(1)
cpf=requests.request('GET','http://geradorapp.com/api/v1/cpf/generate?token=f01e0024a26baef3cc53a2ac208dd141').json()
cpf2=cpf['data']['number_formatted']
cpf=cpf['data']['number']
print(f'{C}[{Y}i{C}] O CPF gerado foi: {B}'+cpf2)
time.sleep(1)
print(f'{C}[{G}*{C}] Consultando CPF gerado...')
consulta(cpf)
def consulta(cpf):
api.consulta(cpf)
tipos()
if op == '5' or op == '05':
def bank():
global requests
os.system("clear")
os.system("figlet KINY")
print("DIGITE O CODIGO BANCARIO")
bank_input = input("\033[32m=====> \033[m")
requests = requests.get('https://brasilapi.com.br/api/banks/v1/{}'.format(bank_input))
bank_data = requests.json()
if 'message' not in bank_data:
os.system('clear')
os.system("figlet KINY")
print("Código bancário: {}".format(bank_data['code']))
print("Nome: {}".format(bank_data['name']))
print("Nome completo: {}".format(bank_data['fullName']))
print("ISPB: {}".format(bank_data['ispb']))
else:
os.system("clear")
print('{}: Código bancário inválido.'.format(bank_input))
print("\nDESEJA CONSULTAR UM NOVO CODIGO BANCARIO? \n{1}Sim\n{2}Nao\n")
kc = input("===> ")
if kc == '01' or kc == '1':
bank()
else:
menu()
bank()
if op == '1' or op == '01':
def main():
os.system("clear")
print("\033[32m######\033[m")
print("\033[32m#KINY#\033[m")
print("\033[32m######\033[m")
cep_input = input("DIGITE O CEP: ")
if len(cep_input) != 8:
print("\033[1;31mQUANTIDADE DE DIGITOS INVALIDA\033[m")
main()
request = get('https://viacep.com.br/ws/{}/json/'.format(cep_input))
adress_data = request.json()
if 'erro' not in adress_data:
os.system("clear")
print("\033[1;31m^CEP ENCONTRADO^\033[m")
print()
print('Cep: {}'.format(adress_data['cep']))
print('Logradouro: {}'.format(adress_data['logradouro']))
print('Complemento: {}'.format(adress_data['complemento']))
print('Bairro: {}'.format(adress_data['bairro']))
print('Cidade: {}'.format(adress_data["localidade"]))
print('Estado: {}'.format(adress_data['uf']))
else:
print('{}: CEP INVALIDO.'.format(cep_input))
print("DESEJA REALIZAR UMA NOVA CONSULTA? \n1. Yes\n2. No\n")
option = input('===> ')
if option == '1':
main()
else:
menu()
main()
if op == '00' or op == '0':
os.system("clear")
print("\033[32m Arrivederci\033[m")
exit()
if op == '99' or op == '99':
os.system("clear")
os.system("pkg update && pkg update")
menu()
if op == '3' or op == '03':
os.system("clear")
os.system("pkg install nmap")
os.system("pkg install whois")
os.system("pkg install python")
os.system("clear")
print("\033[1;36m KINY \033[m")
print()
j = input("1 for HTTPS, 2 for HTTP:")
print()
print("^^^^^^^^^^^^^^^^*")
print()
print("ex: site.com")
print()
k = input("Domain: ")
print()
print("^^^^^^^^^^^^^^^^^*")
print()
if j == '1':
print("URL: ""https://www." + k)
os.system("nmap " + k)
os.system("whois " + k)
print()
if j == '2':
print("URL: ""http://www." + k)
os.system("nmap " + k)
os.system("whois " + k)
menu()
if op == '2' or op == '02':
def ip():
os.system("clear")
print("\033[32m######\033[m")
print("\033[32m#KINY#\033[m")
print("\033[32m######\033[m")
ip_input = input("\033[32m=====> \033[m")
requests = get('http://ip-api.com/json/{}'.format(ip_input))
adress_data = requests.json()
if 'fail' not in adress_data:
print('IP: {}'.format(adress_data['query']))
print('Status: {}'.format(adress_data['status']))
print('Pais: {}'.format(adress_data['country']))
print('Regiao: {}'.format(adress_data['regionName']))
print('Cidade: {}'.format(adress_data['city']))
print('ZIP: {}'.format(adress_data['zip']))
print('Latitude: {}'.format(adress_data['lat']))
print('Longitude: {}'.format(adress_data['lon']))
print('Fuso-Horarro: {}'.format(adress_data['timezone']))
print('Internet-Info: {}'.format(adress_data['as']))
print('ISP: {}'.format(adress_data['isp']))
print('ORG: {}'.format(adress_data['org']))
else:
print('{}: IP INVALIDO.'.format(ip_input))
print("\nDESEJA LOCALIZAR UM NOVO IP? \n{1}Sim\n{2}Nao\n")
vi = input('===> ')
if vi == '1' or vi == '01':
ip()
else:
menu()
ip()
if op == '4' or op == '04':
api.cnpj()
def password():
user = input("USERNAME: ")
snh = 'VirtualInsanity'
print("\n ")
if input("PASSWORD: ").strip() == snh:
menu()
else:
os.system("clear")
print("\033[1;31mERROR: Wrong Password....Yare Yare\033[m")
timeout(1)
password() | 33.396694 | 130 | 0.461024 |
ace84b0e686b26aac208f422536d6493f4e4953e | 3,868 | py | Python | pypureclient/pure1/Pure1_1_0/models/policy_rule.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/pure1/Pure1_1_0/models/policy_rule.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/pure1/Pure1_1_0/models/policy_rule.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
Pure1 Public REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.pure1.Pure1_1_0 import models
class PolicyRule(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'at': 'int',
'every': 'int',
'keep_for': 'int',
'time_zone': 'str'
}
attribute_map = {
'at': 'at',
'every': 'every',
'keep_for': 'keep_for',
'time_zone': 'time_zone'
}
required_args = {
}
def __init__(
self,
at=None, # type: int
every=None, # type: int
keep_for=None, # type: int
time_zone=None, # type: str
):
"""
Keyword args:
at (int): Time of day to take the snapshot, in milliseconds since 00:00 in the specified `time_zone`. Only valid if `every` is set as whole days.
every (int): How often to take snapshots, in milliseconds.
keep_for (int): How long to keep snapshots, in milliseconds.
time_zone (str): The time zone in which the `at` rule is applied.
"""
if at is not None:
self.at = at
if every is not None:
self.every = every
if keep_for is not None:
self.keep_for = keep_for
if time_zone is not None:
self.time_zone = time_zone
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyRule`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PolicyRule, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PolicyRule):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.753846 | 157 | 0.541365 |
ace84b0ff37e66d8d8f2248d19b17b8f39cd6f1f | 3,717 | py | Python | ctpn/lib/utils/setup.py | SeventhBlue/chinese_ocr | 589231a1c95f60dbf97ad6b6759b227998f50343 | [
"Apache-2.0"
] | null | null | null | ctpn/lib/utils/setup.py | SeventhBlue/chinese_ocr | 589231a1c95f60dbf97ad6b6759b227998f50343 | [
"Apache-2.0"
] | null | null | null | ctpn/lib/utils/setup.py | SeventhBlue/chinese_ocr | 589231a1c95f60dbf97ad6b6759b227998f50343 | [
"Apache-2.0"
] | null | null | null | from Cython.Build import cythonize
import os
from os.path import join as pjoin
import numpy as np
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
def find_in_path(name, path):
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
# first check if the CUDA_HOME env variable is in use
if 'CUDA_HOME' in os.environ:
home = os.environ['CUDA_HOME']
nvcc = pjoin(home, 'bin')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDA_HOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.items():
#for k, v in cudaconfig.iteritems():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
self.src_extensions.append('.cu')
default_compiler_so = self.compiler_so
super = self._compile
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
print(extra_postargs)
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"utils.bbox",
["bbox.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension(
"utils.cython_nms",
["cython_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension('utils.gpu_nms',
['nms_kernel.cu', 'gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include']]
),
]
setup(
ext_modules=ext_modules,
cmdclass={'build_ext': custom_build_ext},
)
| 34.416667 | 90 | 0.602368 |
ace84b2b5a131717d45bba46c5eeba84299523ac | 4,383 | py | Python | cnn_quantization/tf_cnn_benchmarks/ssd_constants.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | cnn_quantization/tf_cnn_benchmarks/ssd_constants.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | cnn_quantization/tf_cnn_benchmarks/ssd_constants.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Central location for all constants related to MLPerf SSD."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# ==============================================================================
# == Model =====================================================================
# ==============================================================================
IMAGE_SIZE = 300
# TODO(taylorrobie): MLPerf uses 80, but COCO documents 90. (RetinaNet uses 90)
# Update(taylorrobie): Labels > 81 show up in the pipeline. This will need to
# be resolved.
NUM_CLASSES = 81 # Including "no class". Not all COCO classes are used.
# Note: Zero is special. (Background class) CLASS_INV_MAP[0] must be zero.
CLASS_INV_MAP = (
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87,
88, 89, 90)
_MAP = {j: i for i, j in enumerate(CLASS_INV_MAP)}
CLASS_MAP = tuple(_MAP.get(i, -1) for i in range(max(CLASS_INV_MAP) + 1))
NUM_SSD_BOXES = 8732
RESNET_DEPTH = 34
"""SSD specific"""
MIN_LEVEL = 3
MAX_LEVEL = 8
FEATURE_SIZES = (38, 19, 10, 5, 3, 1)
STEPS = (8, 16, 32, 64, 100, 300)
# https://github.com/amdegroot/ssd.pytorch/blob/master/data/config.py
SCALES = (21, 45, 99, 153, 207, 261, 315)
ASPECT_RATIOS = ((2,), (2, 3), (2, 3), (2, 3), (2,), (2,))
NUM_DEFAULTS = (4, 6, 6, 6, 4, 4)
NUM_DEFAULTS_BY_LEVEL = {3: 4, 4: 6, 5: 6, 6: 6, 7: 4, 8: 4}
SCALE_XY = 0.1
SCALE_HW = 0.2
BOX_CODER_SCALES = (1 / SCALE_XY, 1 / SCALE_XY, 1 / SCALE_HW, 1 / SCALE_HW)
MATCH_THRESHOLD = 0.5
# https://discuss.pytorch.org/t/how-to-preprocess-input-for-pre-trained-networks/683
NORMALIZATION_MEAN = (0.485, 0.456, 0.406)
NORMALIZATION_STD = (0.229, 0.224, 0.225)
# SSD Cropping
NUM_CROP_PASSES = 50
CROP_MIN_IOU_CHOICES = (0, 0.1, 0.3, 0.5, 0.7, 0.9)
P_NO_CROP_PER_PASS = 1 / (len(CROP_MIN_IOU_CHOICES) + 1)
# Hard example mining
NEGS_PER_POSITIVE = 3
# Batch normalization
BATCH_NORM_DECAY = 0.997
BATCH_NORM_EPSILON = 1e-4
# ==============================================================================
# == Optimizer =================================================================
# ==============================================================================
LEARNING_RATE_SCHEDULE = (
(0, 1e-3),
(160000, 1e-4),
(200000, 1e-5),
)
MOMENTUM = 0.9
WEIGHT_DECAY = 5e-4
# ==============================================================================
# == Keys ======================================================================
# ==============================================================================
BOXES = "boxes"
CLASSES = "classes"
NUM_MATCHED_BOXES = "num_matched_boxes"
IMAGE = "image"
SOURCE_ID = "source_id"
RAW_SHAPE = "raw_shape"
PRED_BOXES = "pred_boxes"
PRED_SCORES = "pred_scores"
# ==============================================================================
# == Evaluation ================================================================
# ==============================================================================
# Note: This is based on a batch size of 32
# https://github.com/mlperf/reference/blob/master/single_stage_detector/ssd/train.py#L21-L37
CHECKPOINT_FREQUENCY = 20000
MAX_NUM_EVAL_BOXES = 200
OVERLAP_CRITERIA = 0.5 # Used for nonmax supression
MIN_SCORE = 0.05 # Minimum score to be considered during evaluation.
DUMMY_SCORE = -1e5 # If no boxes are matched.
ANNOTATION_FILE = "annotations/instances_val2017.json"
COCO_NUM_TRAIN_IMAGES = 118287
COCO_NUM_VAL_IMAGES = 4952
| 36.525 | 94 | 0.546429 |
ace84b67f07abe43a70ce568603433f511c6f717 | 16,810 | py | Python | instantdiscourse/test/test_server.py | simon-weber/instant-discourse | b10f7db2470b5ea54b4bca374daa6bada1a4bfbf | [
"MIT"
] | 3 | 2015-04-03T00:19:05.000Z | 2016-02-22T20:55:22.000Z | instantdiscourse/test/test_server.py | simon-weber/instant-discourse | b10f7db2470b5ea54b4bca374daa6bada1a4bfbf | [
"MIT"
] | null | null | null | instantdiscourse/test/test_server.py | simon-weber/instant-discourse | b10f7db2470b5ea54b4bca374daa6bada1a4bfbf | [
"MIT"
] | null | null | null | from collections import namedtuple
import json
import threading
from concurrent import futures
import fakeredis
from nose.plugins.attrib import attr
import mock
import psutil
import redis
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.concurrent import Future
from tornado.testing import AsyncHTTPTestCase, gen_test
import tornado.websocket
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from instantdiscourse.app import ChatNode
from instantdiscourse.server import get_app as get_id_app
from instantdiscourse.server import (
ChatHandler, custom_message_type, HashEntry
)
class SequentialClientIDMixin(object):
"""Make ChatNode return sequential ids.
Must be mixed into a TestCase.
"""
client_id = 0
def setUp(self):
cls = SequentialClientIDMixin # for brevity
super(cls, self).setUp()
cls.client_id = 0
cls.patch = mock.patch.object(ChatNode, '_get_cid', side_effect=self._next_client_id)
cls.patch.__enter__()
self.addCleanup(cls.patch.__exit__)
def _next_client_id(self, *args, **kwargs):
cls = SequentialClientIDMixin
cid = cls.client_id
cls.client_id += 1
return str(cid)
class TornadoWsClientTestCase(AsyncHTTPTestCase):
# adapted from:
# https://github.com/tornadoweb/tornado/blob/26cb9b3fa67ef3282414a86743ee2e16c81913c3/tornado/test/websocket_test.py#L90
def setUp(self):
self.future_gen = FutureGen()
self.futures = {} # ws -> Future
# Tornado calls get_app in their setUp,
# and our get_app needs self.future_gen.
super(TornadoWsClientTestCase, self).setUp()
@gen.coroutine
def connect_client(self, protocol, path):
client = yield self._client_connect_impl(
'%s://localhost:%d%s' % (protocol, self.get_http_port(), path),
)
self.futures[client] = self.future_gen.generated[-1]
raise gen.Return(client)
@gen.coroutine
def _client_connect_impl(self, url):
client = yield tornado.websocket.websocket_connect(url)
raise gen.Return(client)
@gen.coroutine
def close(self, client):
"""Close a client and wait for the server side.
If we don't wait here, there are sometimes leak warnings in the
tests.
If client.close is safe to call more than once, this is as well.
"""
client.close()
future = self.futures[client]
if not future.done():
yield future
@gen.coroutine
def close_all_clients(self):
for client in self.futures:
yield self.close(client)
def wait_until(self, condition, interval=.1, future=None, **kwargs):
"""Return a Future that completes when condition() returns a truthy value."""
# Could potentially backoff.
if future is None:
future = Future()
result = condition(**kwargs)
if result:
future.set_result(result)
else:
self.io_loop.call_later(
interval, self.wait_until, condition,
interval, future, **kwargs)
return future
class FutureGen(object):
def __init__(self):
self.generated = []
def __iter__(self):
return self
def next(self):
while True:
f = Future()
self.generated.append(f)
return f
Client = namedtuple('Client', 'cid ws')
class ServerTornadoWsClientTestCase(SequentialClientIDMixin, TornadoWsClientTestCase):
capacity = 4
error_rate = .08
def _get_redis(self):
return fakeredis.FakeStrictRedis()
def get_app(self):
redis = self._get_redis()
ChatNode.zero_redis_filter(redis, self.capacity, self.error_rate, force=True)
return get_id_app(
self.get_http_port(),
redis, self.capacity, self.error_rate, 1,
True, '../indextemplate',
{'tornado_port': self.get_http_port(), 'tornado_host': 'localhost'},
self.future_gen,
)
def setUp(self):
super(ServerTornadoWsClientTestCase, self).setUp()
self._app._id_node.redis.flushdb()
self._app._id_node._overwrite_redis_filter()
def tearDown(self):
# We need access to the ioloop while closing; do it before Tornado's tearDown.
self.close_all_clients(callback=self.stop)
self.wait()
super(ServerTornadoWsClientTestCase, self).tearDown()
@gen.coroutine
def _connect_new_client(self):
client = AsyncHTTPClient(self.io_loop)
res = yield client.fetch(self.get_url('/peerjs/id'))
cid = res.body
ws = yield self.connect_client('ws', '/peerjs?id=%s' % cid)
res = yield ws.read_message()
self.assertEqual(res, json.dumps({'type': 'OPEN'}))
raise gen.Return(Client(cid, ws))
@attr('unit')
class ServerTornadoClientTests(ServerTornadoWsClientTestCase):
@gen_test
def test_connect(self):
client = yield self._connect_new_client()
# close early to test close_all_clients' multiple-call safety.
yield self.close(client.ws)
@gen_test
def test_get_partner(self):
clients = []
for _ in xrange(2):
c = yield self._connect_new_client()
c.ws.write_message(json.dumps({
'type': custom_message_type,
'subtype': 'get-partner',
}))
clients.append(c)
res = yield clients[0].ws.read_message()
self.assertEqual(json.loads(res)['match_cid'], clients[1].cid)
@gen_test
def test_successful_hash_match(self):
clients = []
for _ in xrange(2):
c = yield self._connect_new_client()
c.ws.write_message(json.dumps({
'type': custom_message_type,
'subtype': 'get-partner',
}))
clients.append(c)
res = yield clients[0].ws.read_message()
self.assertEqual(json.loads(res)['match_cid'], clients[1].cid)
hash_counter = ChatHandler.cid_handlers[clients[0].cid].hash_counter
self.assertIs(
hash_counter,
ChatHandler.cid_handlers[clients[1].cid].hash_counter
)
clients[0].ws.write_message(json.dumps({
'type': custom_message_type,
'subtype': 'me-hash',
'hash': 'the hash',
}))
hash = HashEntry(clients[0].cid, clients[0].cid, 'the hash')
# Wait for the hash to be matched.
yield self.wait_until(lambda: hash_counter[hash] == 1)
self.assertEqual(hash_counter[hash], 1)
clients[1].ws.write_message(json.dumps({
'type': custom_message_type,
'subtype': 'them-hash',
'hash': 'the hash',
}))
# Wait for the hash to be recorded.
yield self.wait_until(lambda: hash not in hash_counter)
self.assertEqual(hash_counter[hash], 0)
@gen_test
def test_penalty_me_first(self):
clients = []
for _ in xrange(2):
c = yield self._connect_new_client()
c.ws.write_message(json.dumps({
'type': custom_message_type,
'subtype': 'get-partner',
}))
clients.append(c)
res = yield clients[0].ws.read_message()
self.assertEqual(json.loads(res)['match_cid'], clients[1].cid)
for _ in xrange(2):
clients[0].ws.write_message(json.dumps({
'type': custom_message_type,
'subtype': 'me-hash',
'hash': 'penalty hash',
}))
clients[1].ws.write_message(json.dumps({
'type': custom_message_type,
'subtype': 'them-hash',
'hash': 'penalty hash',
}))
my_penalty = yield clients[0].ws.read_message()
their_penalty = yield clients[1].ws.read_message()
self.assertEqual(
json.loads(my_penalty),
{'type': custom_message_type,
'subtype': 'you-penalty',
'num_clients': 2,
'duration_secs': 2})
self.assertEqual(
json.loads(their_penalty),
{'type': custom_message_type,
'subtype': 'them-penalty',
'num_clients': 2,
'duration_secs': 2})
@gen_test
def test_penalty_them_first(self):
clients = []
for _ in xrange(2):
c = yield self._connect_new_client()
c.ws.write_message(json.dumps({
'type': custom_message_type,
'subtype': 'get-partner',
}))
clients.append(c)
res = yield clients[0].ws.read_message()
self.assertEqual(json.loads(res)['match_cid'], clients[1].cid)
for _ in xrange(2):
clients[1].ws.write_message(json.dumps({
'type': custom_message_type,
'subtype': 'them-hash',
'hash': 'penalty hash',
}))
# Something weird with ordering happens here.
# If we don't spin the ioloop, client 0's me-hash
# will be processed first.
yield gen.moment
clients[0].ws.write_message(json.dumps({
'type': custom_message_type,
'subtype': 'me-hash',
'hash': 'penalty hash',
}))
my_penalty = yield clients[0].ws.read_message()
their_penalty = yield clients[1].ws.read_message()
self.assertEqual(
json.loads(my_penalty),
{'type': custom_message_type,
'subtype': 'you-penalty',
'num_clients': 2,
'duration_secs': 2})
self.assertEqual(
json.loads(their_penalty),
{'type': custom_message_type,
'subtype': 'them-penalty',
'num_clients': 2,
'duration_secs': 2})
class ServerSeleniumTestCase(ServerTornadoWsClientTestCase):
def _get_redis(self):
# db 0 used by test server, currently
return redis.StrictRedis(host='localhost', port=6379, db=1)
def _get_driver(self):
"""Return a new WebDriver.
Cleanup is handled automatically.
"""
# Enables getting chrome log:
# driver.get_log('browser') -> ['log line']
d = DesiredCapabilities.CHROME
d['loggingPrefs'] = {'browser': 'ALL'}
driver = webdriver.Chrome('/opt/selenium/chromedriver-2.13',
desired_capabilities=d)
self.addCleanup(driver.quit)
return driver
@gen.coroutine
def _client_connect_impl(self, url):
driver = self._get_driver()
yield self.executor.submit(driver.get, url)
raise gen.Return(driver)
def setUp(self):
super(ServerSeleniumTestCase, self).setUp()
self.assertEqual(threading.active_count(), 1)
self.assertEqual(len(ChatHandler.cid_handlers), 0)
self.executor = futures.ThreadPoolExecutor(max_workers=1)
self.addCleanup(self.executor.shutdown, wait=True)
def tearDown(self):
super(ServerSeleniumTestCase, self).tearDown()
# All clients should have been closed, which should remove the handlers.
self.assertEqual(len(ChatHandler.cid_handlers), 0)
# By default, cleanups are run after tearDown finishes.
# driver.quit should run before our check for chrome leaking, though,
# so we run cleanups now instead.
# There's no harm in calling it more than once.
self.doCleanups()
for proc in psutil.process_iter():
try:
if 'chrome' in proc.name():
msg = "chrome resource leak detected! killing %s" % proc
print msg
proc.kill()
raise AssertionError(msg)
except psutil.NoSuchProcess:
pass
@attr('end-to-end')
class ServerSeleniumTests(ServerSeleniumTestCase):
@gen_test
def test_get_clients_initially(self):
num_clients = 2
for i in xrange(num_clients):
driver = yield self.connect_client('http', '/')
def clients_filled(driver=driver):
# We only continue on truthy values, so we'll wait until
# we don't see the empty string.
return driver.find_element_by_id('num-clients').text
client_text = yield self.wait_until(clients_filled)
self.assertEqual(int(client_text), i + 1)
@gen_test
def test_single_client_gets_cid(self):
driver = yield self.connect_client('http', '/')
client_id = driver.find_element_by_id('pid').text
self.assertEqual(len(ChatHandler.cid_handlers), 1)
self.assertEqual(ChatHandler.cid_handlers[client_id].cid, client_id)
@gen_test
def test_n_clients_get_cids(self):
num_clients = 2
client_ids = {}
for _ in xrange(num_clients):
driver = yield self.connect_client('http', '/')
client_ids[driver] = driver.find_element_by_id('pid').text
self.assertEqual(len(ChatHandler.cid_handlers), num_clients)
for client_id in client_ids.values():
self.assertEqual(ChatHandler.cid_handlers[client_id].cid, client_id)
@staticmethod
def _peer_connection_established(recv_driver, requester_cid):
try:
return recv_driver.find_element_by_id('connect-' + requester_cid)
except NoSuchElementException:
return None
# why do these take so long? The timeouts happen on _wait_for_peer
@gen_test(timeout=10)
def test_two_client_chat(self):
clients = [] # (driver, cid)
# Connect + request-partner from one client, then the other.
for _ in xrange(2):
driver = yield self.connect_client('http', '/')
driver.find_element_by_id('get-partner').click()
clients.append((driver, driver.find_element_by_id('pid').text))
yield self.wait_until(
self._peer_connection_established,
recv_driver=clients[0][0], requester_cid=clients[1][1])
# Send a message from the first to the second.
chatbox = clients[0][0].find_element_by_id('text')
chatbox.send_keys("message from client 0", Keys.RETURN)
recv_message_from = WebDriverWait(clients[1][0], 10).until(
EC.presence_of_element_located((By.CLASS_NAME, "from-peer"))
)
self.assertEqual(recv_message_from.text, 'Peer: message from client 0')
self.assertTrue('You: message from client 0' in clients[0][0].page_source)
@gen_test(timeout=15)
def test_multi_client_chat(self):
clients = [] # (driver, cid)
# Connect + request-partner from all three.
# The first two will get connected to eachother and the third will queue.
for _ in xrange(3):
driver = yield self.connect_client('http', '/')
driver.find_element_by_id('get-partner').click()
clients.append((driver, driver.find_element_by_id('pid').text))
yield self.wait_until(
self._peer_connection_established,
recv_driver=clients[0][0], requester_cid=clients[1][1])
# Disconnect from client 0, which disconnects 0 and 1.
clients[0][0].find_element_by_id('close').click()
for driver in (clients[0][0], clients[1][0]):
with self.assertRaises(NoSuchElementException):
driver.find_element_by_class_name('peer')
# request-partner from 0, which should connect to 2.
clients[0][0].find_element_by_id('get-partner').click()
yield self.wait_until(
self._peer_connection_established,
recv_driver=clients[2][0], requester_cid=clients[0][1])
# Send a message from 2 to 0.
chatbox = clients[2][0].find_element_by_id('text')
chatbox.send_keys("message text", Keys.RETURN)
# This times out sometimes. Maybe I need to drain the websocket
# messages too?
recv_message_from = WebDriverWait(clients[0][0], 10).until(
EC.presence_of_element_located((By.CLASS_NAME, "from-peer"))
)
self.assertEqual(recv_message_from.text, 'Peer: message text')
self.assertTrue('You: message text' in clients[2][0].page_source)
| 33.75502 | 124 | 0.614932 |
ace84b99062d2f1799adfa9fbc41ed12dc082158 | 961 | py | Python | libbeat/tests/system/test_cmd_version.py | caigy/beats | 9cf957b12f93956d2fa49428bdb01a7943989d20 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2022-01-17T17:31:41.000Z | 2022-01-17T17:31:41.000Z | libbeat/tests/system/test_cmd_version.py | caigy/beats | 9cf957b12f93956d2fa49428bdb01a7943989d20 | [
"ECL-2.0",
"Apache-2.0"
] | 26 | 2021-11-04T11:17:36.000Z | 2022-02-16T11:55:30.000Z | libbeat/tests/system/test_cmd_version.py | caigy/beats | 9cf957b12f93956d2fa49428bdb01a7943989d20 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from base import BaseTest
import logging
import os
INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False)
class TestCommandVersion(BaseTest):
"""
Test beat subcommands
"""
def setUp(self):
super(BaseTest, self).setUp()
self.elasticsearch_url = self.get_elasticsearch_url()
print("Using elasticsearch: {}".format(self.elasticsearch_url))
self.es = self.get_elasticsearch_instance(url=self.elasticsearch_url, user='beats')
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("elasticsearch").setLevel(logging.ERROR)
def test_version(self):
"""
Test version command
"""
exit_code = self.run_beat(
extra_args=["version"], logging_args=["-v", "-d", "*"])
assert exit_code == 0
assert self.log_contains("mockbeat")
assert self.log_contains("version")
assert self.log_contains("9.9.9")
| 27.457143 | 91 | 0.651405 |
ace84c24b70ad73ed761c7e5e3fdc235c35496a5 | 5,820 | py | Python | genmod/f_gan/f_divergence.py | shuiruge/generative_models | a1765a5ff9aeee8c0325f0c5f40b3537bb82accf | [
"MIT"
] | 2 | 2018-11-23T06:46:59.000Z | 2020-09-20T14:42:56.000Z | genmod/f_gan/f_divergence.py | shuiruge/generative_models | a1765a5ff9aeee8c0325f0c5f40b3537bb82accf | [
"MIT"
] | null | null | null | genmod/f_gan/f_divergence.py | shuiruge/generative_models | a1765a5ff9aeee8c0325f0c5f40b3537bb82accf | [
"MIT"
] | null | null | null | """
Description
-----------
Implements the abstract base class of f-divergence.
"""
import abc
import tensorflow as tf
from tfutils.monte_carlo_integral import monte_carlo_integrate
class BaseFDivergence(abc.ABC):
r"""Abstract base class for f-divergence.
Definition:
```math
Denotes:
$f^{\*}$: Convex function on $\mathbb{R}$, with domain
$\textrm{dom}_{f^{\*}$;
$g_f$: Function $\mathbb{R} \mapsto \textrm{dom}_{f^{\*}}$, called
"output activation function";
$P$: The empirical distribution of the data;
$Q$: The distribution fitting to $P$, by minimizing the f-divergence
F-divergence $D_f \left( P \| Q \right)$ is defined as
\begin{equation}
D_f := \mathbb{E}_{x \sim P} \left[ g_f(D(x)) \right] +
\mathbb{E}_{x \sim Q} \left[ f^{\*}( g_f(D(x)) ) \right],
\end{equation}
where the first line is called "discriminator part" and the second line
"generator part".
```
Notations:
A: The event-shape of the ambient.
References:
1. [Nowozin, et al. (2016)](https://arxiv.org/abs/1606.00709).
Args:
n_samples: Positive integer. Shall be greater than 30 as a thumb-rule
for central limit theorem, employed by the Monte-Carlo integral
(i.e. the E_{x~Q}[...]).
name: String.
"""
def __init__(self, n_samples=32, name='f_divergence'):
self.n_samples = n_samples
self.name = name
# For peering
self._peep = {}
@abc.abstractmethod
def output_activation(self, x):
r"""The output activation function
```math
g_f: \mathbb{R} \mapsto \textrm{dom}_{f^{\*}}
```
Args:
x: Tensor with shape `[None]`.
Returns:
Tensor with the same shape as `x`.
"""
pass
@abc.abstractmethod
def f_star(self, x):
r"""The Legendre-Fenchel conjugate of some given function `f`.
Definition:
```math
For $\forall f$ given, its Legendre-Fenchel conjugate is defined as
\begin{equation}
f^{\*}(k) := \sup_{k} \left\{ k x - f(x) \right\}.
\end{equation}
```
Args:
x: Tensor with shape `[None]`.
Returns:
Tensor with the same shape as `x`.
"""
pass
def discriminate_part(self, data, discriminator, reuse):
"""Returns the `E_{x~P} [ g_f(D(x)) ]`.
Args:
data: Tensor with shape `[None] + A`.
discriminator: Callable with the signature:
Args:
ambient: Tensor with shape `[None] + A`.
reuse: Boolean.
Returns:
Tensor with the same batch-shape as the `ambient`.
reuse: Boolean.
Returns:
A scalar `MonteCarloIntegral` instance.
"""
with tf.name_scope('discriminator_part'):
# [B]
integrands = self.output_activation(
discriminator(data, reuse))
return monte_carlo_integrate(integrands, axes=[0])
def generate_part(self, fake_data, discriminator, reuse):
"""Returns the `E_{x~Q} [ -f*( g_f(D(x)) ) ]`.
Args:
fake_data: Tensor with shape `[None] + A`.
discriminator: Callable with the signature:
Args:
ambient: Tensor with shape `[None] + A`.
reuse: Boolean.
Returns:
Tensor with the same batch-shape as the `ambient`.
reuse: Boolean.
Returns:
A scalar `MonteCarloIntegral` instance.
"""
with tf.name_scope('generator_part'):
# [self.n_samples]
integrands = - self.f_star(
self.output_activation(
discriminator(fake_data, reuse)))
return monte_carlo_integrate(integrands, axes=[0])
def __call__(self,
data,
discriminator,
generator,
reuse=tf.AUTO_REUSE):
"""
Args:
data: Tensor with shape `[None] + A`.
discriminator: Callable with the signature:
Args:
ambient: Tensor with shape `[None] + A`.
reuse: Boolean.
Returns:
Tensor with the same batch-shape as the `ambient`.
generator: Callable with signature:
Args:
n_samples: Positive integer.
reuse: Boolean.
Returns:
Tensor with shape `[n_samples] + A`.
reuse: Boolean.
Returns:
A scalar `MonteCarloIntegral` instance.
Raises:
EventShapeError.
"""
with tf.name_scope(self.name):
# [self.n_samples] + E
fake_data = generator(self.n_samples, reuse)
self.check_same_event_shape(data, fake_data)
discr_part = self.discriminate_part(data, discriminator, reuse)
gen_part = self.generate_part(fake_data, discriminator, reuse)
# Store as extra information
self._peep['discriminate_part'] = discr_part
self._peep['generate_part'] = gen_part
return discr_part + gen_part
def check_same_event_shape(self, data, ambient_samples):
"""
Args:
data: Tensor.
ambient_samples: Tensor.
Raises:
EventShapeError: If `data` and `ambient_samples` do not share the same
event-shape.
"""
if get_event_shape(data) != get_event_shape(ambient_samples):
raise EventShapeError('Data "{0}" and ambient-samples "{1}" should '
'share the same event-shape.'
.format(data, ambient_samples))
def get_event_shape(x):
"""
Args:
x: Tensor of the shape `[B] + E` where `B` is the batch_size, and `E` the
event-shape.
Returns:
The event-shape `E`, as a list of positive integers.
"""
batch_shape, *event_shape = shape_list(x)
return event_shape
def shape_list(tensor):
"""Returns the shape of the tensor `tensor` as a list.
Args:
tensor: Tensor.
Returns:
List of positive integers.
"""
return tensor.get_shape().as_list()
class EventShapeError(Exception):
pass
| 25.752212 | 77 | 0.604983 |
ace84c645355d60b42d6c98cc78e954c306656b1 | 1,226 | py | Python | scripts/performance/perf_load/perf_req_gen_schema.py | Rob-S/indy-node | 0aefbda62c5a7412d7e03b2fb9795c500ea67e9f | [
"Apache-2.0"
] | 627 | 2017-07-06T12:38:08.000Z | 2022-03-30T13:18:43.000Z | scripts/performance/perf_load/perf_req_gen_schema.py | Rob-S/indy-node | 0aefbda62c5a7412d7e03b2fb9795c500ea67e9f | [
"Apache-2.0"
] | 580 | 2017-06-29T17:59:57.000Z | 2022-03-29T21:37:52.000Z | scripts/performance/perf_load/perf_req_gen_schema.py | Rob-S/indy-node | 0aefbda62c5a7412d7e03b2fb9795c500ea67e9f | [
"Apache-2.0"
] | 704 | 2017-06-29T17:45:34.000Z | 2022-03-30T07:08:58.000Z | import json
import libnacl
from indy import ledger, anoncreds
from perf_load.perf_utils import rawToFriendly, get_txnid_field
from perf_load.perf_req_gen import RequestGenerator
class RGSchema(RequestGenerator):
_req_types = ["101", "107"]
async def _gen_req(self, submit_did, req_data):
_, schema_json = await anoncreds.issuer_create_schema(submit_did, req_data,
"1.0", json.dumps(["name", "age", "sex", "height"]))
schema_request = await ledger.build_schema_request(submit_did, schema_json)
return schema_request
def _from_file_str_data(self, file_str):
req_json = super()._from_file_str_data(file_str)
return get_txnid_field(req_json)
class RGGetSchema(RGSchema):
def _rand_data(self):
raw = libnacl.randombytes(16)
target_did = rawToFriendly(raw)
schema_marker = '02'
name = super()._rand_data()
version = '1.0'
schema_id = ':'.join([target_did, schema_marker, name, version])
return schema_id
async def _gen_req(self, submit_did, req_data):
req = await ledger.build_get_schema_request(submit_did, req_data)
return req
| 33.135135 | 114 | 0.666395 |
ace84cc0ffb243b1049ab788dbc9374977cee56e | 3,029 | py | Python | mvt/cores/bbox/bbox_samplers/base_sampler.py | JackMing1986/MultipleVsiualTasks | 7f2bd81c41bcd41af34f6953101038201a4f7d37 | [
"MIT"
] | 2 | 2021-08-29T16:54:42.000Z | 2021-09-27T12:22:10.000Z | mvt/cores/bbox/bbox_samplers/base_sampler.py | visriv/multi-visual-tasks | 7f2bd81c41bcd41af34f6953101038201a4f7d37 | [
"MIT"
] | null | null | null | mvt/cores/bbox/bbox_samplers/base_sampler.py | visriv/multi-visual-tasks | 7f2bd81c41bcd41af34f6953101038201a4f7d37 | [
"MIT"
] | 1 | 2022-01-16T20:17:50.000Z | 2022-01-16T20:17:50.000Z | from abc import ABCMeta, abstractmethod
import torch
from .sampling_result import SamplingResult
class BaseSampler(metaclass=ABCMeta):
"""Base class of samplers."""
def __init__(
self, num, pos_fraction, neg_pos_ub=-1, add_gt_as_proposals=True, **kwargs
):
self.num = num
self.pos_fraction = pos_fraction
self.neg_pos_ub = neg_pos_ub
self.add_gt_as_proposals = add_gt_as_proposals
self.pos_sampler = self
self.neg_sampler = self
@abstractmethod
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Sample positive samples."""
pass
@abstractmethod
def _sample_neg(self, assign_result, num_expected, **kwargs):
"""Sample negative samples."""
pass
def sample(self, assign_result, bboxes, gt_bboxes, gt_labels=None, **kwargs):
"""Sample positive and negative bboxes.
This is a simple implementation of bbox sampling given candidates,
assigning results and ground truth bboxes.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
bboxes (Tensor): Boxes to be sampled from.
gt_bboxes (Tensor): Ground truth bboxes.
gt_labels (Tensor, optional): Class labels of ground truth bboxes.
Returns:
:obj:`SamplingResult`: Sampling result.
"""
if len(bboxes.shape) < 2:
bboxes = bboxes[None, :]
bboxes = bboxes[:, :4]
gt_flags = bboxes.new_zeros((bboxes.shape[0],), dtype=torch.uint8)
if self.add_gt_as_proposals and len(gt_bboxes) > 0:
if gt_labels is None:
raise ValueError(
"gt_labels must be given when add_gt_as_proposals is True"
)
bboxes = torch.cat([gt_bboxes, bboxes], dim=0)
assign_result.add_gt_(gt_labels)
gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)
gt_flags = torch.cat([gt_ones, gt_flags])
num_expected_pos = int(self.num * self.pos_fraction)
pos_inds = self.pos_sampler._sample_pos(
assign_result, num_expected_pos, bboxes=bboxes, **kwargs
)
# We found that sampled indices have duplicated items occasionally.
# (may be a bug of PyTorch)
pos_inds = pos_inds.unique()
num_sampled_pos = pos_inds.numel()
num_expected_neg = self.num - num_sampled_pos
if self.neg_pos_ub >= 0:
_pos = max(1, num_sampled_pos)
neg_upper_bound = int(self.neg_pos_ub * _pos)
if num_expected_neg > neg_upper_bound:
num_expected_neg = neg_upper_bound
neg_inds = self.neg_sampler._sample_neg(
assign_result, num_expected_neg, bboxes=bboxes, **kwargs
)
neg_inds = neg_inds.unique()
sampling_result = SamplingResult(
pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags
)
return sampling_result
| 36.059524 | 82 | 0.62892 |
ace84d105bf5458b9f64d0df33dcaa06bff1ae74 | 179 | py | Python | src/graph_transpiler/webdnn/backend/webgpu/kernels/sqrt.py | urantialife/webdnn | dedc5da424288594cdfa605a015ddc7a3afcf2b7 | [
"MIT"
] | 1 | 2021-04-09T15:55:35.000Z | 2021-04-09T15:55:35.000Z | src/graph_transpiler/webdnn/backend/webgpu/kernels/sqrt.py | urantialife/webdnn | dedc5da424288594cdfa605a015ddc7a3afcf2b7 | [
"MIT"
] | null | null | null | src/graph_transpiler/webdnn/backend/webgpu/kernels/sqrt.py | urantialife/webdnn | dedc5da424288594cdfa605a015ddc7a3afcf2b7 | [
"MIT"
] | null | null | null | from webdnn.backend.webgpu.kernels.elementwise import register_elementwise_kernel
from webdnn.graph.operators.sqrt import Sqrt
register_elementwise_kernel(Sqrt, "y = sqrt(x0);")
| 35.8 | 81 | 0.837989 |
ace84d2ad6cb2fc6ad6dce862c1770189b1859a4 | 3,413 | py | Python | example.py | gabrielasuchopar/nasbench | cb2d12cf2905220c4f59fc1811e567742e5fed47 | [
"Apache-2.0"
] | null | null | null | example.py | gabrielasuchopar/nasbench | cb2d12cf2905220c4f59fc1811e567742e5fed47 | [
"Apache-2.0"
] | null | null | null | example.py | gabrielasuchopar/nasbench | cb2d12cf2905220c4f59fc1811e567742e5fed47 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runnable example, as shown in the README.md."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from nasbench import api
# Replace this string with the path to the downloaded nasbench.tfrecord before
# executing.
NASBENCH_TFRECORD = './nasbench_full.tfrecord'
INPUT = 'input'
OUTPUT = 'output'
CONV1X1 = 'conv1x1-bn-relu'
CONV3X3 = 'conv3x3-bn-relu'
MAXPOOL3X3 = 'maxpool3x3'
def main(argv):
del argv # Unused
# Load the data from file (this will take some time)
nasbench = api.NASBench(NASBENCH_TFRECORD)
# Create an Inception-like module (5x5 convolution replaced with two 3x3
# convolutions).
model_spec = api.ModelSpec(
# Adjacency matrix of the module
matrix=[[0, 1, 1, 1, 0, 1, 0], # input layer
[0, 0, 0, 0, 0, 0, 1], # 1x1 conv
[0, 0, 0, 0, 0, 0, 1], # 3x3 conv
[0, 0, 0, 0, 1, 0, 0], # 5x5 conv (replaced by two 3x3's)
[0, 0, 0, 0, 0, 0, 1], # 5x5 conv (replaced by two 3x3's)
[0, 0, 0, 0, 0, 0, 1], # 3x3 max-pool
[0, 0, 0, 0, 0, 0, 0]], # output layer
# Operations at the vertices of the module, matches order of matrix
ops=[INPUT, CONV1X1, CONV3X3, CONV3X3, CONV3X3, MAXPOOL3X3, OUTPUT])
# Query this model from dataset, returns a dictionary containing the metrics
# associated with this model.
print('Querying an Inception-like model.')
data = nasbench.query(model_spec)
print(data)
print(nasbench.get_budget_counters()) # prints (total time, total epochs)
# Get all metrics (all epoch lengths, all repeats) associated with this
# model_spec. This should be used for dataset analysis and NOT for
# benchmarking algorithms (does not increment budget counters).
print('\nGetting all metrics for the same Inception-like model.')
fixed_metrics, computed_metrics = nasbench.get_metrics_from_spec(model_spec)
print(fixed_metrics)
for epochs in nasbench.valid_epochs:
for repeat_index in range(len(computed_metrics[epochs])):
data_point = computed_metrics[epochs][repeat_index]
print('Epochs trained %d, repeat number: %d' % (epochs, repeat_index + 1))
print(data_point)
# Iterate through unique models in the dataset. Models are unqiuely identified
# by a hash.
print('\nIterating over unique models in the dataset.')
for unique_hash in nasbench.hash_iterator():
fixed_metrics, computed_metrics = nasbench.get_metrics_from_hash(
unique_hash)
print(fixed_metrics)
# For demo purposes, break here instead of iterating through whole set.
break
# If you are passing command line flags to modify the default config values, you
# must use app.run(main)
if __name__ == '__main__':
app.run(main)
| 37.505495 | 80 | 0.702608 |
ace84d8dc414d098834f55ba437b7e62a316de73 | 2,015 | py | Python | apps/useradmin/src/useradmin/urls.py | vinaymundada27/Hue | 7bffb33bbe7cfa34d340241c4ba3b19476211b2a | [
"Apache-2.0"
] | null | null | null | apps/useradmin/src/useradmin/urls.py | vinaymundada27/Hue | 7bffb33bbe7cfa34d340241c4ba3b19476211b2a | [
"Apache-2.0"
] | null | null | null | apps/useradmin/src/useradmin/urls.py | vinaymundada27/Hue | 7bffb33bbe7cfa34d340241c4ba3b19476211b2a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns, url
from desktop.lib.django_util import get_username_re_rule, get_groupname_re_rule
username_re = get_username_re_rule()
groupname_re = get_groupname_re_rule()
urlpatterns = patterns('useradmin.views',
url(r'^$', 'list_users'),
url(r'^users/?$', 'list_users'),
url(r'^groups/?$', 'list_groups'),
url(r'^permissions/?$', 'list_permissions'),
url(r'^configurations/?$', 'list_configurations'),
url(r'^users/edit/(?P<username>%s)$' % (username_re,), 'edit_user'),
url(r'^view_user/(?P<username>%s)$' % (username_re,), 'view_user'),
url(r'^users/add_ldap_users$', 'add_ldap_users'),
url(r'^users/add_ldap_groups$', 'add_ldap_groups'),
url(r'^users/sync_ldap_users_groups$', 'sync_ldap_users_groups'),
url(r'^groups/edit/(?P<name>%s)$' % (groupname_re,), 'edit_group'),
url(r'^permissions/edit/(?P<app>.+?)/(?P<priv>.+?)/?$', 'edit_permission'),
url(r'^users/new$', 'edit_user', name="useradmin.new"),
url(r'^groups/new$', 'edit_group', name="useradmin.new_group"),
url(r'^users/delete', 'delete_user'),
url(r'^groups/delete$', 'delete_group'),
)
urlpatterns += patterns('useradmin.api',
url(r'^api/get_users/?', 'get_users', name='api_get_users'),
)
| 42.87234 | 79 | 0.716129 |
ace84db9265327a9623def5cc227befbce93d6e3 | 3,642 | py | Python | all/memory/advantage.py | michalgregor/autonomous-learning-library | 24ed3efd4a96c468eb3313438dc4d937b37c9d60 | [
"MIT"
] | null | null | null | all/memory/advantage.py | michalgregor/autonomous-learning-library | 24ed3efd4a96c468eb3313438dc4d937b37c9d60 | [
"MIT"
] | null | null | null | all/memory/advantage.py | michalgregor/autonomous-learning-library | 24ed3efd4a96c468eb3313438dc4d937b37c9d60 | [
"MIT"
] | null | null | null | import torch
from all.core import State
class NStepAdvantageBuffer:
def __init__(self, v, features, n_steps, n_envs, discount_factor=1):
self.v = v
self.features = features
self.n_steps = n_steps
self.n_envs = n_envs
self.gamma = discount_factor
self._states = []
self._actions = []
self._rewards = []
def __len__(self):
return len(self._states) * self.n_envs
def store(self, states, actions, rewards):
if states is None:
return
if not self._states:
self._states = [states]
self._actions = [actions]
self._rewards = [rewards]
elif len(self._states) < self.n_steps:
self._states.append(states)
self._actions.append(actions)
self._rewards.append(rewards)
else:
raise Exception("Buffer length exceeded: " + str(self.n_steps))
def advantages(self, states):
if len(self) < self.n_steps * self.n_envs:
raise Exception("Not enough states received!")
self._states.append(states)
rewards, lengths = self._compute_returns()
states, actions, next_states = self._summarize_transitions()
advantages = self._compute_advantages(states, rewards, next_states, lengths)
self._clear_buffers()
return (
states,
actions,
advantages
)
def _compute_returns(self):
sample_returns = torch.zeros(
(self.n_steps, self.n_envs),
device=self._rewards[0].device
)
sample_lengths = torch.zeros(
(self.n_steps, self.n_envs),
device=self._rewards[0].device
)
current_returns = self._rewards[0] * 0
current_lengths = current_returns.clone()
for i in range(self.n_steps):
t = self.n_steps - 1 - i
mask = self._states[t + 1].mask.float()
current_returns = (
self._rewards[t] + self.gamma * current_returns * mask
)
current_lengths = (
1 + current_lengths * mask
)
sample_returns[t] = current_returns
sample_lengths[t] = current_lengths
return sample_returns, sample_lengths
def _summarize_transitions(self):
sample_n = self.n_envs * self.n_steps
sample_states = [None] * sample_n
sample_actions = [None] * sample_n
sample_next_states = [None] * sample_n
for e in range(self.n_envs):
next_state = self._states[self.n_steps][e]
for i in range(self.n_steps):
t = self.n_steps - 1 - i
idx = t * self.n_envs + e
state = self._states[t][e]
action = self._actions[t][e]
sample_states[idx] = state
sample_actions[idx] = action
sample_next_states[idx] = next_state
if not state.mask:
next_state = state
return (
State.from_list(sample_states),
torch.stack(sample_actions),
State.from_list(sample_next_states)
)
def _compute_advantages(self, states, rewards, next_states, lengths):
return (
rewards.view(-1)
+ (self.gamma ** lengths.view(-1))
* self.v.target(self.features.target(next_states)).view(-1)
- self.v.eval(self.features.eval(states)).view(-1)
)
def _clear_buffers(self):
self._states = []
self._actions = []
self._rewards = []
| 32.517857 | 84 | 0.560406 |
ace84effbd2d7541c78233790e4f5b92aecef539 | 3,231 | py | Python | indico/util/rules_test.py | yamiacat/indico | 754c02cd7cd25bf1eab0ca5f497eb24b135dd51c | [
"MIT"
] | null | null | null | indico/util/rules_test.py | yamiacat/indico | 754c02cd7cd25bf1eab0ca5f497eb24b135dd51c | [
"MIT"
] | null | null | null | indico/util/rules_test.py | yamiacat/indico | 754c02cd7cd25bf1eab0ca5f497eb24b135dd51c | [
"MIT"
] | null | null | null | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from collections import OrderedDict
import pytest
from indico.core import signals
from indico.util.rules import Condition, check_rule, get_conditions, get_missing_conditions
class TestCondition(Condition):
@classmethod
def is_none(cls, **kwargs):
return False
class ActionCondition(TestCondition):
name = 'action'
description = "The action performed"
required = True
@classmethod
def get_available_values(cls, **kwargs):
return OrderedDict([('add', 'added'), ('del', 'deleted')])
@classmethod
def check(cls, values, action, **kwargs):
return action in values
class FooCondition(TestCondition):
name = 'foo'
description = "The foo value"
required = False
@classmethod
def get_available_values(cls, **kwargs):
return OrderedDict([(1, '1'), (2, '2'), (3, '3')])
@classmethod
def check(cls, values, foo, **kwargs):
return foo in values
@classmethod
def is_none(cls, foo, **kwargs):
return foo == 42
class BarCondition(TestCondition):
name = 'bar'
description = "The bar value"
required = False
@classmethod
def get_available_values(cls, **kwargs):
return {'a': 'a', 'b': 'b', 'c': 'c'}
@classmethod
def check(cls, values, bar, **kwargs):
return bar in values
def _get_test_conditions(sender, **kwargs):
yield ActionCondition
yield FooCondition
yield BarCondition
@pytest.fixture(autouse=True)
def _register_test_rules():
with signals.get_conditions.connected_to(_get_test_conditions, sender='test'):
yield
def test_get_rule():
assert get_conditions('test') == {'action': ActionCondition, 'foo': FooCondition, 'bar': BarCondition}
def test_get_missing_rules():
assert get_missing_conditions('test', {'action': ['test']}) == set()
assert get_missing_conditions('test', {'action': None}) == {'action'}
assert get_missing_conditions('test', {}) == {'action'}
@pytest.mark.parametrize(('rule', 'kwargs', 'expected'), (
# required rule missing
({}, {'action': 'add', 'foo': 1, 'bar': 'a'}, False),
# match "any" value
({'action': ['add']}, {'action': 'add', 'foo': 1}, True),
# match "none" value
({'action': ['add'], 'foo': []}, {'action': 'add', 'foo': 42}, True),
({'action': ['add'], 'foo': []}, {'action': 'add', 'foo': 1}, False),
# no match
({'action': ['del']}, {'action': 'add', 'foo': 1, 'bar': 'a'}, False),
# invalid value
({'action': ['add'], 'foo': [4]}, {'action': 'add', 'foo': 4, 'bar': 'a'}, False),
# invalid + valid value
({'action': ['add'], 'foo': [2]}, {'action': 'add', 'foo': 3, 'bar': 'a'}, False),
# valid value
({'action': ['add'], 'foo': [3]}, {'action': 'add', 'foo': 3, 'bar': 'a'}, True),
# valid values
({'action': ['add'], 'foo': [2, 3]}, {'action': 'add', 'foo': 3, 'bar': 'a'}, True),
))
def test_check_rules(rule, kwargs, expected):
assert check_rule('test', rule, **kwargs) == expected
| 29.108108 | 106 | 0.607861 |
ace8502a06e907c33f797f525e65bb0e3cc85f48 | 718 | py | Python | ionosenterprise/__init__.py | ionos-cloud/ionos-enterprise-sdk-python | 6b601990098ab36289a251406fb093489b647f1d | [
"Apache-2.0"
] | 6 | 2015-04-16T11:43:27.000Z | 2019-04-10T10:47:57.000Z | ionosenterprise/__init__.py | ionos-cloud/ionos-enterprise-sdk-python | 6b601990098ab36289a251406fb093489b647f1d | [
"Apache-2.0"
] | 40 | 2015-05-26T15:24:14.000Z | 2018-12-05T20:17:28.000Z | ionosenterprise/__init__.py | ionos-cloud/ionos-enterprise-sdk-python | 6b601990098ab36289a251406fb093489b647f1d | [
"Apache-2.0"
] | 11 | 2015-04-27T16:40:36.000Z | 2018-12-21T09:35:32.000Z | # Copyright 2015-2019 IONOS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ionos Enterprise API Client Library for Python"""
__version__ = '5.5.1'
API_HOST = 'https://api.ionos.com/cloudapi/v5'
API_VERSION = '5.0'
| 35.9 | 74 | 0.750696 |
ace85091f7494f0cb067376dfd60dc12da3ec8d8 | 82 | py | Python | testdata/hello.py | rickstaa/black-action | 752e8a23953b8af9316f42ddae11417830238fbc | [
"MIT"
] | null | null | null | testdata/hello.py | rickstaa/black-action | 752e8a23953b8af9316f42ddae11417830238fbc | [
"MIT"
] | null | null | null | testdata/hello.py | rickstaa/black-action | 752e8a23953b8af9316f42ddae11417830238fbc | [
"MIT"
] | null | null | null | # This file doesn't need formatting, so Black should skip it
print("hello world")
| 27.333333 | 60 | 0.756098 |
ace8517438ff7e4ef893817ec0b78d6e65217ac6 | 58,320 | py | Python | sarkas/processes.py | pwessels-uhh/sarkas | 78fb9f8106ed6b15fb67c22afea09593fed01730 | [
"MIT"
] | null | null | null | sarkas/processes.py | pwessels-uhh/sarkas | 78fb9f8106ed6b15fb67c22afea09593fed01730 | [
"MIT"
] | 1 | 2022-02-18T00:32:25.000Z | 2022-02-18T00:32:25.000Z | sarkas/processes.py | pwessels-uhh/sarkas | 78fb9f8106ed6b15fb67c22afea09593fed01730 | [
"MIT"
] | null | null | null | """
Module handling stages of an MD run: PreProcessing, Simulation, PostProcessing.
"""
import numpy as np
import copy as py_copy
from numba import njit
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import os
# Sarkas modules
from sarkas.utilities.io import InputOutput
from sarkas.utilities.timing import SarkasTimer
from sarkas.potentials.core import Potential
from sarkas.time_evolution.integrators import Integrator
from sarkas.time_evolution.thermostats import Thermostat
from sarkas.core import Particles, Parameters, Species
import sarkas.tools.observables as sk_obs
class Process:
"""Stage of a Molecular Dynamics simulation. This is the Parent class for PreProcess, Simulation, and PostProcess.
Parameters
----------
input_file : str
Path to the YAML input file.
Attributes
----------
potential : sarkas.potential.base.Potential
Class handling the interaction between particles.
integrator: sarkas.time_evolution.integrators.Integrator
Class handling the integrator.
thermostat: sarkas.time_evolution.thermostats.Thermostat
Class handling the equilibration thermostat.
particles: sarkas.core.Particles
Class handling particles properties.
parameters: sarkas.core.Parameters
Class handling simulation's parameters.
species: list
List of :meth:`sarkas.core.Species` classes.
input_file: str
Path to YAML input file.
timer: sarkas.utilities.timing.SarkasTimer
Class handling the timing of processes.
io: sarkas.utilities.io.InputOutput
Class handling the IO in Sarkas.
"""
def __init__(self, input_file: str = None):
self.potential = Potential()
self.integrator = Integrator()
self.thermostat = Thermostat()
self.parameters = Parameters()
self.particles = Particles()
self.species = []
self.input_file = input_file if input_file else None
self.timer = SarkasTimer()
self.io = InputOutput(process=self.__name__)
def common_parser(self, filename: str = None) -> None:
"""
Parse simulation parameters from YAML file.
Parameters
----------
filename: str
Input YAML file
"""
if filename:
self.input_file = filename
dics = self.io.from_yaml(self.input_file)
for lkey in dics:
if lkey == "Particles":
for species in dics["Particles"]:
spec = Species(species["Species"])
self.species.append(spec)
if lkey == "Potential":
self.potential.from_dict(dics[lkey])
if lkey == "Thermostat":
self.thermostat.from_dict(dics[lkey])
if lkey == "Integrator":
self.integrator.from_dict(dics[lkey])
if lkey == "Parameters":
self.parameters.from_dict(dics[lkey])
self.observables_list = []
# This is not needed in the case of process = simulation
for observable in dics['Observables']:
for key, sub_dict in observable.items():
if key == 'RadialDistributionFunction':
self.observables_list.append('rdf')
self.rdf = sk_obs.RadialDistributionFunction()
if sub_dict:
self.rdf.from_dict(sub_dict)
if key == 'HermiteCoefficients':
self.hc = sk_obs.HermiteCoefficients()
self.hc.from_dict(sub_dict)
if key == 'Thermodynamics':
self.therm = sk_obs.Thermodynamics()
self.therm.from_dict(sub_dict)
self.observables_list.append('therm')
if key == 'DynamicStructureFactor':
self.observables_list.append('dsf')
self.dsf = sk_obs.DynamicStructureFactor()
if sub_dict:
self.dsf.from_dict(sub_dict)
if key == 'CurrentCorrelationFunction':
self.observables_list.append('ccf')
self.ccf = sk_obs.CurrentCorrelationFunction()
if sub_dict:
self.ccf.from_dict(sub_dict)
if key == 'StaticStructureFactor':
self.observables_list.append('ssf')
self.ssf = sk_obs.StaticStructureFactor()
if sub_dict:
self.ssf.from_dict(sub_dict)
if key == 'VelocityAutoCorrelationFunction':
self.observables_list.append('vacf')
self.vacf = sk_obs.VelocityAutoCorrelationFunction()
if sub_dict:
self.vacf.from_dict(sub_dict)
if key == 'VelocityDistribution':
self.observables_list.append('vd')
self.vm = sk_obs.VelocityDistribution()
if sub_dict:
self.vm.from_dict(sub_dict)
if key == 'ElectricCurrent':
self.observables_list.append('ec')
self.ec = sk_obs.ElectricCurrent()
if sub_dict:
self.ec.from_dict(sub_dict)
if key == 'DiffusionFlux':
self.observables_list.append('diff_flux')
self.diff_flux = sk_obs.DiffusionFlux()
if sub_dict:
self.diff_flux.from_dict(sub_dict)
if 'TransportCoefficients' in dics.keys():
self.transport_dict = dics["TransportCoefficients"].copy()
def initialization(self):
"""Initialize all classes."""
# initialize the directories and filenames
self.io.setup()
# Copy relevant subsclasses attributes into parameters class. This is needed for post-processing.
# Update parameters' dictionary with filenames and directories
self.parameters.from_dict(self.io.__dict__)
# save some general info
self.parameters.potential_type = self.potential.type
self.parameters.cutoff_radius = self.potential.rc
self.parameters.integrator = self.integrator.type
self.parameters.thermostat = self.thermostat.type
# Copy some integrator parameters if not already defined
if not hasattr(self.parameters, 'dt'):
self.parameters.dt = self.integrator.dt
if not hasattr(self.parameters, 'equilibration_steps'):
self.parameters.equilibration_steps = self.integrator.equilibration_steps
if not hasattr(self.parameters, 'eq_dump_step'):
self.parameters.eq_dump_step = self.integrator.eq_dump_step
if not hasattr(self.parameters, 'production_steps'):
self.parameters.production_steps = self.integrator.production_steps
if not hasattr(self.parameters, 'prod_dump_step'):
self.parameters.prod_dump_step = self.integrator.prod_dump_step
# Check for magnetization phase
if self.integrator.electrostatic_equilibration:
self.parameters.electrostatic_equilibration = True
if not hasattr(self.parameters, 'mag_dump_step'):
self.parameters.mag_dump_step = self.integrator.mag_dump_step
if not hasattr(self.parameters, 'magnetization_steps'):
self.parameters.magnetization_steps = self.integrator.magnetization_steps
self.parameters.setup(self.species)
t0 = self.timer.current()
self.potential.setup(self.parameters)
time_pot = self.timer.current()
self.thermostat.setup(self.parameters)
self.integrator.setup(self.parameters, self.thermostat, self.potential)
self.particles.setup(self.parameters, self.species)
time_ptcls = self.timer.current()
# For restart and backups.
self.io.setup_checkpoint(self.parameters, self.species)
self.io.save_pickle(self)
# Print Process summary to file and screen
self.io.simulation_summary(self)
time_end = self.timer.current()
# Print timing
self.io.time_stamp("Potential Initialization", self.timer.time_division(time_end - t0))
self.io.time_stamp("Particles Initialization", self.timer.time_division(time_ptcls - time_pot))
self.io.time_stamp("Total Simulation Initialization", self.timer.time_division(time_end - t0))
def setup(self, read_yaml=False, other_inputs=None):
"""Setup simulations' parameters and io subclasses.
Parameters
----------
read_yaml: bool
Flag for reading YAML input file. Default = False.
other_inputs: dict (optional)
Dictionary with additional simulations options.
"""
if read_yaml:
self.common_parser()
if other_inputs:
assert isinstance(other_inputs, dict), "Wrong input type. other_inputs should be a nested dictionary"
for class_name, class_attr in other_inputs.items():
if class_name not in ['Particles', 'Obervables']:
self.__dict__[class_name.lower()].__dict__.update(class_attr)
else:
for sp, species in enumerate(other_inputs["Particles"]):
spec = Species(species["Species"])
self.species[sp].__dict__.update(spec.__dict__)
if class_name == 'Observables':
for observable in class_attr:
for key, sub_dict in observable.items():
if key == 'RadialDistributionFunction':
self.rdf = sk_obs.RadialDistributionFunction()
self.rdf.from_dict(sub_dict)
if key == 'HermiteCoefficients':
self.hc = sk_obs.HermiteCoefficients()
self.hc.from_dict(sub_dict)
if key == 'Thermodynamics':
self.therm = sk_obs.Thermodynamics()
self.therm.from_dict(sub_dict)
if key == 'DynamicStructureFactor':
self.dsf = sk_obs.DynamicStructureFactor()
if sub_dict:
self.dsf.from_dict(sub_dict)
if key == 'CurrentCorrelationFunction':
self.ccf = sk_obs.CurrentCorrelationFunction()
if sub_dict:
self.ccf.from_dict(sub_dict)
if key == 'StaticStructureFactor':
self.ssf = sk_obs.StaticStructureFactor()
if sub_dict:
self.ssf.from_dict(sub_dict)
if key == 'VelocityAutoCorrelationFunction':
self.vacf = sk_obs.VelocityAutoCorrelationFunction()
if sub_dict:
self.vacf.from_dict(sub_dict)
if key == 'VelocityMoments':
self.vm = sk_obs.VelocityMoments()
if sub_dict:
self.vm.from_dict(sub_dict)
if key == 'ElectricCurrent':
self.ec = sk_obs.ElectricCurrent()
if sub_dict:
self.ec.from_dict(sub_dict)
if self.__name__ == 'postprocessing':
# Create the file paths without creating directories and redefining io attributes
self.io.create_file_paths()
# Read previouly stored files
self.io.read_pickle(self)
# Print parameters to log file
self.io.simulation_summary(self)
# Initialize the observable classes
# for obs in self.observables_list:
# if obs in self.__dict__.keys():
# self.__dict__[obs].setup(self.parameters)
else:
self.initialization()
if self.parameters.plot_style:
plt.style.use(self.parameters.plot_style)
class PostProcess(Process):
"""
Class handling the post-processing stage of a simulation.
Parameters
----------
input_file : str
Path to the YAML input file.
"""
def __init__(self, input_file: str = None):
self.__name__ = 'postprocessing'
super().__init__(input_file)
def setup_from_simulation(self, simulation):
"""
Setup postprocess' subclasses by (shallow) copying them from simulation object.
Parameters
----------
simulation: sarkas.core.processes.Simulation
Simulation object
"""
self.parameters = py_copy.copy(simulation.parameters)
self.integrator = py_copy.copy(simulation.integrator)
self.potential = py_copy.copy(simulation.potential)
self.species = py_copy.copy(simulation.species)
self.thermostat = py_copy.copy(simulation.thermostat)
self.io = py_copy.copy(simulation.io)
def run(self):
"""Calculate all the observables from the YAML input file."""
for obs in self.observables_list:
if obs in self.__dict__.keys():
self.__dict__[obs].setup(self.parameters)
if obs == 'therm':
self.therm.temp_energy_plot(self)
else:
self.io.postprocess_info(self, write_to_file=True, observable=obs)
self.__dict__[obs].compute()
if hasattr(self, 'transport_dict'):
from sarkas.tools.transport import TransportCoefficient as TC
for coeff in self.transport_dict:
for key, coeff_kwargs in coeff.items():
if key.lower() == 'diffusion':
TC.diffusion(self.parameters, **coeff_kwargs)
if key.lower() == 'interdiffusion':
TC.interdiffusion(self.parameters, **coeff_kwargs)
if key.lower() == 'viscosity':
TC.viscosity(self.parameters, **coeff_kwargs)
if key.lower() == 'electricalconductivity':
TC.electrical_conductivity(self.parameters, **coeff_kwargs)
class PreProcess(Process):
"""
Wrapper class handling the estimation of time and best parameters of a simulation.
Parameters
----------
input_file : str
Path to the YAML input file.
Attributes
----------
loops: int
Number of timesteps to run for time and size estimates. Default = 10
estimate: bool
Run an estimate for the best PPPM parameters in the simulation. Default=False.
pm_meshes: numpy.ndarray
Array of mesh sizes used in the PPPM parameters estimation.
pp_cells: numpy.ndarray
Array of simulations box cells used in the PPPM parameters estimation.
kappa: float
Screening parameter. Calculated from :meth:`sarkas.potentials.core.Potential.matrix`.
"""
def __init__(self, input_file: str = None):
self.__name__ = 'preprocessing'
self.loops = 10
self.estimate = False
self.pm_meshes = np.logspace(3, 7, 12, base =2, dtype=int )
# np.array([16, 24, 32, 48, 56, 64, 72, 88, 96, 112, 128], dtype=int)
self.pp_cells = np.arange(3, 16, dtype=int)
self.kappa = None
super().__init__(input_file)
def green_function_timer(self):
"""Time Potential setup."""
self.timer.start()
self.potential.pppm_setup(self.parameters)
return self.timer.stop()
def run(self,
loops: int = None,
timing: bool = True,
timing_study: bool = False,
pppm_estimate: bool = False,
postprocessing: bool = False,
remove: bool = False):
"""
Estimate the time of the simulation and best parameters if wanted.
Parameters
----------
loops : int
Number of loops over which to average the acceleration calculation.
Note that the number of timestep over which to averages is three times this value.
Example: loops = 5, acceleration is averaged over 5 loops, while full time step over 15 loops.
timing : bool
Flag for estimating simulation times. Default =True.
timing_study : bool
Flag for estimating time for simulation parameters.
pppm_estimate : bool
Flag for showing the force error plots in case of pppm algorithm.
postprocessing : bool
Flag for calculating Post processing parameters.
remove : bool
Flag for removing energy files and dumps created during times estimation. Default = False.
"""
plt.close('all')
self.pppm_plots_dir = os.path.join(self.io.preprocessing_dir, 'PPPM_Plots')
if not os.path.exists(self.pppm_plots_dir):
os.mkdir(self.pppm_plots_dir)
# Set the screening parameter
self.kappa = self.potential.matrix[1, 0, 0] if self.potential.type == "Yukawa" else 0.0
if loops:
self.loops = loops + 1
if timing:
self.io.preprocess_timing("header", [0, 0, 0, 0, 0, 0], 0)
if self.potential.pppm_on:
green_time = self.timer.time_division(self.green_function_timer())
self.io.preprocess_timing("GF", green_time, 0)
self.time_acceleration()
self.time_integrator_loop()
# Estimate size of dump folder
# Grab one file from the dump directory and get the size of it.
eq_dump_size = os.stat(os.path.join(self.io.eq_dump_dir, os.listdir(self.io.eq_dump_dir)[0])).st_size
eq_dump_fldr_size = eq_dump_size * (self.integrator.equilibration_steps / self.integrator.eq_dump_step)
# Grab one file from the dump directory and get the size of it.
prod_dump_size = os.stat(os.path.join(self.io.eq_dump_dir, os.listdir(self.io.eq_dump_dir)[0])).st_size
prod_dump_fldr_size = prod_dump_size * (self.integrator.production_steps / self.integrator.prod_dump_step)
# Prepare arguments to pass for print out
sizes = np.array([[eq_dump_size, eq_dump_fldr_size],
[prod_dump_size, prod_dump_fldr_size]])
# Check for electrostatic equilibration
if self.integrator.electrostatic_equilibration:
dump = self.integrator.mag_dump_step
mag_dump_size = os.stat(os.path.join(self.io.mag_dump_dir, 'checkpoint_' + str(dump) + '.npz')).st_size
mag_dump_fldr_size = mag_dump_size * (
self.integrator.magnetization_steps / self.integrator.mag_dump_step)
sizes = np.array([[eq_dump_size, eq_dump_fldr_size],
[prod_dump_size, prod_dump_fldr_size],
[mag_dump_size, mag_dump_fldr_size]])
self.io.preprocess_sizing(sizes)
if remove:
# Delete the energy files created during the estimation runs
os.remove(self.io.eq_energy_filename)
os.remove(self.io.prod_energy_filename)
# Delete dumps created during the estimation runs
for npz in os.listdir(self.io.eq_dump_dir):
os.remove(os.path.join(self.io.eq_dump_dir, npz))
for npz in os.listdir(self.io.prod_dump_dir):
os.remove(os.path.join(self.io.prod_dump_dir, npz))
if self.integrator.electrostatic_equilibration:
os.remove(self.io.mag_energy_filename)
# Remove dumps
for npz in os.listdir(self.io.mag_dump_dir):
os.remove(os.path.join(self.io.mag_dump_dir, npz))
if pppm_estimate:
if timing_study:
self.input_rc = self.potential.rc
self.input_mesh = np.copy(self.potential.pppm_mesh)
self.input_alpha = self.potential.pppm_alpha_ewald
self.timing_study = timing_study
self.make_timing_plots()
# Reset the original values.
self.potential.rc = self.input_rc
self.potential.pppm_mesh = np.copy(self.input_mesh)
self.potential.pppm_alpha_ewald = self.input_alpha
self.potential.setup(self.parameters)
self.pppm_approximation()
print('\nFigures can be found in {}'.format(self.pppm_plots_dir))
if postprocessing:
# POST- PROCESSING
self.io.postprocess_info(self, write_to_file=True, observable='header')
if hasattr(self, 'rdf'):
self.rdf.setup(self.parameters)
self.io.postprocess_info(self, write_to_file=True, observable='rdf')
if hasattr(self, 'ssf'):
self.ssf.setup(self.parameters)
self.io.postprocess_info(self, write_to_file=True, observable='ssf')
if hasattr(self, 'dsf'):
self.dsf.setup(self.parameters)
self.io.postprocess_info(self, write_to_file=True, observable='dsf')
if hasattr(self, 'ccf'):
self.ccf.setup(self.parameters)
self.io.postprocess_info(self, write_to_file=True, observable='ccf')
if hasattr(self, 'vm'):
self.ccf.setup(self.parameters)
self.io.postprocess_info(self, write_to_file=True, observable='vm')
def make_timing_plots(self):
"""Estimate the best number of mesh points and cutoff radius."""
from scipy.optimize import curve_fit
print('\n\n{:=^70} \n'.format(' Timing Study '))
max_cells = int(0.5 * self.parameters.box_lengths.min() / self.parameters.a_ws)
if max_cells != self.pp_cells[-1]:
self.pp_cells = np.arange(3, max_cells, dtype=int)
pm_times = np.zeros(len(self.pm_meshes))
pm_errs = np.zeros(len(self.pm_meshes))
pp_times = np.zeros((len(self.pm_meshes), len(self.pp_cells)))
pp_errs = np.zeros((len(self.pm_meshes), len(self.pp_cells)))
pm_xlabels = []
pp_xlabels = []
self.force_error_map = np.zeros((len(self.pm_meshes), len(self.pp_cells)))
# Average the PM time
for i, m in enumerate(self.pm_meshes):
self.potential.pppm_mesh = m * np.ones(3, dtype=int)
self.potential.pppm_alpha_ewald = 0.3 * m / self.parameters.box_lengths.min()
green_time = self.green_function_timer()
pm_errs[i] = self.parameters.pppm_pm_err
print('\n\nMesh = {} x {} x {} : '.format(*self.potential.pppm_mesh))
print('alpha = {:.4f} / a_ws = {:.4e} '.format(self.potential.pppm_alpha_ewald * self.parameters.a_ws,
self.potential.pppm_alpha_ewald))
print('PM Err = {:.6e}'.format(self.parameters.pppm_pm_err))
self.io.preprocess_timing("GF", self.timer.time_division(green_time), 0)
pm_xlabels.append("{}x{}x{}".format(*self.potential.pppm_mesh))
# Calculate the PM acceleration timing 3x and average
for it in range(3):
self.timer.start()
self.potential.update_pm(self.particles)
pm_times[i] += self.timer.stop() / 3.0
# For each number of PP cells, calculate the PM acceleration timing 3x and average
for j, c in enumerate(self.pp_cells):
self.potential.rc = self.parameters.box_lengths.min() / c
kappa_over_alpha = - 0.25 * (self.kappa / self.potential.pppm_alpha_ewald) ** 2
alpha_times_rcut = - (self.potential.pppm_alpha_ewald * self.potential.rc) ** 2
# Update the Force error
self.potential.pppm_pp_err = 2.0 * np.exp(kappa_over_alpha + alpha_times_rcut) / np.sqrt(
self.potential.rc)
self.potential.pppm_pp_err *= np.sqrt(self.parameters.total_num_ptcls) * self.parameters.a_ws ** 2 \
/ np.sqrt(self.parameters.box_volume)
pp_errs[i, j] = self.potential.pppm_pp_err
self.force_error_map[i, j] = np.sqrt(self.potential.pppm_pp_err ** 2
+ self.parameters.pppm_pm_err ** 2)
if j == 0:
pp_xlabels.append("{:.2f}".format(self.potential.rc / self.parameters.a_ws))
for it in range(3):
self.timer.start()
self.potential.update_linked_list(self.particles)
pp_times[i, j] += self.timer.stop() / 3.0
# Get the time in seconds
pp_times *= 1e-9
pm_times *= 1e-9
# Fit the PM times
pm_popt, _ = curve_fit(
lambda x, a, b: a + 5 * b * x ** 3 * np.log2(x**3),
self.pm_meshes,
pm_times)
fit_str = r'Fit = $a_2 + 5 a_3 M^3 \log_2(M^3)$ [s]' + '\n' + r'$a_2 = ${:.4e}, $a_3 = ${:.4e} '.format(
*pm_popt)
print('\nPM Time ' + fit_str)
# Fit the PP Times
pp_popt, _ = curve_fit(
lambda x, a, b: a + b / x ** 3,
self.pp_cells,
np.mean(pp_times, axis=0),
p0=[np.mean(pp_times, axis =0)[0], self.parameters.total_num_ptcls],
bounds = (0, [np.mean(pp_times, axis =0)[0], 1e9])
)
fit_pp_str = r'Fit = $a_0 + a_1 / N_c^3$ [s]' + '\n' + '$a_0 = ${:.4e}, $a_1 = ${:.4e}'.format(*pp_popt)
print('\nPP Time ' + fit_pp_str)
# Start the plot
fig, (ax_pp, ax_pm) = plt.subplots(1, 2, sharey=True, figsize=(12, 7))
ax_pm.plot(self.pm_meshes, pm_times, 'o', label='Measured')
ax_pm.plot(
self.pm_meshes,
pm_popt[0] + 5 * pm_popt[1] * self.pm_meshes ** 3 * np.log2(self.pm_meshes**3),
ls='--', label='Fit')
ax_pm.set(title='PM calculation time and estimate', yscale = 'log', xlabel='Mesh size')
ax_pm.set_xscale('log', base =2)
ax_pm.legend(ncol=2)
ax_pm.annotate(
text=fit_str,
xy=(self.pm_meshes[-1], pm_times[-1]),
xytext=(self.pm_meshes[0], pm_times[-1]),
bbox=dict(boxstyle="round4", fc="white", ec="k", lw=2)
)
# Scatter Plot the PP Times
self.tot_time_map = np.zeros(pp_times.shape)
for j, mesh_points in enumerate(self.pm_meshes):
self.tot_time_map[j, :] = pm_times[j] + pp_times[j, :]
ax_pp.plot(self.pp_cells, pp_times[j], 'o', label=r'@ Mesh {}$^3$'.format(mesh_points))
# Plot the Fit PP times
ax_pp.plot(self.pp_cells, pp_popt[0] + pp_popt[1]/ self.pp_cells ** 3, ls='--', label='Fit')
ax_pp.legend(ncol=2)
ax_pp.annotate(
text=fit_pp_str,
xy=(self.pp_cells[0], pp_times[0, 0]),
xytext=(self.pp_cells[0], pp_times[-1, -1]),
bbox=dict(boxstyle="round4", fc="white", ec="k", lw=2)
)
ax_pp.set(title='PP calculation time and estimate', yscale='log', ylabel='CPU Times [s]',
xlabel=r'$N_c $ = Cells')
fig.tight_layout()
fig.savefig(os.path.join(self.pppm_plots_dir, 'Times_' + self.io.job_id + '.png'))
self.make_force_v_timing_plot()
# self.lagrangian = np.empty((len(self.pm_meshes), len(self.pp_cells)))
# self.tot_times = np.empty((len(self.pm_meshes), len(self.pp_cells)))
# self.pp_times = np.copy(pp_times)
# self.pm_times = np.copy(pm_times)
# for i in range(len(self.pm_meshes)):
# self.tot_times[i, :] = pp_times[i] + pm_times[i]
# self.lagrangian[i, :] = self.force_error_map[i, :]
#
# best = np.unravel_index(self.lagrangian.argmin(), self.lagrangian.shape)
# self.best_mesh = self.pm_meshes[best[0]]
# self.best_cells = self.pp_cells[best[1]]
# self.make_lagrangian_plot()
#
# # set the best parameter
# self.potential.pppm_mesh = self.best_mesh * np.ones(3, dtype=int)
# self.potential.rc = self.parameters.box_lengths.min() / self.best_cells
# self.potential.pppm_alpha_ewald = 0.3 * self.best_mesh / self.parameters.box_lengths.min()
# self.potential.pppm_setup(self.parameters)
#
# # print report
# self.io.timing_study(self)
# # time prediction
# self.predicted_times = pp_times[best] + pm_times[best[0]]
# # Print estimate of run times
# self.io.time_stamp('Equilibration',
# self.timer.time_division(self.predicted_times * self.integrator.equilibration_steps))
# self.io.time_stamp('Production',
# self.timer.time_division(self.predicted_times * self.integrator.production_steps))
# self.io.time_stamp('Total Run',
# self.timer.time_division(self.predicted_times * (self.integrator.equilibration_steps
# + self.integrator.production_steps)))
def make_lagrangian_plot(self):
c_mesh, m_mesh = np.meshgrid(self.pp_cells, self.pm_meshes)
fig = plt.figure()
ax = fig.add_subplot(111) # projection='3d')
# CS = ax.plot_surface(m_mesh, c_mesh, self.lagrangian, rstride=1, cstride=1, cmap='viridis', edgecolor='none')
CS = ax.contourf(m_mesh,
c_mesh,
self.lagrangian,
norm=LogNorm(vmin=self.lagrangian.min(), vmax=self.lagrangian.max()))
CS2 = ax.contour(CS, colors='w')
ax.clabel(CS2, fmt='%1.0e', colors='w')
fig.colorbar(CS)
ax.scatter(self.best_mesh, self.best_cells, s=200, c='k')
ax.set_xlabel('Mesh size')
ax.set_ylabel(r'Cells = $L/r_c$')
ax.set_title('2D Lagrangian')
fig.savefig(os.path.join(self.io.preprocessing_dir, '2D_Lagrangian.png'))
def make_force_v_timing_plot(self):
# Plot the results
fig_path = self.pppm_plots_dir
c_mesh, m_mesh = np.meshgrid(self.pp_cells, self.pm_meshes)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 9))
if self.force_error_map.min() == 0.0:
minv = 1e-120
else:
minv = self.force_error_map.min()
maxt = self.force_error_map.max()
nlvl = 12
lvls = np.logspace(np.log10(minv), np.log10(maxt), nlvl)
luxmap = mpl.cm.get_cmap('viridis', nlvl)
luxnorm = mpl.colors.LogNorm(vmin=minv, vmax=maxt)
CS = ax1.contourf(m_mesh,
c_mesh,
self.force_error_map,
levels=lvls,
cmap=luxmap,
norm=luxnorm)
clb = fig.colorbar(mpl.cm.ScalarMappable(norm=luxnorm, cmap=luxmap), ax=ax1)
clb.set_label(r'Force Error [$Q^2/ a_{\rm ws}^2$]', rotation=270, va='bottom')
CS2 = ax1.contour(CS, colors='w')
ax1.clabel(CS2, fmt='%1.0e', colors='w')
input_Nc = int(self.parameters.box_lengths[0]/ self.input_rc)
ax1.scatter(self.input_mesh[0],input_Nc,s=200, c='k')
# ax1.scatter(self.input_mesh[1], input_Nc, s=200, c='k')
# ax1.scatter(self.input_mesh[2], input_Nc, s=200, c='k')
ax1.set_xlabel('Mesh size')
ax1.set_ylabel(r'N_c = Cells')
ax1.set_title('Force Error Map')
# Timing Plot
maxt = self.tot_time_map.max()
mint = self.tot_time_map.min()
# nlvl = 13
lvls = np.logspace(np.log10(mint), np.log10(maxt), nlvl)
luxmap = mpl.cm.get_cmap('viridis', nlvl)
luxnorm = mpl.colors.LogNorm(vmin=minv, vmax=maxt)
CS = ax2.contourf(m_mesh,
c_mesh,
self.tot_time_map,
levels=lvls, cmap=luxmap)
CS2 = ax2.contour(CS, colors='w', levels=lvls)
ax2.clabel(CS2, fmt='%.2e', colors='w')
# fig.colorbar(, ax = ax2)
clb = fig.colorbar(mpl.cm.ScalarMappable(norm=luxnorm, cmap=luxmap),
ax=ax2)
clb.set_label('CPU Time [s]', rotation=270, va='bottom')
ax2.scatter(self.input_mesh[0], input_Nc, s=200, c='k')
ax2.set_xlabel('Mesh size')
ax2.set_title('Timing Map')
fig.savefig(os.path.join(fig_path, 'ForceErrorMap_v_Timing_' + self.io.job_id + '.png'))
def time_acceleration(self):
self.pp_acc_time = np.zeros(self.loops)
for i in range(self.loops):
self.timer.start()
self.potential.update_linked_list(self.particles)
self.pp_acc_time[i] = self.timer.stop()
# Calculate the mean excluding the first value because that time include numba compilation time
pp_mean_time = self.timer.time_division(np.mean(self.pp_acc_time[1:]))
self.io.preprocess_timing("PP", pp_mean_time, self.loops)
# PM acceleration
if self.potential.pppm_on:
self.pm_acc_time = np.zeros(self.loops)
for i in range(self.loops):
self.timer.start()
self.potential.update_pm(self.particles)
self.pm_acc_time[i] = self.timer.stop()
pm_mean_time = self.timer.time_division(np.mean(self.pm_acc_time[1:]))
self.io.preprocess_timing("PM", pm_mean_time, self.loops)
def time_integrator_loop(self):
"""Run several loops of the equilibration and production phase to estimate the total time of the simulation."""
if self.parameters.electrostatic_equilibration:
# Save the original number of timesteps
steps = np.array([self.integrator.equilibration_steps,
self.integrator.production_steps,
self.integrator.magnetization_steps])
self.integrator.magnetization_steps = self.loops
else:
# Save the original number of timesteps
steps = np.array([self.integrator.equilibration_steps,
self.integrator.production_steps])
# Update the equilibration and production timesteps for estimation
self.integrator.production_steps = self.loops
self.integrator.equilibration_steps = self.loops
if self.io.verbose:
print('\nRunning {} equilibration and production steps to estimate simulation times\n'.format(self.loops))
# Run few equilibration steps to estimate the equilibration time
self.timer.start()
self.integrator.equilibrate(0, self.particles, self.io)
self.eq_mean_time = self.timer.stop() / self.loops
# Print the average equilibration & production times
self.io.preprocess_timing("Equilibration", self.timer.time_division(self.eq_mean_time), self.loops)
if self.integrator.electrostatic_equilibration:
self.timer.start()
self.integrator.magnetize(0, self.particles, self.io)
self.mag_mean_time = self.timer.stop() / self.loops
# Print the average equilibration & production times
self.io.preprocess_timing("Magnetization", self.timer.time_division(self.mag_mean_time), self.loops)
# Run few production steps to estimate the equilibration time
self.timer.start()
self.integrator.produce(0, self.particles, self.io)
self.prod_mean_time = self.timer.stop() / self.loops
self.io.preprocess_timing("Production", self.timer.time_division(self.prod_mean_time), self.loops)
# Restore the original number of timesteps and print an estimate of run times
self.integrator.equilibration_steps = steps[0]
self.integrator.production_steps = steps[1]
# Print the estimate for the full run
eq_prediction = self.eq_mean_time * steps[0]
self.io.time_stamp('Equilibration', self.timer.time_division(eq_prediction))
if self.integrator.electrostatic_equilibration:
self.integrator.magnetization_steps = steps[2]
mag_prediction = self.mag_mean_time * steps[2]
self.io.time_stamp('Magnetization', self.timer.time_division(mag_prediction))
eq_prediction += mag_prediction
prod_prediction = self.prod_mean_time * steps[1]
self.io.time_stamp('Production', self.timer.time_division(prod_prediction))
tot_time = eq_prediction + prod_prediction
self.io.time_stamp('Total Run', self.timer.time_division(tot_time))
def pppm_approximation(self):
"""Calculate the Force error for a PPPM simulation using analytical approximations.
Plot the force error in the parameter space."""
# Calculate Force error from analytic approximation given in Dharuman et al. J Chem Phys 2017
total_force_error, pp_force_error, pm_force_error, rcuts, alphas = self.analytical_approx_pppm()
chosen_alpha = self.potential.pppm_alpha_ewald * self.parameters.a_ws
chosen_rcut = self.potential.rc / self.parameters.a_ws
# mesh_dir = os.path.join(self.pppm_plots_dir, 'Mesh_{}'.format(self.potential.pppm_mesh[0]))
# if not os.path.exists(mesh_dir):
# os.mkdir(mesh_dir)
#
# cell_num = int(self.parameters.box_lengths.min() / self.potential.rc)
# cell_dir = os.path.join(mesh_dir, 'Cells_{}'.format(cell_num))
# if not os.path.exists(cell_dir):
# os.mkdir(cell_dir)
#
# self.pppm_plots_dir = cell_dir
# Color Map
self.make_color_map(rcuts, alphas, chosen_alpha, chosen_rcut, total_force_error)
# Line Plot
self.make_line_plot(rcuts, alphas, chosen_alpha, chosen_rcut, total_force_error)
def make_line_plot(self, rcuts, alphas, chosen_alpha, chosen_rcut, total_force_error):
"""
Plot selected values of the total force error approximation.
Parameters
----------
rcuts: numpy.ndarray
Cut off distances.
alphas: numpy.ndarray
Ewald parameters.
chosen_alpha: float
Chosen Ewald parameter.
chosen_rcut: float
Chosen cut off radius.
total_force_error: numpy.ndarray
Force error matrix.
"""
# Plot the results
fig_path = self.pppm_plots_dir
fig, ax = plt.subplots(1, 2, constrained_layout=True, figsize=(12, 7))
ax[0].plot(rcuts, total_force_error[30, :], ls= (0, (5, 10)), label=r'$\alpha a_{ws} = ' + '{:2.2f}$'.format(alphas[30]))
ax[0].plot(rcuts, total_force_error[40, :], ls= 'dashed', label=r'$\alpha a_{ws} = ' + '{:2.2f}$'.format(alphas[40]))
ax[0].plot(rcuts, total_force_error[50, :], ls = 'solid', label=r'$\alpha a_{ws} = ' + '{:2.2f}$'.format(alphas[50]))
ax[0].plot(rcuts, total_force_error[60, :], ls = 'dashdot',label=r'$\alpha a_{ws} = ' + '{:2.2f}$'.format(alphas[60]))
ax[0].plot(rcuts, total_force_error[70, :], ls = (0, (3, 10, 1, 10)),label=r'$\alpha a_{ws} = ' + '{:2.2f}$'.format(alphas[70]))
ax[0].set_ylabel(r'$\Delta F^{approx}_{tot}$')
ax[0].set_xlabel(r'$r_c/a_{ws}$')
ax[0].set_yscale('log')
ax[0].axvline(chosen_rcut, ls='--', c='k')
ax[0].axhline(self.parameters.force_error, ls='--', c='k')
if rcuts[-1] * self.parameters.a_ws > 0.5 * self.parameters.box_lengths.min():
ax[0].axvline(0.5 * self.parameters.box_lengths.min() / self.parameters.a_ws, c='r', label=r'$L/2$')
ax[0].grid(True, alpha=0.3)
ax[0].legend(loc='best')
ax[1].plot(alphas, total_force_error[:, 30], ls = (0, (5, 10)),label=r'$r_c = {:2.2f}'.format(rcuts[30]) + ' a_{ws}$')
ax[1].plot(alphas, total_force_error[:, 40], ls = 'dashed',label=r'$r_c = {:2.2f}'.format(rcuts[40]) + ' a_{ws}$')
ax[1].plot(alphas, total_force_error[:, 50], ls = 'solid',label=r'$r_c = {:2.2f}'.format(rcuts[50]) + ' a_{ws}$')
ax[1].plot(alphas, total_force_error[:, 60], ls = 'dashdot',label=r'$r_c = {:2.2f}'.format(rcuts[60]) + ' a_{ws}$')
ax[1].plot(alphas, total_force_error[:, 70], ls = (0, (3, 10, 1, 10)), label=r'$r_c = {:2.2f}'.format(rcuts[70]) + ' a_{ws}$')
ax[1].set_xlabel(r'$\alpha \; a_{ws}$')
ax[1].set_yscale('log')
ax[1].axhline(self.parameters.force_error, ls='--', c='k')
ax[1].axvline(chosen_alpha, ls='--', c='k')
ax[1].grid(True, alpha=0.3)
ax[1].legend(loc='best')
fig.suptitle(
r'Parameters $N = {}, \quad M = {}, \quad p = {}, \quad \kappa = {:.2f}$'.format(
self.parameters.total_num_ptcls,
self.potential.pppm_mesh[0],
self.potential.pppm_cao,
self.kappa * self.parameters.a_ws))
fig.savefig(os.path.join(fig_path, 'LinePlot_ForceError_' + self.io.job_id + '.png'))
def make_color_map(self, rcuts, alphas, chosen_alpha, chosen_rcut, total_force_error):
"""
Plot a color map of the total force error approximation.
Parameters
----------
rcuts: numpy.ndarray
Cut off distances.
alphas: numpy.ndarray
Ewald parameters.
chosen_alpha: float
Chosen Ewald parameter.
chosen_rcut: float
Chosen cut off radius.
total_force_error: numpy.ndarray
Force error matrix.
"""
# Plot the results
fig_path = self.pppm_plots_dir
r_mesh, a_mesh = np.meshgrid(rcuts, alphas)
fig, ax = plt.subplots(1, 1, figsize=(10, 7))
if total_force_error.min() == 0.0:
minv = 1e-120
else:
minv = total_force_error.min()
total_force_error[total_force_error == 0.0] = minv
CS = ax.contourf(a_mesh, r_mesh, total_force_error, norm=LogNorm(vmin=minv, vmax=total_force_error.max()))
CS2 = ax.contour(CS, colors='w')
ax.clabel(CS2, fmt='%1.0e', colors='w')
ax.scatter(chosen_alpha, chosen_rcut, s=200, c='k')
if rcuts[-1] * self.parameters.a_ws > 0.5 * self.parameters.box_lengths.min():
ax.axhline(0.5 * self.parameters.box_lengths.min() / self.parameters.a_ws, c='r', label=r'$L/2$')
# ax.tick_parameters(labelsize=fsz)
ax.set_xlabel(r'$\alpha \;a_{ws}$')
ax.set_ylabel(r'$r_c/a_{ws}$')
ax.set_title(r'Parameters $N = {}, \quad M = {}, \quad p = {}, \quad \kappa = {:.2f}$'.format(
self.parameters.total_num_ptcls,
self.potential.pppm_mesh[0],
self.potential.pppm_cao,
self.kappa * self.parameters.a_ws))
clb = fig.colorbar(CS)
clb.set_label(r'$\Delta F^{approx}_{tot}(r_c,\alpha)$', va='bottom', rotation=270)
fig.tight_layout()
fig.savefig(os.path.join(fig_path, 'ClrMap_ForceError_' + self.io.job_id + '.png'))
def analytical_approx_pp(self):
"""Calculate PP force error."""
r_min = self.potential.rc * 0.5
r_max = self.potential.rc * 1.5
rcuts = np.linspace(r_min, r_max, 101) / self.parameters.a_ws
# Calculate the analytic PP error and the total force error
pp_force_error = np.sqrt(2.0 * np.pi * self.kappa) * np.exp(- rcuts * self.kappa)
pp_force_error *= np.sqrt(self.parameters.total_num_ptcls *
self.parameters.a_ws ** 3 / self.parameters.box_volume)
return pp_force_error, rcuts
def analytical_approx_pppm(self):
"""Calculate the total force error as given in Dharuman et al. J Chem Phys 146 024112 (2017)."""
p = self.potential.pppm_cao
L = self.parameters.box_lengths[0] / self.parameters.a_ws
h = L / self.potential.pppm_mesh[0]
a_min = self.potential.pppm_alpha_ewald * 0.5
a_max = self.potential.pppm_alpha_ewald * 1.5
r_min = self.potential.rc * 0.5
r_max = self.potential.rc * 1.5
alphas = self.parameters.a_ws * np.linspace(a_min, a_max, 101)
rcuts = np.linspace(r_min, r_max, 101) / self.parameters.a_ws
pm_force_error = np.zeros(len(alphas))
pp_force_error = np.zeros((len(alphas), len(rcuts)))
total_force_error = np.zeros((len(alphas), len(rcuts)))
# Coefficient from Deserno and Holm J Chem Phys 109 7694 (1998)
if p == 1:
Cmp = np.array([2 / 3])
elif p == 2:
Cmp = np.array([2 / 45, 8 / 189])
elif p == 3:
Cmp = np.array([4 / 495, 2 / 225, 8 / 1485])
elif p == 4:
Cmp = np.array([2 / 4725, 16 / 10395, 5528 / 3869775, 32 / 42525])
elif p == 5:
Cmp = np.array([4 / 93555, 2764 / 11609325, 8 / 25515, 7234 / 32531625, 350936 / 3206852775])
elif p == 6:
Cmp = np.array([2764 / 638512875, 16 / 467775, 7234 / 119282625, 1403744 / 25196700375,
1396888 / 40521009375, 2485856 / 152506344375])
elif p == 7:
Cmp = np.array([8 / 18243225, 7234 / 1550674125, 701872 / 65511420975, 2793776 / 225759909375,
1242928 / 132172165125, 1890912728 / 352985880121875, 21053792 / 8533724574375])
kappa = self.kappa * self.parameters.a_ws
for ia, alpha in enumerate(alphas):
somma = 0.0
for m in np.arange(p):
expp = 2 * (m + p)
somma += Cmp[m] * (2 / (1 + expp)) * betamp(m, p, alpha, kappa) * (h / 2.) ** expp
# eq.(36) in Dharuman J Chem Phys 146 024112 (2017)
pm_force_error[ia] = np.sqrt(3.0 * somma) / (2.0 * np.pi)
# eq.(35)
pm_force_error *= np.sqrt(self.parameters.total_num_ptcls *
self.parameters.a_ws ** 3 / self.parameters.box_volume)
# Calculate the analytic PP error and the total force error
if self.potential.type == "QSP":
for (ir, rc) in enumerate(rcuts):
pp_force_error[:, ir] = np.sqrt(2.0 * np.pi * kappa) * np.exp(- rc * kappa)
pp_force_error[:, ir] *= np.sqrt(self.parameters.total_num_ptcls
* self.parameters.a_ws ** 3 / self.parameters.box_volume)
for (ia, alfa) in enumerate(alphas):
# eq.(42) from Dharuman J Chem Phys 146 024112 (2017)
total_force_error[ia, ir] = np.sqrt(pm_force_error[ia] ** 2 + pp_force_error[ia, ir] ** 2)
else:
for (ir, rc) in enumerate(rcuts):
for (ia, alfa) in enumerate(alphas):
# eq.(30) from Dharuman J Chem Phys 146 024112 (2017)
pp_force_error[ia, ir] = 2.0 * np.exp(-(0.5 * kappa / alfa) ** 2
- alfa ** 2 * rc ** 2) / np.sqrt(rc)
pp_force_error[ia, ir] *= np.sqrt(self.parameters.total_num_ptcls *
self.parameters.a_ws ** 3 / self.parameters.box_volume)
# eq.(42) from Dharuman J Chem Phys 146 024112 (2017)
total_force_error[ia, ir] = np.sqrt(pm_force_error[ia] ** 2 + pp_force_error[ia, ir] ** 2)
return total_force_error, pp_force_error, pm_force_error, rcuts, alphas
@staticmethod
def make_fit_plot(pp_xdata, pm_xdata, pp_times, pm_times, pp_opt, pm_opt, pp_xlabels, pm_xlabels, fig_path):
"""
Make a dual plot of the fitted functions.
"""
fig, ax = plt.subplots(1, 2, figsize=(12, 7))
ax[0].plot(pm_xdata, pm_times.mean(axis=-1), 'o', label='Measured times')
# ax[0].plot(pm_xdata, quadratic(pm_xdata, *pm_opt), '--r', label="Fit $f(x) = a + b x + c x^2$")
ax[1].plot(pp_xdata, pp_times.mean(axis=-1), 'o', label='Measured times')
# ax[1].plot(pp_xdata, linear(pp_xdata, *pp_opt), '--r', label="Fit $f(x) = a x$")
ax[0].set_xscale('log')
ax[0].set_yscale('log')
ax[1].set_xscale('log')
ax[1].set_yscale('log')
ax[0].legend()
ax[1].legend()
ax[0].set_xticks(pm_xdata)
ax[0].set_xticklabels(pm_xlabels)
# Rotate the tick labels and set their alignment.
plt.setp(ax[0].get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
ax[1].set_xticks(pp_xdata[0:-1:3])
ax[1].set_xticklabels(pp_xlabels)
# Rotate the tick labels and set their alignment.
plt.setp(ax[1].get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
ax[0].set_title("PM calculation")
ax[1].set_title("PP calculation")
ax[0].set_xlabel('Mesh sizes')
ax[1].set_xlabel(r'$r_c / a_{ws}$')
fig.tight_layout()
fig.savefig(os.path.join(fig_path, 'Timing_Fit.png'))
class Simulation(Process):
"""
Sarkas simulation wrapper. This class manages the entire simulation and its small moving parts.
Parameters
----------
input_file : str
Path to the YAML input file.
"""
def __init__(self, input_file: str = None):
self.__name__ = 'simulation'
super().__init__(input_file)
def equilibrate(self) -> None:
"""
Run the time integrator with the thermostat to evolve the system to its thermodynamics equilibrium state.
"""
if self.parameters.verbose:
print("\n------------- Equilibration -------------")
# Check if this is restart
if self.parameters.load_method in ["equilibration_restart", "eq_restart"]:
it_start = self.parameters.restart_step
else:
it_start = 0
self.io.dump('equilibration', self.particles, 0)
# Start timer, equilibrate, and print run time.
self.timer.start()
self.integrator.equilibrate(it_start, self.particles, self.io)
time_eq = self.timer.stop()
self.io.time_stamp("Equilibration", self.timer.time_division(time_eq))
# Check for magnetization phase
if self.integrator.electrostatic_equilibration:
if self.parameters.verbose:
print('\n------------- Magnetization -------------')
if self.parameters.load_method in ["magnetization_restart", "mag_restart"]:
it_start = self.parameters.restart_step
else:
it_start = 0
self.io.dump('magnetization', self.particles, it_start)
# Start timer, magnetize, and print run time.
self.timer.start()
self.integrator.magnetize(it_start, self.particles, self.io)
time_eq = self.timer.stop()
self.io.time_stamp("Magnetization", self.timer.time_division(time_eq))
def evolve(self) -> None:
"""
Run the time integrator to evolve the system for the duration of the production phase.
"""
# Check for simulation restart.
if self.parameters.load_method in ["prod_restart", "production_restart"]:
it_start = self.parameters.restart_step
else:
it_start = 0
# Restart the pbc counter.
self.particles.pbc_cntr.fill(0)
self.io.dump('production', self.particles, 0)
if self.parameters.verbose:
print("\n------------- Production -------------")
# Update measurement flag for rdf.
self.potential.measure = True
# Start timer, produce data, and print run time.
self.timer.start()
self.integrator.produce(it_start, self.particles, self.io)
time_end = self.timer.stop()
self.io.time_stamp("Production", self.timer.time_division(time_end))
def run(self) -> None:
"""Run the simulation."""
time0 = self.timer.current()
if not self.parameters.load_method in ['prod_restart', 'production_restart']:
self.equilibrate()
self.evolve()
time_tot = self.timer.current()
self.io.time_stamp("Total", self.timer.time_division(time_tot - time0))
# def setup(self, read_yaml: bool = False, other_inputs=None):
# """Setup simulations' parameters and io subclasses.
#
# Parameters
# ----------
# read_yaml: bool
# Flag for reading YAML input file. Default = False.
#
# other_inputs: dict (optional)
# Dictionary with additional simulations options.
#
# """
# if read_yaml:
# self.common_parser()
#
# if other_inputs:
# if not isinstance(other_inputs, dict):
# raise TypeError("Wrong input type. other_inputs should be a nested dictionary")
#
# for class_name, class_attr in other_inputs.items():
# if not class_name == 'Particles':
# self.__dict__[class_name.lower()].from_dict(class_attr)
# else:
# for sp, species in enumerate(other_inputs["Particles"]):
# spec = Species(species["Species"])
# self.species[sp].__dict__.update(spec.__dict__)
# # initialize the directories and filenames
# self.io.setup()
# # Copy relevant subsclasses attributes into parameters class. This is needed for post-processing.
#
# # Update parameters' dictionary with filenames and directories
# self.parameters.from_dict(self.io.__dict__)
# # save some general info
# self.parameters.potential_type = self.potential.type
# self.parameters.cutoff_radius = self.potential.rc
#
# # Copy some integrator parameters if not already defined
# if not hasattr(self.parameters, 'dt'):
# self.parameters.dt = self.integrator.dt
# if not hasattr(self.parameters, 'equilibration_steps'):
# self.parameters.equilibration_steps = self.integrator.equilibration_steps
# if not hasattr(self.parameters, 'eq_dump_step'):
# self.parameters.eq_dump_step = self.integrator.eq_dump_step
# if not hasattr(self.parameters, 'production_steps'):
# self.parameters.production_steps = self.integrator.production_steps
# if not hasattr(self.parameters, 'prod_dump_step'):
# self.parameters.prod_dump_step = self.integrator.prod_dump_step
#
# if self.integrator.electrostatic_equilibration:
# self.parameters.electrostatic_equilibration = True
# if not hasattr(self.parameters, 'mag_dump_step'):
# self.parameters.mag_dump_step = self.integrator.mag_dump_step
# if not hasattr(self.parameters, 'magnetization_steps'):
# self.parameters.magnetization_steps = self.integrator.magnetization_steps
#
# self.parameters.setup(self.species)
#
# self.io.setup_checkpoint(self.parameters, self.species)
@njit
def Gk(x, alpha, kappa):
"""
Green's function of Coulomb/Yukawa potential.
"""
return 4.0 * np.pi * np.exp(-(x ** 2 + kappa ** 2) / (2 * alpha) ** 2) / (kappa ** 2 + x ** 2)
@njit
def betamp(m, p, alpha, kappa):
"""
Calculate :math:`\beta(m)` of eq.(37) in Dharuman et al. J Chem Phys 146 024112 (2017)
"""
xa = np.linspace(0.0001, 500, 5000)
return np.trapz(Gk(xa, alpha, kappa) * Gk(xa, alpha, kappa) * xa ** (2 * (m + p + 2)), x=xa)
@njit
def analytical_approx_pppm_single(kappa, rc, p, h, alpha):
"""
Calculate the total force error for a given value of ``rc`` and ``alpha``. See similar function above.
"""
# Coefficient from Deserno and Holm J Chem Phys 109 7694 (1998)
if p == 1:
Cmp = np.array([2 / 3])
elif p == 2:
Cmp = np.array([2 / 45, 8 / 189])
elif p == 3:
Cmp = np.array([4 / 495, 2 / 225, 8 / 1485])
elif p == 4:
Cmp = np.array([2 / 4725, 16 / 10395, 5528 / 3869775, 32 / 42525])
elif p == 5:
Cmp = np.array([4 / 93555, 2764 / 11609325, 8 / 25515, 7234 / 32531625, 350936 / 3206852775])
elif p == 6:
Cmp = np.array([2764 / 638512875, 16 / 467775, 7234 / 119282625, 1403744 / 25196700375,
1396888 / 40521009375, 2485856 / 152506344375])
elif p == 7:
Cmp = np.array([8 / 18243225, 7234 / 1550674125, 701872 / 65511420975, 2793776 / 225759909375,
1242928 / 132172165125, 1890912728 / 352985880121875, 21053792 / 8533724574375])
somma = 0.0
for m in np.arange(p):
expp = 2 * (m + p)
somma += Cmp[m] * (2 / (1 + expp)) * betamp(m, p, alpha, kappa) * (h / 2.) ** expp
# eq.(36) in Dharuman J Chem Phys 146 024112 (2017)
pm_force_error = np.sqrt(3.0 * somma) / (2.0 * np.pi)
# eq.(30) from Dharuman J Chem Phys 146 024112 (2017)
pp_force_error = 2.0 * np.exp(-(0.5 * kappa / alpha) ** 2 - alpha ** 2 * rc ** 2) / np.sqrt(rc)
# eq.(42) from Dharuman J Chem Phys 146 024112 (2017)
Tot_DeltaF = np.sqrt(pm_force_error ** 2 + pp_force_error ** 2)
return Tot_DeltaF, pp_force_error, pm_force_error
def pm_time_model(x, a, b):
"""
Quadratic function for fitting.
Parameters
----------
x : array
Values at which to calculate the function.
a: float
Intercept.
b: float
Coefficient of linear term.
c: float
Coefficient of quadratic term.
Returns
-------
quadratic formula
"""
return a * x + b * x * np.log(x)
| 42.756598 | 136 | 0.584071 |
ace8524bdaee09448d181d75899d66ec69f4fa92 | 1,337 | py | Python | examples/basics/gloo/display_shape.py | hmaarrfk/vispy | 7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2 | [
"BSD-3-Clause"
] | 3 | 2019-02-28T16:05:33.000Z | 2020-05-03T21:29:03.000Z | examples/basics/gloo/display_shape.py | hmaarrfk/vispy | 7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2 | [
"BSD-3-Clause"
] | null | null | null | examples/basics/gloo/display_shape.py | hmaarrfk/vispy | 7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2 | [
"BSD-3-Clause"
] | 1 | 2019-04-03T12:49:18.000Z | 2019-04-03T12:49:18.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vispy: gallery 2
"""
Simple example demonstrating showing a quad.
gloo objects that this example demonstrates: Program.
"""
from vispy import gloo
from vispy import app
import numpy as np
# Create vertices
vPosition = np.array([[-0.8, -0.8, 0.0], [+0.7, -0.7, 0.0],
[-0.7, +0.7, 0.0], [+0.8, +0.8, 0.0, ]], np.float32)
VERT_SHADER = """ // simple vertex shader
attribute vec3 a_position;
void main (void) {
gl_Position = vec4(a_position, 1.0);
}
"""
FRAG_SHADER = """ // simple fragment shader
uniform vec4 u_color;
void main()
{
gl_FragColor = u_color;
}
"""
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, keys='interactive')
# Create program
self._program = gloo.Program(VERT_SHADER, FRAG_SHADER)
# Set uniform and attribute
self._program['u_color'] = 0.2, 1.0, 0.4, 1
self._program['a_position'] = gloo.VertexBuffer(vPosition)
gloo.set_clear_color('white')
self.show()
def on_resize(self, event):
width, height = event.physical_size
gloo.set_viewport(0, 0, width, height)
def on_draw(self, event):
gloo.clear()
self._program.draw('triangle_strip')
if __name__ == '__main__':
c = Canvas()
app.run()
| 21.222222 | 74 | 0.618549 |
ace852bd0ccd62b5dc63e6b47fcdcc1983ab1525 | 87 | py | Python | unread/apps.py | boxed/forum | abb3699d310bf3a404f031a3cb0e4bdbf403da5a | [
"BSD-3-Clause"
] | 2 | 2019-06-28T16:30:44.000Z | 2020-12-28T01:46:52.000Z | unread/apps.py | boxed/forum | abb3699d310bf3a404f031a3cb0e4bdbf403da5a | [
"BSD-3-Clause"
] | 14 | 2019-02-26T17:25:54.000Z | 2019-04-03T18:11:24.000Z | unread/apps.py | boxed/forum | abb3699d310bf3a404f031a3cb0e4bdbf403da5a | [
"BSD-3-Clause"
] | 1 | 2019-06-14T14:21:47.000Z | 2019-06-14T14:21:47.000Z | from django.apps import AppConfig
class UnreadConfig(AppConfig):
name = 'unread'
| 14.5 | 33 | 0.747126 |
ace853d55969814b4a31ac3f7de38d21b058dcce | 66 | py | Python | django_toolkit/oauth2/__init__.py | rafaelgotts/django-toolkit | 10b68cbb326bdbc8c2d9efda5edbfc7768476a72 | [
"MIT"
] | 14 | 2016-07-25T19:29:05.000Z | 2021-12-10T19:12:37.000Z | django_toolkit/oauth2/__init__.py | rafaelgotts/django-toolkit | 10b68cbb326bdbc8c2d9efda5edbfc7768476a72 | [
"MIT"
] | 37 | 2016-07-22T12:28:02.000Z | 2021-03-19T21:52:39.000Z | django_toolkit/oauth2/__init__.py | rafaelgotts/django-toolkit | 10b68cbb326bdbc8c2d9efda5edbfc7768476a72 | [
"MIT"
] | 8 | 2016-10-05T13:02:32.000Z | 2020-08-02T12:59:08.000Z | default_app_config = 'django_toolkit.oauth2.apps.OAuth2AppConfig'
| 33 | 65 | 0.863636 |
ace854f6be4fe0573168d207284053143dad02f3 | 34,348 | py | Python | beam_telescope_analysis/tools/kalman.py | YannickDieter/beam_telescope_analysis | 0c678ad991a9ef42178b2eeaf58059d387362f2a | [
"MIT"
] | null | null | null | beam_telescope_analysis/tools/kalman.py | YannickDieter/beam_telescope_analysis | 0c678ad991a9ef42178b2eeaf58059d387362f2a | [
"MIT"
] | null | null | null | beam_telescope_analysis/tools/kalman.py | YannickDieter/beam_telescope_analysis | 0c678ad991a9ef42178b2eeaf58059d387362f2a | [
"MIT"
] | null | null | null | from __future__ import division
import numpy as np
from numpy import linalg
from numba import njit
from beam_telescope_analysis.tools import geometry_utils
@njit
def _filter_predict(transition_matrix, transition_covariance,
transition_offset, current_filtered_state,
current_filtered_state_covariance):
"""Calculates the predicted state and its covariance matrix. Prediction
is done on whole track chunk with size chunk_size.
Parameters
----------
transition_matrix : [chunk_size, n_dim_state, n_dim_state] array
state transition matrix from time t to t+1.
transition_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix for state transition from time t to t+1.
transition_offset : [chunk_size, n_dim_state] array
offset for state transition from time t to t+1.
current_filtered_state: [chunk_size, n_dim_state] array
filtered state at time t.
current_filtered_state_covariance: [chunk_size, n_dim_state, n_dim_state] array
covariance of filtered state at time t.
Returns
-------
predicted_state : [chunk_size, n_dim_state] array
predicted state at time t+1.
predicted_state_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix of predicted state at time t+1.
"""
predicted_state = _vec_mul(transition_matrix, current_filtered_state) + transition_offset
predicted_state_covariance = _mat_mul(transition_matrix,
_mat_mul(current_filtered_state_covariance,
_mat_trans(transition_matrix))) + transition_covariance
return predicted_state, predicted_state_covariance
def _filter_correct(observation_matrix, observation_covariance,
observation_offset, predicted_state,
predicted_state_covariance, observation):
r"""Filters a predicted state with the Kalman Filter. Filtering
is done on whole track chunk with size chunk_size.
Parameters
----------
observation_matrix : [chunk_size, n_dim_obs, n_dim_obs] array
observation matrix for time t.
observation_covariance : [chunk_size, n_dim_obs, n_dim_obs] array
covariance matrix for observation at time t.
observation_offset : [chunk_size, n_dim_obs] array
offset for observation at time t.
predicted_state : [chunk_size, n_dim_state] array
predicted state at time t.
predicted_state_covariance : [n_dim_state, n_dim_state] array
covariance matrix of predicted state at time t.
observation : [chunk_size, n_dim_obs] array
observation at time t. If observation is a masked array and any of
its values are masked, the observation will be not included in filtering.
Returns
-------
kalman_gain : [chunk_size, n_dim_state, n_dim_obs] array
Kalman gain matrix for time t.
filtered_state : [chunk_size, n_dim_state] array
filtered state at time t.
filtered_state_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix of filtered state at time t.
"""
predicted_observation = _vec_mul(observation_matrix, predicted_state) + observation_offset
predicted_observation_covariance = _mat_mul(observation_matrix,
_mat_mul(predicted_state_covariance, _mat_trans(observation_matrix))) + observation_covariance
kalman_gain = _mat_mul(predicted_state_covariance,
_mat_mul(_mat_trans(observation_matrix),
_mat_inverse(predicted_observation_covariance)))
filtered_state = predicted_state + _vec_mul(kalman_gain, observation - predicted_observation)
filtered_state_covariance = predicted_state_covariance - _mat_mul(kalman_gain,
_mat_mul(observation_matrix,
predicted_state_covariance))
# update filtered state where no observation is available
no_observation_indices = np.isnan(observation[:, 0])
kalman_gain[no_observation_indices, :, :] = 0.0
filtered_state[no_observation_indices, :] = predicted_state[no_observation_indices, :]
filtered_state_covariance[no_observation_indices, :, :] = predicted_state_covariance[no_observation_indices, :, :]
return kalman_gain, filtered_state, filtered_state_covariance
def _filter(dut_planes, z_sorted_dut_indices, thetas, observations, select_fit_duts,
transition_matrices, observation_matrices, transition_covariances,
observation_covariances, transition_offsets, observation_offsets,
initial_state, initial_state_covariance):
"""Apply the Kalman Filter. First a prediction of the state is done, then a filtering is
done which includes the observations.
Parameters
----------
dut_planes : list
List of DUT parameters (material_budget, translation_x, translation_y, translation_z, rotation_alpha, rotation_beta, rotation_gamma).
z_sorted_dut_indices : list
List of DUT indices in the order reflecting their z position.
thetas : list
List of scattering angle root mean squares (RMS).
observations : [chunk_size, n_timesteps, n_dim_obs] array
observations (measurements) from times [0...n_timesteps-1]. If any of observations is masked,
then observations[:, t] will be treated as a missing observation
and will not be included in the filtering step.
select_fit_duts : iterable
List of DUTs which should be included in Kalman Filter. DUTs which are not in list
were treated as missing measurements and will not be included in the Filtering step.
transition_matrices : [chunk_size, n_timesteps-1, n_dim_state, n_dim_state] array-like
matrices to transport states from t to t+1.
observation_matrices : [chunk_size, n_timesteps, n_dim_obs, n_dim_state] array-like
observation matrices.
transition_covariances : [chunk_size, n_timesteps-1, n_dim_state,n_dim_state] array-like
covariance matrices of transition matrices.
observation_covariances : [chunk_size, n_timesteps, n_dim_obs, n_dim_obs] array-like
covariance matrices of observation matrices.
transition_offsets : [chunk_size, n_timesteps-1, n_dim_state] array-like
offsets of transition matrices.
observation_offsets : [chunk_size, n_timesteps, n_dim_obs] array-like
offsets of observations.
initial_state : [chunk_size, n_dim_state] array-like
initial value of state.
initial_state_covariance : [chunk_size, n_dim_state, n_dim_state] array-like
initial value for observation covariance matrices.
Returns
-------
predicted_states : [chunk_size, n_timesteps, n_dim_state] array
predicted states of times [0...t].
predicted_state_covariances : [chunk_size, n_timesteps, n_dim_state, n_dim_state] array
covariance matrices of predicted states of times [0...t].
kalman_gains : [chunk_size, n_timesteps, n_dim_state] array
Kalman gain matrices of times [0...t].
filtered_states : [chunk_size, n_timesteps, n_dim_state] array
filtered states of times [0...t].
filtered_state_covariances : [chunk_size, n_timesteps, n_dim_state] array
covariance matrices of filtered states of times [0...t].
transition_matrices_update : [chunk_size, n_timesteps-1, n_dim_state, n_dim_state] array-like
updated transition matrices in case of rotated planes.
"""
chunk_size, n_timesteps, n_dim_obs = observations.shape
n_dim_state = transition_covariances.shape[2]
predicted_states = np.zeros((chunk_size, n_timesteps, n_dim_state))
predicted_state_covariances = np.zeros((chunk_size, n_timesteps, n_dim_state, n_dim_state))
kalman_gains = np.zeros((chunk_size, n_timesteps, n_dim_state, n_dim_obs))
filtered_states = np.zeros((chunk_size, n_timesteps, n_dim_state))
filtered_state_covariances = np.zeros((chunk_size, n_timesteps, n_dim_state, n_dim_state))
# array where new transition matrices are stored, needed to pass it to kalman smoother
transition_matrices_update = np.zeros_like(transition_covariances)
for i, dut_index in enumerate(z_sorted_dut_indices):
dut = dut_planes[dut_index]
if i == 0: # first DUT
predicted_states[:, dut_index] = initial_state
predicted_state_covariances[:, dut_index] = initial_state_covariance
else:
# slopes (directional vectors) of the filtered estimates
slopes_filtered_state = np.column_stack((
filtered_states[:, z_sorted_dut_indices[i - 1], 3],
filtered_states[:, z_sorted_dut_indices[i - 1], 4],
filtered_states[:, z_sorted_dut_indices[i - 1], 5]))
# offsets (support vectors) of the filtered states
offsets_filtered_state = np.column_stack((
filtered_states[:, z_sorted_dut_indices[i - 1], 0],
filtered_states[:, z_sorted_dut_indices[i - 1], 1],
filtered_states[:, z_sorted_dut_indices[i - 1], 2]))
# offsets of filtered state with actual plane (plane on which the filtered estimate should be predicted)
offsets_filtered_state_actual_plane = geometry_utils.get_line_intersections_with_dut(
line_origins=offsets_filtered_state,
line_directions=slopes_filtered_state,
translation_x=dut.translation_x,
translation_y=dut.translation_y,
translation_z=dut.translation_z,
rotation_alpha=dut.rotation_alpha,
rotation_beta=dut.rotation_beta,
rotation_gamma=dut.rotation_gamma)
z_diff = offsets_filtered_state_actual_plane[:, 2] - offsets_filtered_state[:, 2]
if np.any(z_diff < 0.0):
raise ValueError("Z differences give values smaller zero.")
# update transition matrix according to the DUT rotation
transition_matrices[np.nonzero(slopes_filtered_state[:, 0])[0], z_sorted_dut_indices[i - 1], 0, 3] = (offsets_filtered_state_actual_plane[np.nonzero(slopes_filtered_state[:, 0])[0], 0] - offsets_filtered_state[np.nonzero(slopes_filtered_state[:, 0])[0], 0]) / slopes_filtered_state[np.nonzero(slopes_filtered_state[:, 0])[0], 0]
transition_matrices[np.nonzero(slopes_filtered_state[:, 0] == 0)[0], z_sorted_dut_indices[i - 1], 0, 3] = z_diff[np.nonzero(slopes_filtered_state[:, 0] == 0)[0]]
transition_matrices[np.nonzero(slopes_filtered_state[:, 1])[0], z_sorted_dut_indices[i - 1], 1, 4] = (offsets_filtered_state_actual_plane[np.nonzero(slopes_filtered_state[:, 1])[0], 1] - offsets_filtered_state[np.nonzero(slopes_filtered_state[:, 1])[0], 1]) / slopes_filtered_state[np.nonzero(slopes_filtered_state[:, 1])[0], 1]
transition_matrices[np.nonzero(slopes_filtered_state[:, 1] == 0)[0], z_sorted_dut_indices[i - 1], 1, 4] = z_diff[np.nonzero(slopes_filtered_state[:, 1] == 0)[0]]
transition_matrices[np.nonzero(slopes_filtered_state[:, 2])[0], z_sorted_dut_indices[i - 1], 2, 5] = (offsets_filtered_state_actual_plane[np.nonzero(slopes_filtered_state[:, 2])[0], 2] - offsets_filtered_state[np.nonzero(slopes_filtered_state[:, 2])[0], 2]) / slopes_filtered_state[np.nonzero(slopes_filtered_state[:, 2])[0], 2]
transition_matrices[np.nonzero(slopes_filtered_state[:, 2] == 0)[0], z_sorted_dut_indices[i - 1], 2, 5] = z_diff[np.nonzero(slopes_filtered_state[:, 2] == 0)[0]]
# update transition covariance matrix according to the DUT rotation
transition_covariances[np.nonzero(slopes_filtered_state[:, 0])[0], z_sorted_dut_indices[i - 1], 0, 0] = np.square((offsets_filtered_state_actual_plane[np.nonzero(slopes_filtered_state[:, 0])[0], 0] - offsets_filtered_state[np.nonzero(slopes_filtered_state[:, 0])[0], 0]) / slopes_filtered_state[np.nonzero(slopes_filtered_state[:, 0])[0], 0]) * np.square(thetas[z_sorted_dut_indices[i - 1]])
transition_covariances[np.nonzero(slopes_filtered_state[:, 0] == 0)[0], z_sorted_dut_indices[i - 1], 0, 0] = np.square(z_diff[np.nonzero(slopes_filtered_state[:, 0] == 0)[0]]) * np.square(thetas[z_sorted_dut_indices[i - 1]])
transition_covariances[np.nonzero(slopes_filtered_state[:, 1])[0], z_sorted_dut_indices[i - 1], 3, 0] = ((offsets_filtered_state_actual_plane[np.nonzero(slopes_filtered_state[:, 1])[0], 1] - offsets_filtered_state[np.nonzero(slopes_filtered_state[:, 1])[0], 1]) / slopes_filtered_state[np.nonzero(slopes_filtered_state[:, 1])[0], 1]) * np.square(thetas[z_sorted_dut_indices[i - 1]])
transition_covariances[np.nonzero(slopes_filtered_state[:, 1] == 0)[0], z_sorted_dut_indices[i - 1], 3, 0] = z_diff[np.nonzero(slopes_filtered_state[:, 1] == 0)[0]] * np.square(thetas[z_sorted_dut_indices[i - 1]])
transition_covariances[np.nonzero(slopes_filtered_state[:, 2])[0], z_sorted_dut_indices[i - 1], 1, 1] = np.square((offsets_filtered_state_actual_plane[np.nonzero(slopes_filtered_state[:, 2])[0], 2] - offsets_filtered_state[np.nonzero(slopes_filtered_state[:, 2])[0], 2]) / slopes_filtered_state[np.nonzero(slopes_filtered_state[:, 2])[0], 2]) * np.square(thetas[z_sorted_dut_indices[i - 1]])
transition_covariances[np.nonzero(slopes_filtered_state[:, 2] == 0)[0], z_sorted_dut_indices[i - 1], 1, 1] = np.square(z_diff[np.nonzero(slopes_filtered_state[:, 2] == 0)[0]]) * np.square(thetas[z_sorted_dut_indices[i - 1]])
transition_covariances[np.nonzero(slopes_filtered_state[:, 0])[0], z_sorted_dut_indices[i - 1], 4, 1] = ((offsets_filtered_state_actual_plane[np.nonzero(slopes_filtered_state[:, 0])[0], 0] - offsets_filtered_state[np.nonzero(slopes_filtered_state[:, 0])[0], 0]) / slopes_filtered_state[np.nonzero(slopes_filtered_state[:, 0])[0], 0]) * np.square(thetas[z_sorted_dut_indices[i - 1]])
transition_covariances[np.nonzero(slopes_filtered_state[:, 0] == 0)[0], z_sorted_dut_indices[i - 1], 4, 1] = z_diff[np.nonzero(slopes_filtered_state[:, 0] == 0)[0]] * np.square(thetas[z_sorted_dut_indices[i - 1]])
transition_covariances[np.nonzero(slopes_filtered_state[:, 1])[0], z_sorted_dut_indices[i - 1], 0, 3] = ((offsets_filtered_state_actual_plane[np.nonzero(slopes_filtered_state[:, 1])[0], 1] - offsets_filtered_state[np.nonzero(slopes_filtered_state[:, 1])[0], 1]) / slopes_filtered_state[np.nonzero(slopes_filtered_state[:, 1])[0], 1]) * np.square(thetas[z_sorted_dut_indices[i - 1]])
transition_covariances[np.nonzero(slopes_filtered_state[:, 1] == 0)[0], z_sorted_dut_indices[i - 1], 0, 3] = z_diff[np.nonzero(slopes_filtered_state[:, 1] == 0)[0]] * np.square(thetas[z_sorted_dut_indices[i - 1]])
transition_covariances[np.nonzero(slopes_filtered_state[:, 2])[0], z_sorted_dut_indices[i - 1], 1, 4] = ((offsets_filtered_state_actual_plane[np.nonzero(slopes_filtered_state[:, 2])[0], 2] - offsets_filtered_state[np.nonzero(slopes_filtered_state[:, 2])[0], 2]) / slopes_filtered_state[np.nonzero(slopes_filtered_state[:, 2])[0], 2]) * np.square(thetas[z_sorted_dut_indices[i - 1]])
transition_covariances[np.nonzero(slopes_filtered_state[:, 2] == 0)[0], z_sorted_dut_indices[i - 1], 1, 4] = z_diff[np.nonzero(slopes_filtered_state[:, 2] == 0)[0]] * np.square(thetas[z_sorted_dut_indices[i - 1]])
# store updated transition matrices
transition_matrices_update[:, z_sorted_dut_indices[i - 1]] = transition_matrices[:, z_sorted_dut_indices[i - 1]]
# calculate prediction from filter
predicted_states[:, dut_index], predicted_state_covariances[:, dut_index] = _filter_predict(
transition_matrix=transition_matrices[:, z_sorted_dut_indices[i - 1]], # next plane in -z direction
transition_covariance=transition_covariances[:, z_sorted_dut_indices[i - 1]], # next plane in -z direction
transition_offset=transition_offsets[:, z_sorted_dut_indices[i - 1]], # next plane in -z direction
current_filtered_state=filtered_states[:, z_sorted_dut_indices[i - 1]], # next plane in -z direction
current_filtered_state_covariance=filtered_state_covariances[:, z_sorted_dut_indices[i - 1]]) # next plane in -z direction
# TODO:
# Check for offsets_filtered_state_actual_plane == predicted_states
if dut_index in select_fit_duts:
# DUT is a fit dut:
# set filter to prediction where no hit is available,
# otherwise calculate filtered state.
kalman_gains[:, dut_index], filtered_states[:, dut_index], filtered_state_covariances[:, dut_index] = _filter_correct(
observation_matrix=observation_matrices[:, dut_index],
observation_covariance=observation_covariances[:, dut_index],
observation_offset=observation_offsets[:, dut_index],
predicted_state=predicted_states[:, dut_index],
predicted_state_covariance=predicted_state_covariances[:, dut_index],
observation=observations[:, dut_index])
else:
# DUT is not a fit dut:
# set filter to prediction.
kalman_gains[:, dut_index] = np.zeros((chunk_size, n_dim_state, n_dim_obs), dtype=np.float64)
filtered_states[:, dut_index] = predicted_states[:, dut_index]
filtered_state_covariances[:, dut_index] = predicted_state_covariances[:, dut_index]
# Set the offset to the track intersection with the tilted plane
intersections = geometry_utils.get_line_intersections_with_dut(
line_origins=filtered_states[:, dut_index, 0:3],
line_directions=filtered_states[:, dut_index, 3:6],
translation_x=dut.translation_x,
translation_y=dut.translation_y,
translation_z=dut.translation_z,
rotation_alpha=dut.rotation_alpha,
rotation_beta=dut.rotation_beta,
rotation_gamma=dut.rotation_gamma)
# set x/y/z
filtered_states[:, dut_index, 0:3] = intersections
return predicted_states, predicted_state_covariances, kalman_gains, filtered_states, filtered_state_covariances, transition_matrices_update
@njit
def _smooth_update(transition_matrix, filtered_state,
filtered_state_covariance, predicted_state,
predicted_state_covariance, next_smoothed_state,
next_smoothed_state_covariance):
"""Smooth a filtered state with a Kalman Smoother. Smoothing
is done on whole track chunk with size chunk_size.
Parameters
----------
transition_matrix : [chunk_size, n_dim_state, n_dim_state] array
transition matrix to transport state from time t to t+1.
filtered_state : [chunk_size, n_dim_state] array
filtered state at time t.
filtered_state_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix of filtered state at time t.
predicted_state : [chunk_size, n_dim_state] array
predicted state at time t+1.
predicted_state_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix of filtered state at time t+1.
next_smoothed_state : [chunk_size, n_dim_state] array
smoothed state at time t+1.
next_smoothed_state_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix of smoothed state at time t+1.
Returns
-------
smoothed_state : [chunk_size, n_dim_state] array
smoothed state at time t.
smoothed_state_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix of smoothed state at time t.
kalman_smoothing_gain : [chunk_size, n_dim_state, n_dim_state] array
smoothed Kalman gain matrix at time t.
"""
kalman_smoothing_gain = _mat_mul(filtered_state_covariance,
_mat_mul(_mat_trans(transition_matrix),
_mat_inverse(predicted_state_covariance)))
smoothed_state = filtered_state + _vec_mul(kalman_smoothing_gain,
next_smoothed_state - predicted_state)
smoothed_state_covariance = filtered_state_covariance + _mat_mul(kalman_smoothing_gain,
_mat_mul((next_smoothed_state_covariance - predicted_state_covariance),
_mat_trans(kalman_smoothing_gain)))
return smoothed_state, smoothed_state_covariance, kalman_smoothing_gain
def _smooth(dut_planes, z_sorted_dut_indices, transition_matrices, filtered_states,
filtered_state_covariances, predicted_states,
predicted_state_covariances):
"""Apply the Kalman Smoother to filtered states. Estimate the smoothed states.
Smoothing is done on whole track chunk with size chunk_size.
Parameters
----------
dut_planes : list
List of DUT parameters (material_budget, translation_x, translation_y, translation_z, rotation_alpha, rotation_beta, rotation_gamma).
z_sorted_dut_indices : list
List of DUT indices in the order reflecting their z position.
transition_matrices : [chunk_size, n_timesteps-1, n_dim_state, n_dim_state] array-like
matrices to transport states from t to t+1 of times [0...t-1].
filtered_states : [chunk_size, n_timesteps, n_dim_state] array
filtered states of times [0...t].
filtered_state_covariances : [chunk_size, n_timesteps, n_dim_state] array
covariance matrices of filtered states of times [0...t].
predicted_states : [chunk_size, n_timesteps, n_dim_state] array
predicted states of times [0...t].
predicted_state_covariances : [chunk_size, n_timesteps, n_dim_state, n_dim_state] array
covariance matrices of predicted states of times [0...t].
Returns
-------
smoothed_states : [chunk_size, n_timesteps, n_dim_state]
smoothed states for times [0...n_timesteps-1].
smoothed_state_covariances : [chunk_size, n_timesteps, n_dim_state, n_dim_state] array
covariance matrices of smoothed states for times [0...n_timesteps-1].
kalman_smoothing_gains : [chunk_size, n_timesteps-1, n_dim_state] array
smoothed kalman gain matrices fot times [0...n_timesteps-2].
"""
chunk_size, n_timesteps, n_dim_state = filtered_states.shape
smoothed_states = np.zeros((chunk_size, n_timesteps, n_dim_state))
smoothed_state_covariances = np.zeros((chunk_size, n_timesteps, n_dim_state, n_dim_state))
kalman_smoothing_gains = np.zeros((chunk_size, n_timesteps - 1, n_dim_state, n_dim_state))
smoothed_states[:, -1] = filtered_states[:, -1]
smoothed_state_covariances[:, -1] = filtered_state_covariances[:, -1]
# reverse order for smoother
for i, dut_index in enumerate(z_sorted_dut_indices[:-1][::-1]):
dut = dut_planes[dut_index]
transition_matrix = transition_matrices[:, dut_index]
smoothed_states[:, dut_index], smoothed_state_covariances[:, dut_index], kalman_smoothing_gains[:, dut_index] = _smooth_update(
transition_matrix,
filtered_states[:, dut_index],
filtered_state_covariances[:, dut_index],
predicted_states[:, z_sorted_dut_indices[::-1][i]], # next plane in +z direction
predicted_state_covariances[:, z_sorted_dut_indices[::-1][i]], # next plane +z direction
smoothed_states[:, z_sorted_dut_indices[::-1][i]], # next plane +z direction
smoothed_state_covariances[:, z_sorted_dut_indices[::-1][i]]) # next plane +z direction
# Set the offset to the track intersection with the tilted plane
intersections_smooth = geometry_utils.get_line_intersections_with_dut(
line_origins=smoothed_states[:, dut_index, 0:3],
line_directions=smoothed_states[:, dut_index, 3:6],
translation_x=dut.translation_x,
translation_y=dut.translation_y,
translation_z=dut.translation_z,
rotation_alpha=dut.rotation_alpha,
rotation_beta=dut.rotation_beta,
rotation_gamma=dut.rotation_gamma)
smoothed_states[:, dut_index, 0:3] = intersections_smooth
return smoothed_states, smoothed_state_covariances, kalman_smoothing_gains
@njit
def _mat_mul(X, Y):
'''Helper function to multiply two 3D matrices. Multiplication is done on last two axes.
'''
result = np.zeros((X.shape[0], X.shape[1], Y.shape[2]))
for l in range(X.shape[0]):
# iterate through rows of X
for i in range(X.shape[1]):
# iterate through columns of Y
for j in range(Y.shape[2]):
# iterate through rows of Y
for k in range(Y.shape[1]):
result[l][i][j] += X[l][i][k] * Y[l][k][j]
return result
@njit
def _vec_mul(X, Y):
'''Helper function to multiply 3D matrix with 3D vector. Multiplication is done on last two axes.
'''
result = np.zeros((X.shape[0], X.shape[1]))
for l in range(X.shape[0]):
# iterate through rows of X
for i in range(X.shape[1]):
# iterate through columns of Y
for k in range(X.shape[2]):
result[l][i] += X[l][i][k] * Y[l][k]
return result
@njit
def _mat_trans(X):
'''Helper function to calculate transpose of 3D matrix. Transposition is done on last two axes.
'''
result = np.zeros((X.shape[0], X.shape[2], X.shape[1]))
for l in range(X.shape[0]):
for i in range(X.shape[2]):
for j in range(X.shape[1]):
result[l][i][j] = X[l][j][i]
return result
@njit
def _mat_inverse(X):
'''Helper function to calculate inverese of 3D matrix. Inversion is done on last two axes.
'''
inv = np.zeros((X.shape))
for i in range(X.shape[0]):
inv[i] = linalg.pinv(X[i])
return inv
class KalmanFilter(object):
def smooth(self, dut_planes, z_sorted_dut_indices, thetas, observations, select_fit_duts,
transition_offsets, observation_matrices, observation_offsets, observation_covariances,
initial_state, initial_state_covariance):
"""Apply the Kalman Smoother to the observations. In the first step a filtering is done,
afterwards a smoothing is done. Calculation is done on whole track chunk with size chunk_size.
Parameters
----------
dut_planes : list
List of DUT parameters (material_budget, translation_x, translation_y, translation_z, rotation_alpha, rotation_beta, rotation_gamma).
z_sorted_dut_indices : list
List of DUT indices in the order reflecting their z position.
thetas : list
List of scattering angle root mean squares (RMS).
observations : [chunk_size, n_timesteps, n_dim_obs] array
observations (measurements) from times [0...n_timesteps-1]. If any of observations is masked,
then observations[:, t] will be treated as a missing observation
and will not be included in the filtering step.
select_fit_duts : iterable
List of DUTs which should be included in Kalman Filter. DUTs which are not in list
were treated as missing measurements and will not be included in the Filtering step.
transition_offsets : [chunk_size, n_timesteps-1, n_dim_state] array-like
offsets of transition matrices.
observation_matrices : [chunk_size, n_timesteps, n_dim_obs, n_dim_state] array-like
observation matrices.
observation_offsets : [chunk_size, n_timesteps, n_dim_obs] array-like
offsets of observations.
observation_covariances : [chunk_size, n_timesteps, n_dim_obs, n_dim_obs] array-like
covariance matrices of observation matrices.
initial_state : [chunk_size, n_dim_state] array-like
initial value of state.
initial_state_covariance : [chunk_size, n_dim_state, n_dim_state] array-like
initial value for observation covariance matrices.
Returns
-------
smoothed_states : [chunk_size, n_timesteps, n_dim_state]
smoothed states for times [0...n_timesteps-1].
smoothed_state_covariances : [chunk_size, n_timesteps, n_dim_state, n_dim_state] array
covariance matrices of smoothed states for times [0...n_timesteps-1].
"""
# express transition matrices
# transition matrices are filled already here.
# If alignment is used, transition matrices are updated (in Kalman Filter) before each prediction step in order to take
# rotations of planes into account.
n_duts = len(dut_planes)
chunk_size = observations.shape[0]
n_dim_state = transition_offsets.shape[2]
# express transition matrix
transition_matrices = np.full((chunk_size, n_duts, n_dim_state, n_dim_state), fill_value=np.nan, dtype=np.float64)
# express transition covariance matrix
transition_covariances = np.full((chunk_size, n_duts, n_dim_state, n_dim_state), fill_value=np.nan, dtype=np.float64)
transition_matrices[:, :, :, 0] = np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0])
transition_matrices[:, :, :, 1] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0])
transition_matrices[:, :, :, 2] = np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
transition_matrices[:, :, :, 3] = np.array([[np.nan] * n_duts,
[0] * n_duts,
[0] * n_duts,
[1] * n_duts,
[0] * n_duts,
[0] * n_duts]).T
transition_matrices[:, :, :, 4] = np.array([[0] * n_duts,
[np.nan] * n_duts,
[0] * n_duts,
[0] * n_duts,
[1] * n_duts,
[0] * n_duts]).T
transition_matrices[:, :, :, 5] = np.array([[0] * n_duts,
[0] * n_duts,
[np.nan] * n_duts,
[0] * n_duts,
[0] * n_duts,
[1] * n_duts]).T
# express transition covariance matrices, according to http://web-docs.gsi.de/~ikisel/reco/Methods/CovarianceMatrices-NIMA329-1993.pdf
transition_covariances[:, :, :, 0] = np.array([[np.nan] * n_duts,
[0] * n_duts,
[0] * n_duts,
[np.nan] * n_duts,
[0] * n_duts,
[0] * n_duts]).T
transition_covariances[:, :, :, 1] = np.array([[0] * n_duts,
[np.nan] * n_duts,
[0] * n_duts,
[0] * n_duts,
[np.nan] * n_duts,
[0] * n_duts]).T
transition_covariances[:, :, :, 2] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
transition_covariances[:, :, :, 3] = np.array([[np.nan] * n_duts,
[0] * n_duts,
[0] * n_duts,
np.square(thetas),
[0] * n_duts,
[0] * n_duts]).T
transition_covariances[:, :, :, 4] = np.array([[0] * n_duts,
[np.nan] * n_duts,
[0] * n_duts,
[0] * n_duts,
np.square(thetas),
[0] * n_duts]).T
transition_covariances[:, :, :, 5] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
predicted_states, predicted_state_covariances, _, filtered_states, filtered_state_covariances, transition_matrices = _filter(
dut_planes=dut_planes,
z_sorted_dut_indices=z_sorted_dut_indices,
thetas=thetas,
select_fit_duts=select_fit_duts,
observations=observations,
transition_matrices=transition_matrices,
observation_matrices=observation_matrices,
transition_covariances=transition_covariances,
observation_covariances=observation_covariances,
transition_offsets=transition_offsets,
observation_offsets=observation_offsets,
initial_state=initial_state,
initial_state_covariance=initial_state_covariance)
smoothed_states, smoothed_state_covariances, smoothed_kalman_gains = _smooth(
dut_planes=dut_planes,
z_sorted_dut_indices=z_sorted_dut_indices,
transition_matrices=transition_matrices,
filtered_states=filtered_states,
filtered_state_covariances=filtered_state_covariances,
predicted_states=predicted_states,
predicted_state_covariances=predicted_state_covariances)
return smoothed_states, smoothed_state_covariances
| 60.259649 | 403 | 0.646122 |
ace8578b3163922eedb309e085e8db48a1aa9a80 | 1,546 | py | Python | h0rton/tdlmc_utils/tdlmc_metrics.py | jiwoncpark/h0rton | 2541885d70d090fdb777339cfb77a3a9f3e7996d | [
"MIT"
] | 4 | 2020-12-02T02:18:08.000Z | 2021-11-25T21:56:33.000Z | h0rton/tdlmc_utils/tdlmc_metrics.py | jiwoncpark/h0rton | 2541885d70d090fdb777339cfb77a3a9f3e7996d | [
"MIT"
] | 25 | 2019-10-17T08:18:38.000Z | 2020-12-26T09:38:05.000Z | h0rton/tdlmc_utils/tdlmc_metrics.py | jiwoncpark/h0rton | 2541885d70d090fdb777339cfb77a3a9f3e7996d | [
"MIT"
] | 1 | 2020-12-03T02:14:12.000Z | 2020-12-03T02:14:12.000Z | import numpy as np
__all__ = ['get_goodness', 'get_precision', 'get_accuracy', 'format_submission']
def format_submission(summary):
"""Format the summary into submission form for getting the TDLMC metrics cornerplot
"""
pass
def get_goodness(h0_means, h0_errors, true_h0):
"""Get the goodness of fit (chi square)
Parameters
----------
h0_means : np.array
central estimate of H0 for each lens
h0_errors : np.array
errors corresponding to the `h0_means`
true_h0 : np.array or float
the true H0
Returns
-------
float
the goodness of fit metric
"""
chi_sq = np.mean(((h0_means - true_h0)/h0_errors)**2.0)
return chi_sq
def get_precision(h0_errors, true_h0):
"""Get the precision, i.e. how well-constrained were the estimates on average?
Parameters
----------
h0_errors : np.array
errors corresponding to the `h0_means`
true_h0 : np.array or float
the true H0
Returns
-------
float
the precision metric
"""
precision = np.mean(h0_errors/true_h0)
return precision
def get_accuracy(h0_means, true_h0):
"""Get the accuracy, i.e. how close were the central estimates to the truth?
Parameters
----------
h0_means : np.array
central estimate of H0 for each lens
true_h0 : np.array or float
the true H0
Returns
-------
float
the accuracy metric
"""
precision = np.mean((h0_means - true_h0)/true_h0)
return precision | 22.735294 | 87 | 0.626779 |
ace857c1d8110df3dd70e721a94141cb67ba4865 | 1,104 | py | Python | SnookR/api/urls.py | eSmelser/SnookR | 39b841e9a3e9e6b5d94904e0ccf37faef0ec712d | [
"MIT"
] | null | null | null | SnookR/api/urls.py | eSmelser/SnookR | 39b841e9a3e9e6b5d94904e0ccf37faef0ec712d | [
"MIT"
] | 12 | 2017-07-12T02:41:32.000Z | 2017-12-30T23:08:00.000Z | SnookR/api/urls.py | eSmelser/SnookR | 39b841e9a3e9e6b5d94904e0ccf37faef0ec712d | [
"MIT"
] | 1 | 2017-07-17T14:56:16.000Z | 2017-07-17T14:56:16.000Z | from django.conf.urls import url
from api import views
urlpatterns = [
url(r'^auth/user/$', views.UserView.as_view(), name='user'),
url(r'^users/$', views.UserListView.as_view(), name='user_list'),
url(r'^team/$', views.TeamView.as_view(), name='team'),
url(r'^invites/$', views.TeamInviteListView.as_view(), name='invite_list'),
url(r'^invites/(?P<pk>[0-9]+)/$', views.TeamInviteUpdateView.as_view(), name='invite'),
url(r'^unregistered-players/$', views.NonUserPlayerListCreateView.as_view(), name='unregistered_players'),
url(r'^sessions/$', views.SessionListView.as_view(), name='sessions'),
url(r'^session-events/$', views.SessionEventListView.as_view(), name='session_events'),
url(r'^subs/$', views.SubListView.as_view(), name='sub_list'),
url(r'^search-user/$', views.SearchUserView.as_view(), name='search_user'),
url(r'^session-event-invites/$', views.SessionEventInviteListView.as_view(), name='session_event_invite_list'),
url(r'^session-event-invites/(?P<pk>[0-9]+)/$', views.SessionEventInviteView.as_view(), name='session_event_invite'),
]
| 61.333333 | 121 | 0.697464 |
ace8595ebb7baf71778943f766c7b0c1bb07bfc1 | 1,590 | py | Python | 6310545566_Phawit_ex6/ex6_files/try2.py | HelloYeew/helloyeew-computer-programming-i | cf8315bdc4b8e20ba410e0745778595805f51afb | [
"MIT"
] | null | null | null | 6310545566_Phawit_ex6/ex6_files/try2.py | HelloYeew/helloyeew-computer-programming-i | cf8315bdc4b8e20ba410e0745778595805f51afb | [
"MIT"
] | null | null | null | 6310545566_Phawit_ex6/ex6_files/try2.py | HelloYeew/helloyeew-computer-programming-i | cf8315bdc4b8e20ba410e0745778595805f51afb | [
"MIT"
] | 1 | 2020-12-07T10:05:44.000Z | 2020-12-07T10:05:44.000Z | import csv
# open Cities.csv file with csv.DictReader and read its content into a list of dictionary, cities_data
cities_data = []
with open('Cities.csv', 'r') as f:
rows = csv.DictReader(f)
for r in rows:
cities_data.append(r)
# open Countries.csv file with csv.DictReader and read its content into a list of dictionary, countries_data
countries_data = []
with open('Countries.csv', 'r') as f:
rows = csv.DictReader(f)
for r in rows:
countries_data.append(r)
titanic_data = []
with open('Titanic.csv') as f:
rows = csv.DictReader(f)
for r in rows:
titanic_data.append(r)
def twin_list(titanic_data):
"""Returns a list of tuples of pairs of passengers who are likely to be twin children, i.e., same last name, same age, same place of embarkment, and age is under 18; each tuple has the following format: (person1's "last name" + "first name", person2's "last name" + "first name")
"""
twins = list()
already_checked = list()
for person in titanic_data:
for person_2 in titanic_data:
if person["age"] != "" and person_2["age"] != "":
if person["first"] != person_2["first"] and person["last"] == person_2["last"] and float(person["age"]) < 18 and float(person_2["age"]) < 18 and person["age"] == person_2["age"] and person_2["first"] not in already_checked:
twins.append((f"{person['last']} {person['first']}", f"{person_2['last']} {person_2['first']}"))
already_checked.append(person["first"])
return twins
print(twin_list(titanic_data)) | 44.166667 | 283 | 0.649686 |
ace85a03b87f9a6ac7458937043edb47ca4520ad | 9,501 | py | Python | jesse/indicators/pattern_recognition.py | discohead/jesse | 5f025cc72adb33132b75a516f74f96b52ca12af3 | [
"MIT"
] | null | null | null | jesse/indicators/pattern_recognition.py | discohead/jesse | 5f025cc72adb33132b75a516f74f96b52ca12af3 | [
"MIT"
] | null | null | null | jesse/indicators/pattern_recognition.py | discohead/jesse | 5f025cc72adb33132b75a516f74f96b52ca12af3 | [
"MIT"
] | 1 | 2021-03-09T19:51:14.000Z | 2021-03-09T19:51:14.000Z | from typing import Union
import numpy as np
import talib
from jesse.helpers import get_config
def pattern_recognition(candles: np.ndarray, pattern_type: str, penetration: int = 0, sequential: bool = False) -> \
Union[int, np.ndarray]:
"""
Pattern Recognition
:param candles: np.ndarray
:param penetration: int - default = 0
:param pattern_type: str
:param sequential: bool - default=False
:return: int | np.ndarray
"""
warmup_candles_num = get_config('env.data.warmup_candles_num', 240)
if not sequential and len(candles) > warmup_candles_num:
candles = candles[-warmup_candles_num:]
if pattern_type == "CDL2CROWS":
res = talib.CDL2CROWS(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDL3BLACKCROWS":
res = talib.CDL3BLACKCROWS(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDL3INSIDE":
res = talib.CDL3INSIDE(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDL3LINESTRIKE":
res = talib.CDL3LINESTRIKE(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDL3OUTSIDE":
res = talib.CDL3OUTSIDE(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDL3STARSINSOUTH":
res = talib.CDL3STARSINSOUTH(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDL3WHITESOLDIERS":
res = talib.CDL3WHITESOLDIERS(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLABANDONEDBABY":
res = talib.CDLABANDONEDBABY(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2],
penetration=penetration)
elif pattern_type == "CDLADVANCEBLOCK":
res = talib.CDLADVANCEBLOCK(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLBELTHOLD":
res = talib.CDLBELTHOLD(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLBREAKAWAY":
res = talib.CDLBREAKAWAY(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLCLOSINGMARUBOZU":
res = talib.CDLCLOSINGMARUBOZU(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLCONCEALBABYSWALL":
res = talib.CDLCONCEALBABYSWALL(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLCOUNTERATTACK":
res = talib.CDLCOUNTERATTACK(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLDARKCLOUDCOVER":
res = talib.CDLDARKCLOUDCOVER(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2],
penetration=penetration)
elif pattern_type == "CDLDOJI":
res = talib.CDLDOJI(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLDOJISTAR":
res = talib.CDLDOJISTAR(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLDRAGONFLYDOJI":
res = talib.CDLDRAGONFLYDOJI(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLENGULFING":
res = talib.CDLENGULFING(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLEVENINGDOJISTAR":
res = talib.CDLEVENINGDOJISTAR(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2],
penetration=penetration)
elif pattern_type == "CDLEVENINGSTAR":
res = talib.CDLEVENINGSTAR(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2], penetration=penetration)
elif pattern_type == "CDLGAPSIDESIDEWHITE":
res = talib.CDLGAPSIDESIDEWHITE(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLGRAVESTONEDOJI":
res = talib.CDLGRAVESTONEDOJI(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLHAMMER":
res = talib.CDLHAMMER(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLHANGINGMAN":
res = talib.CDLHANGINGMAN(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLHARAMI":
res = talib.CDLHARAMI(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLHARAMICROSS":
res = talib.CDLHARAMICROSS(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLHIGHWAVE":
res = talib.CDLHIGHWAVE(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLHIKKAKE":
res = talib.CDLHIKKAKE(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLHIKKAKEMOD":
res = talib.CDLHIKKAKEMOD(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLHOMINGPIGEON":
res = talib.CDLHOMINGPIGEON(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLIDENTICAL3CROWS":
res = talib.CDLIDENTICAL3CROWS(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLINNECK":
res = talib.CDLINNECK(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLINVERTEDHAMMER":
res = talib.CDLINVERTEDHAMMER(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLKICKING":
res = talib.CDLKICKING(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLKICKINGBYLENGTH":
res = talib.CDLKICKINGBYLENGTH(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLLADDERBOTTOM":
res = talib.CDLLADDERBOTTOM(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLLONGLEGGEDDOJI":
res = talib.CDLLONGLEGGEDDOJI(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLLONGLINE":
res = talib.CDLLONGLINE(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLMARUBOZU":
res = talib.CDLMARUBOZU(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLMATCHINGLOW":
res = talib.CDLMATCHINGLOW(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLMATHOLD":
res = talib.CDLMATHOLD(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2], penetration=penetration)
elif pattern_type == "CDLMORNINGDOJISTAR":
res = talib.CDLMORNINGDOJISTAR(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2],
penetration=penetration)
elif pattern_type == "CDLMORNINGSTAR":
res = talib.CDLMORNINGSTAR(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2], penetration=penetration)
elif pattern_type == "CDLONNECK":
res = talib.CDLONNECK(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLPIERCING":
res = talib.CDLPIERCING(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLRICKSHAWMAN":
res = talib.CDLRICKSHAWMAN(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLRISEFALL3METHODS":
res = talib.CDLRISEFALL3METHODS(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLSEPARATINGLINES":
res = talib.CDLSEPARATINGLINES(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLSHOOTINGSTAR":
res = talib.CDLSHOOTINGSTAR(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLSHORTLINE":
res = talib.CDLSHORTLINE(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLSPINNINGTOP":
res = talib.CDLSPINNINGTOP(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLSTALLEDPATTERN":
res = talib.CDLSTALLEDPATTERN(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLSTICKSANDWICH":
res = talib.CDLSTICKSANDWICH(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLTAKURI":
res = talib.CDLTAKURI(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLTASUKIGAP":
res = talib.CDLTASUKIGAP(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLTHRUSTING":
res = talib.CDLTHRUSTING(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLTRISTAR":
res = talib.CDLTRISTAR(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLUNIQUE3RIVER":
res = talib.CDLUNIQUE3RIVER(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLUPSIDEGAP2CROWS":
res = talib.CDLUPSIDEGAP2CROWS(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
elif pattern_type == "CDLXSIDEGAP3METHODS":
res = talib.CDLXSIDEGAP3METHODS(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])
else:
raise ValueError('pattern type string not recognised')
return res / 100 if sequential else res[-1] / 100
| 61.296774 | 119 | 0.609831 |
ace85a0c21d24551f9da622dd089c7ba54cece2b | 8,537 | bzl | Python | container/new_pull.bzl | xiaohegong/rules_docker | b25df18cb31cebaeec42359d5945a276b0ee775b | [
"Apache-2.0"
] | 1 | 2019-06-14T09:54:13.000Z | 2019-06-14T09:54:13.000Z | container/new_pull.bzl | xiaohegong/rules_docker | b25df18cb31cebaeec42359d5945a276b0ee775b | [
"Apache-2.0"
] | null | null | null | container/new_pull.bzl | xiaohegong/rules_docker | b25df18cb31cebaeec42359d5945a276b0ee775b | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of container_pull based on google/containerregistry using google/go-containerregistry.
This wraps the rulesdocker.go.cmd.puller.puller executable in a
Bazel rule for downloading base images without a Docker client to
construct new images.
"""
_container_pull_attrs = {
"architecture": attr.string(
default = "amd64",
doc = "(optional) Which CPU architecture to pull if this image " +
"refers to a multi-platform manifest list, default 'amd64'.",
),
"cpu_variant": attr.string(
doc = "Which CPU variant to pull if this image refers to a " +
"multi-platform manifest list.",
),
"digest": attr.string(
doc = "(optional) The digest of the image to pull.",
),
"docker_client_config": attr.string(
doc = "A custom directory for the docker client config.json. " +
"If DOCKER_CONFIG is not specified, the value of the " +
"DOCKER_CONFIG environment variable will be used. DOCKER_CONFIG" +
" is not defined, the home directory will be used.",
mandatory = False,
),
"format": attr.string(
default = "oci",
values = [
"oci",
"docker",
],
doc = "(optional) The format of the image to be pulled, default to 'OCI' (OCI Layout Format) " +
"or option for 'Docker' (tarball compatible with `docker load` command).",
),
"os": attr.string(
default = "linux",
doc = "(optional) Which os to pull if this image refers to a " +
"multi-platform manifest list, default 'linux'.",
),
"os_features": attr.string_list(
doc = "(optional) Specifies os features when pulling a multi-platform " +
"manifest list.",
),
"os_version": attr.string(
doc = "(optional) Which os version to pull if this image refers to a " +
"multi-platform manifest list.",
),
"platform_features": attr.string_list(
doc = "(optional) Specifies platform features when pulling a " +
"multi-platform manifest list.",
),
"registry": attr.string(
mandatory = True,
doc = "The registry from which we are pulling.",
),
"repository": attr.string(
mandatory = True,
doc = "The name of the image.",
),
"tag": attr.string(
default = "latest",
doc = "(optional) The tag of the image, default to 'latest' " +
"if this and 'digest' remain unspecified.",
),
"_puller_darwin": attr.label(
executable = True,
default = Label("@go_puller_darwin//file:downloaded"),
cfg = "host",
),
"_puller_linux": attr.label(
executable = True,
default = Label("@go_puller_linux//file:downloaded"),
cfg = "host",
),
}
def _impl(repository_ctx):
"""Core implementation of container_pull."""
# Add an empty top-level BUILD file.
repository_ctx.file("BUILD", "")
if repository_ctx.attr.format == "docker":
repository_ctx.file("image/BUILD", """package(default_visibility = ["//visibility:public"])
exports_files(["image.tar"])""")
# Create symlinks to the appropriate config.json and layers in the pulled OCI image in the image-oci directory.
# Generates container_import rule which is able to comprehend OCI layout via the symlinks.
else:
repository_ctx.file("image/BUILD", """package(default_visibility = ["//visibility:public"])
load("@io_bazel_rules_docker//container:import.bzl", "container_import")
container_import(
name = "image",
config = "config.json",
layers = glob(["*.tar.gz"]),
)
exports_files(["image.digest", "digest"])
""")
# Currently exports all files pulled by the binary and will not be depended on by other rules_docker rules.
repository_ctx.file("image-oci/BUILD", """package(default_visibility = ["//visibility:public"])
exports_files(glob(["**"]))
filegroup(
name = "image-oci",
srcs = glob(["**"]),
)""")
puller = repository_ctx.attr._puller_linux
if repository_ctx.os.name.lower().startswith("mac os"):
puller = repository_ctx.attr._puller_darwin
args = [
repository_ctx.path(puller),
"-directory",
repository_ctx.path(""),
"-format",
repository_ctx.attr.format,
"-os",
repository_ctx.attr.os,
"-os-version",
repository_ctx.attr.os_version,
"-os-features",
" ".join(repository_ctx.attr.os_features),
"-architecture",
repository_ctx.attr.architecture,
"-variant",
repository_ctx.attr.cpu_variant,
"-features",
" ".join(repository_ctx.attr.platform_features),
]
# Use the custom docker client config directory if specified.
if repository_ctx.attr.docker_client_config != "":
args += ["-client-config-dir", "{}".format(repository_ctx.attr.docker_client_config)]
cache_dir = repository_ctx.os.environ.get("DOCKER_REPO_CACHE")
if cache_dir:
if cache_dir.startswith("~/") and "HOME" in repository_ctx.os.environ:
cache_dir = cache_dir.replace("~", repository_ctx.os.environ["HOME"], 1)
args += [
"-cache",
cache_dir,
]
# If a digest is specified, then pull by digest. Otherwise, pull by tag.
if repository_ctx.attr.digest:
args += [
"-name",
"{registry}/{repository}@{digest}".format(
registry = repository_ctx.attr.registry,
repository = repository_ctx.attr.repository,
digest = repository_ctx.attr.digest,
),
]
else:
args += [
"-name",
"{registry}/{repository}:{tag}".format(
registry = repository_ctx.attr.registry,
repository = repository_ctx.attr.repository,
tag = repository_ctx.attr.tag,
),
]
kwargs = {}
if "PULLER_TIMEOUT" in repository_ctx.os.environ:
kwargs["timeout"] = int(repository_ctx.os.environ.get("PULLER_TIMEOUT"))
result = repository_ctx.execute(args, **kwargs)
if result.return_code:
fail("Pull command failed: %s (%s)" % (result.stderr, " ".join([str(a) for a in args])))
updated_attrs = {
k: getattr(repository_ctx.attr, k)
for k in _container_pull_attrs.keys()
}
updated_attrs["name"] = repository_ctx.name
digest_result = repository_ctx.execute(["cat", repository_ctx.path("image/digest")])
if digest_result.return_code:
fail("Failed to read digest: %s" % digest_result.stderr)
updated_attrs["digest"] = digest_result.stdout
if repository_ctx.attr.digest and repository_ctx.attr.digest != updated_attrs["digest"]:
fail(("SHA256 of the image specified does not match SHA256 of the pulled image. " +
"Expected {}, but pulled image with {}. " +
"It is possible that you have a pin to a manifest list " +
"which points to another image, if so, " +
"change the pin to point at the actual Docker image").format(
repository_ctx.attr.digest,
updated_attrs["digest"],
))
# Add image.digest for compatibility with container_digest, which generates
# foo.digest for an image named foo.
repository_ctx.symlink(repository_ctx.path("image/digest"), repository_ctx.path("image/image.digest"))
return updated_attrs
pull = struct(
attrs = _container_pull_attrs,
implementation = _impl,
)
# Pulls a container image.
# This rule pulls a container image into our intermediate format (OCI Image Layout).
new_container_pull = repository_rule(
attrs = _container_pull_attrs,
implementation = _impl,
environ = [
"DOCKER_REPO_CACHE",
"HOME",
"PULLER_TIMEOUT",
],
)
| 36.021097 | 119 | 0.62727 |
ace85ae586886e190bcca6d5b5f5133193496368 | 2,841 | py | Python | python/ki_vs_km.py | eladnoor/small-molecule-regulation | 83127f20859093a06ee493128d672ac7428cec83 | [
"MIT"
] | 3 | 2018-03-29T12:14:05.000Z | 2021-03-22T09:04:22.000Z | python/ki_vs_km.py | eladnoor/small-molecule-regulation | 83127f20859093a06ee493128d672ac7428cec83 | [
"MIT"
] | 9 | 2016-05-30T16:43:21.000Z | 2017-03-17T13:15:02.000Z | python/ki_vs_km.py | eladnoor/small-molecule-regulation | 83127f20859093a06ee493128d672ac7428cec83 | [
"MIT"
] | 1 | 2021-03-22T09:04:26.000Z | 2021-03-22T09:04:26.000Z | # Compare Ki and KM directly
import numpy as np, pandas as pd, pdb, matplotlib.pyplot as plt, seaborn as sns, scipy.stats as st
sns.set_style('ticks')
from statsmodels.sandbox.stats.multicomp import multipletests as padjust
plt.close('all')
plt.ion()
ki = pd.read_csv('../cache/regulation.csv',
header=0, index_col=0, encoding='Latin-1')
km = pd.read_csv('../cache/km.csv',
header=0, index_col=0, encoding='Latin-1')
# Drop negative values
km = km[km['KM_Value'] > 0]
ki = ki[ki['KI_Value'] > 0]
# Drop duplicate values
km = km.drop_duplicates(subset = ['KM_Value', 'EC_number', 'bigg.metabolite'])
ki = ki.drop_duplicates(subset = ['KI_Value', 'EC_number', 'bigg.metabolite'])
# Drop mutants
ki = ki[(pd.isnull(ki['Commentary'])) |
((ki['Commentary'].str.find('mutant') == -1) &
(ki['Commentary'].str.find('mutation') == -1))]
km = km[(pd.isnull(km['Commentary'])) |
((km['Commentary'].str.find('mutant') == -1) &
(km['Commentary'].str.find('mutation') == -1))]
res = pd.DataFrame()
res['KI_Values'] = ki.groupby('bigg.metabolite')['KI_Value'].mean()
res['KI_Number'] = ki.groupby('bigg.metabolite')['EC_number'].nunique()
res['KM_Values'] = km.groupby('bigg.metabolite')['KM_Value'].mean()
res['KM_Number'] = km.groupby('bigg.metabolite')['EC_number'].nunique()
#res['Metabolite'] = km.groupby('bigg.metabolite')['name'].first()
# Drop rows where we don't have data for both
res = res.dropna()
# Keep only metabolites with at least 2 measurements of each
res = res[res['KI_Number'] > 1]
res = res[res['KM_Number'] > 1]
res['PValue'] = np.nan
# for each metabolite, if there is sufficient data, test
for ii in res.index:
kid = ki[ki['bigg.metabolite'] == ii]['KI_Value']
kmd = km[km['bigg.metabolite'] == ii]['KM_Value']
s,p = st.mannwhitneyu( kid,kmd )
res.at[ii,'PValue'] = p
res['QValue'] = padjust(res['PValue'],method = 'fdr_bh')[1]
res = res.sort_values('PValue')
maxval = 2*np.max( [res['KI_Values'].max(),res['KM_Values'].max()] )
minval = 0.5*np.min( [res['KI_Values'].min(),res['KM_Values'].min()] )
f,ax = plt.subplots(figsize = (8,8))
plt.loglog(res['KI_Values'],res['KM_Values'],'o')
plt.axis([minval,maxval,minval,maxval])
# Change index for readability
#res['KEGGID'] = res.index
#res.index = res['Metabolite']
# Calculate log ratio of values
res['Ratio'] = np.log2( res['KI_Values'] / res['KM_Values'] )
for ii in res.index:
if res.at[ii,'QValue'] < 0.1:
plt.text(res.at[ii,'KI_Values'],res.at[ii,'KM_Values'],ii)
plt.xlabel('Mean KI')
plt.ylabel('Mean KM')
diag_line, = ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c=".3")
# The first plot is misleading, make a volcano plot
res['-Log10Q'] = -np.log10(res['QValue'])
plt.figure('Volcano')
plt.plot(res['Ratio'],res['-Log10Q'],'o')
| 31.921348 | 98 | 0.641323 |
ace85b5979ed786a20c6a68bc90653f3fe0a9a14 | 530 | py | Python | idioms_examples/magic_methods/magic_methods.py | jjmerchante/Pythonic-webserver | b57fe35975b60d7fe8d04d63094068e54d9e3ee8 | [
"Apache-2.0"
] | 3 | 2017-11-06T14:00:30.000Z | 2018-04-07T08:02:40.000Z | idioms_examples/magic_methods/magic_methods.py | jjmerchante/Pythonic-webserver | b57fe35975b60d7fe8d04d63094068e54d9e3ee8 | [
"Apache-2.0"
] | null | null | null | idioms_examples/magic_methods/magic_methods.py | jjmerchante/Pythonic-webserver | b57fe35975b60d7fe8d04d63094068e54d9e3ee8 | [
"Apache-2.0"
] | null | null | null | class Rectangle():
def __init__(self, height, width):
self.height = height
self.width = width
def __eq__(self, rect):
return (self.height * self.width) == (rect.height * rect.width)
def __lt__(self, rect):
return (self.height * self.width) < (rect.height * rect.width)
def __gt__(self, rect):
return (self.height * self.width) > (rect.height * rect.width)
r1 = Rectangle(3,6)
r2 = Rectangle(3,5)
print( r1 > r2 )# True
print( r1 < r2 )# False
print( r1 == r2 ) # False
| 25.238095 | 71 | 0.603774 |
ace85bdf081e3b8ac3935697a194d618d061cd35 | 81,011 | py | Python | test/quantization/fx/test_numeric_suite_fx.py | sanchitintel/pytorch | 416f59308023b5d98f6ea4ecdd0bcd3829edb7a7 | [
"Intel"
] | 1 | 2021-03-13T02:53:40.000Z | 2021-03-13T02:53:40.000Z | test/quantization/fx/test_numeric_suite_fx.py | sanchitintel/pytorch | 416f59308023b5d98f6ea4ecdd0bcd3829edb7a7 | [
"Intel"
] | 1 | 2021-02-23T21:52:37.000Z | 2021-02-23T21:52:37.000Z | test/quantization/fx/test_numeric_suite_fx.py | sanchitintel/pytorch | 416f59308023b5d98f6ea4ecdd0bcd3829edb7a7 | [
"Intel"
] | null | null | null | import copy
import math
import operator
import unittest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.ao.quantization import default_dynamic_qconfig
import torch.nn.quantized as nnq
toq = torch.ops.quantized
from torch.ao.quantization.quantize_fx import (
convert_fx,
prepare_fx,
prepare_qat_fx,
)
from torch.testing._internal.common_quantization import (
ConvBnModel,
ConvBnReLUModel,
ConvModel,
QuantizationTestCase,
skipIfNoFBGEMM,
SingleLayerLinearDynamicModel,
SingleLayerLinearModel,
LSTMwithHiddenDynamicModel,
SparseNNModel,
skip_if_no_torchvision,
)
from torch.ao.quantization.quantization_mappings import (
get_default_static_quant_module_mappings,
get_default_dynamic_quant_module_mappings,
get_default_float_to_quantized_operator_mappings,
)
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_quantization import NodeSpec as ns
from torch.ao.quantization.fx.pattern_utils import get_default_quant_patterns
import torch.ao.quantization.fx.quantization_patterns as qp
from torch.ao.ns.fx.pattern_utils import (
get_type_a_related_to_b,
)
from torch.ao.ns.fx.graph_matcher import (
get_matching_subgraph_pairs,
GraphMatchingException,
)
from torch.ao.ns.fx.utils import (
compute_sqnr,
compute_normalized_l2_error,
compute_cosine_similarity,
)
from torch.ao.ns.fx.mappings import (
get_node_type_to_io_type_map,
get_unmatchable_types_map,
get_base_name_to_sets_of_related_ops,
get_base_name_for_op,
add_op_to_sets_of_related_ops,
)
from torch.ao.ns.fx.weight_utils import (
get_op_to_type_to_weight_extraction_fn,
)
from torch.ao.ns._numeric_suite_fx import (
extract_weights,
_extract_weights_impl,
add_loggers,
_add_loggers_impl,
OutputLogger,
add_shadow_loggers,
_add_shadow_loggers_impl,
extract_logger_info,
extract_shadow_logger_info,
extend_logger_results_with_comparison,
)
# Note: these models are not for use outside of this file. While it's good
# to reuse code, we also need to be able to iterate on tests
# quickly when debugging. If a test model has a large number of callsites
# across various different files, speed of debugging on individual test cases
# decreases.
class LinearReluFunctional(nn.Module):
def __init__(self):
super().__init__()
self.w1 = nn.Parameter(torch.empty(4, 4))
self.b1 = nn.Parameter(torch.zeros(4))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = F.linear(x, self.w1, self.b1)
x = F.relu(x)
return x
class LinearFunctional(nn.Module):
def __init__(self):
super().__init__()
self.w1 = nn.Parameter(torch.empty(4, 4))
self.b1 = nn.Parameter(torch.zeros(4))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = F.linear(x, self.w1, self.b1)
return x
class LinearReluLinearFunctional(nn.Module):
def __init__(self):
super().__init__()
self.w = nn.Parameter(torch.Tensor(4, 4))
self.b = nn.Parameter(torch.zeros(4))
torch.nn.init.kaiming_uniform_(self.w, a=math.sqrt(5))
def forward(self, x):
x = F.linear(x, self.w, self.b)
x = F.relu(x)
x = F.linear(x, self.w, self.b)
return x
class AddMulFunctional(nn.Module):
def forward(self, x, y):
x = x + 1.0
x = x * 1.0
x = 1.0 + x
x = 1.0 * x
x = x + y
x = x * y
return x
class AllConvAndLinearFusionModules(torch.nn.Module):
def __init__(self):
super().__init__()
# conv1d
self.conv1d_0 = nn.Conv1d(1, 1, 1)
# conv1d - relu
self.conv1d_1 = nn.Conv1d(1, 1, 1)
self.relu_0 = nn.ReLU()
# conv1d - bn (qat only)
self.conv1d_2 = nn.Conv1d(1, 1, 1)
self.bn1d_0 = nn.BatchNorm1d(1)
# conv1d - bn - relu (qat only)
self.conv1d_3 = nn.Conv1d(1, 1, 1)
self.bn1d_1 = nn.BatchNorm1d(1)
self.relu_4 = nn.ReLU()
# conv2d
self.conv2d_0 = nn.Conv2d(1, 1, 1)
# conv2d - relu
self.conv2d_1 = nn.Conv2d(1, 1, 1)
self.relu_1 = nn.ReLU()
# conv2d - bn (qat only)
self.conv2d_2 = nn.Conv2d(1, 1, 1)
self.bn2d_0 = nn.BatchNorm2d(1)
# conv2d - bn - relu (qat only)
self.conv2d_3 = nn.Conv2d(1, 1, 1)
self.bn2d_1 = nn.BatchNorm2d(1)
self.relu_5 = nn.ReLU()
# conv3d
self.conv3d_0 = nn.Conv3d(1, 1, 1)
# conv3d - relu
self.conv3d_1 = nn.Conv3d(1, 1, 1)
self.relu_2 = nn.ReLU()
# conv3d - bn (qat only)
self.conv3d_2 = nn.Conv3d(1, 1, 1)
self.bn3d_0 = nn.BatchNorm3d(1)
# conv3d - bn - relu (qat only)
self.conv3d_3 = nn.Conv3d(1, 1, 1)
self.bn3d_1 = nn.BatchNorm3d(1)
self.relu_6 = nn.ReLU()
# linear
self.linear_0 = nn.Linear(1, 1)
# linear - relu
self.linear_1 = nn.Linear(1, 1)
self.relu_3 = nn.ReLU()
def forward(self, x):
# conv1d
x = self.conv1d_0(x)
x = self.conv1d_1(x)
x = self.relu_0(x)
x = self.conv1d_2(x)
x = self.bn1d_0(x)
x = self.conv1d_3(x)
x = self.bn1d_1(x)
x = self.relu_4(x)
# conv2d
x = x.reshape(1, 1, 1, 1)
x = self.conv2d_0(x)
x = self.conv2d_1(x)
x = self.relu_1(x)
x = self.conv2d_2(x)
x = self.bn2d_0(x)
x = self.conv2d_3(x)
x = self.bn2d_1(x)
x = self.relu_5(x)
# conv3d
x = x.reshape(1, 1, 1, 1, 1)
x = self.conv3d_0(x)
x = self.conv3d_1(x)
x = self.relu_2(x)
x = self.conv3d_2(x)
x = self.bn3d_0(x)
x = self.conv3d_3(x)
x = self.bn3d_1(x)
x = self.relu_6(x)
# linear
x = x.reshape(1, 1)
x = self.linear_0(x)
x = self.linear_1(x)
x = self.relu_3(x)
return x
class AllConvFunctional(torch.nn.Module):
def __init__(self, weight1d, weight2d, weight3d, bias1d, bias2d, bias3d):
super().__init__()
self.weight1d = torch.nn.Parameter(weight1d)
self.weight2d = torch.nn.Parameter(weight2d)
self.weight3d = torch.nn.Parameter(weight3d)
self.bias1d = torch.nn.Parameter(bias1d)
self.bias2d = torch.nn.Parameter(bias2d)
self.bias3d = torch.nn.Parameter(bias3d)
self.stride1d = 1
self.padding1d = 0
self.dilation1d = 1
self.stride2d = (1, 1)
self.padding2d = (0, 0)
self.dilation2d = (1, 1)
self.groups = 1
self.stride3d = (1, 1, 1)
self.padding3d = (0, 0, 0)
self.dilation3d = (1, 1, 1)
def forward(self, x):
x = F.conv1d(
x, self.weight1d, self.bias1d, self.stride1d, self.padding1d,
self.dilation1d, self.groups)
x = F.conv1d(
x, self.weight1d, self.bias1d, self.stride1d, self.padding1d,
self.dilation1d, self.groups)
x = F.relu(x)
x = F.conv2d(
x, self.weight2d, self.bias2d, self.stride2d, self.padding2d,
self.dilation2d, self.groups)
x = F.conv2d(
x, self.weight2d, self.bias2d, self.stride2d, self.padding2d,
self.dilation2d, self.groups)
x = F.relu(x)
x = F.conv3d(
x, self.weight3d, self.bias3d, self.stride3d, self.padding3d,
self.dilation3d, self.groups)
x = F.conv3d(
x, self.weight3d, self.bias3d, self.stride3d, self.padding3d,
self.dilation3d, self.groups)
x = F.relu(x)
return x
@torch.fx.wrap
def _wrapped_hardswish(x):
return F.hardswish(x)
@torch.fx.wrap
def _wrapped_hardswish_fp16(x):
x = x.dequantize()
x = F.hardswish(x)
x = x.to(torch.float16)
return x
@torch.fx.wrap
def _wrapped_sigmoid(x):
return F.sigmoid(x)
@torch.fx.wrap
def _wrapped_linear(x, w, b):
return F.linear(x, w, b)
class TestFXGraphMatcher(QuantizationTestCase):
@skipIfNoFBGEMM
def test_simple_mod(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig})
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
conv_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, nn.Conv2d) + '_0'
expected_types = {
conv_name_0: ((nn.Conv2d, torch.ao.quantization.MinMaxObserver), (nnq.Conv2d, nnq.Conv2d)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
def test_simple_fun(self):
class M(nn.Module):
def __init__(self):
super().__init__()
self.w = nn.Parameter(torch.empty(1, 4))
self.b = nn.Parameter(torch.zeros(1))
torch.nn.init.kaiming_uniform_(self.w, a=math.sqrt(5))
def forward(self, x):
return F.linear(x, self.w, self.b)
m = M().eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig})
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
linear_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, F.linear) + '_0'
expected_types = {
linear_name_0:
((F.linear, torch.ao.quantization.MinMaxObserver), (toq.linear, toq.linear))
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
def test_simple_fusion(self):
m = LinearReluFunctional().eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig})
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
linear_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, F.linear) + '_0'
expected_types = {
linear_name_0:
((F.linear, torch.ao.quantization.MinMaxObserver), (toq.linear_relu, toq.linear_relu)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
def test_simple_mod_multi(self):
m = nn.Sequential(
nn.Sequential(
nn.Conv2d(1, 1, 1),
),
nn.Conv2d(1, 1, 1),
).eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig})
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# assume success if no exceptions
results = get_matching_subgraph_pairs(mp, mq)
@skipIfNoFBGEMM
def test_simple_tensor_ops(self):
class M(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
z = x + y
return z
m = M().eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig})
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# assume success if no exceptions
results = get_matching_subgraph_pairs(mp, mq)
@skipIfNoFBGEMM
def test_matching_failure_node_count(self):
# verify that matching graphs with matching node types but
# different counts of matchable nodes fails
m1 = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
m2 = nn.Sequential(nn.Conv2d(1, 1, 1), nn.Conv2d(1, 1, 1)).eval()
mp1 = prepare_fx(m1, {'': torch.ao.quantization.default_qconfig})
mp2 = prepare_fx(m2, {'': torch.ao.quantization.default_qconfig})
with self.assertRaises(GraphMatchingException) as ex:
results = get_matching_subgraph_pairs(mp1, mp2)
@skipIfNoFBGEMM
def test_matching_failure_node_type(self):
# verify that matching graphs with non-matching node types fails
m1 = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
m2 = nn.Sequential(nn.Linear(1, 1)).eval()
mp1 = prepare_fx(m1, {'': torch.ao.quantization.default_qconfig})
mp2 = prepare_fx(m2, {'': torch.ao.quantization.default_qconfig})
with self.assertRaises(GraphMatchingException) as ex:
results = get_matching_subgraph_pairs(mp1, mp2)
@skipIfNoFBGEMM
def test_nodes_before_cat(self):
# verify that nodes before cat get matched
class M(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x0):
x1 = torch.add(x0, 1.0)
y1 = torch.add(x0, 1.0)
x2 = torch.cat([x1, y1])
return x2
m = M().eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig})
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
cat_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.cat) + '_0'
add_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_0'
add_name_1 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_1'
expected_types = {
cat_name_0: ((torch.cat, torch.cat), (torch.cat, torch.cat)),
add_name_0: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
add_name_1: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
def test_dict_return_type(self):
# verify that we can traverse up nodes which return dictionaries
class M(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x0):
x1 = torch.add(x0, 1.0)
y1 = torch.add(x0, 1.0)
z1 = torch.add(x0, 1.0)
a1 = {'x1': x1, 'y1': (y1,), 'z1': [{'key': (z1,)}]}
return a1
m = M().eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig})
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
add_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_0'
add_name_1 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_1'
add_name_2 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_2'
expected_types = {
add_name_0: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
add_name_1: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
add_name_2: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
@unittest.skip("Broken by https://github.com/pytorch/pytorch/pull/62608, need dtype inference support")
def test_nodes_with_equal_types_get_matched(self):
class M(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = torch.mul(x, x)
x = torch.sigmoid(x)
x = F.relu(x)
return x
m = M().eval()
# prevent conv2 from getting quantized, so we can test
# modules with equal types
qconfig_dict = {
'': torch.ao.quantization.default_qconfig,
'module_name': [('conv2', None)],
}
mp = prepare_fx(m, qconfig_dict)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
conv_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, nn.Conv2d) + '_0'
conv_name_1 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, nn.Conv2d) + '_1'
mul_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.mul) + '_0'
relu_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.relu) + '_0'
sigmoid_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.sigmoid) + '_0'
# all of these should be matched
expected_types = {
conv_name_1:
((nn.Conv2d, torch.ao.quantization.MinMaxObserver), (nnq.Conv2d, nnq.Conv2d)),
conv_name_0:
((nn.Conv2d, torch.ao.quantization.MinMaxObserver), (nn.Conv2d, nn.Conv2d)),
mul_name_0: ((torch.mul, torch.ao.quantization.MinMaxObserver), (toq.mul, toq.mul)),
relu_name_0: ((F.relu, torch.ao.quantization.MinMaxObserver), (F.relu, F.relu)),
sigmoid_name_0:
((torch.sigmoid, torch.sigmoid), (torch.sigmoid, torch.sigmoid)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@unittest.skip("Broken by https://github.com/pytorch/pytorch/pull/62608, need dtype inference support")
def test_methods(self):
"""
Verify that graph matching works on methods
"""
class M(nn.Module):
def forward(self, x):
x = x.sigmoid()
return x
m1 = M().eval()
m2 = M().eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
m1p = prepare_fx(m1, qconfig_dict)
m2p = prepare_fx(m2, qconfig_dict)
results = get_matching_subgraph_pairs(m1p, m2p)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
sigmoid_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.sigmoid) + '_0'
expected_types = {
sigmoid_name_0:
(('sigmoid', 'sigmoid'), ('sigmoid', 'sigmoid')),
}
self.assert_types_for_matched_subgraph_pairs(
results, expected_types, m1p, m2p)
def test_op_relationship_mapping(self):
"""
Tests that the mapping of op relationships is complete.
"""
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
type_a_related_to_b = \
get_type_a_related_to_b(base_name_to_sets_of_related_ops)
# 1. check static quant module mappings
static_quant_mod_mappings = get_default_static_quant_module_mappings()
for fp32_type, int8_type in static_quant_mod_mappings.items():
# skip quants and dequants, for the purposes of Numerical Suite
types_to_skip = (
torch.ao.quantization.QuantStub,
torch.ao.quantization.DeQuantStub,
nnq.FloatFunctional,
)
if fp32_type in types_to_skip:
continue
# verify relatedness
in_type_a_related_to_b = \
(fp32_type, int8_type) in type_a_related_to_b
self.assertTrue(
in_type_a_related_to_b,
f"{fp32_type} and {int8_type} need a relationship mapping")
# 2. check static quant op mappings
static_quant_fun_mappings = get_default_float_to_quantized_operator_mappings()
for fp32_type, int8_type in static_quant_fun_mappings.items():
# verify relatedness
in_type_a_related_to_b = \
(fp32_type, int8_type) in type_a_related_to_b
self.assertTrue(
in_type_a_related_to_b,
f"{fp32_type} and {int8_type} need a relationship mapping")
# 3. check dynamic quant mappings
dynamic_quant_mappings = get_default_dynamic_quant_module_mappings()
for fp32_type, int8_type in dynamic_quant_mappings.items():
# TODO(future PR): enable correct weight extraction for these
# and remove from this list.
types_to_skip = (
nn.GRUCell,
nn.GRU,
nn.LSTMCell,
nn.RNNCell,
)
if fp32_type in types_to_skip:
continue
# verify relatedness
in_type_a_related_to_b = \
(fp32_type, int8_type) in type_a_related_to_b
self.assertTrue(
in_type_a_related_to_b,
f"{fp32_type} and {int8_type} need a relationship mapping")
# 4. go through the ops mapped to each QuantizeHandler type, and verify
# correctness.
def _op_in_base_sets_of_related_ops(op):
for name, ops in base_name_to_sets_of_related_ops.items():
if op in ops:
return True
return False
unmatchable_types_map = get_unmatchable_types_map()
FUNS_UNMATCHABLE = unmatchable_types_map['funs_unmatchable']
MODS_UNMATCHABLE = unmatchable_types_map['mods_unmatchable']
METHS_UNMATCHABLE = unmatchable_types_map['meths_unmatchable']
def _op_is_unmatchable(op):
return (
op in FUNS_UNMATCHABLE or
op in MODS_UNMATCHABLE or
op in METHS_UNMATCHABLE
)
default_quant_patterns = get_default_quant_patterns()
for pattern, qhandler_cls in default_quant_patterns.items():
base_op = None
if isinstance(pattern, tuple):
base_op = pattern[-1]
elif isinstance(pattern, str):
base_op = pattern
else:
base_op = pattern
qhandler_cls_all_ops_quantizeable = [
qp.CatQuantizeHandler,
qp.ConvReluQuantizeHandler,
qp.LinearReLUQuantizeHandler,
qp.BatchNormQuantizeHandler,
qp.EmbeddingQuantizeHandler,
qp.RNNDynamicQuantizeHandler,
]
qhandler_cls_quant_op_same_signature = [
qp.FixedQParamsOpQuantizeHandler,
qp.CopyNodeQuantizeHandler,
qp.GeneralTensorShapeOpQuantizeHandler,
]
if qhandler_cls == qp.BinaryOpQuantizeHandler:
# these ops do not have quantized equivalents
ops_to_skip = [
torch.bmm,
torch.div,
torch.sub,
operator.truediv,
operator.sub
]
if base_op in ops_to_skip:
continue
self.assertTrue(
_op_in_base_sets_of_related_ops(base_op),
f"{base_op} not in sets of related ops")
elif qhandler_cls == qp.RNNDynamicQuantizeHandler:
# TODO(future PR): add support for all classes in
# RNNDynamicQuantizeHandler
pass
elif qhandler_cls == qp.DefaultNodeQuantizeHandler:
# torch.sum does not have quantized equivalents
if base_op == torch.sum:
continue
self.assertTrue(
_op_in_base_sets_of_related_ops(base_op),
f"{base_op} not in sets of related ops")
elif qhandler_cls in qhandler_cls_quant_op_same_signature:
# these ops use the same op signature for fp32 and quantized
# tensors
self.assertTrue(
_op_in_base_sets_of_related_ops(base_op) or
_op_is_unmatchable(base_op),
f"{base_op} not in sets of related ops or unmatchable")
elif qhandler_cls in qhandler_cls_all_ops_quantizeable:
self.assertTrue(
_op_in_base_sets_of_related_ops(base_op),
f"{base_op} not in sets of related ops")
else:
raise AssertionError(
f"handing for {qhandler_cls} not implemented")
@skipIfNoFBGEMM
def test_user_defined_function(self):
"""
Verify that graph matching works on user defined functions
"""
class M1(nn.Module):
def forward(self, x):
x = F.hardswish(x)
return x
class M2(nn.Module):
def forward(self, x):
x = _wrapped_hardswish(x)
return x
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
m1 = prepare_fx(M1().eval(), qconfig_dict)
m2 = prepare_fx(M2().eval(), qconfig_dict)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
add_op_to_sets_of_related_ops(
base_name_to_sets_of_related_ops, _wrapped_hardswish, F.hardswish)
results = get_matching_subgraph_pairs(
m1, m2,
base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops)
hardswish_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, F.hardswish) + '_0'
expected_types = {
hardswish_name_0:
((F.hardswish, torch.ao.quantization.MinMaxObserver), (_wrapped_hardswish, _wrapped_hardswish)),
}
self.assert_types_for_matched_subgraph_pairs(
results, expected_types, m1, m2)
@skipIfNoFBGEMM
def test_results_order(self):
m = nn.Sequential(
nn.Conv2d(1, 1, 1),
nn.Linear(1, 1),
).eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig})
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
self.assertTrue(len(results) == 2)
results_iter = iter(results.items())
_, (subgraph_a_0, subgraph_b_0) = next(results_iter)
self.assertTrue(subgraph_a_0.start_node.name == '_0' and
subgraph_b_0.start_node.name == '_0')
_, (subgraph_a_1, subgraph_b_1) = next(results_iter)
self.assertTrue(subgraph_a_1.start_node.name == '_1' and
subgraph_b_1.start_node.name == '_1')
class TestFXGraphMatcherModels(QuantizationTestCase):
@skipIfNoFBGEMM
@skip_if_no_torchvision
def test_mobilenet_v2(self):
# verify that mobilenetv2 graph is able to be matched
import torchvision
m = torchvision.models.__dict__['mobilenet_v2'](pretrained=False).eval().float()
mp = prepare_fx(copy.deepcopy(m), {'': torch.ao.quantization.default_qconfig})
# assume success if no exceptions
results_m_mp = get_matching_subgraph_pairs(torch.fx.symbolic_trace(m), mp)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# assume success if no exceptions
results_mp_mq = get_matching_subgraph_pairs(mp, mq)
@skipIfNoFBGEMM
@skip_if_no_torchvision
def test_mobilenet_v2_qat(self):
# verify that mobilenetv2 graph is able to be matched
import torchvision
m = torchvision.models.__dict__['mobilenet_v2'](pretrained=False).float()
mp = prepare_qat_fx(
copy.deepcopy(m),
{'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')})
# assume success if no exceptions
results_m_mp = get_matching_subgraph_pairs(torch.fx.symbolic_trace(m), mp)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# assume success if no exceptions
results_mp_mq = get_matching_subgraph_pairs(mp, mq)
class FXNumericSuiteQuantizationTestCase(QuantizationTestCase):
def _test_extract_weights(
self, m, results_len=0, qconfig_dict=None, prepare_fn=prepare_fx
):
m = torch.fx.symbolic_trace(m)
if qconfig_dict is None:
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fn(copy.deepcopy(m), qconfig_dict)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# test both the public API as well as the internal GraphModule API
for extract_weights_fun in (extract_weights, _extract_weights_impl):
# test both m vs mp and mp vs mq
for m1, m2 in ((m, mp), (mp, mq)):
results = extract_weights_fun('a', m1, 'b', m2)
self.assertTrue(
len(results) == results_len,
f"expected len {results_len}, got len {len(results)}")
self.assert_ns_compare_dict_valid(results)
extend_logger_results_with_comparison(
results, 'a', 'b', compute_sqnr, 'sqnr')
extend_logger_results_with_comparison(
results, 'a', 'b', compute_normalized_l2_error, 'l2_error')
extend_logger_results_with_comparison(
results, 'a', 'b', compute_cosine_similarity,
'cosine_similarity')
def _test_match_activations(
self, m, data, prepared_expected_node_occurrence=None, results_len=0,
should_log_inputs=False,
qconfig_dict=None,
skip_scripting=False,
prepare_fn=prepare_fx,
):
if qconfig_dict is None:
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
if prepare_fn == prepare_fx:
m.eval()
else:
m.train()
mp = prepare_fn(copy.deepcopy(m), qconfig_dict)
mp(*data)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
m_ns, mp_ns2 = add_loggers(
'a', m, 'b', copy.deepcopy(mp), OutputLogger,
should_log_inputs=should_log_inputs)
mp_ns, mq_ns = add_loggers(
'a', mp, 'b', mq, OutputLogger,
should_log_inputs=should_log_inputs)
if prepared_expected_node_occurrence:
self.checkGraphModuleNodes(
m_ns, expected_node_occurrence=prepared_expected_node_occurrence)
self.checkGraphModuleNodes(
mp_ns2, expected_node_occurrence=prepared_expected_node_occurrence)
self.checkGraphModuleNodes(
mp_ns, expected_node_occurrence=prepared_expected_node_occurrence)
self.checkGraphModuleNodes(
mq_ns, expected_node_occurrence=prepared_expected_node_occurrence)
if not skip_scripting:
m_ns = torch.jit.script(m_ns)
mp_ns = torch.jit.script(mp_ns)
mq_ns = torch.jit.script(mq_ns)
# calibrate
m_ns(*data)
mp_ns2(*data)
mp_ns(*data)
mq_ns(*data)
# check activation result correctness
results = []
for m1, m2 in ((m_ns, mp_ns2), (mp_ns, mq_ns)):
act_compare_dict = extract_logger_info(
m1, m2, OutputLogger, 'b')
self.assertTrue(
len(act_compare_dict) == results_len,
f"expected len {results_len}, got len {len(act_compare_dict)}")
self.assert_ns_compare_dict_valid(act_compare_dict)
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_sqnr, 'sqnr')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_normalized_l2_error, 'l2_error')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_cosine_similarity,
'cosine_similarity')
results.append(act_compare_dict)
return results
def _test_match_shadow_activations(
self, m, data, prepared_expected_node_occurrence=None, results_len=None,
should_log_inputs=False, qconfig_dict=None, skip_scripting=False,
prepare_fn=prepare_fx, compare_fp32_vs_fp32_prepared=True,
):
if qconfig_dict is None:
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
if prepare_fn == prepare_fx:
m.eval()
else:
m.train()
mp = prepare_fn(copy.deepcopy(m), qconfig_dict)
mp(*data)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
if compare_fp32_vs_fp32_prepared:
m_shadows_mp = add_shadow_loggers(
'a', copy.deepcopy(m), 'b', copy.deepcopy(mp),
OutputLogger, should_log_inputs=should_log_inputs)
mp_shadows_mq = add_shadow_loggers(
'a', mp, 'b', mq, OutputLogger,
should_log_inputs=should_log_inputs)
if prepared_expected_node_occurrence:
if compare_fp32_vs_fp32_prepared:
self.checkGraphModuleNodes(
m_shadows_mp, expected_node_occurrence=prepared_expected_node_occurrence)
self.checkGraphModuleNodes(
mp_shadows_mq, expected_node_occurrence=prepared_expected_node_occurrence)
if not skip_scripting:
if compare_fp32_vs_fp32_prepared:
m_shadows_mp = torch.jit.script(m_shadows_mp)
mp_shadows_mq = torch.jit.script(mp_shadows_mq)
# calibrate
if compare_fp32_vs_fp32_prepared:
m_shadows_mp(*data)
mp_shadows_mq(*data)
# check activation result correctness
results = []
models = (m_shadows_mp, mp_shadows_mq) if \
compare_fp32_vs_fp32_prepared else (mp_shadows_mq,)
for model in models:
act_compare_dict = extract_shadow_logger_info(
model, OutputLogger, 'b')
if results_len is not None:
self.assertTrue(
len(act_compare_dict) == results_len,
f"expected len {results_len}, got len {len(act_compare_dict)}")
self.assert_ns_compare_dict_valid(act_compare_dict)
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_sqnr, 'sqnr')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_normalized_l2_error, 'l2_error')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_cosine_similarity,
'cosine_similarity')
results.append(act_compare_dict)
return results
class TestFXNumericSuiteCoreAPIs(FXNumericSuiteQuantizationTestCase):
@skipIfNoFBGEMM
def test_extract_weights_mod_ptq(self):
m = AllConvAndLinearFusionModules().eval()
self._test_extract_weights(m, results_len=14)
@skipIfNoFBGEMM
def test_extract_weights_mod_qat(self):
m = AllConvAndLinearFusionModules().train()
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
self._test_extract_weights(
m, results_len=14, qconfig_dict=qconfig_dict, prepare_fn=prepare_qat_fx)
@skipIfNoFBGEMM
def test_extract_weights_linear_fun_ptq(self):
m = LinearReluLinearFunctional().eval()
self._test_extract_weights(m, results_len=2)
@skipIfNoFBGEMM
def test_extract_weights_linear_fun_qat(self):
m = LinearReluLinearFunctional().train()
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
self._test_extract_weights(
m, results_len=2, qconfig_dict=qconfig_dict, prepare_fn=prepare_qat_fx)
@skipIfNoFBGEMM
def test_extract_weights_conv_fun_ptq(self):
w1d = torch.randn(1, 1, 1)
w2d = torch.randn(1, 1, 1, 1)
w3d = torch.randn(1, 1, 1, 1, 1)
b1d = torch.randn(1)
b2d = torch.randn(1)
b3d = torch.randn(1)
m = AllConvFunctional(w1d, w2d, w3d, b1d, b2d, b3d).eval()
self._test_extract_weights(m, results_len=6)
@skipIfNoFBGEMM
def test_extract_weights_conv_fun_qat(self):
w1d = torch.randn(1, 1, 1)
w2d = torch.randn(1, 1, 1, 1)
w3d = torch.randn(1, 1, 1, 1, 1)
b1d = torch.randn(1)
b2d = torch.randn(1)
b3d = torch.randn(1)
m = AllConvFunctional(w1d, w2d, w3d, b1d, b2d, b3d).train()
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
self._test_extract_weights(
m, results_len=6, qconfig_dict=qconfig_dict, prepare_fn=prepare_qat_fx)
@skipIfNoFBGEMM
def test_extract_weights_dynamic(self):
# TODO(future PR): add Linear-ReLU, after #55393 is fixed.
m = nn.Sequential(nn.Linear(1, 1)).eval()
qconfig_dict = {
'object_type': [
(nn.Linear, default_dynamic_qconfig),
],
}
self._test_extract_weights(m, results_len=1, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_extract_weights_fqn(self):
m = nn.Sequential(
nn.Sequential(nn.Conv2d(1, 1, 1)),
nn.Conv2d(1, 1, 1),
).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fx(m, qconfig_dict)
mq = convert_fx(copy.deepcopy(mp))
results = extract_weights('a', mp, 'b', mq)
fqn_a_0 = results['_0_0']['weight']['a'][0]['fqn']
fqn_b_0 = results['_0_0']['weight']['b'][0]['fqn']
self.assertTrue(fqn_a_0 == '0.0' and fqn_a_0 == fqn_b_0)
fqn_a_1 = results['_1']['weight']['a'][0]['fqn']
fqn_b_1 = results['_1']['weight']['b'][0]['fqn']
self.assertTrue(fqn_a_1 == '1' and fqn_a_1 == fqn_b_1)
def _test_match_activations_mod_impl(self, prepare_fn=prepare_fx):
m = nn.Sequential(
torch.ao.quantization.QuantStub(),
nn.Conv2d(1, 1, 1),
nn.Conv2d(1, 1, 1),
).eval()
qconfig_dict = None
if prepare_fn == prepare_qat_fx:
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
expected_occurrence = {
ns.call_module(OutputLogger): 2,
}
self._test_match_activations(
m, (torch.randn(2, 1, 2, 2),),
prepared_expected_node_occurrence=expected_occurrence,
results_len=2, qconfig_dict=qconfig_dict, prepare_fn=prepare_fn)
@skipIfNoFBGEMM
def test_match_activations_mod_ptq(self):
self._test_match_activations_mod_impl(prepare_fn=prepare_fx)
@skipIfNoFBGEMM
def test_match_activations_mod_qat(self):
self._test_match_activations_mod_impl(prepare_fn=prepare_qat_fx)
def _test_match_activations_fun_impl(self, prepare_fn=prepare_fx):
m = LinearReluLinearFunctional().eval()
qconfig_dict = None
if prepare_fn == prepare_qat_fx:
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
expected_occurrence = {
ns.call_module(OutputLogger): 2,
}
self._test_match_activations(
m, (torch.randn(4, 4),),
prepared_expected_node_occurrence=expected_occurrence,
results_len=2, prepare_fn=prepare_fn, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_match_activations_fun_ptq(self):
self._test_match_activations_fun_impl(prepare_fn=prepare_fx)
@skipIfNoFBGEMM
def test_match_activations_fun_qat(self):
self._test_match_activations_fun_impl(prepare_fn=prepare_qat_fx)
@skipIfNoFBGEMM
def test_match_activations_meth_ptq(self):
"""
Verify that add_loggers works on methods
"""
class M(nn.Module):
def forward(self, x):
x = x.sigmoid()
return x
m = M().eval()
res = self._test_match_activations(
m, (torch.randn(4, 4),),
results_len=1)
@skipIfNoFBGEMM
def test_match_activations_fqn(self):
m = nn.Sequential(
nn.Sequential(nn.Conv2d(1, 1, 1)),
nn.Conv2d(1, 1, 1),
).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fx(m, qconfig_dict)
mq = convert_fx(copy.deepcopy(mp))
mp_ns, mq_ns = add_loggers('a', mp, 'b', mq, OutputLogger)
datum = torch.randn(1, 1, 1, 1)
mp_ns(datum)
mq_ns(datum)
results = extract_logger_info(mp_ns, mq_ns, OutputLogger, 'b')
fqn_a_0 = results['_0_0']['node_output']['a'][0]['fqn']
fqn_b_0 = results['_0_0']['node_output']['b'][0]['fqn']
self.assertTrue(fqn_a_0 == '0.0' and fqn_a_0 == fqn_b_0)
fqn_a_1 = results['_1']['node_output']['a'][0]['fqn']
fqn_b_1 = results['_1']['node_output']['b'][0]['fqn']
self.assertTrue(fqn_a_1 == '1' and fqn_a_1 == fqn_b_1)
def _test_add_shadow_loggers_mod_impl(self, prepare_fn=prepare_fx):
m = nn.Sequential(
nn.Conv2d(1, 1, 1),
nn.Conv2d(1, 1, 1),
).eval()
qconfig_dict = None
if prepare_fn == prepare_qat_fx:
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
res = self._test_match_shadow_activations(
m, (torch.randn(1, 1, 4, 4),), results_len=2,
prepare_fn=prepare_fn, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
@unittest.skip("Broken by https://github.com/pytorch/pytorch/pull/62608, enable after"
"dtype inference is supported")
def test_add_shadow_loggers_mod_ptq(self):
self._test_add_shadow_loggers_mod_impl(prepare_fn=prepare_fx)
@skipIfNoFBGEMM
def test_add_shadow_loggers_mod_qat(self):
self._test_add_shadow_loggers_mod_impl(prepare_fn=prepare_qat_fx)
def _test_add_shadow_loggers_fun_impl(self, prepare_fn=prepare_fx):
m = LinearReluLinearFunctional()
qconfig_dict = None
if prepare_fn == prepare_qat_fx:
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
res = self._test_match_shadow_activations(
m, (torch.randn(4, 4),), results_len=2, prepare_fn=prepare_fn,
qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_add_shadow_loggers_fun_ptq(self):
self._test_add_shadow_loggers_fun_impl(prepare_fn=prepare_fx)
@skipIfNoFBGEMM
def test_add_shadow_loggers_fun_qat(self):
self._test_add_shadow_loggers_fun_impl(prepare_fn=prepare_qat_fx)
@skipIfNoFBGEMM
@unittest.skip("Broken by https://github.com/pytorch/pytorch/pull/62608, enable after"
"dtype inference is supported")
def test_add_shadow_loggers_meth_ptq(self):
"""
Verify that add_loggers works on methods
"""
class M(nn.Module):
def forward(self, x):
x = x.sigmoid()
return x
m = M().eval()
res = self._test_match_shadow_activations(
m, (torch.randn(4, 4),),
results_len=1)
@skipIfNoFBGEMM
def test_add_shadow_loggers_multiple_dtype_casts(self):
"""
Verifies that for nodes where the first input arg is a list,
such as `cat`, we insert an individual dtype cast for each
arg of the list.
"""
class M(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x = torch.cat([x, x, x], dim=0)
return x
m = M().eval()
expected_occurrence = {
# 3 dequantize function calls from the 3 dtype casts for [x, x, x]
ns.call_module(torch.nn.Identity): 3,
# 1 dequantize method call for module output
ns.call_method("dequantize"): 1,
}
self._test_match_shadow_activations(
m, (torch.randn(4, 4),),
prepared_expected_node_occurrence=expected_occurrence,
results_len=1, compare_fp32_vs_fp32_prepared=False)
@skipIfNoFBGEMM
def test_shadow_activations_fqn(self):
m = nn.Sequential(
nn.Sequential(nn.Conv2d(1, 1, 1)),
nn.Conv2d(1, 1, 1),
).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fx(m, qconfig_dict)
mq = convert_fx(copy.deepcopy(mp))
mp_shadows_mq = add_shadow_loggers('a', mp, 'b', mq, OutputLogger)
datum = torch.randn(1, 1, 1, 1)
mp_shadows_mq(datum)
results = extract_shadow_logger_info(mp_shadows_mq, OutputLogger, 'b')
fqn_a_0 = results['_0_0']['node_output']['a'][0]['fqn']
fqn_b_0 = results['_0_0']['node_output']['b'][0]['fqn']
self.assertTrue(fqn_a_0 == '0.0' and fqn_a_0 == fqn_b_0)
fqn_a_1 = results['_1']['node_output']['a'][0]['fqn']
fqn_b_1 = results['_1']['node_output']['b'][0]['fqn']
self.assertTrue(fqn_a_1 == '1' and fqn_a_1 == fqn_b_1)
@skipIfNoFBGEMM
def test_logging_inputs(self):
"""
Verifies that logging inputs works correctly
"""
class M(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv(x)
x = torch.cat([x, x], dim=0)
return x
m = M().eval()
self._test_match_shadow_activations(
m, (torch.randn(1, 1, 4, 4),),
results_len=2,
should_log_inputs=True)
@skipIfNoFBGEMM
def test_ops_with_same_fp32_and_int8_signature(self):
"""
Verifies that we can match pairs of ops which have the same aten
signature for fp32 and int8 tensors.
"""
class M(nn.Module):
def __init__(self):
super().__init__()
self.max_pool_2d = nn.MaxPool2d(2)
def forward(self, x):
x = self.max_pool_2d(x)
x = F.relu(x)
return x
m = M().eval()
self._test_match_activations(
m, (torch.randn(1, 1, 2, 2),),
results_len=2)
@skipIfNoFBGEMM
def test_add_mul_inputs_activations(self):
m = AddMulFunctional().eval()
res = self._test_match_activations(
m, (torch.randn(2, 2), torch.randn(2, 2)),
results_len=6, should_log_inputs=True)
@skipIfNoFBGEMM
def test_linear_fp16_weights(self):
qconfig_dict = {'': torch.ao.quantization.float16_static_qconfig}
m = LinearReluFunctional().eval()
self._test_extract_weights(m, results_len=1, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_linear_fp16_activations(self):
for should_log_inputs in (True, False):
qconfig_dict = {'': torch.ao.quantization.float16_static_qconfig}
m = LinearReluFunctional().eval()
num_loggers = 2 if should_log_inputs else 1
expected_occurrence = {
ns.call_module(OutputLogger): num_loggers,
}
res = self._test_match_activations(
m, (torch.randn(4, 4),),
prepared_expected_node_occurrence=expected_occurrence,
results_len=1,
qconfig_dict=qconfig_dict,
should_log_inputs=should_log_inputs)
@skipIfNoFBGEMM
def test_linear_fp16_shadow_activations(self):
for should_log_inputs in (True, False):
qconfig_dict = {'': torch.ao.quantization.float16_static_qconfig}
m = LinearReluFunctional().eval()
num_loggers = 4 if should_log_inputs else 2
expected_occurrence = {
ns.call_module(OutputLogger): num_loggers,
}
res2 = self._test_match_shadow_activations(
m, (torch.randn(4, 4),),
prepared_expected_node_occurrence=expected_occurrence,
results_len=1,
qconfig_dict=qconfig_dict,
should_log_inputs=should_log_inputs)
@skipIfNoFBGEMM
def test_linear_fp16_vs_linear_fp16_shadow_activations(self):
m = LinearFunctional().eval()
qconfig_dict = {'': torch.ao.quantization.float16_static_qconfig}
mp = prepare_fx(m, qconfig_dict)
mq1 = convert_fx(copy.deepcopy(mp))
mq2 = convert_fx(copy.deepcopy(mp))
mq1_shadows_mq2 = _add_shadow_loggers_impl(
'a', mq1, 'b', mq2, OutputLogger, should_log_inputs=False)
mq1_shadows_mq2(torch.randn(4, 4))
act_compare_dict = extract_shadow_logger_info(
mq1_shadows_mq2, OutputLogger, 'b')
self.assertTrue(len(act_compare_dict) == 1)
self.assert_ns_compare_dict_valid(act_compare_dict)
@skipIfNoFBGEMM
@unittest.skip("TODO: broken by https://github.com/pytorch/pytorch/pull/61687, will enable later")
def test_op_with_either_fp32_or_int8_input(self):
"""
Verify that shadowing works with ops which accept either fp32 or
int8 inputs.
"""
class M(nn.Module):
def __init__(self):
super().__init__()
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(x)
x = F.relu(x)
return x
m = M()
res = self._test_match_shadow_activations(
m, (torch.randn(4, 4),),
results_len=2)
def _test_int8_shadows_int8_impl(self, m):
"""
Verify that shadowing works where both modules are int8
"""
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fx(m, qconfig_dict)
mp(torch.randn(4, 1, 4, 4))
mq1 = convert_fx(copy.deepcopy(mp))
mq2 = convert_fx(mp)
mq1_shadows_mq2 = add_shadow_loggers('a', mq1, 'b', mq2, OutputLogger)
mq1_shadows_mq2(torch.randn(4, 1, 4, 4))
act_compare_dict = extract_shadow_logger_info(
mq1_shadows_mq2, OutputLogger, 'b')
self.assertTrue(len(act_compare_dict) == 1)
self.assert_ns_compare_dict_valid(act_compare_dict)
@skipIfNoFBGEMM
def test_int8_shadows_int8_mod(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
self._test_int8_shadows_int8_impl(m)
@skipIfNoFBGEMM
def test_int8_shadows_int8_fun(self):
m = LinearFunctional().eval()
self._test_int8_shadows_int8_impl(m)
@skipIfNoFBGEMM
def test_user_module_scriptable(self):
# Logging of the output of this class is not supported, because it is
# neither a tensor or an RNN return type.
class M1(nn.Module):
def forward(self, x):
x1 = x * 2
x2 = x * 4
return (x1, x2)
class M2(nn.Module):
def __init__(self):
super().__init__()
self.m1 = M1()
def forward(self, x):
x1, x2 = self.m1(x)
return x1, x2
m = M2().eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
prepare_custom_config_dict = {
'non_traceable_module_class': [M1],
}
mp1 = prepare_fx(m, qconfig_dict, prepare_custom_config_dict)
mp2 = copy.deepcopy(mp1)
unmatchable_types_map = get_unmatchable_types_map()
unmatchable_types_map['mods_unmatchable'].add(M1)
mp1_ns, mp2_ns = _add_loggers_impl(
'a', mp1, 'b', mp2, OutputLogger, should_log_inputs=False,
unmatchable_types_map=unmatchable_types_map)
# Scripting a model with loggers should succeed. If it fails because of
# incorrect dtypes, we can blocklist the associated types from being instrumented.
mp1_ns_scripted = torch.jit.script(mp1_ns)
mp2_ns_scripted = torch.jit.script(mp2_ns)
@skipIfNoFBGEMM
def test_user_module(self):
"""
For user defined modules,
1. weight extraction should not crash
2. unshadowed activations should only have loggers for known types
3. shadowed activations should only have loggers for known types with
known dtypes
"""
class UserModule(nn.Module):
def forward(self, x):
return x
class M(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 1)
self.user_module = UserModule()
def forward(self, x):
x = self.linear(x)
x = self.user_module(x)
return x
m = M().eval()
# quantize without tracing through UserModule
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
prepare_custom_config_dict = {'non_traceable_module_name': ['user_module']}
mp = prepare_fx(m, qconfig_dict, prepare_custom_config_dict)
mp(torch.randn(1, 1, 1))
mq = convert_fx(copy.deepcopy(mp))
# weight extraction should not crash
weights = _extract_weights_impl('fp32_prepared', mp, 'int8', mq)
# unshadowed activations should have loggers
# add loggers, without retracing
# note: converting again because we cannot copy a quantized linear
mp_ns, mq_ns = _add_loggers_impl(
'fp32_prepared', copy.deepcopy(mp), 'int8',
convert_fx(copy.deepcopy(mp)), OutputLogger,
should_log_inputs=True)
# both fp32 and int8 models should have 2 loggers each, 2 for I/O
# of linear, and 0 for I/O of user_module
unshadowed_expected_occurrence = {
ns.call_module(OutputLogger): 2,
}
self.checkGraphModuleNodes(
mp_ns, expected_node_occurrence=unshadowed_expected_occurrence)
self.checkGraphModuleNodes(
mq_ns, expected_node_occurrence=unshadowed_expected_occurrence)
# shadowed activations should only have loggers for nodes where
# the types are known and we can do a dtype cast
# add shadow loggers, without retracing
mp_shadows_mq_ns = _add_shadow_loggers_impl(
'fp32_prepared', mp, 'int8', mq, OutputLogger,
should_log_inputs=True)
# 4 loggers for I/O of linear, 0 loggers for I/O of user_module
shadowed_expected_occurrence = {
ns.call_module(OutputLogger): 4,
}
self.checkGraphModuleNodes(
mp_shadows_mq_ns, expected_node_occurrence=shadowed_expected_occurrence)
def test_op_io_dtype_coverage(self):
"""
Tests that all the ops quantization cares about have input and output
dtypes defined.
"""
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
type_a_related_to_b = \
get_type_a_related_to_b(base_name_to_sets_of_related_ops)
# TODO(future PR): clean this up
node_type_to_io_type_map = get_node_type_to_io_type_map()
FUNS_IO_TYPE_FP32 = node_type_to_io_type_map['funs_io_type_fp32']
FUNS_IO_TYPE_INT8 = node_type_to_io_type_map['funs_io_type_int8']
FUNS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map['funs_io_type_fp32_or_int8']
MODS_IO_TYPE_FP32 = node_type_to_io_type_map['mods_io_type_fp32']
MODS_IO_TYPE_INT8 = node_type_to_io_type_map['mods_io_type_int8']
MODS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map['mods_io_type_fp32_or_int8']
METHS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map['meths_io_type_fp32_or_int8']
unmatchable_types_map = get_unmatchable_types_map()
FUNS_UNMATCHABLE = unmatchable_types_map['funs_unmatchable']
MODS_UNMATCHABLE = unmatchable_types_map['mods_unmatchable']
METHS_UNMATCHABLE = unmatchable_types_map['meths_unmatchable']
# 1. check static quant module mappings
static_quant_mod_mappings = get_default_static_quant_module_mappings()
for fp32_type, int8_type in static_quant_mod_mappings.items():
types_to_skip = (
torch.ao.quantization.QuantStub,
torch.ao.quantization.DeQuantStub,
nnq.FloatFunctional,
# TODO(future PR): look into whether shadowing embeddings
# makes sense
nn.Embedding,
nn.EmbeddingBag,
)
if fp32_type in types_to_skip:
continue
self.assertTrue(
fp32_type in MODS_IO_TYPE_FP32,
f"missing IO type handling for f{fp32_type}")
self.assertTrue(
int8_type in MODS_IO_TYPE_INT8,
f"missing IO type handling for f{int8_type}")
# 2. check static quant op mappings
static_quant_fun_mappings = get_default_float_to_quantized_operator_mappings()
for fp32_type, int8_type in static_quant_fun_mappings.items():
self.assertTrue(
fp32_type in FUNS_IO_TYPE_FP32,
f"missing IO type handling for f{fp32_type}")
self.assertTrue(
int8_type in FUNS_IO_TYPE_INT8,
f"missing IO type handling for f{int8_type}")
# 3. check dynamic quant mappings
dynamic_quant_mappings = get_default_dynamic_quant_module_mappings()
for fp32_type1, fp32_type2 in dynamic_quant_mappings.items():
# TODO(future PR): verify correct I/O for these and remove from
# this list.
types_to_skip = (
nn.GRUCell,
nn.GRU,
nn.LSTMCell,
nn.RNNCell,
# TODO(future PR): look into whether shadowing embeddings
# makes sense
nn.Embedding,
nn.EmbeddingBag,
)
if fp32_type1 in types_to_skip:
continue
self.assertTrue(
fp32_type1 in MODS_IO_TYPE_FP32,
f"missing IO type handling for f{fp32_type1}")
self.assertTrue(
fp32_type2 in MODS_IO_TYPE_FP32,
f"missing IO type handling for f{fp32_type2}")
# 4. go through the ops mapped to each QuantizeHandler type, and verify
# correctness.
default_quant_patterns = get_default_quant_patterns()
for pattern, qhandler_cls in default_quant_patterns.items():
base_op = None
if isinstance(pattern, tuple):
base_op = pattern[-1]
elif isinstance(pattern, str):
base_op = pattern
else:
base_op = pattern
if (
qhandler_cls in (
qp.BinaryOpQuantizeHandler,
qp.RNNDynamicQuantizeHandler,
)
):
# TODO(future PR): implement shadowing for binary ops
# TODO(future PR): implement shadowing for RNN ops
continue
elif qhandler_cls == qp.CatQuantizeHandler:
self.assertTrue(
base_op in FUNS_IO_TYPE_FP32_OR_INT8,
f"missing IO type handling for {base_op}")
elif (
qhandler_cls in (
qp.ConvReluQuantizeHandler,
qp.LinearReLUQuantizeHandler,
qp.BatchNormQuantizeHandler,
qp.DefaultNodeQuantizeHandler,
)
):
self.assertTrue(
(base_op in FUNS_IO_TYPE_FP32) or (base_op in MODS_IO_TYPE_FP32),
f"missing IO type handling for {base_op}")
elif (
qhandler_cls in (
qp.FixedQParamsOpQuantizeHandler,
qp.CopyNodeQuantizeHandler,
qp.GeneralTensorShapeOpQuantizeHandler,
)
):
if (
base_op in FUNS_UNMATCHABLE or
base_op in MODS_UNMATCHABLE or
base_op in METHS_UNMATCHABLE
):
continue
self.assertTrue(
(base_op in FUNS_IO_TYPE_FP32_OR_INT8) or
(base_op in MODS_IO_TYPE_FP32_OR_INT8) or
(base_op in METHS_IO_TYPE_FP32_OR_INT8),
f"missing IO type handling for {base_op}")
elif qhandler_cls == qp.EmbeddingQuantizeHandler:
# embedding shadowing is not implemented, for now
continue
else:
raise AssertionError(
f"handing for {qhandler_cls} not implemented")
@skipIfNoFBGEMM
def test_user_defined_function(self):
"""
Verify that NS APIs work on user defined functions
"""
class M1(nn.Module):
def __init__(self):
super().__init__()
self.w1 = nn.Parameter(torch.empty(1, 1))
self.b1 = nn.Parameter(torch.zeros(1))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = F.hardswish(x)
x = x.sigmoid()
x = F.linear(x, self.w1, self.b1)
return x
class M2(nn.Module):
def __init__(self):
super().__init__()
self.w1 = nn.Parameter(torch.empty(1, 1))
self.b1 = nn.Parameter(torch.zeros(1))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = _wrapped_hardswish(x)
x = _wrapped_sigmoid(x)
x = _wrapped_linear(x, self.w1, self.b1)
return x
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
m1 = prepare_fx(M1().eval(), qconfig_dict)
m2 = prepare_fx(M2().eval(), qconfig_dict)
data = torch.randn(1, 1)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
add_op_to_sets_of_related_ops(
base_name_to_sets_of_related_ops, _wrapped_hardswish, F.hardswish)
add_op_to_sets_of_related_ops(
base_name_to_sets_of_related_ops, _wrapped_sigmoid, F.sigmoid)
add_op_to_sets_of_related_ops(
base_name_to_sets_of_related_ops, _wrapped_linear, F.linear)
op_to_type_to_weight_extraction_fn = \
get_op_to_type_to_weight_extraction_fn()
op_to_type_to_weight_extraction_fn['call_function'][_wrapped_linear] = \
torch.ao.ns.fx.weight_utils.get_linear_fun_weight
# test compare weights
results = extract_weights(
'a', m1, 'b', m2,
base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops,
op_to_type_to_weight_extraction_fn=op_to_type_to_weight_extraction_fn)
self.assertTrue(len(results) == 1)
self.assertTrue(len(results['_wrapped_linear']['weight']) == 2)
# test unshadowed activations
m1_ns, m2_ns = _add_loggers_impl(
'a', copy.deepcopy(m1), 'b', copy.deepcopy(m2), OutputLogger,
should_log_inputs=False,
base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops)
# calibrate
m1_ns(data)
m2_ns(data)
# check activation result correctness
act_compare_dict = extract_logger_info(m1_ns, m2_ns, OutputLogger, 'b')
self.assertTrue(len(act_compare_dict) == 3)
self.assert_ns_compare_dict_valid(act_compare_dict)
# test shadowed activations
node_type_to_io_type_map = get_node_type_to_io_type_map()
node_type_to_io_type_map['funs_io_type_fp32'].add(_wrapped_hardswish)
node_type_to_io_type_map['funs_io_type_fp32'].add(_wrapped_sigmoid)
m2_shadows_m1_ns = _add_shadow_loggers_impl(
'a', m2, 'b', m1, OutputLogger,
should_log_inputs=False,
base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops,
node_type_to_io_type_map=node_type_to_io_type_map)
# calibrate
m2_shadows_m1_ns(data)
# check activation result correctness
act_compare_dict = extract_shadow_logger_info(
m2_shadows_m1_ns, OutputLogger, 'b')
self.assertTrue(len(act_compare_dict) == 2)
self.assert_ns_compare_dict_valid(act_compare_dict)
@skipIfNoFBGEMM
@unittest.skip("Broken by https://github.com/pytorch/pytorch/pull/62608, enable after"
"dtype inference is supported")
def test_layer_names(self):
m = nn.Sequential(
nn.Conv2d(1, 1, 1),
nn.Conv2d(1, 1, 1),
nn.Sigmoid(),
).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = torch.ao.quantization.quantize_fx.prepare_fx(m, qconfig_dict)
mq = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
# extract weights
results = extract_weights('fp32', mp, 'int8', mq)
mq_node_names = [node.name for node in mq.graph.nodes]
for layer_name in results.keys():
self.assertTrue(layer_name in mq_node_names)
# match activations
mq = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
mp_ns, mq_ns = add_loggers(
'fp32', copy.deepcopy(mp), 'int8', mq, OutputLogger)
data = torch.randn(1, 1, 1, 1)
mp_ns(data)
mq_ns(data)
results = extract_logger_info(mp_ns, mq_ns, OutputLogger, 'int8')
mq_node_names = [node.name for node in mq_ns.graph.nodes]
for layer_name in results.keys():
self.assertTrue(layer_name in mq_node_names)
# match shadow activations
mq = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
mp_shadows_mq = add_shadow_loggers(
'fp32', mp, 'int8', mq, OutputLogger)
mp_shadows_mq(data)
results = extract_shadow_logger_info(
mp_shadows_mq, OutputLogger, 'int8')
mq_node_names = [node.name for node in mp_shadows_mq.graph.nodes]
for layer_name in results.keys():
self.assertTrue(layer_name in mq_node_names)
@skipIfNoFBGEMM
def test_extend_logger_results_with_comparison(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1), nn.Conv2d(1, 1, 1)).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = torch.ao.quantization.quantize_fx.prepare_fx(m, qconfig_dict)
mq = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
# extract weights
results = extract_weights('fp32', mp, 'int8', mq)
extend_logger_results_with_comparison(
results, 'fp32', 'int8', compute_sqnr, 'sqnr_int8_vs_fp32')
extend_logger_results_with_comparison(
results, 'fp32', 'int8', compute_normalized_l2_error, 'l2_error_int8_vs_fp32')
extend_logger_results_with_comparison(
results, 'fp32', 'int8', compute_cosine_similarity,
'cosine_similarity_int8_vs_fp32')
for layer_name, layer_results in results.items():
assert 'sqnr_int8_vs_fp32' in \
layer_results['weight']['int8'][0].keys()
assert 'l2_error_int8_vs_fp32' in \
layer_results['weight']['int8'][0].keys()
assert 'cosine_similarity_int8_vs_fp32' in \
layer_results['weight']['int8'][0].keys()
@skipIfNoFBGEMM
def test_int8_shadows_fp32_simple(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1), nn.Conv2d(1, 1, 1), nn.ReLU()).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = torch.ao.quantization.quantize_fx.prepare_fx(m, qconfig_dict)
mp(torch.randn(1, 1, 1, 1))
mq = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
mq_ref = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
mp_shadows_mq = add_shadow_loggers(
'int8', mq, 'fp32', mp, OutputLogger)
# verify that scale and zp were extracted correctly
# for the first op, the scale+zp live as attributes on the module
scale_0 = mp_shadows_mq._0_input_scale_0
scale_0_ref = getattr(mq_ref, '0_input_scale_0')
self.assertEqual(scale_0, scale_0_ref)
zp_0 = mp_shadows_mq._0_input_zero_point_0
zp_0_ref = getattr(mq_ref, '0_input_zero_point_0')
self.assertEqual(zp_0, zp_0_ref)
# for the second op, the scale and zp of input to second op
# must equal to scale and zp of output of first op
scale_1 = mp_shadows_mq._1_input_scale_0
scale_1_ref = getattr(mq_ref, '0').scale
self.assertEqual(scale_1, scale_1_ref)
zp_1 = mp_shadows_mq._1_input_zero_point_0
zp_1_ref = getattr(mq_ref, '0').zero_point
self.assertEqual(zp_1, zp_1_ref)
# verify running data works
mp_shadows_mq(torch.randn(1, 1, 1, 1))
act_compare_dict = extract_shadow_logger_info(
mp_shadows_mq, OutputLogger, 'fp32')
self.assertTrue(len(act_compare_dict) == 2)
self.assert_ns_compare_dict_valid(act_compare_dict)
@skipIfNoFBGEMM
def test_int8_shadows_fp32_coverage(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.adaptive_avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.adaptive_avg_pool(x)
# input qparams of conv will be input qparams of adaptive_avg_pool
x = self.conv(x)
x = torch.mul(x, x)
x = self.conv(x)
x = torch.add(x, x)
x = F.relu(x)
x = self.conv(x)
return x
m = M().eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = torch.ao.quantization.quantize_fx.prepare_fx(m, qconfig_dict)
mp(torch.randn(1, 1, 1, 1))
mq = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
mq_ref = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
mp_shadows_mq = add_shadow_loggers(
'int8', mq, 'fp32', mp, OutputLogger)
mp_shadows_mq(torch.randn(1, 1, 1, 1))
act_compare_dict = extract_shadow_logger_info(
mp_shadows_mq, OutputLogger, 'fp32')
self.assertTrue(len(act_compare_dict) == 4)
self.assert_ns_compare_dict_valid(act_compare_dict)
@skipIfNoFBGEMM
def test_loggers_preserve_qat_numerics(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1), nn.Conv2d(1, 1, 1))
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
mp = prepare_qat_fx(m, qconfig_dict)
mp(torch.randn(1, 1, 1, 1))
mc = convert_fx(copy.deepcopy(mp))
mp.apply(torch.ao.quantization.disable_observer)
datum = torch.randn(1, 1, 1, 1)
ref_fp32 = mp(datum)
ref_int8 = mc(datum)
mp_ns, mc_ns = add_loggers('fp32', mp, 'int8', mc, OutputLogger)
ref_fp32_ns = mp_ns(datum)
ref_int8_ns = mc_ns(datum)
self.assertEqual(ref_fp32, ref_fp32_ns)
self.assertEqual(ref_int8, ref_int8_ns)
@skipIfNoFBGEMM
def test_shadow_loggers_preserve_qat_numerics(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1), nn.Conv2d(1, 1, 1))
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
mp = prepare_qat_fx(m, qconfig_dict)
mp(torch.randn(1, 1, 1, 1))
mc = convert_fx(copy.deepcopy(mp))
mp.apply(torch.ao.quantization.disable_observer)
datum = torch.randn(1, 1, 1, 1)
ref_fp32 = mp(datum)
ref_int8 = mc(datum)
mc_shadows_mp = add_shadow_loggers('int8', mc, 'fp32', mp, OutputLogger)
ref_shadow = mc_shadows_mp(datum)
self.assertEqual(ref_fp32, ref_shadow)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_extract_weights_cuda(self):
# Note: this is not using quantization because quantized kernels do not
# work on cuda yet.
m1 = nn.Sequential(nn.Conv2d(1, 1, 1)).cuda()
m2 = nn.Sequential(nn.Conv2d(1, 1, 1)).cuda()
results = extract_weights('a', m1, 'b', m2)
extend_logger_results_with_comparison(
results, 'a', 'b', compute_sqnr, 'sqnr')
self.assert_ns_compare_dict_valid(results)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_add_loggers_cuda(self):
# Note: this is not using quantization because quantized kernels do not
# work on cuda yet.
m1 = nn.Sequential(nn.Conv2d(1, 1, 1)).cuda()
m2 = nn.Sequential(nn.Conv2d(1, 1, 1)).cuda()
m1_ns, m2_ns = add_loggers('a', m1, 'b', m2, OutputLogger)
datum = torch.randn(1, 1, 1, 1)
datum = datum.cuda()
m1_ns(datum)
m2_ns(datum)
act_compare_dict = extract_logger_info(m1_ns, m2_ns, OutputLogger, 'b')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_sqnr, 'sqnr')
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_add_shadow_loggers_cuda(self):
# Note: this is not using quantization because quantized kernels do not
# work on cuda yet.
m1 = nn.Sequential(nn.Conv2d(1, 1, 1)).cuda()
m2 = nn.Sequential(nn.Conv2d(1, 1, 1)).cuda()
m1_shadows_m2 = add_shadow_loggers('a', m1, 'b', m2, OutputLogger)
datum = torch.randn(1, 1, 1, 1)
datum = datum.cuda()
m1_shadows_m2(datum)
act_compare_dict = extract_shadow_logger_info(m1_shadows_m2, OutputLogger, 'b')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_sqnr, 'sqnr')
class TestFXNumericSuiteCoreAPIsModels(FXNumericSuiteQuantizationTestCase):
"""
Tests numeric suite core APIs on non-toy models.
"""
@skipIfNoFBGEMM
def test_compare_weights_conv(self):
test_cases = (
(ConvModel(),),
(ConvBnModel(),),
(ConvBnReLUModel(),),
)
for m, in test_cases:
m.eval()
self._test_extract_weights(m, results_len=1)
@skipIfNoFBGEMM
def test_compare_weights_linear(self):
test_cases = (
(SingleLayerLinearModel(), None),
(
SingleLayerLinearDynamicModel(),
{"object_type": [(nn.Linear, default_dynamic_qconfig)]},
),
)
for m, qconfig_dict in test_cases:
m.eval()
res = self._test_extract_weights(
m, results_len=1, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_compare_weights_lstm_dynamic(self):
qconfig_dict = {"object_type": [(nn.LSTM, default_dynamic_qconfig)]}
m = LSTMwithHiddenDynamicModel().eval()
res = self._test_extract_weights(
m, results_len=1, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_compare_activations_conv(self):
test_cases = (
(ConvModel(),),
(ConvBnModel(),),
(ConvBnReLUModel(),),
)
for m, in test_cases:
m.eval()
res = self._test_match_activations(
m, (torch.randn(1, 3, 4, 4),), results_len=1)
@skipIfNoFBGEMM
def test_compare_activations_linear(self):
test_cases = (
(SingleLayerLinearModel(), None),
(
SingleLayerLinearDynamicModel(),
{"object_type": [(nn.Linear, default_dynamic_qconfig)]},
),
)
for m, qconfig_dict in test_cases:
m.eval()
res = self._test_match_activations(
m, (torch.randn(5, 5),), results_len=1, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_compare_activations_lstm_dynamic(self):
qconfig_dict = {"object_type": [(nn.LSTM, default_dynamic_qconfig)]}
m = LSTMwithHiddenDynamicModel().eval()
lstm_input = torch.rand((1, 1, 2))
lstm_hidden = (torch.rand(1, 1, 2), torch.rand(1, 1, 2))
# TODO(future PR): enable scripting (quant prepared LSTM not scriptable)
res = self._test_match_activations(
m, (lstm_input, lstm_hidden), results_len=1, qconfig_dict=qconfig_dict,
skip_scripting=True)
@skipIfNoFBGEMM
def test_compare_shadow_activations_conv(self):
test_cases = (
(ConvModel(),),
(ConvBnModel(),),
(ConvBnReLUModel(),),
)
for m, in test_cases:
m.eval()
res = self._test_match_shadow_activations(
m, (torch.randn(1, 3, 4, 4),), results_len=1)
@skipIfNoFBGEMM
def test_compare_shadow_activations_linear(self):
test_cases = (
(SingleLayerLinearModel(), None),
(
SingleLayerLinearDynamicModel(),
{"object_type": [(nn.Linear, default_dynamic_qconfig)]},
),
)
for m, qconfig_dict in test_cases:
m.eval()
res = self._test_match_shadow_activations(
m, (torch.randn(5, 5),), results_len=1, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_compare_shadow_activations_lstm_dynamic(self):
qconfig_dict = {"object_type": [(nn.LSTM, default_dynamic_qconfig)]}
m = LSTMwithHiddenDynamicModel().eval()
lstm_input = torch.rand((1, 1, 2))
lstm_hidden = (torch.rand(1, 1, 2), torch.rand(1, 1, 2))
# TODO(future PR): enable scripting (quant prepared LSTM not scriptable)
res = self._test_match_shadow_activations(
m, (lstm_input, lstm_hidden), results_len=1, qconfig_dict=qconfig_dict,
skip_scripting=True)
@skipIfNoFBGEMM
def test_sparsenn_compare_activations(self):
for should_log_inputs in (True, False):
sparse_nn = SparseNNModel().eval()
idx = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
x = torch.randn(2, 4)
self._test_match_activations(
sparse_nn, (idx, offsets, x),
results_len=5,
should_log_inputs=should_log_inputs)
@skipIfNoFBGEMM
def test_sparsenn_shadow(self):
for should_log_inputs in (True, False):
sparse_nn = SparseNNModel().eval()
idx = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
x = torch.randn(2, 4)
self._test_match_shadow_activations(
sparse_nn, (idx, offsets, x),
results_len=4,
should_log_inputs=should_log_inputs)
@skip_if_no_torchvision
@skipIfNoFBGEMM
@unittest.skip("TODO: broken by https://github.com/pytorch/pytorch/pull/61687, will enable later")
def test_resnet18(self):
import torchvision
m = torchvision.models.quantization.resnet18(pretrained=True, quantize=False).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
self._test_match_shadow_activations(
m, (torch.randn(1, 3, 224, 224),),
qconfig_dict=qconfig_dict,
should_log_inputs=False)
@skip_if_no_torchvision
@skipIfNoFBGEMM
@unittest.skip("TODO: broken by https://github.com/pytorch/pytorch/pull/61687, will enable later")
def test_mobilenet_v2(self):
import torchvision
m = torchvision.models.quantization.mobilenet_v2(pretrained=True, quantize=False).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
self._test_match_shadow_activations(
m, (torch.randn(1, 3, 224, 224),),
qconfig_dict=qconfig_dict,
should_log_inputs=False)
| 39.230508 | 112 | 0.611892 |
ace85be9921ae63febcaca98e93886d4ed6d1be4 | 7,073 | py | Python | apps/profile/migrations/0023_auto__add_field_paymenthistory_payment_identifier.py | starsep/NewsBlur | 6c59416ca82377ca1bbc7d044890bdead3eba904 | [
"MIT"
] | 1 | 2019-07-15T09:12:35.000Z | 2019-07-15T09:12:35.000Z | apps/profile/migrations/0023_auto__add_field_paymenthistory_payment_identifier.py | starsep/NewsBlur | 6c59416ca82377ca1bbc7d044890bdead3eba904 | [
"MIT"
] | 7 | 2021-02-08T20:32:31.000Z | 2022-03-11T23:50:47.000Z | apps/profile/migrations/0023_auto__add_field_paymenthistory_payment_identifier.py | starsep/NewsBlur | 6c59416ca82377ca1bbc7d044890bdead3eba904 | [
"MIT"
] | 1 | 2020-11-21T08:43:15.000Z | 2020-11-21T08:43:15.000Z | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PaymentHistory.payment_identifier'
db.add_column(u'profile_paymenthistory', 'payment_identifier',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'PaymentHistory.payment_identifier'
db.delete_column(u'profile_paymenthistory', 'payment_identifier')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'profile.paymenthistory': {
'Meta': {'ordering': "['-payment_date']", 'object_name': 'PaymentHistory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_amount': ('django.db.models.fields.IntegerField', [], {}),
'payment_date': ('django.db.models.fields.DateTimeField', [], {}),
'payment_identifier': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'payment_provider': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payments'", 'to': u"orm['auth.User']"})
},
u'profile.profile': {
'Meta': {'object_name': 'Profile'},
'collapsed_folders': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'dashboard_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feed_pane_size': ('django.db.models.fields.IntegerField', [], {'default': '242'}),
'has_found_friends': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'has_setup_feeds': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'has_trained_intelligence': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'hide_getting_started': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_premium': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_seen_ip': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'last_seen_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'preferences': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'premium_expire': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'secret_token': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'send_emails': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'stripe_4_digits': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'stripe_id': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'timezone': ('vendor.timezones.fields.TimeZoneField', [], {'default': "'America/New_York'", 'max_length': '100'}),
'tutorial_finished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"}),
'view_settings': ('django.db.models.fields.TextField', [], {'default': "'{}'"})
}
}
complete_apps = ['profile'] | 75.244681 | 187 | 0.578397 |
ace85c56cdca46c5d9b7befc3ce95a134f45a2b0 | 748 | py | Python | accounts/forms.py | sekar-srinivasan/sac | e52451ec0622c7f6531ce250b7355dfb00ce7a31 | [
"MIT"
] | null | null | null | accounts/forms.py | sekar-srinivasan/sac | e52451ec0622c7f6531ce250b7355dfb00ce7a31 | [
"MIT"
] | null | null | null | accounts/forms.py | sekar-srinivasan/sac | e52451ec0622c7f6531ce250b7355dfb00ce7a31 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class RegistrationForm(UserCreationForm):
email = forms.EmailField(required=True)
class Meta:
model = User
fields = (
'username',
'first_name',
'last_name',
'email',
'password1',
'password2',
)
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.email = self.cleaned_data['email']
if commit:
user.save()
return user
| 26.714286 | 63 | 0.59893 |
ace85c60b6588693c753174cd09c64695310adac | 2,498 | py | Python | car_accidents/car_accidents/favicon_urls.py | WojBor87/car-accidents | 858e6d841413386dcbd5e530fbdd583bb8a23e90 | [
"MIT",
"Unlicense"
] | null | null | null | car_accidents/car_accidents/favicon_urls.py | WojBor87/car-accidents | 858e6d841413386dcbd5e530fbdd583bb8a23e90 | [
"MIT",
"Unlicense"
] | 1 | 2021-02-28T08:46:25.000Z | 2021-02-28T08:46:25.000Z | car_accidents/car_accidents/favicon_urls.py | WojBor87/car-accidents | 858e6d841413386dcbd5e530fbdd583bb8a23e90 | [
"MIT",
"Unlicense"
] | 1 | 2021-02-27T12:30:47.000Z | 2021-02-27T12:30:47.000Z | from django.contrib.staticfiles.storage import staticfiles_storage
from django.urls import path
from django.views.generic.base import RedirectView
favicon_patterns = [
path(
'apple-icon-57x57.png',
RedirectView.as_view(url=staticfiles_storage.url("frontend/images/favicons/apple-icon-57x57.png")),
),
path(
'apple-icon-60x60.png',
RedirectView.as_view(url=staticfiles_storage.url("frontend/images/favicons/apple-icon-60x60.png")),
),
path(
'apple-icon-72x72.png',
RedirectView.as_view(url=staticfiles_storage.url("frontend/images/favicons/apple-icon-72x72.png")),
),
path(
'apple-icon-76x76.png',
RedirectView.as_view(url=staticfiles_storage.url("frontend/images/favicons/apple-icon-76x76.png")),
),
path(
'apple-icon-114x114.png',
RedirectView.as_view(url=staticfiles_storage.url("frontend/images/favicons/apple-icon-114x114.png")),
),
path(
'apple-icon-120x120.png',
RedirectView.as_view(url=staticfiles_storage.url("frontend/images/favicons/apple-icon-120x120.png")),
),
path(
'apple-icon-144x144.png',
RedirectView.as_view(url=staticfiles_storage.url("frontend/images/favicons/apple-icon-144x144.png")),
),
path(
'apple-icon-152x152.png',
RedirectView.as_view(url=staticfiles_storage.url("frontend/images/favicons/apple-icon-152x152.png")),
),
path(
'apple-icon-180x180.png',
RedirectView.as_view(url=staticfiles_storage.url("frontend/images/favicons/apple-icon-180x180.png")),
),
path(
'android-icon-192x192.png',
RedirectView.as_view(url=staticfiles_storage.url("frontend/images/favicons/android-icon-192x192.png")),
),
path(
'favicon-32x32.png',
RedirectView.as_view(url=staticfiles_storage.url("frontend/images/favicons/favicon-32x32.png")),
),
path(
'favicon-96x96.png',
RedirectView.as_view(url=staticfiles_storage.url("frontend/images/favicons/favicon-96x96.png")),
),
path(
'favicon-16x16.png',
RedirectView.as_view(url=staticfiles_storage.url("frontend/images/favicons/favicon-16x16.png")),
),
path('manifest.json', RedirectView.as_view(url=staticfiles_storage.url("frontend/images/favicons/manifest.json"))),
path(
'ms-icon-144x144.png',
RedirectView.as_view(url=staticfiles_storage.url("frontend/images/favicons/ms-icon-144x144.png")),
),
]
| 38.430769 | 119 | 0.683747 |
ace85d6f9d4312d763acbaf589077322575ad521 | 1,317 | py | Python | examples/python/route_guide/route_guide_resources.py | samotarnik/grpc | 3278bdceda8030d5aa130f12765e5f07263c860d | [
"Apache-2.0"
] | 36,552 | 2015-02-26T17:30:13.000Z | 2022-03-31T22:41:33.000Z | examples/python/route_guide/route_guide_resources.py | SanjanaSingh897/grpc | 2d858866eb95ce5de8ccc8c35189a12733d8ca79 | [
"Apache-2.0"
] | 23,536 | 2015-02-26T17:50:56.000Z | 2022-03-31T23:39:42.000Z | examples/python/route_guide/route_guide_resources.py | SanjanaSingh897/grpc | 2d858866eb95ce5de8ccc8c35189a12733d8ca79 | [
"Apache-2.0"
] | 11,050 | 2015-02-26T17:22:10.000Z | 2022-03-31T10:12:35.000Z | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common resources used in the gRPC route guide example."""
import json
import route_guide_pb2
def read_route_guide_database():
"""Reads the route guide database.
Returns:
The full contents of the route guide database as a sequence of
route_guide_pb2.Features.
"""
feature_list = []
with open("route_guide_db.json") as route_guide_db_file:
for item in json.load(route_guide_db_file):
feature = route_guide_pb2.Feature(
name=item["name"],
location=route_guide_pb2.Point(
latitude=item["location"]["latitude"],
longitude=item["location"]["longitude"]))
feature_list.append(feature)
return feature_list
| 34.657895 | 74 | 0.69552 |
ace85d913f6bacc977f1965366c610335885b44b | 2,245 | py | Python | Contents/Libraries/Shared/json_tricks/utils.py | jippo015/Sub-Zero.bundle | 734e0f7128c05c0f639e11e7dfc77daa1014064b | [
"MIT"
] | 1,553 | 2015-11-09T02:17:06.000Z | 2022-03-31T20:24:52.000Z | Contents/Libraries/Shared/json_tricks/utils.py | saiterlz/Sub-Zero.bundle | 1a0bb9c3e4be84be35d46672907783363fe5a87b | [
"MIT"
] | 691 | 2015-11-05T21:32:26.000Z | 2022-03-17T10:52:45.000Z | Contents/Libraries/Shared/json_tricks/utils.py | saiterlz/Sub-Zero.bundle | 1a0bb9c3e4be84be35d46672907783363fe5a87b | [
"MIT"
] | 162 | 2015-11-06T19:38:55.000Z | 2022-03-16T02:42:41.000Z |
from collections import OrderedDict
class hashodict(OrderedDict):
"""
This dictionary is hashable. It should NOT be mutated, or all kinds of weird
bugs may appear. This is not enforced though, it's only used for encoding.
"""
def __hash__(self):
return hash(frozenset(self.items()))
try:
from inspect import signature
except ImportError:
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec
def get_arg_names(callable):
argspec = getargspec(callable)
return set(argspec.args)
else:
#todo: this is not covered in test case (py 3+ uses `signature`, py2 `getfullargspec`); consider removing it
def get_arg_names(callable):
argspec = getfullargspec(callable)
return set(argspec.args) | set(argspec.kwonlyargs)
else:
def get_arg_names(callable):
sig = signature(callable)
return set(sig.parameters.keys())
def call_with_optional_kwargs(callable, *args, **optional_kwargs):
accepted_kwargs = get_arg_names(callable)
use_kwargs = {}
for key, val in optional_kwargs.items():
if key in accepted_kwargs:
use_kwargs[key] = val
return callable(*args, **use_kwargs)
class NoNumpyException(Exception):
""" Trying to use numpy features, but numpy cannot be found. """
class NoPandasException(Exception):
""" Trying to use pandas features, but pandas cannot be found. """
def get_scalar_repr(npscalar):
return hashodict((
('__ndarray__', npscalar.item()),
('dtype', str(npscalar.dtype)),
('shape', ()),
))
def encode_scalars_inplace(obj):
"""
Searches a data structure of lists, tuples and dicts for numpy scalars
and replaces them by their dictionary representation, which can be loaded
by json-tricks. This happens in-place (the object is changed, use a copy).
"""
from numpy import generic, complex64, complex128
if isinstance(obj, (generic, complex64, complex128)):
return get_scalar_repr(obj)
if isinstance(obj, dict):
for key, val in tuple(obj.items()):
obj[key] = encode_scalars_inplace(val)
return obj
if isinstance(obj, list):
for k, val in enumerate(obj):
obj[k] = encode_scalars_inplace(val)
return obj
if isinstance(obj, (tuple, set)):
return type(obj)(encode_scalars_inplace(val) for val in obj)
return obj
| 27.378049 | 110 | 0.738085 |
ace85e6d39f6cde99fbbbb39ecb693b47d43c844 | 6,233 | py | Python | driving.py | bombermon/Driving_pract | 5d85b43435d094b64ef42ea8f7561f8aa21c5e78 | [
"MIT"
] | null | null | null | driving.py | bombermon/Driving_pract | 5d85b43435d094b64ef42ea8f7561f8aa21c5e78 | [
"MIT"
] | null | null | null | driving.py | bombermon/Driving_pract | 5d85b43435d094b64ef42ea8f7561f8aa21c5e78 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import math
# НАЧАЛО ВСПОМОГАТЕЛЬНЫХ ------------------------------------------------------------------------------
def get_length(point1, point2):
return math.sqrt(((point1[0] - point2[0]) ** 2) + ((point1[1] - point2[1]) ** 2))
def get_triangle_area(point1, point2, point3):
a, b, c = get_length(point1, point2), get_length(point2, point3), get_length(point3, point1)
p = (a + b + c) / 2
return math.sqrt(p * (p - a) * (p - b) * (p - c))
def get_any_area(figure):
n = len(figure)
plus = 0
minus = 0
for i in range(0, n):
x1 = figure[i][0]
y2 = figure[(i + 1) % n][1]
x2 = figure[i][1]
y1 = figure[(i + 1) % n][0]
plus += x1 * y2
minus += x2 * y1
s = abs(plus - minus) / 2
return s
def get_perimeter(figure):
n = len(figure)
perimeter = 0
for i in range(n):
perimeter += get_length(figure[i], figure[(i + 1) % n])
return perimeter
# КОНЕЦ ВСПОМОГАТЕЛЬНЫХ -------------------------------------------------------------------------------
def gen_rectangle():
counter = 0
while True:
yield (0 + counter * 2, 0), (0 + counter * 2, 1), (1 + counter * 2, 1), (1 + counter * 2, 0)
counter += 1
def gen_triangle():
counter = 0
while True:
yield (0 + counter * 3, 0), (1 + counter * 3, 1), (2 + counter * 3, 0)
counter += 1
def gen_hexagon():
counter = 0
while True:
n = 2
yield (0 + counter * n, 0.5), (0.25 + counter * n, 0.067), (0.75 + counter * n, 0.067), (
1 + counter * n, 0.5), (0.75 + counter * n, 1 - 0.067), (0.25 + counter * n, 1 - 0.067)
counter += 1
def tr_translate(figure, x, y):
result_figure = []
for i in figure:
result_figure.append((i[0] + x, i[1] + y))
return tuple(result_figure)
def tr_rotate(figure, x0, y0, radian):
sin = math.sin(radian)
cos = math.cos(radian)
result_figure = []
for i in figure:
x = i[0]
y = i[1]
result_figure.append(((x - x0) * cos - (y - y0) * sin + x0, (x - x0) * sin + (y - y0) * cos + y0))
return tuple(result_figure)
def tr_homothety(figure, x0, y0, k):
result_figure = []
for i in figure:
x = i[0]
y = i[1]
new_vector = [(x - x0) * k, (y - y0) * k]
new_koords = (new_vector[0] + x0, new_vector[1] + y0)
result_figure.append((new_koords))
return tuple(result_figure)
def tr_symmetry(figure, p1x, p1y, p2x, p2y):
dx = p2x - p1x
dy = p2y - p1y
result_figure = []
for i in figure:
p0x = i[0]
p0y = i[1]
ax = (((dx * p0x + dy * p0y) * -1 * dx) - (dy * (dy * p1x - dx * p1y))) / (-1 * dx * dx - dy * dy)
ay = ((dx * (dy * p1y - dx * p1y)) - (dy * (dx * p0x + dy * p0y))) / (-1 * dx * dx - dy * dy)
x = ax + (ax - p0x)
y = ay + (ay - p0y)
result_figure.append((x, y))
return tuple(result_figure)
def flt_angle_point(figure, point):
for i in figure:
if i[0] == point[0] and i[1] == point[1]:
return True
return False
def flt_square(figure, area):
cur_area = get_any_area(figure)
if cur_area < area:
return True
else:
return False
def flt_short_side(figure, side):
n = len(figure)
for i in range(n):
current_side = get_length(figure[i], figure[(i + 1) % n])
if current_side < side:
return True
return False
def flt_point_inside(figure, point):
x = point[0]
y = point[1]
c = 0
for i in range(len(figure)):
if (((figure[i][1] <= y and y < figure[i - 1][1]) or (figure[i - 1][1] <= y and y < figure[i][1])) and
(x > (figure[i - 1][0] - figure[i][0]) *
(y - figure[i][1]) / (figure[i - 1][1] - figure[i][1]) + figure[i][0])):
c = 1 - c
if c == 0:
return False
elif c == 1:
return True
def flt_convex_polygon(figure):
def cross_product(a, b, c):
return (a[0] - b[0]) * (b[1] - c[1]) - (a[1] - b[1]) * (b[0] - c[0])
n = len(figure)
if n < 4:
return True
found_direction = False
clockwise_direction = False
for i in range(n):
current_cross_product = cross_product(figure[i], figure[(i + 1) % n], figure[(i + 2) % n])
if current_cross_product != 0:
if not found_direction:
found_direction = True
if current_cross_product < 0:
clockwise_direction = True
else:
current_clockwise_direction = current_cross_product < 0
if current_clockwise_direction != clockwise_direction:
return False
return True
def flt_polygon_angles_inside(figure, figure_to_cheek):
for i in figure_to_cheek:
if flt_point_inside(figure, i):
return True
return False
def agr_origin_nearest(origin_nearest_point, figure):
min_length = get_length(origin_nearest_point, (0, 0))
for i in figure:
current_length = get_length(i, (0, 0))
if current_length < min_length:
origin_nearest_point = i
min_length = current_length
return origin_nearest_point
def agr_max_side(max_side, figure):
n = len(figure)
for i in range(n):
current_side = get_length(figure[i], figure[(i + 1) % n])
if current_side > max_side:
max_side = current_side
return max_side
def agr_min_area(min_area, figure):
current_area = get_any_area(figure)
if current_area < min_area:
min_area = current_area
return min_area
def agr_perimeter(previous_perimeters_sum, figure):
return previous_perimeters_sum + get_perimeter(figure)
def agr_area(previous_area_sum, figure):
return previous_area_sum + get_any_area(figure)
def plot(a):
b = tuple(a)
for i in b:
for j in range(len(i)):
if j != len(i) - 1:
plt.plot((i[j][0], i[j + 1][0]), (i[j][1], i[j + 1][1]), color="black")
else:
plt.plot((i[j][0], i[0][0]), (i[j][1], i[0][1]), color="black")
def clean():
plt.clf()
def show():
plt.show()
| 26.636752 | 110 | 0.528638 |
ace85f4f435af4b58629ede00b86cf2d6bb5a2b1 | 6,345 | py | Python | web_app/web_app.py | jbronyah/dog-breed-classifier | d849b9641930ca8e0ecb4d7be6c395ca690315f7 | [
"MIT"
] | null | null | null | web_app/web_app.py | jbronyah/dog-breed-classifier | d849b9641930ca8e0ecb4d7be6c395ca690315f7 | [
"MIT"
] | null | null | null | web_app/web_app.py | jbronyah/dog-breed-classifier | d849b9641930ca8e0ecb4d7be6c395ca690315f7 | [
"MIT"
] | null | null | null | '''
DESCRIPTION
This module is able to process an image given to it and detect either a dog or human
It further is able to classify the detected dog breed using a trained model with 82% accuracy
The module then deploys this as a webapp using Flask
INPUTS
img_path - path to an image
OUTPUTS
Returns a dog breed description if a dog is detected
If a human is detected it outputs the closest resemblace to a dog
SCRIPT EXECUTION SAMPLE
python web_app.py
'''
from flask import Flask, request, jsonify, render_template
import cv2
import torch
from torchvision import models, transforms
from torch import nn
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import os
face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')
def face_detector(img_path):
'''
DESCRIPTION
Detects face(s) within a given image
INPUTS
img_path - path to an image
OUTPUTS
Boolean - returns True if face detected else False
'''
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
return len(faces) > 0
# define RESNET50 model
RESNET50 = models.resnet50(pretrained=True)
# check if CUDA is available
use_cuda = torch.cuda.is_available()
# move model to GPU if CUDA is available
if use_cuda:
RESNET50 = RESNET50.cuda()
def RESNET50_predict(img_path):
'''
Use pre-trained VGG-16 model to obtain index corresponding to
predicted ImageNet class for image at specified path
Args:
img_path: path to an image
Returns:
Index corresponding to VGG-16 model's prediction
'''
## Load and pre-process an image from the given img_path
## Return the *index* of the predicted class for that image
image = Image.open(img_path)
transform = transforms.Compose([transforms.Resize(256),
transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
image_tensor = transform(image)
image_tensor.unsqueeze_(0)
RESNET50.eval()
if use_cuda:
image_tensor = image_tensor.cuda()
output = RESNET50(image_tensor)
pred_value, pred_idx = torch.max(output,1)
pred_out = pred_idx.item()
return pred_out # predicted class index
def dog_detector_resnet(img_path):
'''
DESCRIPTION
Detects dog(s) within a given image using a pretrained Resnet model
INPUTS
img_path - path to an image
OUTPUTS
Boolean - returns True if dog detected else False
'''
index = RESNET50_predict(img_path)
return index >= 151 and index <=268 # true/false
def predict_breed_transfer(img_path, model_transfer):
'''
DESCRIPTION
Uses a model passed to it to predict a dog breed of a given image
INPUTS
img_path - path to an image
OUTPUT
pred_out_name - the dog breed identified by the model
'''
f = open('dog_breed_list.txt')
class_names = f.readlines()
class_names = [name[:-1] for name in class_names]
# load the image and return the predicted breed
image_t = Image.open(img_path)
transform = transforms.Compose([transforms.Resize(256),
transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
image_t_tensor = transform(image_t)
image_t_tensor.unsqueeze_(0)
model_transfer.eval()
if use_cuda:
image_t_tensor = image_t_tensor.cuda()
output_t = model_transfer(image_t_tensor)
pred_value_t, pred_idx_t = torch.max(output_t,1)
pred_out_t = pred_idx_t.item()
pred_out_name = class_names[pred_out_t]
return pred_out_name
# Webapp deployment
app = Flask(__name__)
# get the app root folder
root_path = os.path.dirname(os.path.abspath(__file__))
@app.route('/')
@app.route('/index')
def index():
return render_template('master.html')
@app.route('/upload', methods=['POST'])
def upload_file():
# path to save uploaded picture
target_path = os.path.join(root_path, 'static/')
# create folder if missing
if not os.path.isdir(target_path):
os.mkdir(target_path)
# get uploaded image from form
file = request.files['file']
# since app does not work with all image formats checking is done here
image_ext=['bmp', 'jpe', 'jpg', 'jpeg', 'xpm', 'ief', 'pbm', 'tif', 'gif']
if file.filename.split('.')[1] not in image_ext:
return jsonify('Image file format not supported at the moment. Please return to the previous page')
# save fine to destination folder
filename = file.filename
destination_path = "/".join([target_path, filename])
file.save(destination_path)
img_path = destination_path
# defining model and loading trained model
model_transfer = models.densenet121(pretrained=True)
num_inputs = model_transfer.classifier.in_features
final_layer = nn.Linear(num_inputs,133)
model_transfer.classifier = final_layer
model_transfer.load_state_dict(torch.load('model_transfer.pt',map_location='cpu'))
if dog_detector_resnet(img_path) == True:
dog_breed = predict_breed_transfer(img_path, model_transfer)
desc_txt = "Dog detected....It is a " + dog_breed
elif face_detector(img_path) == True:
dog_breed = predict_breed_transfer(img_path, model_transfer)
desc_txt = "Human detected....you look like a " + dog_breed
else:
desc_txt = "No human or dog detected. Please provide an image with either a dog or human"
return render_template('go.html', description=desc_txt, image=filename)
def main():
app.run(port=3001, debug=True)
if __name__ == '__main__':
main() | 23.943396 | 107 | 0.638613 |
ace85ffa9705dc92685b67cb1e41bdf25c27f2b3 | 128 | py | Python | ex010.py | 89Alberth/Aprendendo-a-programar | 37ae0a60369a0b39c733ba15220efecf674b6447 | [
"MIT"
] | null | null | null | ex010.py | 89Alberth/Aprendendo-a-programar | 37ae0a60369a0b39c733ba15220efecf674b6447 | [
"MIT"
] | null | null | null | ex010.py | 89Alberth/Aprendendo-a-programar | 37ae0a60369a0b39c733ba15220efecf674b6447 | [
"MIT"
] | null | null | null | n = float(input('Quantos reais você tem? R$'))
s = n / 5.19
print('Você tem {} R$ e você pode comprar {:.2f} US$'.format(n, s))
| 32 | 67 | 0.601563 |
ace86094f7e7efc1d308970581407e5a2c4534e2 | 6,412 | py | Python | pygame2exe.py | joeization/musicgame | 024fa5976c7511e6ee9e2a6abccc6f0b12eb675f | [
"MIT"
] | 1 | 2019-06-16T16:13:11.000Z | 2019-06-16T16:13:11.000Z | pygame2exe.py | joeization/musicgame | 024fa5976c7511e6ee9e2a6abccc6f0b12eb675f | [
"MIT"
] | null | null | null | pygame2exe.py | joeization/musicgame | 024fa5976c7511e6ee9e2a6abccc6f0b12eb675f | [
"MIT"
] | null | null | null | # This will create a dist directory containing the executable file, all the data
# directories. All Libraries will be bundled in executable file.
#
# Run the build process by entering 'pygame2exe.py' or
# 'python pygame2exe.py' in a console prompt.
#
# To build exe, python, pygame, and py2exe have to be installed. After
# building exe none of this libraries are needed.
#Please Note have a backup file in a different directory as if it crashes you
#will loose it all!(I lost 6 months of work because I did not do this)
try:
from distutils.core import setup
import py2exe, pygame
from modulefinder import Module
import glob, fnmatch
import sys, os, shutil
import operator
except ImportError, message:
raise SystemExit, "Unable to load module. %s" % message
#hack which fixes the pygame mixer and pygame font
origIsSystemDLL = py2exe.build_exe.isSystemDLL # save the orginal before we edit it
def isSystemDLL(pathname):
# checks if the freetype and ogg dll files are being included
if os.path.basename(pathname).lower() in ("libfreetype-6.dll", "libogg-0.dll","sdl_ttf.dll"): # "sdl_ttf.dll" added by arit.
return 0
return origIsSystemDLL(pathname) # return the orginal function
py2exe.build_exe.isSystemDLL = isSystemDLL # override the default function with this one
class pygame2exe(py2exe.build_exe.py2exe): #This hack make sure that pygame default font is copied: no need to modify code for specifying default font
def copy_extensions(self, extensions):
#Get pygame default font
pygamedir = os.path.split(pygame.base.__file__)[0]
pygame_default_font = os.path.join(pygamedir, pygame.font.get_default_font())
#Add font to list of extension to be copied
extensions.append(Module("pygame.font", pygame_default_font))
py2exe.build_exe.py2exe.copy_extensions(self, extensions)
class BuildExe:
def __init__(self):
#Name of starting .py
self.script = "aa.py"
#Name of program
self.project_name = "SP"
#Project url
self.project_url = "about:none"
#Version of program
self.project_version = "0.1"
#License of the program
self.license = "GPL License"
#Auhor of program
self.author_name = "KCL"
self.author_email = "k.c.l.852967@gmail.com"
self.copyright = "Copyright (c) 2015 KCL."
#Description
self.project_description = "Description"
#Icon file (None will use pygame default icon)
self.icon_file = None
#Extra files/dirs copied to game
self.extra_datas = []
#Extra/excludes python modules
self.extra_modules = []
self.exclude_modules = []
#DLL Excludes
self.exclude_dll = ['']
#python scripts (strings) to be included, seperated by a comma
self.extra_scripts = []
#Zip file name (None will bundle files in exe instead of zip file)
self.zipfile_name = None
#Dist directory
self.dist_dir ='cat'
## Code from DistUtils tutorial at http://wiki.python.org/moin/Distutils/Tutorial
## Originally borrowed from wxPython's setup and config files
def opj(self, *args):
path = os.path.join(*args)
return os.path.normpath(path)
def find_data_files(self, srcdir, *wildcards, **kw):
# get a list of all files under the srcdir matching wildcards,
# returned in a format to be used for install_data
def walk_helper(arg, dirname, files):
if '.svn' in dirname:
return
names = []
lst, wildcards = arg
for wc in wildcards:
wc_name = self.opj(dirname, wc)
for f in files:
filename = self.opj(dirname, f)
if fnmatch.fnmatch(filename, wc_name) and not os.path.isdir(filename):
names.append(filename)
if names:
lst.append( (dirname, names ) )
file_list = []
recursive = kw.get('recursive', True)
if recursive:
os.path.walk(srcdir, walk_helper, (file_list, wildcards))
else:
walk_helper((file_list, wildcards),
srcdir,
[os.path.basename(f) for f in glob.glob(self.opj(srcdir, '*'))])
return file_list
def run(self):
if os.path.isdir(self.dist_dir): #Erase previous destination dir
shutil.rmtree(self.dist_dir)
#Use the default pygame icon, if none given
if self.icon_file == None:
path = os.path.split(pygame.__file__)[0]
self.icon_file = os.path.join(path, 'pygame.ico')
#List all data files to add
extra_datas = []
for data in self.extra_datas:
if os.path.isdir(data):
extra_datas.extend(self.find_data_files(data, '*'))
else:
extra_datas.append(('.', [data]))
setup(
cmdclass = {'py2exe': pygame2exe},
version = self.project_version,
description = self.project_description,
name = self.project_name,
url = self.project_url,
author = self.author_name,
author_email = self.author_email,
license = self.license,
# targets to build
windows = [{
'script': self.script,
'icon_resources': [(0, self.icon_file)],
'copyright': self.copyright
}],
options = {'py2exe': {'optimize': 2, 'bundle_files': 1, 'compressed': True, \
'excludes': self.exclude_modules, 'packages': self.extra_modules, \
'dll_excludes': self.exclude_dll,
'includes': self.extra_scripts} },
zipfile = self.zipfile_name,
data_files = extra_datas,
dist_dir = self.dist_dir
)
if os.path.isdir('build'): #Clean up build dir
shutil.rmtree('build')
if __name__ == '__main__':
if operator.lt(len(sys.argv), 2):
sys.argv.append('py2exe')
BuildExe().run() #Run generation
raw_input("Press any key to continue") #Pause to let user see that things ends
| 37.27907 | 150 | 0.60184 |
ace86198332c46098efe7efe6540d5c4e0b41e26 | 357 | py | Python | experiments/heat-3d/tmp_files/4094.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | experiments/heat-3d/tmp_files/4094.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | experiments/heat-3d/tmp_files/4094.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/heat-3d/tmp_files/4094.c')
procedure('kernel_heat_3d')
loop(0)
known('n>3')
tile(0,2,16,2)
tile(0,4,16,3)
tile(1,2,16,2)
tile(1,4,16,3)
| 25.5 | 116 | 0.753501 |
ace861d659213e735db106d8c840f5116ff88dcd | 84 | py | Python | mod2.py | chidanandpujar/Python_scripts | 0ee70e07ef4ab4d8c04955466ea9b305bdac0a53 | [
"Unlicense"
] | null | null | null | mod2.py | chidanandpujar/Python_scripts | 0ee70e07ef4ab4d8c04955466ea9b305bdac0a53 | [
"Unlicense"
] | null | null | null | mod2.py | chidanandpujar/Python_scripts | 0ee70e07ef4ab4d8c04955466ea9b305bdac0a53 | [
"Unlicense"
] | null | null | null | from mod1 import A
from mod1 import mfn
from mod1 import a
print(A.b)
A.fn(1)
mfn()
| 12 | 20 | 0.72619 |
ace8629b17347976ba2566b3b577fb40a6de114e | 593 | py | Python | solutions/1005_maximize_sum_of_array_after_k_negations.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
] | null | null | null | solutions/1005_maximize_sum_of_array_after_k_negations.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
] | null | null | null | solutions/1005_maximize_sum_of_array_after_k_negations.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
] | null | null | null | class Solution:
def largestSumAfterKNegations(self, A: List[int], K: int) -> int:
a_sum = sum(A)
pos, neg = [], []
min_abs_v = abs(A[0])
for a in A:
if a > 0:
pos.append(a)
elif a < 0:
neg.append(a)
min_abs_v = min(min_abs_v, abs(a))
if K <= len(neg):
return a_sum - 2 * sum(sorted(neg)[:K])
else:
a_sum = a_sum - 2 * sum(neg)
if (K - len(neg)) % 2 == 1:
a_sum -= 2 * min_abs_v
return a_sum
| 28.238095 | 69 | 0.409781 |
ace862afa7b977c5f0c5bb0c3137bf30b6f67f62 | 4,964 | py | Python | indico/modules/rb/event/controllers.py | jgrigera/indico | b5538f2755bc38a02313d079bac831ee3dfb44ab | [
"MIT"
] | 1 | 2018-11-12T21:29:26.000Z | 2018-11-12T21:29:26.000Z | indico/modules/rb/event/controllers.py | jgrigera/indico | b5538f2755bc38a02313d079bac831ee3dfb44ab | [
"MIT"
] | 9 | 2020-09-08T09:25:57.000Z | 2022-01-13T02:59:05.000Z | indico/modules/rb/event/controllers.py | jgrigera/indico | b5538f2755bc38a02313d079bac831ee3dfb44ab | [
"MIT"
] | 3 | 2020-07-20T09:09:44.000Z | 2020-10-19T00:29:49.000Z | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from flask import jsonify
from sqlalchemy.orm import joinedload
from indico.modules.events.contributions import Contribution
from indico.modules.events.management.controllers import RHManageEventBase
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.sessions.models.sessions import Session
from indico.modules.events.timetable import TimetableEntry
from indico.modules.rb.controllers import RHRoomBookingBase
from indico.modules.rb.event.forms import BookingListForm
from indico.modules.rb.models.reservations import Reservation, ReservationLink
from indico.modules.rb.util import get_booking_params_for_event
from indico.modules.rb.views import WPEventBookingList
from indico.util.date_time import format_datetime, now_utc
from indico.util.string import to_unicode
def _contrib_query(event):
return (Contribution.query
.with_parent(event)
.filter(Contribution.is_scheduled,
Contribution.timetable_entry.has(TimetableEntry.start_dt > now_utc()))
.options(joinedload('timetable_entry'))
.order_by(Contribution.friendly_id))
def _session_block_query(event):
return (SessionBlock.query
.filter(SessionBlock.session.has(event=event),
SessionBlock.timetable_entry.has(TimetableEntry.start_dt > now_utc()))
.options(joinedload('timetable_entry'))
.join(Session)
.order_by(Session.friendly_id, Session.title, SessionBlock.title))
class RHRoomBookingEventBase(RHManageEventBase, RHRoomBookingBase):
def _check_access(self):
RHManageEventBase._check_access(self)
RHRoomBookingBase._check_access(self)
class RHEventBookingList(RHRoomBookingEventBase):
def _process(self):
form = BookingListForm(event=self.event)
has_contribs = _contrib_query(self.event).has_rows()
has_session_blocks = _session_block_query(self.event).has_rows()
links = (ReservationLink.query.with_parent(self.event)
.options(joinedload('reservation').joinedload('room'),
joinedload('session_block'),
joinedload('contribution'))
.filter(~ReservationLink.reservation.has(Reservation.is_cancelled))
.join(Reservation)
.order_by(Reservation.start_dt)
.all())
contribs_data = {c.id: {'start_dt': c.start_dt.isoformat(), 'end_dt': c.end_dt.isoformat()}
for c in _contrib_query(self.event)}
session_blocks_data = {sb.id: {'start_dt': sb.start_dt.isoformat(), 'end_dt': sb.end_dt.isoformat()}
for sb in _session_block_query(self.event)}
is_past_event = self.event.end_dt < now_utc()
event_rb_params = get_booking_params_for_event(self.event)
return WPEventBookingList.render_template('booking_list.html', self.event,
form=form,
links=links,
has_contribs=has_contribs,
contribs_data=contribs_data,
has_session_blocks=has_session_blocks,
session_blocks_data=session_blocks_data,
event_rb_params=event_rb_params,
is_past_event=is_past_event)
class RHListLinkableContributions(RHManageEventBase):
"""AJAX endpoint that lists all contributions in the event."""
def _process(self):
query = _contrib_query(self.event)
result = [{'id': contrib.id,
'friendly_id': contrib.friendly_id,
'title': contrib.title,
'full_title': contrib.verbose_title}
for contrib in query]
return jsonify(result)
class RHListLinkableSessionBlocks(RHManageEventBase):
"""AJAX endpoint that lists all session blocks in the event."""
def _process(self):
query = _session_block_query(self.event)
result = [{'id': session_block.id,
'friendly_id': session_block.session.friendly_id,
'title': session_block.full_title,
'full_title': '#{}: {} ({})'.format(
session_block.session.friendly_id, session_block.full_title,
to_unicode(format_datetime(session_block.timetable_entry.start_dt)))}
for session_block in query]
return jsonify(result)
| 45.541284 | 108 | 0.637994 |
ace862b47cd4cb07e1a8faf78212e24821beee28 | 285 | py | Python | Chapter03/process_in_subclass.py | ibiscum/Python-Parallel-Programming-Cookbook-Second-Edition | 8fd583019778b4d797d4f948d091b5564e23f732 | [
"MIT"
] | null | null | null | Chapter03/process_in_subclass.py | ibiscum/Python-Parallel-Programming-Cookbook-Second-Edition | 8fd583019778b4d797d4f948d091b5564e23f732 | [
"MIT"
] | null | null | null | Chapter03/process_in_subclass.py | ibiscum/Python-Parallel-Programming-Cookbook-Second-Edition | 8fd583019778b4d797d4f948d091b5564e23f732 | [
"MIT"
] | null | null | null | import multiprocessing
class MyProcess(multiprocessing.Process):
def run(self):
print('called run method in %s' % self.name)
return
if __name__ == '__main__':
for i in range(10):
process = MyProcess()
process.start()
process.join()
| 17.8125 | 52 | 0.610526 |
ace86310d3fefdb72e0a3f682a7e2cb5265be639 | 1,753 | py | Python | photodetector/__main__.py | bebaek/photodetector | 3d66d52a3ee586b945b48d1b78e976e27ee937ed | [
"MIT"
] | null | null | null | photodetector/__main__.py | bebaek/photodetector | 3d66d52a3ee586b945b48d1b78e976e27ee937ed | [
"MIT"
] | null | null | null | photodetector/__main__.py | bebaek/photodetector | 3d66d52a3ee586b945b48d1b78e976e27ee937ed | [
"MIT"
] | null | null | null | import argparse
import logging
from .image_processor import ImageProcessor
def main():
"""Entry point."""
parser = argparse.ArgumentParser()
parser.add_argument('path', help='file or directory', nargs='+')
parser.add_argument('--outdir', help='output directory')
parser.add_argument('--threshold', type=int, default=220,
help='threshold in grayscale')
parser.add_argument('--min-area', type=int, default=50000, help='min area')
parser.add_argument('--left-trim', type=int, default=0,
help='left edge thickness to trim')
parser.add_argument('--right-trim', type=int, default=0,
help='right edge thickness to trim')
parser.add_argument('--top-trim', type=int, default=0,
help='top edge thickness to trim')
parser.add_argument('--no-close', help='do not close speckles',
action='store_true')
parser.add_argument('--no-suppress-overlap',
help='do not suppress overlapping contours',
action='store_true')
parser.add_argument('--diagnose', help='diagnose mode',
action='store_true')
args = parser.parse_args()
level = logging.DEBUG if args.diagnose else logging.INFO
logging.basicConfig(level=level)
processor = ImageProcessor(
outdir=args.outdir,
thresh=args.threshold,
min_area=args.min_area,
left_trim=args.left_trim,
right_trim=args.right_trim,
top_trim=args.top_trim,
close=not args.no_close,
no_suppress_overlap=args.no_suppress_overlap,
diagnose=args.diagnose,
)
processor.run(args.path)
processor.report()
| 38.108696 | 79 | 0.621791 |
ace8634153eef067d46aa73abedd9746fc93d7a8 | 5,881 | py | Python | restrain_jit/vm/am.py | thautwarm/restrain-jit | f76b3e9ae8a34d2eef87a42cc87197153f14634c | [
"MIT"
] | 116 | 2019-09-18T15:43:09.000Z | 2022-02-18T15:28:08.000Z | restrain_jit/vm/am.py | thautwarm/restrain-jit | f76b3e9ae8a34d2eef87a42cc87197153f14634c | [
"MIT"
] | 6 | 2019-09-18T16:12:49.000Z | 2021-02-03T13:01:42.000Z | restrain_jit/vm/am.py | thautwarm/restrain-jit | f76b3e9ae8a34d2eef87a42cc87197153f14634c | [
"MIT"
] | 8 | 2019-09-19T07:15:05.000Z | 2022-01-19T19:40:10.000Z | import abc
import types
import typing as t
import bytecode
from restrain_jit.jit_info import PyCodeInfo, PyFuncInfo
from dataclasses import dataclass
@dataclass
class Symbol:
s: str
@dataclass
class ValSymbol:
s: str
Instr = t.TypeVar("Instr")
Repr = t.TypeVar("Repr")
class AM(t.Generic[Instr, Repr]):
@abc.abstractmethod
def yield_return(self, val: Repr):
raise NotImplemented
@abc.abstractmethod
def set_lineno(self, lineno: int):
raise NotImplemented
@abc.abstractmethod
def require_global(self, s: str):
raise NotImplemented
@abc.abstractmethod
def meta(self) -> dict:
raise NotImplemented
@abc.abstractmethod
def pop_exception(self, must: bool) -> Repr:
raise NotImplemented
@abc.abstractmethod
def push_block(self, end_label: str) -> None:
raise NotImplemented
@abc.abstractmethod
def pop_block(self) -> Repr:
raise NotImplemented
@abc.abstractmethod
def last_block_end(self) -> str:
raise NotImplemented
@classmethod
@abc.abstractmethod
def reg_of(cls, n: str):
raise NotImplemented
@abc.abstractmethod
def from_higher(self, qualifier: str, name: str):
raise NotImplemented
@abc.abstractmethod
def from_lower(self, qualifier: str, name: str):
raise NotImplemented
@abc.abstractmethod
def release(self, name: Repr) -> None:
raise NotImplemented
@abc.abstractmethod
def alloc(self) -> str:
raise NotImplemented
@abc.abstractmethod
def add_instr(self, tag: t.Union[None, str], instr: Instr) -> Repr:
raise NotImplemented
@abc.abstractmethod
def pop(self) -> Repr:
raise NotImplemented
@abc.abstractmethod
def push(self, r: Repr) -> None:
raise NotImplemented
@abc.abstractmethod
def label(self, n: str) -> None:
raise NotImplemented
@abc.abstractmethod
def jump_if(self, n: str, cond: Repr) -> None:
raise NotImplemented
@abc.abstractmethod
def jump_if_push(self, n: str, cond: Repr, leave: Repr) -> None:
raise NotImplemented
@abc.abstractmethod
def jump(self, n: str) -> None:
raise NotImplemented
@abc.abstractmethod
def peek(self, n: int) -> Repr:
raise NotImplemented
@abc.abstractmethod
def assign(self, reg: str, v: Repr):
raise NotImplemented
@abc.abstractmethod
def load(self, reg: str) -> Repr:
raise NotImplemented
@abc.abstractmethod
def store(self, reg: str, val: Repr) -> None:
raise NotImplemented
@abc.abstractmethod
def app(self, f: Repr, args: t.List[Repr]) -> Repr:
raise NotImplemented
@abc.abstractmethod
def const(self, val: object) -> Repr:
raise NotImplemented
@abc.abstractmethod
def from_const(self, val: Repr) -> object:
raise NotImplemented
@abc.abstractmethod
def ret(self, val: Repr) -> None:
raise NotImplemented
@classmethod
@abc.abstractmethod
def code_info(cls, code: bytecode.Bytecode) -> PyCodeInfo[Instr]:
raise NotImplemented
@classmethod
@abc.abstractmethod
def func_info(cls, func: types.FunctionType):
raise NotImplemented
@abc.abstractmethod
def get_module(self) -> types.ModuleType:
raise NotImplemented
def code_info(code: bytecode.Bytecode):
return lambda vm: vm.code_info(code)
def func_info(fn: types.FunctionType):
return lambda vm: vm.code_info(fn)
def pop_exception(must: bool = False) -> Repr:
return lambda vm: vm.pop_exception(must)
def require_global(a: str):
return lambda vm: vm.require_global(a)
def meta():
return lambda vm: vm.meta()
def last_block_end():
return lambda vm: vm.last_block_end()
def push_block(r: str):
return lambda vm: vm.push_block(r)
def pop_block():
return lambda vm: vm.pop_block()
def from_const(r: Repr):
return lambda vm: vm.from_const(r)
def ret(val: Repr):
return lambda vm: vm.ret(val)
def const(val: object):
return lambda vm: vm.const(val)
def reg_of(name: str):
return lambda vm: vm.reg_of(name)
def release(name: Repr):
return lambda vm: vm.release(name)
def alloc():
return lambda vm: vm.alloc()
def add_instr(instr: Instr):
def apply(vm):
a = vm.alloc()
vm.add_instr(a, instr)
return vm.reg_of(vm)
return apply
def from_higher(qualifier: str, name: str):
return lambda vm: vm.from_higher(qualifier, name)
def from_lower(qualifier: str, name: str):
return lambda vm: vm.from_lower(qualifier, name)
def pop() -> Repr:
return lambda vm: vm.pop()
def push(r: Repr):
return lambda vm: vm.push(r)
def label(n: str):
return lambda vm: vm.label(n)
def jump_if(n: str, cond: Repr):
return lambda vm: vm.jump_if(n, cond)
def jump_if_push(n: str, cond: Repr, leave: Repr):
return lambda vm: vm.jump_if_push(n, cond, leave)
def jump(n: str):
return lambda vm: vm.jump(n)
def peek(n: int):
return lambda vm: vm.peek(n)
def assign(reg: str, v: Repr):
return lambda vm: vm.assign(reg, v)
def load(reg: str) -> Repr:
return lambda vm: vm.load(reg)
def store(reg: str, val: Repr):
return lambda vm: vm.store(reg, val)
def app(f: Repr, args: t.List[Repr]):
return lambda vm: vm.app(f, args)
def get_module():
return lambda vm: vm.get_module()
def set_lineno(i):
return lambda vm: vm.set_lineno(i)
def yield_return(val):
return lambda vm: vm.yield_return(val)
def run_machine(gen: t.Generator, vm: AM):
"""
top level of abstract interpretion
"""
v = None
send = gen.send
try:
while True:
binder = send(v)
v = binder(vm)
except StopIteration as e:
return e.value
| 19.935593 | 71 | 0.6504 |
ace8635a25757ed1ccd8135625d3e3ad5e7c676f | 4,458 | py | Python | mmdet/models/necks/nanodet_pafpn.py | Bo396543018/Picodet_Pytorch | 276ecbf6f4f7eefbf046d1bccc25293acf28ba25 | [
"Apache-2.0"
] | 16 | 2022-02-08T13:20:30.000Z | 2022-03-28T10:30:58.000Z | mmdet/models/necks/nanodet_pafpn.py | Bo396543018/picodet_repro | 276ecbf6f4f7eefbf046d1bccc25293acf28ba25 | [
"Apache-2.0"
] | 3 | 2021-11-21T07:41:14.000Z | 2022-01-15T06:29:24.000Z | mmdet/models/necks/nanodet_pafpn.py | Bo396543018/picodet_repro | 276ecbf6f4f7eefbf046d1bccc25293acf28ba25 | [
"Apache-2.0"
] | 4 | 2021-11-19T07:59:23.000Z | 2021-12-25T11:38:36.000Z | # Modification 2020 RangiLyu
# Copyright 2018-2019 Open-MMLab.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn.functional as F
import torch.nn as nn
from mmcv.runner import BaseModule
from ..builder import NECKS
from ..module.conv import ConvModule
from ..module.init_weights import xavier_init
@NECKS.register_module()
class NanoDetPAN(BaseModule):
"""Path Aggregation Network for Instance Segmentation.
This is an implementation of the `PAN in Path Aggregation Network
<https://arxiv.org/abs/1803.01534>`_.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Config dict for normalization layer. Default: None.
activation (str): Config dict for activation layer in ConvModule.
Default: None.
"""
def __init__(
self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
conv_cfg=None,
norm_cfg=None,
activation=None,
):
super(NanoDetPAN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.fp16_enabled = False
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.lateral_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=activation,
inplace=False,
)
self.lateral_convs.append(l_conv)
self.init_weights()
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution="uniform")
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
laterals[i - 1] += F.interpolate(
laterals[i], scale_factor=2, mode="bilinear"
)
# build outputs
# part 1: from original levels
inter_outs = [laterals[i] for i in range(used_backbone_levels)]
# part 2: add bottom-up path
for i in range(0, used_backbone_levels - 1):
inter_outs[i + 1] += F.interpolate(
inter_outs[i], scale_factor=0.5, mode="bilinear"
)
outs = []
outs.append(inter_outs[0])
outs.extend([inter_outs[i] for i in range(1, used_backbone_levels)])
return tuple(outs) | 34.55814 | 79 | 0.628533 |
ace8637a8f3e5495c5fea4686275e83a869e87b8 | 7,223 | py | Python | src/mgard-x/Testing/MDR/plot_encoding.py | JasonRuonanWang/MGARD | 70d3399f6169c8a369da9fe9786c45cb6f3bb9f1 | [
"Apache-2.0"
] | null | null | null | src/mgard-x/Testing/MDR/plot_encoding.py | JasonRuonanWang/MGARD | 70d3399f6169c8a369da9fe9786c45cb6f3bb9f1 | [
"Apache-2.0"
] | null | null | null | src/mgard-x/Testing/MDR/plot_encoding.py | JasonRuonanWang/MGARD | 70d3399f6169c8a369da9fe9786c45cb6f3bb9f1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import subprocess
import csv
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import math
SMALL_SIZE = 12
MEDIUM_SIZE = 18
BIGGER_SIZE = 14
plt.rc('font', size=MEDIUM_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=MEDIUM_SIZE) # fontsize of the figure title
def read_csv(filename):
f = open(filename)
r = csv.reader(f, delimiter=',')
data = []
header = 0
for row in r:
row_data = []
if (header == 0):
for token in row:
row_data.append(float(token))
data.append(row_data)
else:
header -= 1;
return data
def plot_line(title, data, x_ticks, y_max, y_step, xlabel, ylabel, legend, output):
fig, ax1 = plt.subplots(nrows=1, ncols=1, figsize=(12,5))
ax1.set_title(title)
if (len(data[0]) > len(data[1])):
x_idx = np.array(range(len(data[0])))
else:
x_idx = np.array(range(len(data[1])))
y_idx = np.arange(0, y_max, y_step)
style = ['b-s', 'g-o', 'b-v']
ps = []
for i in range(len(legend)):
p, = ax1.plot(np.array(range(len(data[i]))), data[i], style[i], linewidth=4, markersize=15)
ps.append(p)
ax1.set_xticks(x_idx)
ax1.set_xticklabels(x_ticks)
ax1.set_xlabel(xlabel)
ax1.tick_params(axis='x', rotation=0)
ax1.set_yticks(y_idx)
ax1.set_yscale('log')
# ax1.set_yticklabels([str(round(float(label), 2)) for label in y_idx])
ax1.set_ylabel(ylabel)
ax1.grid(which='major', axis='y')
ax1.legend(tuple(ps), legend)
plt.tight_layout()
plt.savefig('{}.png'.format(output))
def plot_bar(title, data, x_ticks, y_max, y_step, xlabel, ylabel, legend, output):
fig, ax1 = plt.subplots(nrows=1, ncols=1, figsize=(8,5))
bar_width = 0.25
offset = 0
ax1.set_title(title)
if (len(data[0]) > len(data[1])):
x_idx = np.array(range(len(data[0])))
else:
x_idx = np.array(range(len(data[1])))
y_idx = np.arange(0, y_max, y_step)
colors = ['b', 'g', 'r']
ps = []
for i in range(len(legend)):
print np.array(range(len(data[i])))+offset
p = ax1.bar(np.array(range(len(data[i])))+offset, data[i], color=colors[i], width=bar_width)
ps.append(p)
offset += bar_width
ax1.set_xticks(x_idx + bar_width/2)
ax1.set_xticklabels(x_ticks)
ax1.set_xlabel(xlabel)
ax1.tick_params(axis='x', rotation=0)
ax1.set_yticks(y_idx)
# ax1.set_yscale('log')
ax1.set_yticklabels([str(round(float(label), 2)) for label in y_idx])
ax1.set_ylabel(ylabel)
ax1.grid(which='major', axis='y')
ax1.legend(tuple(ps), legend)
plt.tight_layout()
plt.savefig('{}.png'.format(output))
def get_filename(encoding_type_bits,
decoding_type_bits,
n,
encoding_num_bitplanes,
decoding_num_bitplanes,
BinaryType,
DataEncodingAlgorithm,
ErrorCollectingAlgorithm,
DataDecodingAlgorithm):
filename = "encoding_perf_results/pref_{}_{}_{}_{}_{}_{}_{}_{}_{}.csv".format(encoding_type_bits,
decoding_type_bits,
n,
encoding_num_bitplanes,
decoding_num_bitplanes,
BinaryType,
DataEncodingAlgorithm,
ErrorCollectingAlgorithm,
DataDecodingAlgorithm)
print filename
return filename;
def large_data_different_num_bitplanes(BinaryType):
DataEncodingAlgorithm = 1
ErrorCollectingAlgorithm = 1
DataDecodingAlgorithm = 1
cpu_encoding = []
cpu_decoding = []
gpu_encoding = []
gpu_decoding = []
for num_bitplane in range(1,32,1):
csv_data = read_csv(get_filename(32, 32, 512*1024*1024, num_bitplane, num_bitplane,
BinaryType, DataEncodingAlgorithm, ErrorCollectingAlgorithm,
DataDecodingAlgorithm))
cpu_encoding.append(csv_data[0][0])
cpu_decoding.append(csv_data[0][1])
gpu_encoding.append(csv_data[0][2])
gpu_decoding.append(csv_data[0][3])
print cpu_encoding
print cpu_decoding
print gpu_encoding
print gpu_decoding
x_ticks = []
for num_bitplane in range(1,33,1):
x_ticks.append("{}".format(num_bitplane))
plot_line("Bitplane Encoding (Num. of Coefficients = $2^{29}$)", [cpu_encoding, gpu_encoding], x_ticks, 100, 5,
"Number of Encoding Bitplanes", "Throughput (GB/s)", ["CPU", "GPU"], "encoding_num_bitplanes_binarytype_{}".format(BinaryType))
plot_line("Bitplane Decoding (Num. of Coefficients = $2^{29}$)", [cpu_decoding, gpu_decoding], x_ticks, 100, 5,
"Number of Decoding Bitplanes", "Throughput (GB/s)", ["CPU", "GPU"], "decoding_num_bitplanes_binarytype_{}".format(BinaryType))
def max_num_bitplane_different_data_sizes(BinaryType):
DataEncodingAlgorithm = 1
ErrorCollectingAlgorithm = 1
DataDecodingAlgorithm = 1
num_bitplane = 32
cpu_encoding = []
cpu_decoding = []
gpu_encoding = []
gpu_decoding = []
for i in range(1,20,1):
csv_data = read_csv(get_filename(32, 32, 2**i*1024, num_bitplane, num_bitplane,
BinaryType, DataEncodingAlgorithm, ErrorCollectingAlgorithm,
DataDecodingAlgorithm))
cpu_encoding.append(csv_data[0][0])
cpu_decoding.append(csv_data[0][1])
gpu_encoding.append(csv_data[0][2])
gpu_decoding.append(csv_data[0][3])
print cpu_encoding
print cpu_decoding
print gpu_encoding
print gpu_decoding
x_ticks = []
for i in range(10,30,1):
x_ticks.append("$2^{" +str(i) + "}$")
plot_line("Bitplane Encoding (Num. of bitplanes = 32)", [cpu_encoding, gpu_encoding], x_ticks, 100, 5,
"Number of Coefficients", "Throughput (GB/s)", ["CPU", "GPU"], "encoding_data_sizes_binarytype_{}".format(BinaryType))
plot_line("Bitplane Decoding (Num. of bitplanes = 32)", [cpu_decoding, gpu_decoding], x_ticks, 100, 5,
"Number of Coefficients", "Throughput (GB/s)", ["CPU", "GPU"], "decoding_data_sizes_binarytype_{}".format(BinaryType))
def end_to_end():
refactor = [[0, 10.8476,0], [0,2.25562,0]]
reconstruct = [[4.32562, 3.18212, 3.06498], [0.769606, 0.304263, 0.249241]]
plot_bar("Refactoring", refactor, [""], 15, 5, "", "Time (s)", ["CPU", "GPU"], "refactor")
plot_bar("Progressive Reconstruction", reconstruct, ["125" ,"145", "158"], 5, 1, "PSNR", "Time (s)", ["CPU", "GPU"], "reconstuct")
# large_data_different_num_bitplanes(0)
# max_num_bitplane_different_data_sizes(0)
# large_data_different_num_bitplanes(1)
# max_num_bitplane_different_data_sizes(1)
end_to_end()
| 33.910798 | 141 | 0.633809 |
ace865845c7189c39dae6d1c5fa19dea26adf7ae | 3,272 | py | Python | herbarium/model_train.py | rafelafrance/herbarium_phenology | b33889090fcaf9482927cb1d089f03316dbe86be | [
"MIT"
] | null | null | null | herbarium/model_train.py | rafelafrance/herbarium_phenology | b33889090fcaf9482927cb1d089f03316dbe86be | [
"MIT"
] | 13 | 2021-09-08T20:15:40.000Z | 2022-03-03T21:22:30.000Z | herbarium/model_train.py | rafelafrance/herbarium_phenology | b33889090fcaf9482927cb1d089f03316dbe86be | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Train a model to classify herbarium traits."""
import argparse
import textwrap
from pathlib import Path
from pylib import db
from pylib import model_util as mu
from pylib.const import ALL_TRAITS
from pylib.herbarium_model import BACKBONES
from pylib.herbarium_model import HerbariumModel
from pylib.herbarium_runner import HerbariumTrainingRunner
def parse_args():
"""Process command-line arguments."""
description = """Train a herbarium phenology classifier model."""
arg_parser = argparse.ArgumentParser(
description=textwrap.dedent(description), fromfile_prefix_chars="@"
)
arg_parser.add_argument(
"--database",
"--db",
type=Path,
metavar="PATH",
required=True,
help="""Path to the SQLite3 database (angiosperm data).""",
)
arg_parser.add_argument(
"--save-model",
type=Path,
metavar="PATH",
required=True,
help="""Save best models to this path.""",
)
arg_parser.add_argument(
"--split-run",
metavar="NAME",
required=True,
help="""Which data split to use. Splits are saved in the database and each
one is used for a specific purpose.""",
)
arg_parser.add_argument(
"--trait",
choices=ALL_TRAITS,
required=True,
help="""Train to classify this trait.""",
)
arg_parser.add_argument(
"--backbone",
choices=list(BACKBONES.keys()),
default=list(BACKBONES.keys())[0],
help="""Which neural network backbone to use.""",
)
arg_parser.add_argument(
"--load-model",
type=Path,
metavar="PATH",
help="""Continue training with weights from this model.""",
)
arg_parser.add_argument(
"--log-dir",
type=Path,
metavar="PATH",
help="""Output log files to this directory.""",
)
arg_parser.add_argument(
"--learning-rate",
"--lr",
type=float,
metavar="FLOAT",
default=0.001,
help="""Initial learning rate. (default: %(default)s)""",
)
arg_parser.add_argument(
"--batch-size",
type=int,
metavar="INT",
default=16,
help="""Input batch size. (default: %(default)s)""",
)
arg_parser.add_argument(
"--workers",
type=int,
metavar="INT",
default=4,
help="""Number of workers for loading data. (default: %(default)s)""",
)
arg_parser.add_argument(
"--epochs",
type=int,
metavar="INT",
default=100,
help="""How many epochs to train. (default: %(default)s)""",
)
arg_parser.add_argument(
"--limit",
type=int,
metavar="INT",
help="""Limit the input to this many records.""",
)
args = arg_parser.parse_args()
mu.validate_split_runs(args)
return args
def main():
"""Train a model using just pytorch."""
args = parse_args()
orders = db.select_all_orders(args.database)
model = HerbariumModel(orders, args.backbone, args.load_model)
runner = HerbariumTrainingRunner(model, orders, args)
runner.run()
if __name__ == "__main__":
main()
| 24.237037 | 82 | 0.592298 |
ace865d9ded6e26bcfc3d3dc7f19574ee23b0ca5 | 303 | py | Python | elevator_service/resources/test_elevator.py | Kalimaha/ElevatorSimulatorServices | ff905b7cd7c9cfed9f39c876646e71194dcd732a | [
"MIT"
] | null | null | null | elevator_service/resources/test_elevator.py | Kalimaha/ElevatorSimulatorServices | ff905b7cd7c9cfed9f39c876646e71194dcd732a | [
"MIT"
] | null | null | null | elevator_service/resources/test_elevator.py | Kalimaha/ElevatorSimulatorServices | ff905b7cd7c9cfed9f39c876646e71194dcd732a | [
"MIT"
] | null | null | null | elevator_1 = {
"id": "A",
"session": "alpha",
"time": 1,
"floor": 1,
"people": 0,
"direction": "stationary",
"stops": []
}
elevator_2 = {
"id": "B",
"session": "alpha",
"time": 1,
"floor": 1,
"people": 0,
"direction": "stationary",
"stops": []
}
| 15.15 | 30 | 0.442244 |
ace865e2b078a4505e233f1308192d89788cbead | 300 | py | Python | src/annalist_root/annalist/views/fields/__init__.py | gklyne/annalist | 82e7ef2d56a400325e7618fa9e590072ee8a71d3 | [
"MIT"
] | 18 | 2015-02-20T23:09:13.000Z | 2020-11-13T06:06:43.000Z | src/annalist_root/annalist/views/fields/__init__.py | gklyne/annalist | 82e7ef2d56a400325e7618fa9e590072ee8a71d3 | [
"MIT"
] | 30 | 2015-01-03T09:56:28.000Z | 2021-06-10T20:58:55.000Z | src/annalist_root/annalist/views/fields/__init__.py | gklyne/annalist | 82e7ef2d56a400325e7618fa9e590072ee8a71d3 | [
"MIT"
] | 5 | 2015-02-02T09:01:23.000Z | 2018-06-14T20:05:28.000Z | from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Annalist field renderers
"""
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
# End.
| 23.076923 | 64 | 0.736667 |
ace865fa9cd563b98647e68e7448ab7423209757 | 8,113 | py | Python | scripts/softp.py | Hedlen/wood-detection-system | 650da7d503c7de5968dc795c042fdea92b5a7eca | [
"Apache-2.0"
] | 5 | 2019-12-27T08:08:55.000Z | 2021-06-28T02:25:36.000Z | scripts/softp.py | Hedlen/wood-detection-system | 650da7d503c7de5968dc795c042fdea92b5a7eca | [
"Apache-2.0"
] | null | null | null | scripts/softp.py | Hedlen/wood-detection-system | 650da7d503c7de5968dc795c042fdea92b5a7eca | [
"Apache-2.0"
] | 2 | 2020-08-07T12:56:19.000Z | 2021-08-30T03:44:19.000Z | # '''''''''''''
# Author: Dylan
# Date:14/08/2019
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import uuid
import time
import datetime
class SoftwareProtecttion(object):
'''
About time process class
'''
def __init__(self, elapsed_time,elapsed_time_flag=False, mac_protection_flag=False):
#self.start_time = start_time
self.elapsed_time = elapsed_time
self.elapsed_time_flag = elapsed_time_flag
self.mac_protection_flag = mac_protection_flag
self.home_path = os.environ['HOME'] + '/' + '.frt'
self.home_path_2 =os.environ['HOME'] + '/' + '.grt'
self.cwd = os.getcwd() + '/' + '.crt'
self.mac_home_path = os.environ['HOME'] + '/' + '.fmac'
self.mac_home_path_2 = os.environ['HOME'] + '/' + '.gmac'
self.mac_cwd = os.getcwd() + '/' + '.cmac'
self.c_time = datetime.datetime.now().replace(microsecond=0)
self.c_mac = self.get_mac_address()
if self.elapsed_time_flag == True:
if not os.path.exists(self.home_path):
with open(self.home_path, 'a') as f:
f.write(str(self.c_time) + '\n') # note use time
if not os.path.exists(self.home_path_2):
with open(self.home_path_2, 'a') as f:
f.write(str(self.c_time) + '\n') # note use time
if not os.path.exists(self.cwd):
with open(self.cwd, 'a') as f:
f.write(str(self.c_time) + '\n') # note use time
if self.mac_protection_flag== True:
if not os.path.exists(self.mac_home_path):
with open(self.mac_home_path, 'a') as f:
f.write(str(self.c_mac)) # note use time
if not os.path.exists(self.mac_home_path_2):
with open(self.mac_home_path_2, 'a') as f:
f.write(str(self.c_mac)) # note use time
if not os.path.exists(self.mac_cwd):
with open(self.mac_cwd, 'a') as f:
f.write(str(self.c_mac)) # note use time
def is_over_time(self):
if self.elapsed_time_flag == False and self.mac_protection_flag == False:
return False
if self.mac_protection_flag:
current_txt = self.time_file_handle(False)
mac_l = ""
with open(current_txt, 'r') as f:
mac_l = f.readline()
#print('mac_l:', mac_l)
mac_c = self.get_mac_address()
#print('mac_c:', mac_c)
if mac_c == mac_l:
return True
if self.elapsed_time_flag:
current_txt = self.time_file_handle(True)
#start = datetime.datetime.strptime(self.start_time, '%Y-%m-%d %H:%M:%S')
e_time = self.elapsed_time
use_time = 0
#print(current_txt)
with open(current_txt, 'r') as f:
file = f.readlines()
first_time = file[0].rstrip('\n')
#print('first_time:', first_time)
#print(file)
latest_time = file[-1].rstrip('\n')
first_time = datetime.datetime.strptime(first_time, '%Y-%m-%d %H:%M:%S')
latest_time = datetime.datetime.strptime(latest_time, '%Y-%m-%d %H:%M:%S')
# 当前时间
n_time = datetime.datetime.now().replace(microsecond=0)
#print('n_time:', n_time)
#print('latest_time:', latest_time)
# 判断当前时间是否在范围时间内
if n_time >= latest_time:
#print('tttt')
t_time = n_time - first_time
days = self.calc_days(first_time, n_time)
#print('days:', days)
if days <= e_time:
self.set_frt(n_time)
return False
return True
else:
return True
def time_file_handle(self, time_or_mac_flag): # time True; mac False
len1 = 0
len2 = 0
len3 = 0
current_txt = ""
if time_or_mac_flag:
txt_list = [self.home_path, self.home_path_2, self.cwd]
with open(self.home_path, 'r') as f:
len1 = len(f.readlines())
with open(self.home_path_2, 'r') as f:
len2 = len(f.readlines())
with open(self.cwd, 'r') as f:
len2 = len(f.readlines())
if len1 == len2 and len1 == len3:
current_txt = self.home_path
return current_txt
else :
txt_list = [self.mac_home_path, self.mac_home_path_2, self.mac_cwd]
with open(self.mac_home_path, 'r') as f:
len1 = len(f.readlines())
with open(self.mac_home_path_2, 'r') as f:
len2 = len(f.readlines())
with open(self.mac_cwd, 'r') as f:
len2 = len(f.readlines())
# if len1 == len2 and len1 == len3:
# current_txt = self.home_path
# return current_txt
current_txt = self.mac_cwd
return current_txt
len_list = [len1, len2, len3]
index = len_list.index(max(len_list))
current_txt = txt_list[index]
return current_txt
def set_frt(self, n_time):
if self.elapsed_time_flag == True:
with open(self.home_path, 'a') as f:
f.write(str(n_time) + '\n') # note use time
with open(self.home_path_2, 'a') as f:
f.write(str(n_time) + '\n') # note use time
with open(self.cwd, 'a') as f:
f.write(str(n_time) + '\n') # note use time
def get_mac_address(self):
mac = uuid.UUID(int=uuid.getnode()).hex[-12:]
return ":".join([mac[e:e + 2] for e in range(0, 11, 2)])
def calc_days(self, time1, time2):
year1 = time1.year
month1 = time1.month
day1 = time1.day
year2 = time2.year
month2 = time2.month
day2 = time2.day
#根据year1是否为闰年,选择year1当年每月的天数列表
if self.isLeapYear(year1):
daysOfMonths1 = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
else:
daysOfMonths1 = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
year_temp = year2 - year1
month_temp = month2 - month1
if year_temp == 0: #同年
if month_temp == 0: #同年且同月
days = day2 -day1
else: #同年但不同月
days = daysOfMonths1[month1-1]-day1+day2 #“掐头去尾”
i = 1 #计算中间月份,若月份相邻则month_temp==1,该循环不会被执行
while i< month_temp:
days += daysOfMonths1[month1+i-1]
i += 1
else: #不同年
#根据是否为闰年,得到year2每月天数列表
if self.isLeapYear(year2):
daysOfMonths2 = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
else:
daysOfMonths2 = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
#第一个日期所在年份剩余天数,首先计算第一个月份剩余天数
days = daysOfMonths1[month1-1]-day1
i = 1
while (month1+i<=12):
#若该月不是12月,即 有剩余月份,则执行该循环,累加本年所剩余天数
days += daysOfMonths1[month1+i-1]
i += 1
#计算第二个日期所在年份已经经过的天数
days += day2#先计算当月已经经过的天数
if month2 > 1:#若不是1月,则计算已经经过的月份的天数
j = 1
while j < month2:
days += daysOfMonths2[j-1]
j += 1
#计算中间年份的天数,此时temp_year > 1(即不相邻年份,
#因为以上的“掐头去尾”已经可以得出年份相邻这种情况的结果了)
k = 1
while k < year_temp:
if self.isLeapYear(year1 + k):
days += 366
else:
days += 365
k += 1
return(days)
def isLeapYear(self, year):
if (year%4 ==0 and year%100 !=0 ) or (year%400 == 0):
return True
else:
return False
| 37.734884 | 90 | 0.505732 |
ace867400f69e6439ffca5c55eebed79db0526e5 | 1,934 | py | Python | venv/Lib/site-packages/pyrogram/raw/base/geo_point.py | iamgeorgiy/heroku-userbot | 5a92417d16f8ead949d88cb38da213fc2da5d3a4 | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/pyrogram/raw/base/geo_point.py | iamgeorgiy/heroku-userbot | 5a92417d16f8ead949d88cb38da213fc2da5d3a4 | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/pyrogram/raw/base/geo_point.py | iamgeorgiy/heroku-userbot | 5a92417d16f8ead949d88cb38da213fc2da5d3a4 | [
"Apache-2.0"
] | null | null | null | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
from typing import Union
from pyrogram import raw
from pyrogram.raw.core import TLObject
GeoPoint = Union[raw.types.GeoPoint, raw.types.GeoPointEmpty]
# noinspection PyRedeclaration
class GeoPoint: # type: ignore
"""This base type has 2 constructors available.
Constructors:
.. hlist::
:columns: 2
- :obj:`GeoPoint <pyrogram.raw.types.GeoPoint>`
- :obj:`GeoPointEmpty <pyrogram.raw.types.GeoPointEmpty>`
"""
QUALNAME = "pyrogram.raw.base.GeoPoint"
def __init__(self):
raise TypeError("Base types can only be used for type checking purposes: "
"you tried to use a base type instance as argument, "
"but you need to instantiate one of its constructors instead. "
"More info: https://docs.pyrogram.org/telegram/base/geo-point")
| 37.921569 | 87 | 0.637539 |
ace8684672151465d346175188f1b00693e36c4d | 4,228 | py | Python | placement/conf/database.py | sapcc/placement | 471e4cc8b78d4382820bbb5245e15d648e7cc136 | [
"Apache-2.0"
] | null | null | null | placement/conf/database.py | sapcc/placement | 471e4cc8b78d4382820bbb5245e15d648e7cc136 | [
"Apache-2.0"
] | null | null | null | placement/conf/database.py | sapcc/placement | 471e4cc8b78d4382820bbb5245e15d648e7cc136 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_db import options as oslo_db_options
_ENRICHED = False
def enrich_help_text(alt_db_opts):
def get_db_opts():
for group_name, db_opts in oslo_db_options.list_opts():
if group_name == 'database':
return db_opts
return []
for db_opt in get_db_opts():
for alt_db_opt in alt_db_opts:
if alt_db_opt.name == db_opt.name:
# NOTE(markus_z): We can append alternative DB specific help
# texts here if needed.
alt_db_opt.help = db_opt.help + alt_db_opt.help
# NOTE(markus_z): We cannot simply do:
# conf.register_opts(oslo_db_options.database_opts, 'placement_database')
# If we reuse a db config option for two different groups ("placement_database"
# and "database") and deprecate or rename a config option in one of these
# groups, "oslo.config" cannot correctly determine which one to update.
# That's why we copied & pasted these config options for the
# "placement_database" group here. See nova commit ba407e3 ("Add support
# for multiple database engines") for more details.
# TODO(cdent): Consider our future options of using 'database' instead of
# 'placement_database' for the group. This is already loose in the wild,
# explicit, and safe if there will ever be more than one database, so may
# be good to leave it.
placement_db_group = cfg.OptGroup('placement_database',
title='Placement API database options',
help="""
The *Placement API Database* is a the database used with the placement
service. If the connection option is not set, the placement service will
not start.
""")
placement_db_opts = [
cfg.StrOpt('connection',
help='',
required=True,
secret=True),
cfg.StrOpt('connection_parameters',
default='',
help=''),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help=''),
cfg.StrOpt('slave_connection',
secret=True,
help=''),
cfg.StrOpt('mysql_sql_mode',
default='TRADITIONAL',
help=''),
cfg.IntOpt('connection_recycle_time',
default=3600,
help=''),
cfg.IntOpt('max_pool_size',
help=''),
cfg.IntOpt('max_retries',
default=10,
help=''),
cfg.IntOpt('retry_interval',
default=10,
help=''),
cfg.IntOpt('max_overflow',
help=''),
cfg.IntOpt('connection_debug',
default=0,
help=''),
cfg.BoolOpt('connection_trace',
default=False,
help=''),
cfg.IntOpt('pool_timeout',
help=''),
cfg.BoolOpt('sync_on_startup',
default=False,
help='If True, database schema migrations will be attempted when the'
' web service starts.'),
] # noqa
def register_opts(conf):
conf.register_opts(placement_db_opts, group=placement_db_group)
def list_opts():
# NOTE(markus_z): 2016-04-04: If we list the oslo_db_options here, they
# get emitted twice(!) in the "sample.conf" file. First under the
# namespace "nova.conf" and second under the namespace "oslo.db". This
# is due to the setting in file "etc/nova/nova-config-generator.conf".
# As I think it is useful to have the "oslo.db" namespace information
# in the "sample.conf" file, I omit the listing of the "oslo_db_options"
# here.
global _ENRICHED
if not _ENRICHED:
enrich_help_text(placement_db_opts)
_ENRICHED = True
return {
placement_db_group: placement_db_opts,
}
| 34.942149 | 79 | 0.659177 |
ace86896a7795f82bedc628ace862a48caf63b38 | 1,257 | py | Python | floodsystem/flood.py | negsrahimi/monke | ec2c953c6f10103eb2b45dc68160246a6ee5a473 | [
"MIT"
] | null | null | null | floodsystem/flood.py | negsrahimi/monke | ec2c953c6f10103eb2b45dc68160246a6ee5a473 | [
"MIT"
] | null | null | null | floodsystem/flood.py | negsrahimi/monke | ec2c953c6f10103eb2b45dc68160246a6ee5a473 | [
"MIT"
] | null | null | null | # A module with functions related to flooding
from .utils import sorted_by_key
from .stationdata import update_water_levels
def stations_in_desc_order(stations):
update_water_levels(stations)
lst = []
for station in stations:
level = station.relative_water_level()
if level != None:
lst.append((station, level))
return sorted_by_key(lst, 1, reverse=True)
#return lst
def stations_level_over_threshold(stations, tol):
"""This function takes a list of stations and a tolerance value. It returns a list of tuples, where each tuple holds (i) a station (object) at which the latest
relative water level is over the tolerance and (ii) the relative water level at the station. The returned list is sorted by the relative level in descending order."""
lst = stations_in_desc_order(stations)
#print(lst)
n = len(lst)
for i in range(n):
if lst[i][1] <= tol:
return lst[:i]
def stations_highest_rel_level(stations, N):
"""returns a list of the N stations (objects) at which the water level, relative to the typical range, is highest (sorted in descending order by relative level)."""
lst = stations_in_desc_order(stations)[:N]
return [x[0] for x in lst]
| 38.090909 | 173 | 0.704853 |
ace868fe2b5511357c3b11f2e1e9d7fd97708920 | 13,289 | py | Python | GCode_A3200.py | mauderliEmbotech/robodk_postprocessors | 32f47272bc28dc8d2bfd95c2237b945de009b0e5 | [
"Apache-2.0"
] | 41 | 2017-03-07T11:00:23.000Z | 2022-03-01T19:05:04.000Z | GCode_A3200.py | mauderliEmbotech/robodk_postprocessors | 32f47272bc28dc8d2bfd95c2237b945de009b0e5 | [
"Apache-2.0"
] | 7 | 2017-05-11T09:31:24.000Z | 2021-07-24T16:16:43.000Z | GCode_A3200.py | mauderliEmbotech/robodk_postprocessors | 32f47272bc28dc8d2bfd95c2237b945de009b0e5 | [
"Apache-2.0"
] | 33 | 2017-04-28T03:23:15.000Z | 2022-03-21T21:41:09.000Z | # Copyright 2017 - RoboDK Software S.L. - http://www.robodk.com/
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------
# This file is a sample POST PROCESSOR script to generate robot programs for a B&R Automation controller
#
# To edit/test this POST PROCESSOR script file:
# Select "Program"->"Add/Edit Post Processor", then select your post or create a new one.
# You can edit this file using any text editor or Python editor. Using a Python editor allows to quickly evaluate a sample program at the end of this file.
# Python should be automatically installed with RoboDK
#
# You can also edit the POST PROCESSOR manually:
# 1- Open the *.py file with Python IDLE (right click -> Edit with IDLE)
# 2- Make the necessary changes
# 3- Run the file to open Python Shell: Run -> Run module (F5 by default)
# 4- The "test_post()" function is called automatically
# Alternatively, you can edit this file using a text editor and run it with Python
#
# To use a POST PROCESSOR file you must place the *.py file in "C:/RoboDK/Posts/"
# To select one POST PROCESSOR for your robot in RoboDK you must follow these steps:
# 1- Open the robot panel (double click a robot)
# 2- Select "Parameters"
# 3- Select "Unlock advanced options"
# 4- Select your post as the file name in the "Robot brand" box
#
# To delete an existing POST PROCESSOR script, simply delete this file (.py file)
#
# ----------------------------------------------------
# More information about RoboDK Post Processors and Offline Programming here:
# http://www.robodk.com/help#PostProcessor
# http://www.robodk.com/doc/en/PythonAPI/postprocessor.html
# ----------------------------------------------------
# ----------------------------------------------------
# Import RoboDK tools
from robodk import *
JOINT_DATA = ['X','Y','Z','PH','RH','RZ']
# ----------------------------------------------------
def pose_2_str(pose, joints = None):
"""Prints a pose target"""
[x,y,z,r,p,w] = Pose_2_KUKA(pose)
#str_xyzwpr = 'X %.3f Y %.3f Z %.3f A %.3f B %.3f C %.3f' % (x,y,z,r,p,w)
str_xyzwpr = ''
if joints is not None:
for j in range(len(joints)):
str_xyzwpr += ('%s %.6f ' % (JOINT_DATA[j], joints[j]))
str_xyzwpr = str_xyzwpr[:-1]
else:
str_xyzwpr = 'X %.3f Y %.3f Z %.3f A %.3f B %.3f C %.3f' % (x,y,z,r,p,w)
return str_xyzwpr
def joints_2_str(joints):
"""Prints a joint target"""
str = ''
for i in range(len(joints)):
str = str + ('%s %.6f ' % (JOINT_DATA[i], joints[i]))
str = str[:-1]
return str
# ----------------------------------------------------
# Object class that handles the robot instructions/syntax
class RobotPost(object):
"""Robot post object"""
PROG_EXT = 'cnc' # set the program extension
# other variables
ROBOT_POST = ''
ROBOT_NAME = ''
PROG_FILES = []
SPEED_MMS = 100
PROG = ''
LOG = ''
nAxes = 6
nId = 0
REF_FRAME = eye(4)
def __init__(self, robotpost=None, robotname=None, robot_axes = 6, **kwargs):
self.ROBOT_POST = robotpost
self.ROBOT_NAME = robotname
self.PROG = ''
self.LOG = ''
self.nAxes = robot_axes
for k,v in kwargs.items():
if k == 'lines_x_prog':
self.MAX_LINES_X_PROG = v
def ProgStart(self, progname):
self.addline('// program: %s' % progname)
self.addline('VELOCITY ON')
def ProgFinish(self, progname):
self.addline('// end of program ' + progname)
def ProgSave(self, folder, progname, ask_user=False, show_result=False):
progname = progname + '.' + self.PROG_EXT
if ask_user or not DirExists(folder):
filesave = getSaveFile(folder, progname, 'Save program as...')
if filesave is not None:
filesave = filesave.name
else:
return
else:
filesave = folder + '/' + progname
fid = open(filesave, "w")
fid.write(self.PROG)
fid.close()
print('SAVED: %s\n' % filesave)
self.PROG_FILES = filesave
#---------------------- show result
if show_result:
if type(show_result) is str:
# Open file with provided application
import subprocess
p = subprocess.Popen([show_result, filesave])
elif type(show_result) is list:
import subprocess
p = subprocess.Popen(show_result + [filesave])
else:
# open file with default application
import os
os.startfile(filesave)
if len(self.LOG) > 0:
mbox('Program generation LOG:\n\n' + self.LOG)
def ProgSendRobot(self, robot_ip, remote_path, ftp_user, ftp_pass):
"""Send a program to the robot using the provided parameters. This method is executed right after ProgSave if we selected the option "Send Program to Robot".
The connection parameters must be provided in the robot connection menu of RoboDK"""
UploadFTP(self.PROG_FILES, robot_ip, remote_path, ftp_user, ftp_pass)
def MoveJ(self, pose, joints, conf_RLF=None):
"""Add a joint movement"""
self.addline('JOINTS %s F%.2f' % (joints_2_str(joints), self.SPEED_MMS))
def MoveL(self, pose, joints, conf_RLF=None):
"""Add a linear movement"""
self.addline('LINEAR %s F%.2f' % (pose_2_str(self.REF_FRAME*pose, joints), self.SPEED_MMS))
def MoveC(self, pose1, joints1, pose2, joints2, conf_RLF_1=None, conf_RLF_2=None):
"""Add a circular movement"""
self.addline('CIRCULAR %s F%.2f' % (pose_2_str(self.REF_FRAME*pose1, joints1), self.SPEED_MMS))
self.addline('CIRCULAR %s F%.2f' % (pose_2_str(self.REF_FRAME*pose2, joints2), self.SPEED_MMS))
def setFrame(self, pose, frame_id=None, frame_name=None):
"""Change the robot reference frame"""
self.REF_FRAME = pose
self.addline('// Reference frame set to: ' + pose_2_str(pose))
def setTool(self, pose, tool_id=None, tool_name=None):
"""Change the robot TCP"""
self.nId = self.nId + 1
self.addline('// Tool frame set to: ' + pose_2_str(pose))
def Pause(self, time_ms):
"""Pause the robot program"""
if time_ms < 0:
self.addline('PAUSE')
else:
self.addline('DWELL %.3f' % (time_ms*0.001))
def setSpeed(self, speed_mms):
"""Changes the robot speed (in mm/s)"""
self.SPEED_MMS = speed_mms
#self.addline('F%.3f' % (speed_mms*60))
def setAcceleration(self, accel_mmss):
"""Changes the robot acceleration (in mm/s2)"""
#self.addlog('setAcceleration not defined')
pass
def setSpeedJoints(self, speed_degs):
"""Changes the robot joint speed (in deg/s)"""
#self.addlog('setSpeedJoints not defined')
pass
def setAccelerationJoints(self, accel_degss):
"""Changes the robot joint acceleration (in deg/s2)"""
#self.addlog('setAccelerationJoints not defined')
pass
def setZoneData(self, zone_mm):
"""Changes the rounding radius (aka CNT, APO or zone data) to make the movement smoother"""
if zone_mm > 0:
self.addline('ROUNDING ON')
else:
self.addline('ROUNDING OFF')
def setDO(self, io_var, io_value):
"""Sets a variable (digital output) to a given value"""
if type(io_var) != str: # set default variable name if io_var is a number
io_var = 'OUT[%s]' % str(io_var)
if type(io_value) != str: # set default variable value if io_value is a number
if io_value > 0:
io_value = 'TRUE'
else:
io_value = 'FALSE'
# at this point, io_var and io_value must be string values
self.addline('%s=%s' % (io_var, io_value))
def waitDI(self, io_var, io_value, timeout_ms=-1):
"""Waits for a variable (digital input) io_var to attain a given value io_value. Optionally, a timeout can be provided."""
if type(io_var) != str: # set default variable name if io_var is a number
io_var = 'IN[%s]' % str(io_var)
if type(io_value) != str: # set default variable value if io_value is a number
if io_value > 0:
io_value = 'TRUE'
else:
io_value = 'FALSE'
# at this point, io_var and io_value must be string values
if timeout_ms < 0:
self.addline('WAIT FOR %s==%s' % (io_var, io_value))
else:
self.addline('WAIT FOR %s==%s TIMEOUT=%.1f' % (io_var, io_value, timeout_ms))
def RunCode(self, code, is_function_call = False):
"""Adds code or a function call"""
if is_function_call:
code.replace(' ','_')
if not code.endswith(')'):
code = code + '()'
self.addline("CALL " + code)
else:
self.addline(code)
def RunMessage(self, message, iscomment = False):
"""Display a message in the robot controller screen (teach pendant)"""
if iscomment:
self.addline('// ' + message)
else:
self.addline('MSGBOX (DF_MSGBOX_OKONLY),"%s" ' % message)
# ------------------ private ----------------------
def addline(self, newline):
"""Add a program line"""
self.PROG = self.PROG + newline + '\n'
def addlog(self, newline):
"""Add a log message"""
self.LOG = self.LOG + newline + '\n'
# -------------------------------------------------
# ------------ For testing purposes ---------------
def Pose(xyzrpw):
[x,y,z,r,p,w] = xyzrpw
a = r*math.pi/180
b = p*math.pi/180
c = w*math.pi/180
ca = math.cos(a)
sa = math.sin(a)
cb = math.cos(b)
sb = math.sin(b)
cc = math.cos(c)
sc = math.sin(c)
return Mat([[cb*ca, ca*sc*sb - cc*sa, sc*sa + cc*ca*sb, x],[cb*sa, cc*ca + sc*sb*sa, cc*sb*sa - ca*sc, y],[-sb, cb*sc, cc*cb, z],[0,0,0,1]])
def test_post():
"""Test the post with a basic program"""
robot = RobotPost()
robot.ProgStart("Program")
robot.RunMessage("Program generated by RoboDK using a custom post processor", True)
robot.setFrame(Pose([807.766544, -963.699898, 41.478944, 0, 0, 0]))
robot.setTool(Pose([62.5, -108.253175, 100, -60, 90, 0]))
robot.MoveJ(Pose([200, 200, 500, 180, 0, 180]), [-46.18419, -6.77518, -20.54925, 71.38674, 49.58727, -302.54752] )
robot.MoveL(Pose([200, 250, 348.734575, 180, 0, -150]), [-41.62707, -8.89064, -30.01809, 60.62329, 49.66749, -258.98418] )
robot.MoveL(Pose([200, 200, 262.132034, 180, 0, -150]), [-43.73892, -3.91728, -35.77935, 58.57566, 54.11615, -253.81122] )
robot.RunMessage("Setting air valve 1 on")
robot.RunCode("TCP_On", True)
robot.Pause(1000)
robot.MoveL(Pose([200, 250, 348.734575, 180, 0, -150]), [-41.62707, -8.89064, -30.01809, 60.62329, 49.66749, -258.98418] )
robot.MoveL(Pose([250, 300, 278.023897, 180, 0, -150]), [-37.52588, -6.32628, -34.59693, 53.52525, 49.24426, -251.44677] )
robot.MoveL(Pose([250, 250, 191.421356, 180, 0, -150]), [-39.75778, -1.04537, -40.37883, 52.09118, 54.15317, -246.94403] )
robot.RunMessage("Setting air valve off")
robot.RunCode("TCP_Off", True)
robot.Pause(1000)
robot.MoveL(Pose([250, 300, 278.023897, 180, 0, -150]), [-37.52588, -6.32628, -34.59693, 53.52525, 49.24426, -251.44677] )
robot.MoveL(Pose([250, 200, 278.023897, 180, 0, -150]), [-41.85389, -1.95619, -34.89154, 57.43912, 52.34162, -253.73403] )
robot.MoveL(Pose([250, 150, 191.421356, 180, 0, -150]), [-43.82111, 3.29703, -40.29493, 56.02402, 56.61169, -249.23532] )
robot.MoveJ(None, [-46.18419, -6.77518, -20.54925, 71.38674, 49.58727, -302.54752] )
robot.ProgFinish("Program")
# robot.ProgSave(".","Program",True)
print(robot.PROG)
if len(robot.LOG) > 0:
mbox('Program generation LOG:\n\n' + robot.LOG)
input("Press Enter to close...")
if __name__ == "__main__":
"""Function to call when the module is executed by itself: test"""
test_post()
| 42.867742 | 166 | 0.565656 |
ace86981a1b52ef81f59967a633d20e9792808d3 | 1,479 | py | Python | setup.py | BackrndSource/pyduinocoin | 98982370e493c0676dff8bd316bf5782bcdff77d | [
"MIT"
] | 6 | 2022-01-05T19:46:09.000Z | 2022-02-11T21:54:34.000Z | setup.py | BackrndSource/pyduinocoin | 98982370e493c0676dff8bd316bf5782bcdff77d | [
"MIT"
] | null | null | null | setup.py | BackrndSource/pyduinocoin | 98982370e493c0676dff8bd316bf5782bcdff77d | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="pyduinocoin",
version="1.0.5",
author="Sergio Contreras Agustí (backrndsource)",
author_email="backrndsource@gmail.com",
description="pyDuinoCoin is a simple python integration for the DuinoCoin REST API, that allows developers to communicate with DuinoCoin Master Server.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/backrndsource/pyduinocoin",
project_urls={
"Bug Tracker": "https://github.com/backrndsource/pyduinocoin/issues",
},
keywords=["DUCO", "DuinoCoin", "api-rest", "python3", "duino", "duino-coin", "api", "client", "REST", "crypto", "coin"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
python_requires=">=3.5",
)
| 41.083333 | 157 | 0.647735 |
ace86a09fcc537724152942889bbb72acd0de862 | 3,027 | py | Python | Face-Emotions-Recognition/face-emotions-recognition-using-deep-learning/src/dataset_prepare.py | swapnilgarg7/Face-X | fab21bf667fa7387b8e73e5a1d72fcba4fba2818 | [
"MIT"
] | 685 | 2017-08-07T13:47:31.000Z | 2022-03-31T22:29:10.000Z | Face-Emotions-Recognition/face-emotions-recognition-using-deep-learning/src/dataset_prepare.py | swapnilgarg7/Face-X | fab21bf667fa7387b8e73e5a1d72fcba4fba2818 | [
"MIT"
] | 704 | 2020-09-30T10:44:13.000Z | 2022-03-30T07:18:28.000Z | Face-Emotions-Recognition/face-emotions-recognition-using-deep-learning/src/dataset_prepare.py | swapnilgarg7/Face-X | fab21bf667fa7387b8e73e5a1d72fcba4fba2818 | [
"MIT"
] | 346 | 2017-11-18T21:35:45.000Z | 2022-03-28T12:04:48.000Z | import numpy as np
import pandas as pd
from PIL import Image
from tqdm import tqdm
import os
# convert string to integer
def atoi(s):
n = 0
for i in s:
n = n*10 + ord(i) - ord("0")
return n
# making folders
outer_names = ['test','train']
inner_names = ['angry', 'disgusted', 'fearful', 'happy', 'sad', 'surprised', 'neutral']
os.makedirs('data', exist_ok=True)
for outer_name in outer_names:
os.makedirs(os.path.join('data',outer_name), exist_ok=True)
for inner_name in inner_names:
os.makedirs(os.path.join('data',outer_name,inner_name), exist_ok=True)
# to keep count of each category
angry = 0
disgusted = 0
fearful = 0
happy = 0
sad = 0
surprised = 0
neutral = 0
angry_test = 0
disgusted_test = 0
fearful_test = 0
happy_test = 0
sad_test = 0
surprised_test = 0
neutral_test = 0
df = pd.read_csv('./fer2013.csv')
mat = np.zeros((48,48),dtype=np.uint8)
print("Saving images...")
# read the csv file line by line
for i in tqdm(range(len(df))):
txt = df['pixels'][i]
words = txt.split()
# the image size is 48x48
for j in range(2304):
xind = j // 48
yind = j % 48
mat[xind][yind] = atoi(words[j])
img = Image.fromarray(mat)
# train
if i < 28709:
if df['emotion'][i] == 0:
img.save('train/angry/im'+str(angry)+'.png')
angry += 1
elif df['emotion'][i] == 1:
img.save('train/disgusted/im'+str(disgusted)+'.png')
disgusted += 1
elif df['emotion'][i] == 2:
img.save('train/fearful/im'+str(fearful)+'.png')
fearful += 1
elif df['emotion'][i] == 3:
img.save('train/happy/im'+str(happy)+'.png')
happy += 1
elif df['emotion'][i] == 4:
img.save('train/sad/im'+str(sad)+'.png')
sad += 1
elif df['emotion'][i] == 5:
img.save('train/surprised/im'+str(surprised)+'.png')
surprised += 1
elif df['emotion'][i] == 6:
img.save('train/neutral/im'+str(neutral)+'.png')
neutral += 1
# test
else:
if df['emotion'][i] == 0:
img.save('test/angry/im'+str(angry_test)+'.png')
angry_test += 1
elif df['emotion'][i] == 1:
img.save('test/disgusted/im'+str(disgusted_test)+'.png')
disgusted_test += 1
elif df['emotion'][i] == 2:
img.save('test/fearful/im'+str(fearful_test)+'.png')
fearful_test += 1
elif df['emotion'][i] == 3:
img.save('test/happy/im'+str(happy_test)+'.png')
happy_test += 1
elif df['emotion'][i] == 4:
img.save('test/sad/im'+str(sad_test)+'.png')
sad_test += 1
elif df['emotion'][i] == 5:
img.save('test/surprised/im'+str(surprised_test)+'.png')
surprised_test += 1
elif df['emotion'][i] == 6:
img.save('test/neutral/im'+str(neutral_test)+'.png')
neutral_test += 1
print("Done!") | 29.105769 | 87 | 0.545755 |
ace86abed2489703307078908c4206086e78b882 | 1,750 | py | Python | actions/ncm_execute_script.py | jschoewe/stackstorm-orion | a5fdb805ff70c3911cb4c74be3f299f9a1c2625f | [
"Apache-2.0"
] | 164 | 2015-01-17T16:08:33.000Z | 2021-08-03T02:34:07.000Z | actions/ncm_execute_script.py | jschoewe/stackstorm-orion | a5fdb805ff70c3911cb4c74be3f299f9a1c2625f | [
"Apache-2.0"
] | 442 | 2015-01-01T11:19:01.000Z | 2017-09-06T23:26:17.000Z | actions/ncm_execute_script.py | EncoreTechnologies/stackstorm-orion | ed6f54ab7a25885ba1313fe52c9bc0d243164aa2 | [
"Apache-2.0"
] | 202 | 2015-01-13T00:37:40.000Z | 2020-11-07T11:30:10.000Z | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from lib.actions import OrionBaseAction
class NcmExecuteScript(OrionBaseAction):
def run(self, node, script):
"""
Excute an Orion NCM script on a node.
Args:
node
script
Returns:
Output
Raises:
ValueError: If Node is not in NCM.
"""
results = {}
self.connect()
orion_node = self.get_node(node)
if not orion_node.npm:
raise ValueError("Node not found in NPM {}".format(
orion_node.caption))
if not orion_node.ncm:
raise ValueError("Node not found in NCM {}".format(
orion_node.caption))
orion_data = self.invoke(
"Cirrus.ConfigArchive",
"ExecuteScript",
[orion_node.ncm_id],
script)
results['job_id'] = orion_data[0]
results['transfer'] = self.get_ncm_transfer_results(
results['job_id'])
return results
| 30.172414 | 74 | 0.636 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.