repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
bdowning/aiotools | tests/test_timer.py | import asyncio
import pytest
import aiotools
@pytest.mark.asyncio
async def test_timer():
"""
Test the timer functionality.
"""
vclock = aiotools.VirtualClock()
with vclock.patch_loop():
count = 0
async def counter(interval):
assert interval == 0.1
nonlocal count
await asyncio.sleep(0)
count += 1
count = 0
timer = aiotools.create_timer(counter, 0.1)
await asyncio.sleep(0.22)
timer.cancel()
await timer
assert count == 3
count = 0
timer = aiotools.create_timer(counter, 0.1, aiotools.TimerDelayPolicy.CANCEL)
await asyncio.sleep(0.22)
timer.cancel()
await timer
# should have same results
assert count == 3
@pytest.mark.asyncio
async def test_timer_leak_default():
"""
Test if the timer-fired tasks are claned up properly
even when each timer-fired task takes longer than the timer interval.
(In this case they will accumulate indefinitely!)
"""
vclock = aiotools.VirtualClock()
with vclock.patch_loop():
spawn_count = 0
cancel_count = 0
done_count = 0
async def delayed(interval):
nonlocal spawn_count, cancel_count, done_count
spawn_count += 1
try:
await asyncio.sleep(5)
done_count += 1
except asyncio.CancelledError:
cancel_count += 1
task_count = len(aiotools.compat.all_tasks())
timer = aiotools.create_timer(delayed, 1)
await asyncio.sleep(9.9)
timer.cancel()
await timer
assert task_count + 1 >= len(aiotools.compat.all_tasks())
assert spawn_count == done_count + cancel_count
assert spawn_count == 10
assert cancel_count == 5
@pytest.mark.asyncio
async def test_timer_leak_cancel():
"""
Test the effect of TimerDelayPolicy.CANCEL which always
cancels any pending previous tasks on each interval.
"""
vclock = aiotools.VirtualClock()
with vclock.patch_loop():
spawn_count = 0
cancel_count = 0
done_count = 0
async def delayed(interval):
nonlocal spawn_count, cancel_count, done_count
spawn_count += 1
try:
await asyncio.sleep(1)
except asyncio.CancelledError:
cancel_count += 1
else:
done_count += 1
task_count = len(aiotools.compat.all_tasks())
timer = aiotools.create_timer(
delayed, 0.01, aiotools.TimerDelayPolicy.CANCEL,
)
await asyncio.sleep(0.1)
timer.cancel()
await timer
await asyncio.sleep(0)
assert task_count + 1 >= len(aiotools.compat.all_tasks())
assert spawn_count == cancel_count + done_count
assert cancel_count == 10
assert done_count == 0
@pytest.mark.asyncio
async def test_timer_leak_nocancel():
"""
Test the effect of TimerDelayPolicy.CANCEL which always
cancels any pending previous tasks on each interval.
"""
vclock = aiotools.VirtualClock()
with vclock.patch_loop():
spawn_count = 0
cancel_count = 0
done_count = 0
async def delayed(interval):
nonlocal spawn_count, cancel_count, done_count
spawn_count += 1
try:
await asyncio.sleep(0)
except asyncio.CancelledError:
cancel_count += 1
else:
done_count += 1
task_count = len(aiotools.compat.all_tasks())
timer = aiotools.create_timer(
delayed, 0.01, aiotools.TimerDelayPolicy.CANCEL,
)
await asyncio.sleep(0.096)
timer.cancel()
await timer
await asyncio.sleep(0)
assert task_count + 1 >= len(aiotools.compat.all_tasks())
assert spawn_count == cancel_count + done_count
assert cancel_count == 0
assert done_count == 10
|
bdowning/aiotools | src/aiotools/func.py | <gh_stars>100-1000
import collections
import functools
from .compat import get_running_loop
__all__ = (
'apartial',
'lru_cache',
)
_CacheEntry = collections.namedtuple('_CacheEntry', 'value expire_at')
def apartial(coro, *args, **kwargs):
"""
Wraps a coroutine function with pre-defined arguments (including keyword
arguments). It is an asynchronous version of :func:`functools.partial`.
"""
@functools.wraps(coro)
async def wrapped(*cargs, **ckwargs):
return await coro(*args, *cargs, **kwargs, **ckwargs)
return wrapped
def lru_cache(maxsize: int = 128,
typed: bool = False,
expire_after: float = None):
"""
A simple LRU cache just like :func:`functools.lru_cache`, but it works for
coroutines. This is not as heavily optimized as :func:`functools.lru_cache`
which uses an internal C implementation, as it targets async operations
that take a long time.
It follows the same API that the standard functools provides. The wrapped
function has ``cache_clear()`` method to flush the cache manually, but
leaves ``cache_info()`` for statistics unimplemented.
Note that calling the coroutine multiple times with the same arguments
before the first call returns may incur duplicate executions.
This function is not thread-safe.
Args:
maxsize: The maximum number of cached entries.
typed: Cache keys in different types separately (e.g., ``3`` and ``3.0`` will
be different keys).
expire_after: Re-calculate the value if the configured time has passed even
when the cache is hit. When re-calculation happens the
expiration timer is also reset.
"""
if maxsize is not None and not isinstance(maxsize, int):
raise TypeError('Expected maxsize to be an integer or None')
def wrapper(coro):
sentinel = object() # unique object to distinguish None as result
cache = collections.OrderedDict()
cache_get = cache.get
cache_del = cache.__delitem__
cache_set = cache.__setitem__
cache_len = cache.__len__
cache_move = cache.move_to_end
make_key = functools._make_key
# We don't use explicit locks like the standard functools,
# because this lru_cache is intended for use in asyncio coroutines.
# The only context interleaving happens when calling the user-defined
# coroutine, so there is no need to add extra synchronization guards.
@functools.wraps(coro)
async def wrapped(*args, **kwargs):
now = get_running_loop().time()
k = make_key(args, kwargs, typed)
entry = cache_get(k, sentinel)
if entry is not sentinel:
if entry.expire_at is None:
return entry.value
if entry.expire_at >= now:
return entry.value
cache_del(k)
result = await coro(*args, **kwargs)
if maxsize is not None and cache_len() >= maxsize:
cache.popitem(last=False)
if expire_after is not None:
expire_at = now + expire_after
else:
expire_at = None
cache_set(k, _CacheEntry(result, expire_at))
cache_move(k, last=True)
return result
def cache_clear():
cache.clear()
def cache_info():
raise NotImplementedError
wrapped.cache_clear = cache_clear
wrapped.cache_info = cache_info
return wrapped
return wrapper
|
bdowning/aiotools | examples/locking.py | <filename>examples/locking.py
import asyncio
import aiotools
lock = asyncio.Lock()
@aiotools.actxmgr
async def mygen(input_value):
print(input_value)
await lock.acquire()
print('The lock is acquired.')
try:
yield 'return_value'
finally:
lock.release()
print('The lock is released.')
async def run():
try:
async with mygen('input_value') as return_value:
print(return_value)
raise RuntimeError
except RuntimeError:
print('RuntimeError is caught!') # you can catch exceptions here.
if __name__ == '__main__':
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(run())
finally:
loop.stop()
|
bdowning/aiotools | examples/zmqserver.py | <reponame>bdowning/aiotools<filename>examples/zmqserver.py
import logging
import os
from typing import Set
import aiotools
import zmq, zmq.asyncio
num_workers = 4
log_init_states: Set[str] = set()
def get_logger(name: str, pid: int) -> logging.Logger:
log = logging.getLogger(name)
# initialize only once for each logger identified by the name
if name not in log_init_states:
sh = logging.StreamHandler()
fmt = logging.Formatter(
f'%(relativeCreated).3f %(name)s[{pid}] %(levelname)s: %(message)s')
sh.setFormatter(fmt)
log.addHandler(sh)
log.propagate = False
log.setLevel(logging.INFO)
log_init_states.add(name)
return log
def router_main(_, pidx, args):
log = get_logger('examples.zmqserver.extra', pidx)
zctx = zmq.Context()
zctx.linger = 0
in_sock = zctx.socket(zmq.PULL)
in_sock.bind('tcp://*:5033')
out_sock = zctx.socket(zmq.PUSH)
out_sock.bind('ipc://example-events')
try:
log.info('router proxy started')
zmq.proxy(in_sock, out_sock)
except KeyboardInterrupt:
pass
except Exception:
log.exception('unexpected error')
finally:
for _ in range(num_workers):
out_sock.send(b'') # sentinel
log.info('router proxy terminated')
in_sock.close()
out_sock.close()
zctx.term()
os.unlink('example-events')
@aiotools.actxmgr
async def worker_main(loop, pidx, args):
log = get_logger('examples.zmqserver.worker', pidx)
zctx = zmq.asyncio.Context()
router = zctx.socket(zmq.PULL)
router.connect('ipc://example-events')
async def process_incoming(router):
while True:
data = await router.recv()
if not data:
return
log.info(data)
task = loop.create_task(process_incoming(router))
log.info('started')
try:
yield
finally:
await task
router.close()
zctx.term()
log.info('terminated')
if __name__ == '__main__':
# This example must be run with multiprocessing.
server = aiotools.start_server(
worker_main,
use_threading=False,
num_workers=num_workers,
extra_procs=[router_main],
start_method='spawn',
)
|
bdowning/aiotools | src/aiotools/compat.py | <gh_stars>100-1000
import asyncio
if hasattr(asyncio, 'get_running_loop'):
get_running_loop = asyncio.get_running_loop
else:
get_running_loop = asyncio.get_event_loop
if hasattr(asyncio, 'all_tasks'):
all_tasks = asyncio.all_tasks
else:
all_tasks = asyncio.Task.all_tasks # type: ignore
if hasattr(asyncio, 'current_task'):
current_task = asyncio.current_task
else:
current_task = asyncio.Task.current_task # type: ignore
|
bdowning/aiotools | tests/test_taskgroup.py | <reponame>bdowning/aiotools
import asyncio
import sys
import warnings
import pytest
from aiotools import (
TaskGroup,
TaskGroupError,
VirtualClock,
)
@pytest.mark.asyncio
async def test_delayed_subtasks():
with VirtualClock().patch_loop():
async with TaskGroup() as tg:
t1 = tg.create_task(asyncio.sleep(3, 'a'))
t2 = tg.create_task(asyncio.sleep(2, 'b'))
t3 = tg.create_task(asyncio.sleep(1, 'c'))
assert t1.done()
assert t2.done()
assert t3.done()
assert t1.result() == 'a'
assert t2.result() == 'b'
assert t3.result() == 'c'
@pytest.mark.asyncio
@pytest.mark.skipif(
sys.version_info < (3, 7),
reason='contextvars is available only in Python 3.7 or later',
)
async def test_contextual_taskgroup():
from aiotools import current_taskgroup
refs = []
async def check_tg(delay):
await asyncio.sleep(delay)
refs.append(current_taskgroup.get())
with VirtualClock().patch_loop():
async with TaskGroup() as outer_tg:
ot1 = outer_tg.create_task(check_tg(0.1))
async with TaskGroup() as inner_tg:
it1 = inner_tg.create_task(check_tg(0.2))
ot2 = outer_tg.create_task(check_tg(0.3))
assert ot1.done()
assert ot2.done()
assert it1.done()
assert refs == [outer_tg, inner_tg, outer_tg]
with pytest.raises(LookupError):
# outside of any taskgroup, this is an error.
current_taskgroup.get()
@pytest.mark.skipif(
sys.version_info < (3, 7),
reason='contextvars is available only in Python 3.7 or later',
)
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
@pytest.mark.asyncio
async def test_contextual_taskgroup_spawning():
from aiotools import current_taskgroup
total_jobs = 0
async def job():
nonlocal total_jobs
await asyncio.sleep(0)
total_jobs += 1
async def spawn_job():
await asyncio.sleep(0)
tg = current_taskgroup.get()
tg.create_task(job())
async def inner_tg_job():
await asyncio.sleep(0)
async with TaskGroup() as tg:
tg.create_task(job())
with VirtualClock().patch_loop():
total_jobs = 0
with pytest.raises(TaskGroupError), pytest.warns(RuntimeWarning):
# When the taskgroup terminates immediately after spawning subtasks,
# the spawned subtasks may not be allowed to proceed because the parent
# taskgroup is already in the terminating procedure.
async with TaskGroup() as tg:
t = tg.create_task(spawn_job())
assert not t.done()
assert total_jobs == 0
total_jobs = 0
async with TaskGroup() as tg:
tg.create_task(inner_tg_job())
tg.create_task(spawn_job())
tg.create_task(inner_tg_job())
tg.create_task(spawn_job())
# Give the subtasks chances to run.
await asyncio.sleep(1)
assert total_jobs == 4
@pytest.mark.asyncio
async def test_taskgroup_cancellation():
with VirtualClock().patch_loop():
async def do_job(delay, result):
# NOTE: replacing do_job directly with asyncio.sleep
# results future-pending-after-loop-closed error,
# because asyncio.sleep() is not a task but a future.
await asyncio.sleep(delay)
return result
with pytest.raises(asyncio.CancelledError):
async with TaskGroup() as tg:
t1 = tg.create_task(do_job(0.3, 'a'))
t2 = tg.create_task(do_job(0.6, 'b'))
await asyncio.sleep(0.5)
raise asyncio.CancelledError
assert t1.done()
assert t2.cancelled()
assert t1.result() == 'a'
@pytest.mark.asyncio
async def test_subtask_cancellation():
results = []
async def do_job():
await asyncio.sleep(1)
results.append('a')
async def do_cancel():
await asyncio.sleep(0.5)
raise asyncio.CancelledError
with VirtualClock().patch_loop():
async with TaskGroup() as tg:
t1 = tg.create_task(do_job())
t2 = tg.create_task(do_cancel())
t3 = tg.create_task(do_job())
assert t1.done()
assert t2.cancelled()
assert t3.done()
assert results == ['a', 'a']
@pytest.mark.asyncio
async def test_taskgroup_error():
with VirtualClock().patch_loop():
async def do_job(delay, result):
await asyncio.sleep(delay)
if result == 'x':
raise ZeroDivisionError('oops')
else:
return 99
with pytest.raises(TaskGroupError) as e:
async with TaskGroup() as tg:
t1 = tg.create_task(do_job(0.3, 'a'))
t2 = tg.create_task(do_job(0.5, 'x'))
t3 = tg.create_task(do_job(0.7, 'a'))
assert len(e.value.__errors__) == 1
assert type(e.value.__errors__[0]).__name__ == 'ZeroDivisionError'
assert t1.done()
assert await t1 == 99
assert t1.result() == 99
assert t1.exception() is None
assert t2.done()
with pytest.raises(ZeroDivisionError):
await t2
with pytest.raises(ZeroDivisionError):
t2.result()
assert type(t2.exception()).__name__ == 'ZeroDivisionError'
assert t3.cancelled()
@pytest.mark.asyncio
async def test_taskgroup_error_weakref():
with VirtualClock().patch_loop():
results = []
async def do_job(delay, result):
await asyncio.sleep(delay)
if result == 'x':
results.append('x')
raise ZeroDivisionError('oops')
else:
results.append('o')
return 99
with pytest.raises(TaskGroupError) as e:
async with TaskGroup() as tg:
# We don't keep the reference to the tasks,
# but they should behave the same way
# regardless of usage of WeakSet in the implementation.
tg.create_task(do_job(0.3, 'a'))
tg.create_task(do_job(0.5, 'x'))
tg.create_task(do_job(0.7, 'a'))
assert len(e.value.__errors__) == 1
assert type(e.value.__errors__[0]).__name__ == 'ZeroDivisionError'
assert results == ['o', 'x']
@pytest.mark.asyncio
async def test_taskgroup_memoryleak_with_persistent_tg():
with VirtualClock().patch_loop(), \
warnings.catch_warnings():
warnings.simplefilter("ignore")
async def do_job(delay):
await asyncio.sleep(delay)
return 1
async with TaskGroup() as tg:
for count in range(1000):
await asyncio.sleep(1)
tg.create_task(do_job(10))
if count == 100:
# 10 ongoing tasks + 1 just spawned task
assert len(tg._tasks) == 11
await asyncio.sleep(10.1)
assert len(tg._tasks) == 0
|
bdowning/aiotools | examples/zmqclient.py | import zmq
if __name__ == '__main__':
ctx = zmq.Context()
s = ctx.socket(zmq.PUSH)
s.connect('tcp://127.0.0.1:5033')
for _ in range(100):
s.send(b'hello world')
s.close()
|
bdowning/aiotools | examples/socketserver.py | import asyncio
import aiotools
async def echo(reader, writer):
data = await reader.read(100)
writer.write(data)
await writer.drain()
writer.close()
@aiotools.actxmgr
async def worker_main(loop, pidx, args):
# Create a listening socket with SO_REUSEPORT option so that each worker
# process can share the same listening port and the kernel balances
# incoming connections across multiple worker processes.
server = await asyncio.start_server(echo, '0.0.0.0', 8888,
reuse_port=True, loop=loop)
print(f'[{pidx}] started')
yield # wait until terminated
server.close()
await server.wait_closed()
print(f'[{pidx}] terminated')
if __name__ == '__main__':
# Run the above server using 4 worker processes.
aiotools.start_server(worker_main, num_workers=4)
|
bdowning/aiotools | tests/test_func.py | import asyncio
import pytest
from aiotools.func import apartial, lru_cache
async def do(a, b, *, c=1, d=2):
'''hello world'''
return (a, b, c, d)
@pytest.mark.asyncio
async def test_apartial_orig():
do2 = apartial(do)
ret = await do2(1, 2, c=3, d=4)
assert ret == (1, 2, 3, 4)
@pytest.mark.asyncio
async def test_apartial_args():
do2 = apartial(do, 9)
ret = await do2(2, c=5, d=6)
assert ret == (9, 2, 5, 6)
@pytest.mark.asyncio
async def test_apartial_kwargs():
do2 = apartial(do, c=8)
ret = await do2(1, 2, d=4)
assert ret == (1, 2, 8, 4)
@pytest.mark.asyncio
async def test_apartial_args_kwargs():
do2 = apartial(do, 9, c=8)
ret = await do2(7, d=6)
assert ret == (9, 7, 8, 6)
@pytest.mark.asyncio
async def test_apartial_wraps():
do2 = apartial(do)
assert do2.__doc__.strip() == 'hello world'
assert do2.__doc__ == do.__doc__
assert do.__name__ == 'do'
assert do2.__name__ == 'do'
@pytest.mark.asyncio
async def test_lru_cache():
calc_count = 0
@lru_cache(maxsize=2)
async def calc(n):
'''testing'''
nonlocal calc_count
await asyncio.sleep(0)
calc_count += 1
return n * n
assert calc.__name__ == 'calc'
assert calc.__doc__ == 'testing'
assert (await calc(1)) == 1
assert calc_count == 1
assert (await calc(2)) == 4
assert calc_count == 2
assert (await calc(1)) == 1
assert calc_count == 2
assert (await calc(3)) == 9
assert calc_count == 3
assert (await calc(1)) == 1 # evicted and re-executed
assert calc_count == 4
assert (await calc(1)) == 1 # cached again
assert calc_count == 4
with pytest.raises(NotImplementedError):
calc.cache_info()
calc.cache_clear()
assert (await calc(1)) == 1
assert calc_count == 5
assert (await calc(3)) == 9
assert calc_count == 6
@pytest.mark.asyncio
async def test_lru_cache_with_expiration():
calc_count = 0
@lru_cache(maxsize=2)
async def calc_no_exp(n):
nonlocal calc_count
await asyncio.sleep(0)
calc_count += 1
return n * n
assert (await calc_no_exp(3)) == 9
assert calc_count == 1
assert (await calc_no_exp(3)) == 9
assert calc_count == 1
await asyncio.sleep(0.1)
assert (await calc_no_exp(3)) == 9
assert calc_count == 1
calc_count = 0
@lru_cache(maxsize=2, expire_after=0.05)
async def calc_exp(n):
nonlocal calc_count
await asyncio.sleep(0)
calc_count += 1
return n * n
assert (await calc_exp(3)) == 9
assert calc_count == 1
assert (await calc_exp(3)) == 9
assert calc_count == 1
await asyncio.sleep(0.1)
assert (await calc_exp(3)) == 9
assert calc_count == 2
|
bdowning/aiotools | src/aiotools/fork.py | <filename>src/aiotools/fork.py
"""
This module implements a simple :func:`os.fork()`-like interface,
but in an asynchronous way with full support for PID file descriptors
on Python 3.9 or higher and the Linux kernel 5.4 or higher.
It internally synchronizes the beginning and readiness status of child processes
so that the users may assume that the child process is completely interruptible after
:func:`afork()` returns.
"""
import asyncio
import ctypes
import errno
# import functools
import logging
import os
# import resource
import signal
from abc import ABCMeta, abstractmethod
# from ctypes import (
# CFUNCTYPE,
# byref,
# c_int,
# c_char_p,
# c_void_p,
# cast,
# )
from typing import Callable, Tuple
from .compat import get_running_loop
__all__ = (
'AbstractChildProcess',
'PosixChildProcess',
'PidfdChildProcess',
'afork',
)
logger = logging.getLogger(__name__)
_libc = ctypes.CDLL(None)
_syscall = _libc.syscall
_default_stack_size = (8 * (2**20)) # 8 MiB
_has_pidfd = False
if hasattr(signal, 'pidfd_send_signal'):
try:
signal.pidfd_send_signal(0, 0) # type: ignore
except OSError as e:
if e.errno == errno.EBADF:
_has_pidfd = True
# if the kernel does not support this,
# it will say errno.ENOSYS or errno.EPERM
class AbstractChildProcess(metaclass=ABCMeta):
"""
The abstract interface to control and monitor a forked child process.
"""
@abstractmethod
def send_signal(self, signum: int) -> None:
"""
Send a UNIX signal to the child process.
If the child process is already terminated, it will log
a warning message and return.
"""
raise NotImplementedError
@abstractmethod
async def wait(self) -> int:
"""
Wait until the child process terminates or reclaim the child process' exit
code if already terminated.
If there are other coroutines that has waited the same process, it may
return 255 and log a warning message.
"""
raise NotImplementedError
class PosixChildProcess(AbstractChildProcess):
"""
A POSIX-compatible version of :class:`AbstractChildProcess`.
"""
def __init__(self, pid: int) -> None:
self._pid = pid
self._terminated = False
def send_signal(self, signum: int) -> None:
if self._terminated:
logger.warning(
"PosixChildProcess(%d).send_signal(%d): "
"The process has already terminated.",
self._pid,
signum,
)
return
os.kill(self._pid, signum)
async def wait(self) -> int:
loop = get_running_loop()
try:
_, status = await loop.run_in_executor(None, os.waitpid, self._pid, 0)
except ChildProcessError:
# The child process is already reaped
# (may happen if waitpid() is called elsewhere).
self._returncode = 255
logger.warning(
"child process pid %d exit status already read: "
" will report returncode 255",
self._pid)
else:
if os.WIFSIGNALED(status):
self._returncode = -os.WTERMSIG(status)
elif os.WIFEXITED(status):
self._returncode = os.WEXITSTATUS(status)
else:
self._returncode = status
finally:
self._terminated = True
return self._returncode
class PidfdChildProcess(AbstractChildProcess):
"""
A PID file descriptor-based version of :class:`AbstractChildProcess`.
"""
def __init__(self, pid: int, pidfd: int) -> None:
self._pid = pid
self._pidfd = pidfd
self._returncode = None
self._wait_event = asyncio.Event()
self._terminated = False
loop = get_running_loop()
loop.add_reader(self._pidfd, self._do_wait)
def send_signal(self, signum: int) -> None:
if self._terminated:
logger.warning(
"PidfdChildProcess(%d, %d).send_signal(%d): "
"The process has already terminated.",
self._pid,
self._pidfd,
signum,
)
return
signal.pidfd_send_signal(self._pidfd, signum) # type: ignore
def _do_wait(self):
loop = get_running_loop()
try:
# The flag is WEXITED | __WALL from linux/wait.h
# (__WCLONE value is out of range of int...)
status_info = os.waitid(
os.P_PIDFD,
self._pidfd,
os.WEXITED | 0x40000000,
)
except ChildProcessError:
# The child process is already reaped
# (may happen if waitpid() is called elsewhere).
self._returncode = 255
logger.warning(
"child process %d exit status already read: "
" will report returncode 255",
self._pid)
else:
if status_info.si_code == os.CLD_KILLED:
self._returncode = -status_info.si_status # signal number
elif status_info.si_code == os.CLD_EXITED:
self._returncode = status_info.si_status
elif status_info.si_code == os.CLD_DUMPED:
self._returncode = -status_info.si_status # signal number
else:
logger.warning(
"unexpected si_code %d and si_status %d for child process %d",
status_info.si_code, status_info.si_status, self._pid)
self._returncode = 255
finally:
loop.remove_reader(self._pidfd)
os.close(self._pidfd)
self._terminated = True
self._wait_event.set()
async def wait(self) -> int:
await self._wait_event.wait()
assert self._returncode is not None
return self._returncode
def _child_main(init_func, init_pipe, child_func: Callable[[], int]) -> int:
if init_func is not None:
init_func()
# notify the parent that the child is ready to execute the requested function.
os.write(init_pipe, b"\0")
os.close(init_pipe)
return child_func()
async def _fork_posix(child_func: Callable[[], int]) -> int:
loop = get_running_loop()
init_pipe = os.pipe()
init_event = asyncio.Event()
loop.add_reader(init_pipe[0], init_event.set)
pid = os.fork()
if pid == 0:
ret = 0
try:
ret = _child_main(None, init_pipe[1], child_func)
except KeyboardInterrupt:
ret = -signal.SIGINT
finally:
os._exit(ret)
# Wait for the child's readiness notification
await init_event.wait()
loop.remove_reader(init_pipe[0])
os.read(init_pipe[0], 1)
os.close(init_pipe[0])
return pid
async def _clone_pidfd(child_func: Callable[[], int]) -> Tuple[int, int]:
loop = get_running_loop()
init_pipe = os.pipe()
init_event = asyncio.Event()
loop.add_reader(init_pipe[0], init_event.set)
pid = os.fork()
if pid == 0:
ret = 0
try:
ret = _child_main(None, init_pipe[1], child_func)
except KeyboardInterrupt:
ret = -signal.SIGINT
finally:
os._exit(ret)
# Get the pidfd.
fd = os.pidfd_open(pid, 0) # type: ignore
# Wait for the child's readiness notification
await init_event.wait()
loop.remove_reader(init_pipe[0])
os.read(init_pipe[0], 1)
os.close(init_pipe[0])
return pid, fd
# The below commneted-out version guarantees the PID reusing issue is prevented
# regardless of SIGCHLD handler configurations.
# However, in complicated real-world applications, it seems to have some
# hard-to-debug side effects when cleaning up... :(
# async def _clone_pidfd(child_func: Callable[[], int]) -> Tuple[int, int]:
# # reference: os_fork_impl() in the CPython source code
# fd = c_int()
# loop = get_running_loop()
#
# # prepare the stack memory
# stack_size = resource.getrlimit(resource.RLIMIT_STACK)[0]
# if stack_size <= 0:
# stack_size = _default_stack_size
# stack = c_char_p(b"\0" * stack_size)
#
# init_pipe = os.pipe()
# init_event = asyncio.Event()
# loop.add_reader(init_pipe[0], init_event.set)
#
# func = CFUNCTYPE(c_int)(
# functools.partial(
# _child_main,
# ctypes.pythonapi.PyOS_AfterFork_Child,
# init_pipe[1],
# child_func,
# )
# )
# stack_top = c_void_p(cast(stack, c_void_p).value + stack_size) # type: ignore
# ctypes.pythonapi.PyOS_BeforeFork()
# # The flag value is CLONE_PIDFD from linux/sched.h
# pid = _libc.clone(func, stack_top, 0x1000, 0, byref(fd))
# ctypes.pythonapi.PyOS_AfterFork_Parent()
#
# # Wait for the child's readiness notification
# await init_event.wait()
# loop.remove_reader(init_pipe[0])
# os.read(init_pipe[0], 1)
# os.close(init_pipe[0])
#
# if pid == -1:
# raise OSError("failed to fork")
# return pid, fd.value
async def afork(child_func: Callable[[], int]) -> AbstractChildProcess:
"""
Fork the current process and execute the given function in the child.
The return value of the function will become the exit code of the child
process.
Args:
child_func: A function that represents the main function of the child and
returns an integer as its exit code.
Note that the function must set up a new event loop if it
wants to run asyncio codes.
"""
if _has_pidfd:
pid, pidfd = await _clone_pidfd(child_func)
return PidfdChildProcess(pid, pidfd)
else:
pid = await _fork_posix(child_func)
return PosixChildProcess(pid)
|
bdowning/aiotools | tests/test_ptaskgroup.py | <reponame>bdowning/aiotools<filename>tests/test_ptaskgroup.py
import aiotools
import asyncio
import sys
import pytest
@pytest.mark.asyncio
async def test_ptaskgroup_all_done():
count = 0
vclock = aiotools.VirtualClock()
with vclock.patch_loop():
async def subtask():
nonlocal count
await asyncio.sleep(0.1)
count += 1
async with aiotools.PersistentTaskGroup() as tg:
assert tg.name.startswith("PTaskGroup-")
for idx in range(10):
t = tg.create_task(subtask(), name=f"Task-{idx}")
if sys.version_info >= (3, 8):
assert t.get_name() == f"Task-{idx}"
del t # to prevent ref-leak after loop
assert len(tg._tasks) == 10
# all done
await asyncio.sleep(0.2)
assert count == 10
assert len(tg._tasks) == 0
assert count == 10
@pytest.mark.asyncio
async def test_ptaskgroup_cancel_after_schedule():
count = 0
vclock = aiotools.VirtualClock()
with vclock.patch_loop():
async def subtask():
nonlocal count
await asyncio.sleep(0.1)
count += 1
async with aiotools.PersistentTaskGroup() as tg:
for _ in range(10):
tg.create_task(subtask())
await asyncio.sleep(0)
assert len(tg._tasks) == 10
# all cancelled after scheduled
assert count == 0
assert len(tg._tasks) == 10
await asyncio.sleep(0)
# after cancellation, all refs should be gone
assert len(tg._tasks) == 0
@pytest.mark.asyncio
async def test_ptaskgroup_cancel_before_schedule():
count = 0
vclock = aiotools.VirtualClock()
with vclock.patch_loop():
async def subtask():
nonlocal count
await asyncio.sleep(0.1)
count += 1
async with aiotools.PersistentTaskGroup() as tg:
for _ in range(10):
tg.create_task(subtask())
assert len(tg._tasks) == 10
# all cancelled before scheduled
assert count == 0
assert len(tg._tasks) == 10
await asyncio.sleep(0)
# after cancellation, all refs should be gone
assert len(tg._tasks) == 0
@pytest.mark.asyncio
async def test_ptaskgroup_exc_handler():
count = 0
error_count = 0
vclock = aiotools.VirtualClock()
with vclock.patch_loop():
async def subtask():
nonlocal count
await asyncio.sleep(0.1)
1 / 0
count += 1
async def handler(e):
nonlocal error_count
assert isinstance(e, ZeroDivisionError)
error_count += 1
async with aiotools.PersistentTaskGroup(exception_handler=handler) as tg:
for _ in range(10):
tg.create_task(subtask())
assert len(tg._tasks) == 10
await asyncio.sleep(1.0)
assert count == 0
assert error_count == 10
# after handling error, all refs should be gone
assert len(tg._tasks) == 0
@pytest.mark.asyncio
async def test_ptaskgroup_cancel_with_await():
count = 0
vclock = aiotools.VirtualClock()
with vclock.patch_loop():
async def subtask():
nonlocal count
try:
await asyncio.sleep(0.1)
count += 1 # should not be executed
except asyncio.CancelledError:
await asyncio.sleep(0.1)
count += 10 # should be executed
async with aiotools.PersistentTaskGroup() as tg:
for _ in range(10):
tg.create_task(subtask())
assert len(tg._tasks) == 10
# close it immediately after starting subtasks
await asyncio.sleep(0)
# ensure that awaits in all cancellation handling blocks have been executed
assert count == 100
# after handling error, all refs should be gone
assert len(tg._tasks) == 0
|
bdowning/aiotools | tests/test_server.py | <filename>tests/test_server.py
import pytest
import asyncio
import functools
import glob
import logging.config
import multiprocessing as mp
import os
import signal
import sys
import tempfile
import time
from typing import List, Sequence
import aiotools
if os.environ.get('CI', '') and sys.version_info < (3, 9, 0):
pytest.skip(
'skipped to prevent kill CI agents due to signals on CI environments',
allow_module_level=True,
)
@pytest.fixture
def restore_signal():
os.setpgrp()
old_alrm = signal.getsignal(signal.SIGALRM)
old_intr = signal.getsignal(signal.SIGINT)
old_term = signal.getsignal(signal.SIGTERM)
old_intr = signal.getsignal(signal.SIGUSR1)
yield
signal.signal(signal.SIGALRM, old_alrm)
signal.signal(signal.SIGINT, old_intr)
signal.signal(signal.SIGTERM, old_term)
signal.signal(signal.SIGUSR1, old_term)
@pytest.fixture
def set_timeout():
def make_timeout(sec, callback):
def _callback(signum, frame):
signal.alarm(0)
callback()
signal.signal(signal.SIGALRM, _callback)
signal.setitimer(signal.ITIMER_REAL, sec)
yield make_timeout
@pytest.fixture
def exec_recorder():
f = tempfile.NamedTemporaryFile(
mode='w', encoding='utf8',
prefix='aiotools.tests.server.',
)
f.close()
def write(msg: str) -> None:
path = f"{f.name}.{os.getpid()}"
with open(path, 'a', encoding='utf8') as writer:
writer.write(msg + '\n')
def read() -> Sequence[str]:
lines: List[str] = []
for path in glob.glob(f"{f.name}.*"):
with open(path, 'r', encoding='utf8') as reader:
lines.extend(line.strip() for line in reader.readlines())
return lines
yield write, read
for path in glob.glob(f"{f.name}.*"):
os.unlink(path)
def interrupt():
os.kill(0, signal.SIGINT)
def interrupt_usr1():
os.kill(os.getpid(), signal.SIGUSR1)
@aiotools.server # type: ignore
async def myserver_simple(loop, proc_idx, args):
write = args[0]
await asyncio.sleep(0)
write(f'started:{proc_idx}')
yield
await asyncio.sleep(0)
write(f'terminated:{proc_idx}')
def test_server_singleproc(set_timeout, restore_signal, exec_recorder):
write, read = exec_recorder
set_timeout(0.2, interrupt)
aiotools.start_server(
myserver_simple,
args=(write,),
)
lines = set(read())
assert 'started:0' in lines
assert 'terminated:0' in lines
def test_server_multiproc(set_timeout, restore_signal, exec_recorder):
write, read = exec_recorder
set_timeout(0.2, interrupt)
aiotools.start_server(
myserver_simple,
num_workers=3,
args=(write,),
)
lines = set(read())
assert lines == {
'started:0', 'started:1', 'started:2',
'terminated:0', 'terminated:1', 'terminated:2',
}
@aiotools.server # type: ignore
async def myserver_signal(loop, proc_idx, args):
write = args[0]
await asyncio.sleep(0)
write(f'started:{proc_idx}')
received_signum = yield
await asyncio.sleep(0)
write(f'terminated:{proc_idx}:{received_signum}')
def test_server_multiproc_custom_stop_signals(
set_timeout,
restore_signal,
exec_recorder,
):
write, read = exec_recorder
set_timeout(0.2, interrupt_usr1)
aiotools.start_server(
myserver_signal,
num_workers=2,
stop_signals={signal.SIGUSR1},
args=(write,),
)
lines = set(read())
assert {'started:0', 'started:1'} < lines
assert {
f'terminated:0:{int(signal.SIGUSR1)}',
f'terminated:1:{int(signal.SIGUSR1)}',
} < lines
@aiotools.server # type: ignore
async def myserver_worker_init_error(loop, proc_idx, args):
write = args[0]
class _LogAdaptor:
def __init__(self, writer):
self.writer = writer
def write(self, msg):
msg = msg.strip().replace('\n', ' ')
self.writer(f'log:{proc_idx}:{msg}')
log_stream = _LogAdaptor(write)
logging.config.dictConfig({
'version': 1,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'stream': log_stream,
'level': 'DEBUG',
},
},
'loggers': {
'aiotools': {
'handlers': ['console'],
'level': 'DEBUG',
},
},
})
log = logging.getLogger('aiotools')
write(f'started:{proc_idx}')
log.debug('hello')
if proc_idx in (0, 2):
# delay until other workers start normally.
await asyncio.sleep(0.1 * proc_idx)
raise ZeroDivisionError('oops')
yield
# should not be reached if errored.
await asyncio.sleep(0)
write(f'terminated:{proc_idx}')
def test_server_worker_init_error(restore_signal, exec_recorder):
write, read = exec_recorder
aiotools.start_server(
myserver_worker_init_error,
num_workers=4,
args=(write,),
)
lines = set(read())
assert sum(1 if line.startswith('started:') else 0 for line in lines) == 4
# workers who did not raise errors have already started,
# and they should have terminated normally
# when the errorneous worker interrupted the main loop.
assert sum(1 if line.startswith('terminated:') else 0 for line in lines) == 2
assert sum(1 if 'hello' in line else 0 for line in lines) == 4
assert sum(1 if 'ZeroDivisionError: oops' in line else 0 for line in lines) == 2
def test_server_user_main(set_timeout, restore_signal):
main_enter = False
main_exit = False
@aiotools.main
def mymain_user_main():
nonlocal main_enter, main_exit
main_enter = True
yield 987
main_exit = True
@aiotools.server # type: ignore
async def myworker_user_main(loop, proc_idx, args):
assert args[0] == 987 # first arg from user main
assert args[1] == 123 # second arg from start_server args
yield
set_timeout(0.2, interrupt)
aiotools.start_server(
myworker_user_main,
mymain_user_main,
num_workers=3,
args=(123,),
)
assert main_enter
assert main_exit
def test_server_user_main_custom_stop_signals(set_timeout, restore_signal):
main_enter = False
main_exit = False
main_signal = None
worker_signals = mp.Array('i', 3)
@aiotools.main
def mymain():
nonlocal main_enter, main_exit, main_signal
main_enter = True
main_signal = yield
main_exit = True
@aiotools.server
async def myworker(loop, proc_idx, args):
worker_signals = args[0]
worker_signals[proc_idx] = yield
def noop(signum, frame):
pass
set_timeout(0.2, interrupt_usr1)
aiotools.start_server(
myworker,
mymain,
num_workers=3,
stop_signals={signal.SIGUSR1},
args=(worker_signals,),
)
assert main_enter
assert main_exit
assert main_signal == signal.SIGUSR1
assert list(worker_signals) == [signal.SIGUSR1] * 3
def test_server_user_main_tuple(set_timeout, restore_signal):
main_enter = False
main_exit = False
@aiotools.main
def mymain():
nonlocal main_enter, main_exit
main_enter = True
yield 987, 654
main_exit = True
@aiotools.server
async def myworker(loop, proc_idx, args):
assert args[0] == 987 # first arg from user main
assert args[1] == 654 # second arg from user main
assert args[2] == 123 # third arg from start_server args
yield
set_timeout(0.2, interrupt)
aiotools.start_server(
myworker,
mymain,
num_workers=3,
args=(123,),
)
assert main_enter
assert main_exit
def test_server_extra_proc(set_timeout, restore_signal):
extras = mp.Array('i', [0, 0])
def extra_proc(key, _, pidx, args):
assert _ is None
extras[key] = 980 + key
try:
while True:
time.sleep(0.1)
except KeyboardInterrupt:
print(f'extra[{key}] interrupted', file=sys.stderr)
except Exception as e:
print(f'extra[{key}] exception', e, file=sys.stderr)
finally:
print(f'extra[{key}] finish', file=sys.stderr)
extras[key] = 990 + key
@aiotools.server
async def myworker(loop, pidx, args):
yield
set_timeout(0.2, interrupt)
aiotools.start_server(myworker, extra_procs=[
functools.partial(extra_proc, 0),
functools.partial(extra_proc, 1)],
num_workers=3, args=(123, ))
assert extras[0] == 990
assert extras[1] == 991
def test_server_extra_proc_custom_stop_signal(set_timeout, restore_signal):
received_signals = mp.Array('i', [0, 0])
def extra_proc(key, _, pidx, args):
received_signals = args[0]
try:
while True:
time.sleep(0.1)
except aiotools.InterruptedBySignal as e:
received_signals[key] = e.args[0]
@aiotools.server
async def myworker(loop, pidx, args):
yield
set_timeout(0.3, interrupt_usr1)
aiotools.start_server(myworker, extra_procs=[
functools.partial(extra_proc, 0),
functools.partial(extra_proc, 1)],
stop_signals={signal.SIGUSR1},
args=(received_signals, ),
num_workers=3)
assert received_signals[0] == signal.SIGUSR1
assert received_signals[1] == signal.SIGUSR1
|
yuanlonghao/reranking | reranking/metrics.py | <gh_stars>1-10
import math
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
__all__ = [
"proportion",
"skew",
"skew_static",
"kld",
"ndcg_diff",
"ndkl",
"infeasible",
]
EPSILON = 1e-12
def proportion(
item_attributes: List[Any], attributes: Optional[Any] = None
) -> List[float]:
"""
Calculates proportions of each attributes in the recommended items.
item_attributes: the attribute of each recommended item
attributes: specified attributes which is optional
"""
if attributes is None:
attributes = list(set(item_attributes))
proportion = [
item_attributes.count(attr) / len(item_attributes) for attr in attributes
]
return proportion
def skew(p_1: float, p_2: float) -> float:
"""
Calculates skew.
p_1, p_2: two probabilities of the same attribute in two distributions
"""
return math.log((p_1 + EPSILON) / (p_2 + EPSILON))
def skew_static(
distr_1: List[float], distr_2: List[float]
) -> Tuple[float, float, float]:
"""
Calculates min, max, absolute mean skew of all the attributes.
"""
skews = []
for p1, p2 in zip(distr_1, distr_2):
skews.append(math.log((p1 + EPSILON) / (p2 + EPSILON)))
skews_abs = [abs(v) for v in skews]
return min(skews), max(skews), sum(skews_abs) / len(skews_abs)
def kld(distr_1: List[float], distr_2: List[float]) -> float:
"""
Calculates KL divergence.
"""
vals = []
for p1, p2 in zip(distr_1, distr_2):
vals.append(p1 * math.log((p1 + EPSILON) / (p2 + EPSILON)))
return sum(vals)
def ndcg_diff(reranked_ranking: List[int], k_max: Optional[int] = None) -> float:
"""
Calculates the NDCG of the ranking change.
Ranking before reranking: from 0 to k_max, i.e., [0, 1, 2, 3, ...]
Re-ranked ranking: new ranking after the process, e.g., [0, 4, 2, 3, ...]
"""
if k_max is None:
k_max = len(reranked_ranking)
original_ranking = list(range(k_max))
reranked_ranking = reranked_ranking[:k_max]
pred_list = np.array([1 if i in original_ranking else 0 for i in reranked_ranking])
cg_factor = np.log2(np.arange(2, k_max + 2))
pred_list_sorted = np.sort(pred_list)[::-1]
dcg = np.sum(pred_list / cg_factor)
idcg = np.sum(pred_list_sorted / cg_factor)
ndcg: float = dcg / idcg if idcg != 0 else 0.0
return ndcg
def ndkl(item_attributes: List[Any], dict_p: Dict[Any, float]) -> float:
"""
Calculates normalized discounted cumulative KL-divergence (NDKL).
item_attributes: the attribute of each recommended item
dict_p: Dict[name/index of the attribute, desired_proportion]
"""
n_items = len(item_attributes)
Z = np.sum(1 / (np.log2(np.arange(1, n_items + 1) + 1)))
total = 0.0
for k in range(1, n_items + 1):
item_attr_k = item_attributes[:k]
item_distr = [
item_attr_k.count(attr) / len(item_attr_k) for attr in dict_p.keys()
]
total += (1 / math.log2(k + 1)) * kld(item_distr, list(dict_p.values()))
res: float = (1 / Z) * total
return res
def infeasible(
item_attributes: List[Any],
dict_p: Dict[Any, float],
k_max: Optional[int] = None,
) -> Tuple[int, int]:
"""
Calculates the infeasible_index and infeasible_count.
infeasible_index: from 1 to k, items saitisfy violation condition.
infeasible_count: from 1 to k, count of insufficient attributes by violation condition.
"""
# (this method is not general used though...)
if k_max is None:
k_max = len(item_attributes)
infeasible_index = 0
infeasible_count = 0
for k in range(1, k_max):
value_counts = pd.Series(item_attributes[:k]).value_counts().to_dict()
count_attr = []
for attr in dict_p:
try:
count_attr.append(value_counts[attr])
except KeyError:
count_attr.append(0)
for p, val in enumerate(dict_p.values()):
if count_attr[p] < math.floor(val * k):
infeasible_count += 1
if count_attr[p] != 0:
infeasible_index += 1
return infeasible_index, infeasible_count
|
yuanlonghao/reranking | reranking/reranker.py | import math
from logging import getLogger
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import numpy as np
import pandas as pd
logger = getLogger(__name__)
## TODO: deal with this situation: attributes = ["f1", ["f1", "f2"], "f2", ...]
class Reranker:
"""
Re-ranking algorithms in the paper: Fairness-Aware Ranking in Search
& Recommendation Systems with Application to LinkedIn Talent Search
Attributes:
item_attr:
The attributes in order of each item ranked by recommendation system
or search engine, from top to bottom. The values in the list can be
feature index (int) or feature name (str).
distr:
The disired distribution for each attribute. The values in the list can be
feature index (int) or feature name (str).
max_na:
The maximum number of different values of the attributes in distribution.
The constraint is achieved by merging the n lowest probabilities attributes.
"""
def __init__(
self,
item_attribute: List[Any],
desired_distribution: Dict[Any, float],
max_na: Optional[int] = None,
) -> None:
self.item_attr = item_attribute
self.distr = desired_distribution
self.max_na = max_na
def __call__(
self,
k_max: Optional[int] = None,
algorithm: str = "det_greedy",
verbose: bool = False,
) -> Union[List[int], pd.DataFrame]:
"""
Re-ranks items by the four algorithms in the paper.
Args:
k_max: top k_max re-ranking items
algorithm: name of one of the four algorithms
verbose: if True, the output is dataframe with more infomation
Attributes:
df_formatted:
Dataframe containing all the processed information.
data:
In format of {(`attribute index`, `rank in the attribute`): overall rank}.
p:
Desired distribution in list format.
Reliability of the algorithms:
According to the paper,
1. `det_greedy`, `det_cons` and `det_relaxed` are guaranteed to be feasible if the category of
the attributes is <=3.
2. `det_greedy` is NOT guaranteed to be feasible if the category of the attributes is >=4.
3. `det_const_sort` is guaranteed to be feasible.
"""
k_max_ = len(self.item_attr) if k_max is None else k_max
try:
self.df_formatted, self.data, self.p = self._format_alg_input()
except (ValueError, NameError) as e:
logger.debug(f"Returning default ranking by the exception: `{e}`")
return list(range(min(k_max_, len(self.item_attr))))
if algorithm in ["det_greedy", "det_cons", "det_relaxed"]:
return self.rerank_greedy(k_max_, algorithm, verbose)
elif algorithm == "det_const_sort":
return self.rerank_ics(k_max_, verbose)
else:
raise NotImplementedError(f"Invalid algorithm name: {algorithm}.")
def _mask_item_attr_and_distr(self) -> Tuple[List[Any], Dict[Any, float]]:
"""
Processes input item attributes and desired distribution to the proper form.
Remark:
set_1: attributes in `self.item_attr` and `self.distr`
set_2: attributes in `self.item_attr` but not in `self.distr`
set_3: attributes in `self.distr` but not in `self.item_attr`
Process logic:
1. Sum of the `self.distr` values is larger than 1: ValueError
2. Merge the lowest value attributes in `self.distr` to meet `self.max_na`
3. set_1 False: NameError
4. set_1 True:
- set_2 True, set_3 True: mask set_2 in `self.item_attr` and set_3 in `self.distr`
- set_2 False, set_3 True: NameError (`self.distr` contains `self.item_attr`,
lack attribute in `self.item_attr`)
- set_2 False, set_3 False: pass (`self.item_attr` set equals `self.distr` set)
- set_2 True, set_3 False: mask set_2 in `self.item_attr`
"""
item_attr = self.item_attr.copy()
distr = self.distr.copy()
distri_sum = sum(distr.values())
if distri_sum > 1 + 1e-6:
raise ValueError("Sum of desired attribute distribution larger than 1.")
if self.max_na is not None:
n_merge = len(distr) - self.max_na
if n_merge > 0:
sorted_attrs = sorted(distr, key=lambda x: distr[x])
distr = self._mask_distr(distr, sorted_attrs[: n_merge + 1])
attr_in_item_in_distr = set(item_attr) & set(distr) # set_1
attr_in_item_not_distr = set(item_attr) - attr_in_item_in_distr # set_2
attr_in_distr_not_item = set(distr) - attr_in_item_in_distr # set_3
if attr_in_item_in_distr:
if attr_in_distr_not_item and not attr_in_item_not_distr:
raise NameError(
f"Wrong attribute in distribution: {attr_in_distr_not_item}."
)
elif not attr_in_distr_not_item and attr_in_item_not_distr:
item_attr = [
"masked" if i in attr_in_item_not_distr else i for i in item_attr
]
elif attr_in_distr_not_item and attr_in_item_not_distr:
item_attr = [
"masked" if i in attr_in_item_not_distr else i for i in item_attr
]
distr = self._mask_distr(distr, list(attr_in_distr_not_item))
else:
pass
else:
raise NameError("Item attribute and distribution have no intersection.")
return (item_attr, distr)
def _mask_distr(
self, distr: Dict[Any, float], mask_attr: List[Any]
) -> Dict[Any, float]:
"""Masks distribution by specified attributes."""
valid_distr = {k: v for k, v in distr.items() if k not in mask_attr}
valid_distr["masked"] = 1.0 - sum(valid_distr.values())
return valid_distr
def _format_alg_input(
self,
) -> Tuple[pd.DataFrame, Dict[Tuple[int, int], int], List[float]]:
"""Formats inputs of algorithms.
Returns:
df: dataframe contains processed information.
data: in {(attribute_index, ranking in the attribute): overall ranking, ...} format.
p: List[float] of distributions of each attribute.
"""
item_attr, distr = self._mask_item_attr_and_distr()
df = pd.DataFrame(
{
"model_rank": list(range(len(item_attr))),
"attribute": item_attr,
"attribute_enc": pd.factorize(item_attr)[0],
}
)
df.sort_values(
["attribute_enc", "model_rank"], ascending=[True, True], inplace=True
)
df.reset_index(drop=True, inplace=True)
df["attri_rank"] = (
df.groupby("attribute_enc")
.apply(lambda x: [i for i in range(len(x))])
.sum()
)
df_distr = pd.DataFrame({"attribute": distr.keys(), "distr": distr.values()})
df = df.merge(df_distr, on="attribute", how="left").fillna(
0.0
) # NaN distr values become 0.0
data = {
(attribute_enc, attri_rank): model_rank
for attribute_enc, attri_rank, model_rank in zip(
df.attribute_enc,
df.attri_rank,
df.model_rank,
)
}
p = df.drop_duplicates("attribute_enc").distr.tolist()
return (df, data, p)
def rerank_greedy(
self,
k_max: int,
algorithm: str,
verbose: bool,
) -> Union[List[int], pd.DataFrame]:
"""Implements the greedy-based algorithms: `DetGreedy`, `DetCons`, `DetRelaxed`."""
re_ranked_ranking: List[int] = []
counts = {i: 0 for i in range(len(self.p))}
for k in range(1, k_max + 1):
below_min = {
attr
for attr, cnt in counts.items()
if cnt < math.floor(k * self.p[attr])
} # minimum requirement violation
below_max = {
attr
for attr, cnt in counts.items()
if cnt >= math.floor(k * self.p[attr])
and cnt < math.ceil(k * self.p[attr])
} # maximum requirement violation
if below_min or below_max:
try:
next_attr = self._process_violated_attributes(
below_min, below_max, counts, k, algorithm
)
except KeyError as ke: # not enough item of desired attribute
attr_short = ke.args[0][0]
logger.debug(
f"Lack of item of attribute {attr_short}, input the current top-rank item."
)
next_attr = self._get_top_rank_attr(re_ranked_ranking)
else: # below_min and below_max are empty
next_attr = self._get_top_rank_attr(re_ranked_ranking)
re_ranked_ranking.append(self.data[(next_attr, counts[next_attr])])
counts[next_attr] += 1
if verbose:
return self._get_verbose(re_ranked_ranking)
else:
return re_ranked_ranking
def rerank_ics(self, k_max: int, verbose: bool) -> Union[List[int], pd.DataFrame]:
"""Implements `DetConstSort` algorithm."""
counts = {i: 0 for i in range(len(self.p))}
min_counts = {i: 0 for i in range(len(self.p))}
re_ranked_ranking_dict = {}
max_indices_dict = {}
last_empty = 0
k = 0
while last_empty < k_max:
k += 1
temp_min_counts = {
idx: math.floor(k * self.p[idx]) for idx in range(len(self.p))
}
changed_mins = {a for a, s in min_counts.items() if s < temp_min_counts[a]}
if changed_mins:
vals = {}
for a in changed_mins:
try:
vals[a] = self.data[(a, counts[a])]
except KeyError: # not enough item of desired attribute
pass
if vals:
ord_changed_mins = np.asarray(
(
sorted(
vals.items(),
key=lambda kv: (kv[1], kv[0]),
reverse=True,
)
)
)[:, 0].tolist()
for a in ord_changed_mins:
re_ranked_ranking_dict[last_empty] = self.data[(a, counts[a])]
max_indices_dict[last_empty] = k
start = last_empty
while (
start > 0
and max_indices_dict[start - 1] >= start
and re_ranked_ranking_dict[start - 1]
> re_ranked_ranking_dict[start]
):
self._swap_dict_values(max_indices_dict, start - 1, start)
self._swap_dict_values(
re_ranked_ranking_dict, start - 1, start
)
start -= 1
counts[a] += 1
last_empty += 1
min_counts = temp_min_counts.copy()
re_ranked_ranking = list(re_ranked_ranking_dict.values())
if verbose:
return self._get_verbose(re_ranked_ranking)
else:
return re_ranked_ranking
def _process_violated_attributes(
self,
below_min: Set[int],
below_max: Set[int],
counts: Dict[int, int],
k: int,
algorithm: str,
) -> int:
"""
Finds the attribute of the next item based on the violations criterias.
"""
s: Dict[int, Union[int, float]] = {}
if below_min:
for i in below_min:
s[i] = self.data[(i, counts[i])]
# Get the desired attribute with toppest rank
next_attr = min(s, key=lambda x: s[x])
else:
if algorithm == "det_greedy":
for i in below_max:
s[i] = self.data[(i, counts[i])]
next_attr = min(s, key=lambda x: s[x])
elif algorithm == "det_cons":
for i in below_max:
s[i] = math.ceil(k * self.p[i]) / self.p[i]
self.data[(i, counts[i])] # catch KeyError exception
next_attr = max(s, key=lambda x: s[x])
elif algorithm == "det_relaxed":
ns = {}
for i in below_max:
ns[i] = math.ceil(math.ceil(k * self.p[i]) / self.p[i])
temp = min(ns.values())
next_attr_set = [key for key in ns if ns[key] == temp]
for i in next_attr_set:
s[i] = self.data[(i, counts[i])]
next_attr = min(s, key=lambda x: s[x])
else:
raise NotImplementedError("Invalid name for the greedy algorithms.")
return next_attr
def _get_top_rank_attr(self, re_ranked_ranking: List[int]) -> int:
"""
Gets next attribute which is of the toppest rank item in the remained items.
Use when not enough item for the required attribute or both `below_min` and
`below_max` are empty.
"""
data_rest: Dict[Tuple[int, int], int] = {
idx: rank
for idx, rank in self.data.items()
if rank not in re_ranked_ranking
}
next_attr = min(data_rest, key=lambda k: data_rest[k])[0]
return next_attr
def _get_verbose(self, re_ranked_ranking: List[int]) -> pd.DataFrame:
df_verbose = pd.DataFrame(
{
"model_rank": re_ranked_ranking,
"re_rank": [i for i in range(len(re_ranked_ranking))],
}
)
df_verbose = df_verbose.merge(self.df_formatted, on="model_rank", how="left")
df_verbose.drop(["attribute_enc", "attri_rank"], axis=1, inplace=True)
return df_verbose
@staticmethod
def _swap_dict_values(
dic: Dict[int, Any], key_1: int, key_2: int
) -> Dict[int, Any]:
"""Swaps values of two keys in a dictionary."""
dic[key_1], dic[key_2] = dic[key_2], dic[key_1]
return dic
|
yuanlonghao/reranking | tests/test_reranker.py | <filename>tests/test_reranker.py
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import pytest
from reranking.reranker import Reranker
class TestReranking:
@pytest.fixture
def genders(self) -> List[str]:
np.random.seed(seed=43)
genders: List[str] = np.random.choice(
["male"] * 70 + ["female"] * 30, 100, replace=False
).tolist()
return genders
@pytest.mark.parametrize(
"item_attributes, distribution",
[
([1, 2, 3], {1: 0.3, 2: 0.4, 3: 0.5}), # sum of distibution larger than 1
([1, 2, 3], {4: 0.3, 5: 0.4, 6: 0.3}), # no intersection
([4, 5], {4: 0.3, 5: 0.4, 6: 0.3}), # distr contains item attr
],
)
def test_exception_returns(
self, item_attributes: List[Any], distribution: Dict[Any, float]
) -> None:
r = Reranker(item_attributes, distribution)
with pytest.raises((NameError, ValueError)):
r._format_alg_input()
default_ranking = list(range(len(item_attributes)))
reranking = r()
assert default_ranking == reranking
@pytest.mark.parametrize(
"item_attributes, distribution, max_na, expected_item_attr, expected_distr_key, expected_data, expected_p",
[
(
[2, 4, 5],
{4: 0.3, 5: 0.4, 6: 0.3},
None,
["masked", 4, 5],
["masked", 4, 5],
{(0, 0): 0, (1, 0): 1, (2, 0): 2},
[0.3, 0.3, 0.4],
), # mask both
(
[4, 5, 6],
{4: 0.3, 5: 0.4, 6: 0.3},
None,
[4, 5, 6],
[4, 5, 6],
{(0, 0): 0, (1, 0): 1, (2, 0): 2},
[0.3, 0.4, 0.3],
), # mask nothing
(
[4, 5, 6, 7],
{4: 0.3, 5: 0.4, 6: 0.3},
None,
[4, 5, 6, "masked"],
[
4,
5,
6,
],
{(0, 0): 0, (1, 0): 1, (2, 0): 2, (3, 0): 3},
[0.3, 0.4, 0.3, 0.0],
), # mask item attr
(
[2, 3, 4],
{2: 0.3, 3: 0.2, 4: 0.1, 5: 0.1, 6: 0.1},
3,
[2, 3, "masked"],
[2, 3, "masked"],
{(0, 0): 0, (1, 0): 1, (2, 0): 2},
[0.3, 0.2, 0.5],
), # max_na constraint case 1
(
[1, 2, 3, 4],
{2: 0.3, 3: 0.2, 4: 0.1, 5: 0.1, 6: 0.1},
3,
["masked", 2, 3, "masked"],
[2, 3, "masked"],
{(0, 0): 0, (0, 1): 3, (1, 0): 1, (2, 0): 2},
[0.5, 0.3, 0.2],
), # max_na constraint case 2
],
)
def test__format_alg_input(
self,
item_attributes: List[Any],
distribution: Dict[Any, float],
max_na: Optional[int],
expected_item_attr: List[Any],
expected_distr_key: List[Any],
expected_data: Dict[Tuple[int, int], int],
expected_p: List[float],
) -> None:
r = Reranker(item_attributes, distribution, max_na=max_na)
# test masking
acutal_item_attr, acutal_distr = r._mask_item_attr_and_distr()
assert set(expected_item_attr) == set(acutal_item_attr)
assert set(expected_distr_key) == set(acutal_distr)
# test algorithm inputs
_, actual_data, actual_p = r._format_alg_input()
assert expected_data == actual_data
np.testing.assert_almost_equal(expected_p, actual_p)
@pytest.mark.parametrize(
"distribution, k_max, algorithm",
[
# test performce
({"female": 0.5, "male": 0.5}, 10, "det_greedy"),
({"female": 0.0, "male": 1.0}, 10, "det_greedy"),
({"female": 0.5, "male": 0.5}, 10, "det_cons"),
({"female": 0.0, "male": 1.0}, 10, "det_cons"),
({"female": 0.5, "male": 0.5}, 10, "det_relaxed"),
({"female": 0.0, "male": 1.0}, 10, "det_relaxed"),
({"female": 0.5, "male": 0.5}, 10, "det_const_sort"),
({"female": 0.0, "male": 1.0}, 10, "det_const_sort"),
# test edge conditions
({"female": 0.5, "male": 0.5}, 70, "det_greedy"),
({"female": 0.5, "male": 0.5}, 100, "det_greedy"),
({"female": 0.5, "male": 0.5}, 70, "det_cons"),
({"female": 0.5, "male": 0.5}, 100, "det_cons"),
({"female": 0.5, "male": 0.5}, 70, "det_relaxed"),
({"female": 0.5, "male": 0.5}, 100, "det_relaxed"),
({"female": 0.5, "male": 0.5}, 70, "det_const_sort"),
({"female": 0.5, "male": 0.5}, 100, "det_const_sort"),
],
)
def test_algorithms(
self,
genders: List[Any],
distribution: Dict[Any, float],
k_max: int,
algorithm: str,
) -> None:
ranker = Reranker(genders, distribution)
reranking = ranker(algorithm=algorithm, k_max=k_max)
re_features = [genders[i] for i in reranking]
acutal_male = re_features.count("male")
actual_female = re_features.count("female")
if k_max == 70: # not enough items for female
assert acutal_male == k_max - genders.count("female")
assert actual_female == genders.count("female")
elif k_max == 100: # result distribution should be the overall distribution
assert acutal_male == genders.count("male")
assert actual_female == genders.count("female")
else: # enough item for each attribute
assert acutal_male == distribution["male"] * k_max
assert actual_female == distribution["female"] * k_max
|
yuanlonghao/reranking | setup.py | """
Publish steps:
- Confirm version number in `setup.py`.
- Delete the old version files in `/dist`.
- Wrap package: `python setup.py sdist bdist_wheel`
- Upload: `twine upload --repository-url https://upload.pypi.org/legacy/ dist/*` (username&password required)
"""
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="reranking",
version="0.3.6",
author="<NAME>",
author_email="<EMAIL>",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/yuanlonghao/reranking",
packages=setuptools.find_packages(),
python_requires=">=3.6",
)
|
yuanlonghao/reranking | tests/test_metrics.py | <gh_stars>1-10
from typing import Any, Dict, List
import pytest
from reranking.metrics import (
infeasible,
kld,
ndcg_diff,
ndkl,
proportion,
skew,
skew_static,
)
class TestMetrics:
@pytest.fixture
def item_attributes(self) -> List[Any]:
return [1, 1, 2, 3, 4]
@pytest.fixture
def dict_p(self) -> Dict[Any, float]:
return {1: 0.5, 2: 0.2, 3: 0.1, 4: 0.3}
def test_proportion(
self,
item_attributes: List[Any],
dict_p: Dict[Any, float],
) -> None:
actual = proportion(item_attributes, list(dict_p.keys()))
assert all(isinstance(i, float) for i in actual)
def test_skew(self) -> None:
assert isinstance(skew(0.5, 0.5), float)
def test_skew_static(
self,
item_attributes: List[Any],
dict_p: Dict[Any, float],
) -> None:
item_distr = proportion(item_attributes)
actual = skew_static(item_distr, list(dict_p.keys()))
assert all(isinstance(i, float) for i in actual)
def test_kld(self) -> None:
assert isinstance(kld([0.1, 0.3, 0.5, 0.0], [0.0, 0.5, 0.4, 0.1]), float)
def test_ndcg_diff(self) -> None:
assert isinstance(ndcg_diff([0, 1, 4, 3], 4), float)
assert ndcg_diff([0, 1, 2, 3], 4) == 1.0
assert ndcg_diff([4, 5, 6, 7], 4) == 0.0
def test_ndkl(
self,
item_attributes: List[Any],
dict_p: Dict[Any, float],
) -> None:
assert isinstance(ndkl(item_attributes, dict_p), float)
def test_infeasible(
self,
item_attributes: List[Any],
dict_p: Dict[Any, float],
) -> None:
infeasible_index, infeasible_count = infeasible(item_attributes, dict_p, 5)
assert isinstance(infeasible_index, int)
assert isinstance(infeasible_count, int)
|
yuanlonghao/reranking | reranking/__init__.py | <reponame>yuanlonghao/reranking
from typing import Any, Dict, List, Optional, Union
import pandas as pd
from .metrics import *
from .reranker import Reranker
def rerank(
item_attribute: List[Any],
desired_distribution: Dict[Any, float],
max_na: Optional[int] = None,
k_max: Optional[int] = None,
algorithm: str = "det_greedy",
verbose: bool = False,
) -> Union[List[int], pd.DataFrame]:
rerank = Reranker(item_attribute, desired_distribution, max_na)
return rerank(k_max, algorithm, verbose)
|
Jaraxxus-Me/UAVpytrackers_for_Odroid | bacflibs/bacffeatures.py |
import numpy as np
import pickle
import os
import cv2
from bacflibs import _gradient
from bacflibs import otb_hc_config
def mround(x):
x_ = x.copy()
idx = (x - np.floor(x)) >= 0.5
x_[idx] = np.floor(x[idx]) + 1
idx = ~idx
x_[idx] = np.floor(x[idx])
return x_
def fhog(I, bin_size=8, num_orients=9, clip=0.2, crop=False):
soft_bin = -1
M, O = _gradient.gradMag(I.astype(np.float32), 0, True)
H = _gradient.fhog(M, O, bin_size, num_orients, soft_bin, clip)
return H
class Feature:
def __init__(self,config=otb_hc_config.OTBHCConfig()):
self.config=config
def init_size(self, img_sample_sz, cell_size=None):
if cell_size is not None:
max_cell_size = max(cell_size)
new_img_sample_sz = (1 + 2 * mround(img_sample_sz / ( 2 * max_cell_size))) * max_cell_size
feature_sz_choices = np.array([(new_img_sample_sz.reshape(-1, 1) + np.arange(0, max_cell_size).reshape(1, -1)) // x for x in cell_size])
num_odd_dimensions = np.sum((feature_sz_choices % 2) == 1, axis=(0,1))
best_choice = np.argmax(num_odd_dimensions.flatten())
img_sample_sz = mround(new_img_sample_sz + best_choice)
self.sample_sz = img_sample_sz
self.data_sz = [img_sample_sz // self._cell_size]
return img_sample_sz
def _sample_patch(self, im, pos, sample_sz, output_sz):
pos = np.floor(pos)
sample_sz = np.maximum(mround(sample_sz), 1)
xs = np.floor(pos[1]) + np.arange(0, sample_sz[1]+1) - np.floor((sample_sz[1]+1)/2)
ys = np.floor(pos[0]) + np.arange(0, sample_sz[0]+1) - np.floor((sample_sz[0]+1)/2)
xmin = max(0, int(xs.min()))
xmax = min(im.shape[1], int(xs.max()))
ymin = max(0, int(ys.min()))
ymax = min(im.shape[0], int(ys.max()))
# extract image
im_patch = im[ymin:ymax, xmin:xmax, :]
left = right = top = down = 0
if xs.min() < 0:
left = int(abs(xs.min()))
if xs.max() > im.shape[1]:
right = int(xs.max() - im.shape[1])
if ys.min() < 0:
top = int(abs(ys.min()))
if ys.max() > im.shape[0]:
down = int(ys.max() - im.shape[0])
if left != 0 or right != 0 or top != 0 or down != 0:
im_patch = cv2.copyMakeBorder(im_patch, top, down, left, right, cv2.BORDER_REPLICATE)
# im_patch = cv2.resize(im_patch, (int(output_sz[0]), int(output_sz[1])))
im_patch = cv2.resize(im_patch, (int(output_sz[1]), int(output_sz[0])), cv2.INTER_CUBIC)
if len(im_patch.shape) == 2:
im_patch = im_patch[:, :, np.newaxis]
return im_patch
def _feature_normalization(self, x):
if hasattr(self.config, 'normalize_power') and self.config.normalize_power > 0:
if self.config.normalize_power == 2:
x = x * np.sqrt((x.shape[0]*x.shape[1]) ** self.config.normalize_size * (x.shape[2] ** self.config.normalize_dim) / (x ** 2).sum(axis=(0, 1, 2)))
else:
x = x * ((x.shape[0]*x.shape[1]) ** self.config.normalize_size) * (x.shape[2] ** self.config.normalize_dim) / ((np.abs(x) ** (1. / self.config.normalize_power)).sum(axis=(0, 1, 2)))
if self.config.square_root_normalization:
x = np.sign(x) * np.sqrt(np.abs(x))
return x.astype(np.float32)
class TableFeature(Feature):
def __init__(self, fname, compressed_dim, table_name, use_for_color, cell_size=1,config=otb_hc_config.OTBHCConfig()):
super(TableFeature,self).__init__(config)
self.fname = fname
self._table_name = table_name
self._color = use_for_color
self._cell_size = cell_size
self._compressed_dim = [compressed_dim]
self._factor = 32
self._den = 8
# load table
dir_path = os.path.dirname(os.path.realpath(__file__))
self._table = pickle.load(open(os.path.join(dir_path, "lookup_tables", self._table_name+".pkl"), "rb"))
self.num_dim = [self._table.shape[1]]
self.min_cell_size = self._cell_size
self.penalty = [0.]
self.sample_sz = None
self.data_sz = None
def integralVecImage(self, img):
w, h, c = img.shape
intImage = np.zeros((w+1, h+1, c), dtype=img.dtype)
intImage[1:, 1:, :] = np.cumsum(np.cumsum(img, 0), 1)
return intImage
def average_feature_region(self, features, region_size):
region_area = region_size ** 2
if features.dtype == np.float32:
maxval = 1.
else:
maxval = 255
intImage = self.integralVecImage(features)
i1 = np.arange(region_size, features.shape[0]+1, region_size).reshape(-1, 1)
i2 = np.arange(region_size, features.shape[1]+1, region_size).reshape(1, -1)
region_image = (intImage[i1, i2, :] - intImage[i1, i2-region_size,:] - intImage[i1-region_size, i2, :] + intImage[i1-region_size, i2-region_size, :]) / (region_area * maxval)
return region_image
def get_features(self, img, pos, sample_sz, scales,normalization=True):
feat = []
if not isinstance(scales, list) and not isinstance(scales, np.ndarray):
scales = [scales]
for scale in scales:
patch = self._sample_patch(img, pos, sample_sz*scale, sample_sz)
h, w, c = patch.shape
if c == 3:
RR = patch[:, :, 0].astype(np.int32)
GG = patch[:, :, 1].astype(np.int32)
BB = patch[:, :, 2].astype(np.int32)
index = RR // self._den + (GG // self._den) * self._factor + (BB // self._den) * self._factor * self._factor
features = self._table[index.flatten()].reshape((h, w, self._table.shape[1]))
else:
features = self._table[patch.flatten()].reshape((h, w, self._table.shape[1]))
if self._cell_size > 1:
features = self.average_feature_region(features, self._cell_size)
feat.append(features)
feat=np.stack(feat, axis=3)
if normalization is True:
feat = self._feature_normalization(feat)
return [feat]
|
Jaraxxus-Me/UAVpytrackers_for_Odroid | lib/eco/config/gpu_config.py | class GPUConfig:
use_gpu=False
gpu_id=2
gpu_config=GPUConfig() |
Jaraxxus-Me/UAVpytrackers_for_Odroid | autotrack.py | <gh_stars>0
"""
Python re-implemented of "Learning Spatial-Temporal Regularized Correlation Filters for Visual Tracking"
@inproceedings{li2018learning,
title={Learning spatial-temporal regularized correlation filters for visual tracking},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
pages={4904--4913},
year={2018}
}
"""
import numpy as np
import cv2
# import time
from bacflibs.utils import cos_window,gaussian2d_rolled_labels
from bacflibs.fft_tools import fft2,ifft2
from numpy.fft import fft, ifft
# from bacflibs.feature import extract_hog_feature,extract_pyhog_feature,extract_cn_feature
from bacflibs.feature import extract_hog_feature,extract_cn_feature
import autotrack_config
from bacflibs.cf_utils import resp_newton,mex_resize,resize_dft2,circShift
# from bacflibs.scale_estimator import DSSTScaleEstimator
class AutoTrack(object):
def __init__(self,config=autotrack_config.AutoTrackConfig()):
super(AutoTrack).__init__()
#sample and feature parameter
self.name='AutoTrack'
self.hog_cell_size = config.hog_cell_size
self.hog_n_dim = config.hog_n_dim
self.gray_cell_size = config.gray_cell_size
self.cn_use_for_gray = config.cn_use_for_gray
self.cn_cell_size = config.cn_cell_size
self.cn_n_dim = config.cn_n_dim
self.cell_size=self.hog_cell_size
self.search_area_shape = config.search_area_shape
self.search_area_scale=config.search_area_scale
self.min_image_sample_size=config.min_image_sample_size
self.max_image_sample_size=config.max_image_sample_size
self.feature_downsample_ratio=config.feature_downsample_ratio
self.reg_window_max=config.reg_window_max
self.reg_window_min=config.reg_window_min
# detection parameters
self.refinement_iterations=config.refinement_iterations
self.newton_iterations=config.newton_iterations
self.clamp_position=config.clamp_position
# learning parameters
self.output_sigma_factor=config.output_sigma_factor
self.nu=config.nu
self.zeta=config.zeta
self.delta=config.delta
self.epsilon=config.epsilon
self.lam=config.admm_lambda
# ADMM params
self.admm_max_iterations=config.max_iterations
self.init_penalty_factor=config.init_penalty_factor
self.max_penalty_factor=config.max_penalty_factor
self.penalty_scale_step=config.penalty_scale_step
# scale parameters
self.number_of_scales =config.number_of_scales
self.scale_step=config.scale_step
self.use_mex_resize=True
self.scale_type=config.scale_type
self.scale_config = config.scale_config
self.normalize_power=config.normalize_power
self.normalize_size=config.normalize_size
self.normalize_dim=config.normalize_dim
self.square_root_normalization=config.square_root_normalization
self.config=config
def init(self,first_frame,bbox):
bbox = np.array(bbox).astype(np.int64)
x0, y0, w, h = tuple(bbox)
self._center = (int(x0 + w / 2),int(y0 + h / 2))
self.target_sz=(w,h)
search_area=self.target_sz[0]*self.search_area_scale*self.target_sz[1]*self.search_area_scale
self.sc=np.clip(1,a_min=np.sqrt(search_area/self.max_image_sample_size),a_max=np.sqrt(search_area/self.min_image_sample_size))
self.base_target_sz=(self.target_sz[0]/self.sc,self.target_sz[1]/self.sc)
if self.search_area_shape=='proportional':
self.crop_size=(int(self.base_target_sz[0]*self.search_area_scale),int(self.base_target_sz[1]*self.search_area_scale))
elif self.search_area_shape=='square':
w=int(np.sqrt(self.base_target_sz[0]*self.base_target_sz[1])*self.search_area_scale)
self.crop_size=(w,w)
elif self.search_area_shape=='fix_padding':
tmp=int(np.sqrt(self.base_target_sz[0]*self.search_area_scale+(self.base_target_sz[1]-self.base_target_sz[0])/4))+\
(self.base_target_sz[0]+self.base_target_sz[1])/2
self.crop_size=(self.base_target_sz[0]+tmp,self.base_target_sz[1]+tmp)
else:
raise ValueError
output_sigma = np.sqrt(np.floor(self.base_target_sz[0]/self.cell_size)*np.floor(self.base_target_sz[1]/self.cell_size))*\
self.output_sigma_factor
self.crop_size = (int(round(self.crop_size[0] / self.cell_size) * self.cell_size),
int(round(self.crop_size[1] / self.cell_size) * self.cell_size))
self.feature_map_sz = (self.crop_size[0] // self.cell_size, self.crop_size[1] // self.cell_size)
y=gaussian2d_rolled_labels(self.feature_map_sz,output_sigma)
self.cosine_window=(cos_window((y.shape[1],y.shape[0])))
# self.cosine_window=self.cosine_window[1:-1,1:-1]
self.yf=fft2(y)
self.reg_scale=(int(np.floor(self.base_target_sz[0]/self.feature_downsample_ratio)),
int(np.floor(self.base_target_sz[1] / self.feature_downsample_ratio)))
use_sz = self.feature_map_sz
self.interp_sz=use_sz
self.range_h,self.range_w,self.reg_window=self.create_reg_window_const(self.reg_scale,use_sz,self.reg_window_max,self.reg_window_min)
self.ky = np.roll(np.arange(-int(np.floor((self.feature_map_sz[1] - 1) / 2)),
int(np.ceil((self.feature_map_sz[1] - 1) / 2 + 1))),
-int(np.floor((self.feature_map_sz[1] - 1) / 2)))
self.kx = np.roll(np.arange(-int(np.floor((self.feature_map_sz[0] - 1) / 2)),
int(np.ceil((self.feature_map_sz[0] - 1) / 2 + 1))),
-int(np.floor((self.feature_map_sz[0] - 1) / 2)))
if self.number_of_scales>0:
self._min_scale_factor = self.scale_step ** np.ceil(
np.log(np.max(5 / np.array(([self.crop_size[0], self.crop_size[1]])))) / np.log(self.scale_step))
self._max_scale_factor = self.scale_step ** np.floor(np.log(np.min(
first_frame.shape[:2] / np.array([self.base_target_sz[1], self.base_target_sz[0]]))) / np.log(
self.scale_step))
#print(self._min_scale_factor)
#print(self._max_scale_factor)
self.scale_estimator = DSSTScaleEstimator(self.target_sz, config=self.scale_config)
self.scale_estimator.init(first_frame, self._center, self.base_target_sz, self.sc)
# self._num_scales = self.scale_estimator.num_scales
# self._scale_step = self.scale_estimator.scale_step
#
# self._min_scale_factor = self._scale_step ** np.ceil(
# np.log(np.max(5 / np.array(([self.crop_size[0], self.crop_size[1]])))) / np.log(self._scale_step))
# self._max_scale_factor = self._scale_step ** np.floor(np.log(np.min(
# first_frame.shape[:2] / np.array([self.base_target_sz[1], self.base_target_sz[0]]))) / np.log(
# self._scale_step))
# elif self.scale_type=='LP':
# self.scale_estimator=LPScaleEstimator(self.target_sz,config=self.scale_config)
# self.scale_estimator.init(first_frame,self._center,self.base_target_sz,self.sc)
patch = self.get_sub_window(first_frame, self._center, model_sz=self.crop_size,
scaled_sz=(int(np.round(self.crop_size[0] * self.sc)),
int(np.round(self.crop_size[1] * self.sc))))
patch=patch[:,:,[2,1,0]]
xl_hc = self.extrac_hc_feature(patch, self.cell_size)
# xl_hc =self.extrac_feature_test(patch, (50,50))
xlf_hc = fft2(xl_hc * self.cosine_window[:, :, None])
mu=0
self.occ=False
self.frame_id=1
self.g_pre=np.zeros_like(xlf_hc)
if self.occ==False:
self.ADMM(xlf_hc,mu)
def update(self,current_frame,frame_id,vis=False):
self.frame_id=frame_id
assert len(current_frame.shape) == 3 and current_frame.shape[2] == 3
# old_pos=(np.inf,np.inf)
# iter=1
# while iter<=self.refinement_iterations and (np.abs(old_pos[0]-self._center[0])>1e-2 or
# np.abs(old_pos[1]-self._center[1])>1e-2):
#
# sample_scales=self.sc*self.scale_factors
#position search
sample_pos=(int(np.round(self._center[0])),int(np.round(self._center[1])))
sub_window = self.get_sub_window(current_frame, sample_pos, model_sz=self.crop_size,
scaled_sz=(int(round(self.crop_size[0] * self.sc)),
int(round(self.crop_size[1] * self.sc))))
sub_window=sub_window[:,:,[2,1,0]]
# xt_hc =self.extrac_feature_test(sub_window, (50,50))
xt_hc = self.extrac_hc_feature(sub_window, self.cell_size)
# for scale in sample_scales:
# sub_window = self.get_sub_window(current_frame, sample_pos, model_sz=self.crop_size,
# scaled_sz=(int(round(self.crop_size[0] * scale)),
# int(round(self.crop_size[1] * scale))))
# hc_features=self.extrac_hc_feature(sub_window, self.cell_size)[:,:,:,np.newaxis]
# if xt_hc is None:
# xt_hc = hc_features
# else:
# xt_hc = np.concatenate((xt_hc, hc_features), axis=3)
xtw_hc=xt_hc*self.cosine_window[:,:,None]
xtf_hc=fft2(xtw_hc)
responsef_hc=np.sum(np.conj(self.g_f)[:,:,:]*xtf_hc,axis=2)
responsef_padded=resize_dft2(responsef_hc,self.interp_sz)
response = np.real(ifft2(responsef_padded))
disp_row,disp_col,sind=resp_newton(response,responsef_padded,self.newton_iterations,self.ky,self.kx,self.feature_map_sz)
if frame_id>2:
response_shift=circShift(response,[-int(np.floor(disp_row)),-int(np.floor(disp_col))])
response_pre_shift=circShift(self.response_pre,[-int(np.floor(self.disp_row_pre)),-int(np.floor(self.disp_col_pre))])
response_diff=np.abs(np.abs(response_shift-response_pre_shift)/response_pre_shift)
self.ref_mu, self.occ = self.updateRefmu(response_diff,self.zeta,self.nu,frame_id)
response_diff=circShift(response_diff,[int(np.floor(response_diff.shape[0]/2)),int(np.floor(response_diff.shape[1]/2))])
varience=self.delta*np.log(response_diff[self.range_h[0]:(self.range_h[-1]+1), self.range_w[0]:(self.range_w[-1]+1)]+1)
self.reg_window[self.range_h[0]:(self.range_h[-1]+1), self.range_w[0]:(self.range_w[-1]+1)] = varience
self.response_pre=response
self.disp_row_pre=disp_row
self.disp_col_pre=disp_col
#row, col, sind = np.unravel_index(np.argmax(response, axis=None), response.shape)
#disp_row = (row+ int(np.floor(self.feature_map_sz[1] - 1) / 2)) % self.feature_map_sz[1] - int(
# np.floor((self.feature_map_sz[1] - 1) / 2))
#disp_col = (col + int(np.floor(self.feature_map_sz[0] - 1) / 2)) % self.feature_map_sz[0] - int(
# np.floor((self.feature_map_sz[0] - 1) / 2))
# if vis is True:
# self.score = response[:, :, sind].astype(np.float32)
# self.score = np.roll(self.score, int(np.floor(self.score.shape[0] / 2)), axis=0)
# self.score = np.roll(self.score, int(np.floor(self.score.shape[1] / 2)), axis=1)
dx, dy = (disp_col * self.cell_size*self.sc), (disp_row * self.cell_size*self.sc)
trans_vec=np.array(np.round([dx,dy]))
# old_pos = self._center
self._center = (np.round(sample_pos[0] +dx), np.round(sample_pos[1] + dy))
# print(self._center)
# self.sc=self.sc*scale_change_factor
# self.sc = np.clip(self.sc, self._min_scale_factor, self._max_scale_factor)
#scale search and update
self.sc = self.scale_estimator.update(current_frame, self._center, self.base_target_sz,
self.sc)
# print(self.sc)
if self.scale_type == 'normal':
self.sc = np.clip(self.sc, a_min=self._min_scale_factor,
a_max=self._max_scale_factor)
#training
shift_sample_pos=np.array(2.0*np.pi*trans_vec/(self.sc*np.array(self.crop_size,dtype=float)))
# shift_sample() hard to ensure objectiveness....
# consider using original ways-----LBW
# xlf_hc=self.shift_sample(xtf_hc, shift_sample_pos, self.kx, self.ky.T)
patch = self.get_sub_window(current_frame, self._center, model_sz=self.crop_size,
scaled_sz=(int(np.round(self.crop_size[0] * self.sc)),
int(np.round(self.crop_size[1] * self.sc))))
patch=patch[:,:,[2,1,0]]
xl_hc =self.extrac_feature_test(patch, (50,50))
xl_hc = self.extrac_hc_feature(patch, self.cell_size)
xlw_hc = xl_hc * self.cosine_window[:, :, None]
xlf_hc = fft2(xlw_hc)
mu = self.zeta
if self.occ==False:
self.ADMM(xlf_hc,mu)
target_sz=(self.base_target_sz[0]*self.sc,self.base_target_sz[1]*self.sc)
return [(self._center[0] - (target_sz[0]) / 2), (self._center[1] -(target_sz[1]) / 2), target_sz[0],target_sz[1]]
def extrac_hc_feature(self,patch,cell_size,normalization=False):
hog_features=extract_hog_feature(patch,cell_size)
cn_features=extract_cn_feature(patch,cell_size)
hc_features=np.concatenate((hog_features,cn_features),axis=2)
# hc_features=hog_features
if normalization is True:
hc_features=self._feature_normalization(hc_features)
return hc_features
def get_sub_window(self, img, center, model_sz, scaled_sz=None):
model_sz = (int(model_sz[0]), int(model_sz[1]))
if scaled_sz is None:
sz = model_sz
else:
sz = scaled_sz
sz = (max(int(sz[0]), 2), max(int(sz[1]), 2))
"""
w,h=sz
xs = (np.floor(center[0]) + np.arange(w) - np.floor(w / 2)).astype(np.int64)
ys = (np.floor(center[1]) + np.arange(h) - np.floor(h / 2)).astype(np.int64)
xs[xs < 0] = 0
ys[ys < 0] = 0
xs[xs >= img.shape[1]] = img.shape[1] - 1
ys[ys >= img.shape[0]] = img.shape[0] - 1
im_patch = img[ys, :][:, xs]
"""
im_patch = cv2.getRectSubPix(img, sz, center)
if scaled_sz is not None:
im_patch = mex_resize(im_patch, model_sz)
return im_patch.astype(np.uint8)
def ADMM(self,xlf,mu):
model_xf = xlf
self.l_f = np.zeros_like(model_xf)
self.g_f = np.zeros_like(self.l_f)
self.h_f = np.zeros_like(self.l_f)
self.gamma =self.init_penalty_factor
self.gamma_max =self.max_penalty_factor
self.gamma_scale_step = self.penalty_scale_step
T = self.feature_map_sz[0] * self.feature_map_sz[1]
S_xx = np.sum(np.conj(model_xf) * model_xf, axis=2)
Sg_pre_f = np.sum(np.conj(model_xf) * self.g_pre, axis=2)
Sgx_pre_f = model_xf * Sg_pre_f[:, :, None]
iter = 1
while iter <= self.admm_max_iterations:
#subproblem g
B = S_xx + T * (self.gamma + mu)
Slx_f = np.sum(np.conj(model_xf) * self.l_f, axis=2)
Shx_f = np.sum(np.conj(model_xf) * self.h_f, axis=2)
tmp0 = (1 / (T * (self.gamma + mu)) * (self.yf[:, :, None] * model_xf)) - ((1 / (self.gamma + mu)) * self.l_f) + (
self.gamma / (self.gamma + mu)) * self.h_f + \
(mu / (self.gamma + mu)) * self.g_pre
tmp1 = 1 / (T * (self.gamma + mu)) * (model_xf * ((S_xx * self.yf)[:, :, None]))
tmp2 = mu / (self.gamma + mu) * Sgx_pre_f
tmp3 = 1 / (self.gamma + mu) * (model_xf * (Slx_f[:, :, None]))
tmp4 = self.gamma / (self.gamma + mu) * (model_xf * Shx_f[:, :, None])
self.g_f = (tmp0 - (tmp1 + tmp2 - tmp3 +tmp4) / B[:, :, None]).astype(np.complex64)
#subproblem h
self.h_f = fft2(self.argmin_g(self.reg_window, self.gamma, T, ifft2(self.gamma * (self.g_f + self.l_f))))
#subproblem mu
if self.frame_id>2 and iter<self.admm_max_iterations:
for i in range(self.g_f.shape[2]):
z=np.power(np.linalg.norm((self.g_f[:,:,i]-self.g_pre[:,:,i]),2),2)/(2*self.epsilon)
mu=self.ref_mu-z
# update l
self.l_f = self.l_f + (self.gamma * (self.g_f - self.h_f))
# update gama
self.gamma = min(self.gamma_scale_step * self.gamma, self.gamma_max)
iter += 1
self.g_pre=self.g_f
def argmin_g(self,w0,zeta,T,X):
lhd = T / (self.lam* w0 ** 2 + T*zeta)
m = lhd[:, :, None] * X
return m
# def create_reg_window(self,reg_scale,use_sz,p,reg_window_max,reg_window_min,alpha,beta):
# range_ = np.zeros((2, 2))
# for j in range(len(use_sz)):
# if use_sz[0]%2==1 and use_sz[1]%2==1:
# if int(reg_scale[j]) % 2 == 1:
# range_[j, :] = np.array([-np.floor(use_sz[j] / 2), np.floor(use_sz[j] / 2)])
# else:
# range_[j, :] = np.array([-(use_sz[j] / 2 - 1), (use_sz[j] / 2)])
# else:
# if int(reg_scale[j]) % 2 == 1:
# range_[j, :] = np.array([-np.floor(use_sz[j] / 2), (np.floor((use_sz[j] - 1) / 2))])
# else:
# range_[j, :] = np.array([-((use_sz[j] - 1) / 2),((use_sz[j] - 1) / 2)])
# wrs = np.arange(range_[1, 0], range_[1, 1] + 1)
# wcs = np.arange(range_[0, 0], range_[0, 1] + 1)
# wrs, wcs = np.meshgrid(wrs, wcs)
# res = (np.abs(wrs) / reg_scale[1]) ** p + (np.abs(wcs) / reg_scale[0]) ** p
# reg_window = reg_window_max / (1 + np.exp(-1. * alpha * (np.power(res, 1. / p) -beta))) +reg_window_min
# reg_window=reg_window.T
# return reg_window
def create_reg_window_const(self, reg_scale, use_sz,reg_window_max, reg_window_min):
reg_window=np.ones((use_sz[1],use_sz[0]))*reg_window_max
range_=np.zeros((2,2))
for j in range(2):
range_[j,:]=np.array([0,reg_scale[j]-1])-np.floor(reg_scale[j]/2)
cx=int(np.floor((use_sz[0]+1)/2))+(use_sz[0]+1)%2-1
cy=int(np.floor((use_sz[1]+1)/2))+(use_sz[1]+1)%2-1
range_h=np.arange(cy+range_[1,0],cy+range_[1,1]+1).astype(np.int)
range_w=np.arange(cx+range_[0,0],cx+range_[0,1]+1).astype(np.int)
a_h,a_w=np.meshgrid(range_h,range_w)
reg_window[a_h,a_w]=reg_window_min
return range_h,range_w,reg_window
def _feature_normalization(self, x):
if hasattr(self.config, 'normalize_power') and self.config.normalize_power > 0:
if self.config.normalize_power == 2:
x = x * np.sqrt((x.shape[0]*x.shape[1]) ** self.config.normalize_size * (x.shape[2] ** self.config.normalize_dim) / (x ** 2).sum(axis=(0, 1, 2)))
else:
x = x * ((x.shape[0]*x.shape[1]) ** self.config.normalize_size) * (x.shape[2] ** self.config.normalize_dim) / ((np.abs(x) ** (1. / self.config.normalize_power)).sum(axis=(0, 1, 2)))
if self.config.square_root_normalization:
x = np.sign(x) * np.sqrt(np.abs(x))
return x.astype(np.float32)
def shift_sample(self, xf, shift, kx, ky):
xp = np
shift_exp_y = [xp.exp(1j * shift[0] * ky_).astype(xp.complex64) for ky_ in ky]
shift_exp_x = [xp.exp(1j * shift[1] * kx_).astype(xp.complex64) for kx_ in kx]
xf = [xf_ * sy_.reshape(1, -1) * sx_.reshape((-1, 1))
for xf_, sy_, sx_ in zip(xf, shift_exp_y, shift_exp_x)]
return xf
def updateRefmu(self,response_dif,init_mu,p,frame):
phi=0.3
m=init_mu
eta=np.linalg.norm(response_dif,2)/10000
if eta<phi:
y=m/(1+np.log(p*eta+1))
occ=False
else:
y=50
occ=True
return np.float16(y),occ
def extrac_feature_test(self,patch,sz):
total_dim=42
im=np.sum(patch,axis=2)/300
resized_patch=cv2.resize(im,sz,interpolation = cv2.INTER_AREA)
w,h=resized_patch.shape
feature_pixels=np.zeros([w,h,total_dim])
for i in range(total_dim):
feature_pixels[:,:,i]=resized_patch
return feature_pixels
# def init_regwindow(self,sz,target_sz):
# reg_scale=target_sz
# use_sz=sz
# reg_window=np.ones(use_sz)*self.reg_window_max
# ran=np.zeros(reg_scale.size,2)
#
# for j in range(reg_scale.size):
# ran[j,:]=[0,reg_scale[j]-1]-np.floor(reg_scale[j]/2)
# center=np.floor((use_sz)/2)+np.mod(use_sz+1,2)
# range_h=range((center[0]+ran[0,0]),(center[0]+ran[0,1]))
# range_w=range((center[1]+ran[1,0]),(center[1]+ran[1,1]))
# reg_window[range_h,range_w]=self.reg_window_min
# return range_h,range_w,reg_windowimport matplotlib.pyplot as plt
class DSSTScaleEstimator:
def __init__(self,target_sz,config):
init_target_sz = np.array([target_sz[0],target_sz[1]])
self.config=config
num_scales = self.config.number_of_scales_filter
scale_step = self.config.scale_step_filter
scale_sigma = np.sqrt(self.config.number_of_scales_filter) * self.config.scale_sigma_factor
scale_exp = np.arange(-np.floor(num_scales - 1)/2,
np.ceil(num_scales-1)/2+1,
dtype=np.float32)
# interp_scale_exp = np.arange(-np.floor((self.config.number_of_interp_scales - 1) / 2),
# np.ceil((self.config.number_of_interp_scales - 1) / 2) + 1,
# dtype=np.float32)
self.scale_size_factors = scale_step ** (-scale_exp)
# self.interp_scale_factors = scale_step ** interp_scale_exp_shift
ys = np.exp(-0.5 * (scale_exp ** 2) / (scale_sigma ** 2))
self.yf = fft(ys)
self.window = np.hanning(ys.shape[0]).T.astype(np.float32)
# make sure the scale model is not to large, to save computation time
self.num_scales = num_scales
self.scale_step = scale_step
if self.config.scale_model_factor ** 2 * np.prod(init_target_sz) > self.config.scale_model_max_area:
scale_model_factor = np.sqrt(self.config.scale_model_max_area / np.prod(init_target_sz))
else:
scale_model_factor = self.config.scale_model_factor
# set the scale model size
self.scale_model_sz = np.floor(init_target_sz * scale_model_factor)
# self.max_scale_dim = self.config.s_num_compressed_dim == 'MAX'
# if self.max_scale_dim:
# self.s_num_compressed_dim = len(self.scale_size_factors)
# else:
# self.s_num_compressed_dim = self.config.s_num_compressed_dim
def init(self,im,pos,base_target_sz,current_scale_factor):
# self.scale_factors = np.array([1])
scales = current_scale_factor * self.scale_size_factors
xs = self._extract_scale_sample(im, pos, base_target_sz, scales, self.scale_model_sz,self.window)
# compute projection basis
# if self.max_scale_dim:
# self.basis, _ = scipy.linalg.qr(self.s_num, mode='economic')
# scale_basis_den, _ = scipy.linalg.qr(xs, mode='economic')
# else:
# U, _, _ = np.linalg.svd(self.s_num)
# self.basis = U[:, :self.s_num_compressed_dim]
# V, _, _ = np.linalg.svd(xs)
# scale_basis_den = V[:, :self.s_num_compressed_dim]
# self.basis = self.basis.T
# compute numerator
# feat_proj = self.basis.dot(self.s_num) * self.window
xsf = np.fft.fft(xs, axis=1)
self.sf_num = self.yf * np.conj(xsf)
# update denominator
# xs = scale_basis_den.T.dot(xs)*self.window
new_sf_den = np.sum(xsf*np.conj(xsf), 0)
self.sf_den = new_sf_den
def update(self, im, pos, base_target_sz, current_scale_factor):
# base_target_sz=np.array([base_target_sz[0],base_target_sz[1]])
scales = current_scale_factor * self.scale_size_factors
xs = self._extract_scale_sample(im, pos, base_target_sz, scales, self.scale_model_sz,self.window)
# get scores
xsf = np.fft.fft(xs, axis=1)
scale_responsef = np.sum(self.sf_num * xsf, 0) / (self.sf_den + self.config.scale_lambda)
recovered_scale_index = np.argmax(np.real(ifft(scale_responsef)))
current_scale_factor=current_scale_factor*self.scale_size_factors[recovered_scale_index]
scales = current_scale_factor * self.scale_size_factors
xs = self._extract_scale_sample(im, pos, base_target_sz, scales, self.scale_model_sz,self.window)
#(error)xs = self._shift_scale_sample(im, pos, base_target_sz, xs, recovered_scale_index, scales, self.window, self.scale_model_sz)
xsf = np.fft.fft(xs, axis=1)
new_sf_num = self.yf * np.conj(xsf)
new_sf_den = np.sum(xsf*np.conj(xsf), 0)
self.sf_num = (1 - self.config.scale_learning_rate) * self.sf_num + self.config.scale_learning_rate * new_sf_num
self.sf_den = (1 - self.config.scale_learning_rate) * self.sf_den + self.config.scale_learning_rate * new_sf_den
return current_scale_factor
def _extract_scale_sample(self, im, pos, base_target_sz, scale_factors, scale_model_sz, window):
scale_sample = []
base_target_sz=np.array([base_target_sz[0],base_target_sz[1]])
for idx, scale in enumerate(scale_factors):
patch_sz = np.floor(base_target_sz * scale)
im_patch=cv2.getRectSubPix(im,(int(patch_sz[0]),int(patch_sz[1])),pos)
if scale_model_sz[0] > patch_sz[1]:
interpolation = cv2.INTER_LINEAR
else:
interpolation = cv2.INTER_AREA
im_patch_resized = cv2.resize(im_patch, (int(scale_model_sz[0]),int(scale_model_sz[1])), interpolation=interpolation).astype(np.uint8)
im_patch_resized=im_patch_resized[:,:,[2,1,0]]
temp=extract_hog_feature(im_patch_resized,cell_size=4)
# temp=self.extrac_feature_test(im_patch, (4,8), 31)
scale_sample.append(temp.reshape((-1,1),order="F")*window[idx])
scale_sample = np.concatenate(scale_sample, axis=1)
return scale_sample
def _shift_scale_sample(self,im, pos, base_target_sz, xs, recovered_scale, scaleFactors,scale_window, scale_model_sz):
nScales=len(scaleFactors)
base_target_sz=np.array([base_target_sz[0],base_target_sz[1]])
out=[]
shift_pos=int(recovered_scale-np.ceil(nScales/2))
if shift_pos==0:
out=xs
elif shift_pos>0:
for j in range(nScales-shift_pos):
xin=np.expand_dims(xs[:,j+shift_pos],axis=1)
out.append(xin/(scale_window[j+shift_pos]+0.00001)*scale_window[j])
for i in range(shift_pos):
patch_sz = np.floor(base_target_sz * scaleFactors[nScales-shift_pos+i])
im_patch=cv2.getRectSubPix(im,(int(patch_sz[0]),int(patch_sz[1])),pos)
if scale_model_sz[0] > patch_sz[1]:
interpolation = cv2.INTER_LINEAR
else:
interpolation = cv2.INTER_AREA
im_patch_resized = cv2.resize(im_patch, (int(scale_model_sz[0]),int(scale_model_sz[1])), interpolation=interpolation).astype(np.uint8)
im_patch_resized=im_patch_resized[:,:,[2,1,0]]
temp=extract_hog_feature(im_patch_resized,cell_size=4)
# temp=self.extrac_feature_test(im_patch, (4,8), 31)
out.append(temp.reshape((-1, 1),order="F")*scale_window[nScales-shift_pos+i])
out=np.concatenate(out, axis=1)
else:
for i in range(-shift_pos):
patch_sz = np.floor(base_target_sz * scaleFactors[i])
im_patch=cv2.getRectSubPix(im,(int(patch_sz[0]),int(patch_sz[1])),pos)
if scale_model_sz[0] > patch_sz[1]:
interpolation = cv2.INTER_LINEAR
else:
interpolation = cv2.INTER_AREA
im_patch_resized = cv2.resize(im_patch, (int(scale_model_sz[0]),int(scale_model_sz[1])), interpolation=interpolation).astype(np.uint8)
im_patch_resized=im_patch_resized[:,:,[2,1,0]]
temp=extract_hog_feature(im_patch_resized,cell_size=4)
# temp=self.extrac_feature_test(im_patch, (8,4), 31)
out.append(temp.reshape((-1, 1),order="F")*scale_window[i])
for j in range(nScales+shift_pos):
xin=np.expand_dims(xs[:,j],axis=1)
out.append(xin/(scale_window[j]+0.00001)*scale_window[j-shift_pos])
out=np.concatenate(out, axis=1)
return out
def extrac_feature_test(self,patch,sz,dim):
total_dim=dim
im=np.sum(patch,axis=2)/300
resized_patch=cv2.resize(im,sz,interpolation = cv2.INTER_AREA)
w,h=resized_patch.shape
feature_pixels=np.zeros([w,h,total_dim])
for i in range(total_dim):
feature_pixels[:,:,i]=resized_patch
return feature_pixels
|
Jaraxxus-Me/UAVpytrackers_for_Odroid | lib/utils.py | import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
def APCE(response_map):
Fmax=np.max(response_map)
Fmin=np.min(response_map)
apce=(Fmax-Fmin)**2/(np.mean((response_map-Fmin)**2))
return apce
def PSR(response):
response_map=response.copy()
max_loc=np.unravel_index(np.argmax(response_map, axis=None),response_map.shape)
y,x=max_loc
F_max = np.max(response_map)
response_map[y-5:y+6,x-5:x+6]=0.
mean=np.mean(response_map[response_map>0])
std=np.std(response_map[response_map>0])
psr=(F_max-mean)/std
return psr
def to_color_map(score,sz):
score = cv2.resize(score, sz)
score -= score.min()
score = score / score.max()
score = (score * 255).astype(np.uint8)
# score = 255 - score
score = cv2.applyColorMap(score, cv2.COLORMAP_JET)
return score
def calAUC(value_list):
length=len(value_list)
delta=1./(length-1)
area=0.
for i in range(1,length):
area+=(delta*((value_list[i]+value_list[i-1])/2))
return area
def cos_window(sz):
"""
width, height = sz
j = np.arange(0, width)
i = np.arange(0, height)
J, I = np.meshgrid(j, i)
cos_window = np.sin(np.pi * J / width) * np.sin(np.pi * I / height)
"""
cos_window = np.hanning(int(sz[1])+2)[:, np.newaxis].dot(np.hanning(int(sz[0])+2)[np.newaxis, :])
cos_window = cos_window[1:-1,1:-1]
return cos_window
def get_img_list(img_dir):
frame_list = []
for frame in sorted(os.listdir(img_dir)):
if os.path.splitext(frame)[1] == '.jpg':
frame_list.append(os.path.join(img_dir, frame))
return frame_list
def get_ground_truthes(anno_path):
gt_path = os.path.join(anno_path)
gts=[]
with open(gt_path, 'r') as f:
while True:
line = f.readline()
if line=='':
gts=np.array(gts,dtype=np.float32)
#for i in range(4): # x, y, width, height
# xp = range(0, gts.shape[0], 5)
# fp = gts[xp, i]
# x = range(gts.shape[0])
# gts[:, i] = pylab.interp(x, xp, fp)
return gts
if ',' in line:
gt_pos = line.split(',')
else:
gt_pos=line.split()
gt_pos_int=[(float(element)) for element in gt_pos]
gts.append(gt_pos_int)
def get_init_gt(anno_path):
gt_path = os.path.join(anno_path)
with open(gt_path, 'r') as f:
line = f.readline()
if ',' in line:
gt_pos = line.split(',')
else:
gt_pos=line.split()
gt_pos_int=[int(float(element)) for element in gt_pos]
return tuple(gt_pos_int)
def gaussian2d_labels(sz,sigma):
w,h=sz
xs, ys = np.meshgrid(np.arange(w), np.arange(h))
center_x, center_y = w / 2, h / 2
dist = ((xs - center_x) ** 2 + (ys - center_y) ** 2) / (sigma**2)
labels = np.exp(-0.5*dist)
return labels
"""
max val at the top left loc
"""
def gaussian2d_rolled_labels(sz,sigma):
w,h=sz
xs, ys = np.meshgrid(np.arange(w)-w//2, np.arange(h)-h//2)
dist = (xs**2+ys**2) / (sigma**2)
labels = np.exp(-0.5*dist)
labels = np.roll(labels, -int(np.floor(sz[0] / 2)), axis=1)
labels=np.roll(labels,-int(np.floor(sz[1]/2)),axis=0)
return labels
def plot_precision(gts,preds,save_path):
# x,y,w,h
threshes,precisions=get_thresh_precision_pair(gts,preds)
idx20 = [i for i, x in enumerate(threshes) if x == 20][0]
plt.plot(threshes,precisions,label=str(precisions[idx20])[:5])
plt.title('Precision Plots')
plt.legend()
plt.savefig(save_path)
plt.show()
def get_thresh_precision_pair(gts,preds):
length=min(len(gts),len(preds))
gts=gts[:length,:]
preds=preds[:length,:]
gt_centers_x = (gts[:, 0]+gts[:,2]/2)
gt_centers_y = (gts[:, 1]+gts[:,3]/2)
preds_centers_x = (preds[:, 0]+preds[:,2]/2)
preds_centers_y = (preds[:, 1]+preds[:,3]/2)
dists = np.sqrt((gt_centers_x - preds_centers_x) ** 2 + (gt_centers_y - preds_centers_y) ** 2)
threshes = []
precisions = []
for thresh in np.linspace(0, 50, 101):
true_len = len(np.where(dists < thresh)[0])
precision = true_len / len(dists)
threshes.append(thresh)
precisions.append(precision)
return threshes,precisions
def plot_success(gts,preds,save_path):
threshes, successes=get_thresh_success_pair(gts, preds)
plt.plot(threshes,successes,label=str(calAUC(successes))[:5])
plt.title('Success Plot')
plt.legend()
plt.savefig(save_path)
plt.show()
def get_thresh_success_pair(gts, preds):
length=min(len(gts),len(preds))
gts=gts[:length,:]
preds=preds[:length,:]
intersect_tl_x = np.max((gts[:, 0], preds[:, 0]), axis=0)
intersect_tl_y = np.max((gts[:, 1], preds[:, 1]), axis=0)
intersect_br_x = np.min((gts[:, 0] + gts[:, 2], preds[:, 0] + preds[:, 2]), axis=0)
intersect_br_y = np.min((gts[:, 1] + gts[:, 3], preds[:, 1] + preds[:, 3]), axis=0)
intersect_w = intersect_br_x - intersect_tl_x
intersect_w[intersect_w < 0] = 0
intersect_h = intersect_br_y - intersect_tl_y
intersect_h[intersect_h < 0] = 0
intersect_areas = intersect_h * intersect_w
ious = intersect_areas / (gts[:, 2] * gts[:, 3] + preds[:, 2] * preds[:, 3] - intersect_areas)
threshes = []
successes = []
for thresh in np.linspace(0, 1, 101):
success_len = len(np.where(ious > thresh)[0])
success = success_len / len(ious)
threshes.append(thresh)
successes.append(success)
return threshes,successes
|
Jaraxxus-Me/UAVpytrackers_for_Odroid | examples/uavdataset_config.py | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 12 09:18:12 2020
@author: Jaraxxus
"""
import os
class UAVDatasetConfig:
def __init__(self,vediopath):
self.path=vediopath
frames={}
names=os.listdir(vediopath)
for name in names:
frames[name]=[1,len(os.listdir(os.join(self.path,name)))] |
Jaraxxus-Me/UAVpytrackers_for_Odroid | cf_demo.py | <filename>cf_demo.py
import os
import time
from autotrack import AutoTrack
from arcf import ARCF
import glob
import cv2
import numpy as np
def get_init_gt(anno_path):
gt_path = os.path.join(anno_path)
with open(gt_path, 'r') as f:
line = f.readline()
if ',' in line:
gt_pos = line.split(',')
else:
gt_pos=line.split()
gt_pos_int=[int(float(element)) for element in gt_pos]
return tuple(gt_pos_int)
def get_img_list(img_dir):
frame_list = []
for frame in sorted(os.listdir(img_dir)):
if os.path.splitext(frame)[1] == '.jpg':
frame_list.append(os.path.join(img_dir, frame))
return frame_list
def get_ground_truthes(anno_path):
gt_path = os.path.join(anno_path)
gts=[]
with open(gt_path, 'r') as f:
while True:
line = f.readline()
if line=='':
gts=np.array(gts,dtype=np.float32)
return gts
if ',' in line:
gt_pos = line.split(',')
else:
gt_pos=line.split()
gt_pos_int=[(float(element)) for element in gt_pos]
gts.append(gt_pos_int)
def main(visulization=False,track=AutoTrack):
data_dir='/home/v4r/Dataset/UAVDT/data_seq'
anno_dir='/home/v4r/Dataset/UAVDT/anno'
data_names=sorted(os.listdir(data_dir))
print(data_names)
tracker = track()
# setup experiments
video_paths = sorted(glob.glob(os.path.join(data_dir, '*')))
video_num = len(video_paths)
output_dir = os.path.join('results', tracker.name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# overall_performance = []
overall_FPS=[]
# run tracking experiments and report performance
for video_id, video_path in enumerate(video_paths, start=1):
data_name=data_names[video_id-1]
gt_path=os.path.join(anno_dir,data_name+'_gt.txt')
frame_list = get_img_list(video_path)
frame_list.sort()
init_rect=get_init_gt(gt_path)
gts=get_ground_truthes(gt_path)
out_res = []
ut=0
for frame_id in range(len(frame_list)):
frame=cv2.imread(frame_list[frame_id])
if frame_id == 0:
s=time.time()
tracker.init(frame, init_rect) # initialization
delta=time.time()-s
ut+=delta
# tracker.initialize(frame, init_rect)
out = init_rect
out_res.append(init_rect)
else:
s=time.time()
out = tracker.update(frame,frame_id) # tracking
delta=time.time()-s
ut+=delta
# out = tracker.track(frame)
out_res.append(out)
if visulization:
_gt = gts[frame_id]
# _exist = label_res['exist'][frame_id]
# if _exist:
cv2.rectangle(frame, (int(_gt[0]), int(_gt[1])), (int(_gt[0] + _gt[2]), int(_gt[1] + _gt[3])),
(0, 255, 0))
FPS=str(1/delta)
cv2.putText(frame, FPS,
(frame.shape[1] // 2 - 20, 30), 1, 2, (0, 255, 0), 2)
cv2.rectangle(frame, (int(out[0]), int(out[1])), (int(out[0] + out[2]), int(out[1] + out[3])),
(0, 255, 255))
cv2.imshow(data_name,frame)
cv2.waitKey(1)
frame_id += 1
if visulization:
cv2.destroyAllWindows()
overall_FPS.append(len(frame_list)/ut)
# save result
output_file = os.path.join(output_dir, '%s.txt' % (data_name))
with open(output_file, 'w') as f:
np.savetxt(f,np.array(out_res).astype(np.int16))
# mixed_measure = eval(out_res, gts)
# overall_performance.append(mixed_measure)
print('[%03d/%03d] %20s FPS: %.03f' % (video_id, video_num, data_name, len(frame_list)/ut))
print('[Overall] FPS: %.03f\n' % (np.mean(overall_FPS)))
if __name__ == '__main__':
main(True,AutoTrack) |
Jaraxxus-Me/UAVpytrackers_for_Odroid | arcf.py | """
Python re-implementation of "Learning Background-Aware Correlation Filters for Visual Tracking"
@article{Galoogahi2017Learning,
title={Learning Background-Aware Correlation Filters for Visual Tracking},
author={Galoogahi, <NAME> and <NAME> and <NAME>},
year={2017},
}
"""
import numpy as np
import cv2
#import torch
#import torch.nn.functional as F
from bacflibs.utils import cos_window,gaussian2d_rolled_labels
#libs.dcf hann2d & label
from bacflibs.fft_tools import fft2,ifft2,ifft2_sym,cifft2
#libs.fourier ๆญฃ่ดๅ
้ๅถ
from bacflibs.feature import extract_hog_feature,extract_cn_feature
#maybe in 'features'
# from bacflibs.bacf_config import BACFConfig
#BACF configuration-parameters
from bacflibs.cf_utils import mex_resize,resp_newton,resize_dft2
#libs.optimization need to figure out what 'resize' means
from bacflibs.scale_estimator import LPScaleEstimator
#No dependency here
from collections import namedtuple
class ARCF(object):
def __init__(self, net_path=None, **kargs):
self.name = 'ARCF'
self.params = self.parse_args(**kargs)
# setup GPU device if available
# self.cuda = torch.cuda.is_available()
self.device = 'cpu'
def parse_args(self, **kargs):
# default parameters
params = {
# inference parameters
'cell_size': 4,
'cell_selection_thresh': 0.75**2,
'search_area_shape': 'square',
'search_area_scale': 5,
'filter_max_area': 50**2,
'interp_factor': 0.0190,
'output_sigma_factor': 1./16,
'interpolate_response': 4,
'newton_iterations': 50,
'number_of_scales': 5,
'scale_step': 1.01,
'admm_iterations': 2,
'admm_lambda': 0.01,
'admm_gamma': 0.71}
for key, val in kargs.items():
if key in params:
params.update({key: val})
return namedtuple('GenericDict', params.keys())(**params)
def init(self, image, box):
self.cell_size=self.params.cell_size
self.cell_selection_thresh=self.params.cell_selection_thresh
self.search_area_shape = self.params.search_area_shape
self.search_area_scale=self.params.search_area_scale
self.filter_max_area = self.params.filter_max_area
self.interp_factor=self.params.interp_factor
self.output_sigma_factor = self.params.output_sigma_factor
self.interpolate_response =self.params.interpolate_response
self.newton_iterations =self.params.newton_iterations
self.number_of_scales =self.params.number_of_scales
self.scale_step = self.params.scale_step
self.admm_iterations = self.params.admm_iterations
self.admm_lambda = self.params.admm_lambda
self.admm_gamma = self.params.admm_gamma
self.frame = 1
# self.learning_rate_scale = self.params.learning_rate_scale
# self.scale_sz_window = self.params.scale_sz_window
# class ScaleConfig:
# learning_rate_scale = self.learning_rate_scale
# scale_sz_window = self.scale_sz_window
# self.scale_config = ScaleConfig()
# Get target position and size
# state = info['init_bbox']
state = box
bbox = np.array(state).astype(np.int64)
x, y, w, h = tuple(bbox)
self._center = (x + w / 2, y + h / 2)
# self._center = tuple(np.floor(self._center))
self.w, self.h = w, h
self.feature_ratio=self.cell_size
self.search_area=(self.w/self.feature_ratio*self.search_area_scale)*\
(self.h/self.feature_ratio*self.search_area_scale)
if self.search_area<self.cell_selection_thresh*self.filter_max_area:
self.cell_size=int(min(self.feature_ratio,max(1,int(np.ceil(np.sqrt(
self.w*self.search_area_scale/(self.cell_selection_thresh*self.filter_max_area)*\
self.h*self.search_area_scale))))))
self.feature_ratio=self.cell_size
self.search_area = (self.w / self.feature_ratio * self.search_area_scale) * \
(self.h / self.feature_ratio * self.search_area_scale)
if self.search_area>self.filter_max_area:
self.current_scale_factor=np.sqrt(self.search_area/self.filter_max_area)
else:
self.current_scale_factor=1.
self.base_target_sz=(self.w/self.current_scale_factor,self.h/self.current_scale_factor)
# self.target_sz=self.base_target_sz
if self.search_area_shape=='proportional':
self.crop_size=(int(self.base_target_sz[0]*self.search_area_scale),int(self.base_target_sz[1]*self.search_area_scale))
elif self.search_area_shape=='square':
w= np.sqrt(self.base_target_sz[0]*self.base_target_sz[1])*self.search_area_scale
self.crop_size=(w,w)
elif self.search_area_shape=='fix_padding':
tmp=int(np.sqrt(self.base_target_sz[0]*self.search_area_scale+(self.base_target_sz[1]-self.base_target_sz[0])/4))+\
(self.base_target_sz[0]+self.base_target_sz[1])/2
self.crop_size=(self.base_target_sz[0]+tmp,self.base_target_sz[1]+tmp)
else:
raise ValueError
self.crop_size=(int(round(self.crop_size[0]/self.feature_ratio)*self.feature_ratio),int(round(self.crop_size[1]/self.feature_ratio)*self.feature_ratio))
self.feature_map_sz=(self.crop_size[0]//self.feature_ratio,self.crop_size[1]//self.feature_ratio)
output_sigma=np.sqrt(np.floor(self.base_target_sz[0]/self.feature_ratio)*np.floor(self.base_target_sz[1]/self.feature_ratio))*self.output_sigma_factor
y=gaussian2d_rolled_labels(self.feature_map_sz, output_sigma)
self.yf=fft2(y)
if self.interpolate_response==1:
self.interp_sz=(self.feature_map_sz[0]*self.feature_ratio,self.feature_map_sz[1]*self.feature_ratio)
else:
self.interp_sz=(self.feature_map_sz[0],self.feature_map_sz[1])
self._window=cos_window(self.feature_map_sz)
if self.number_of_scales>0:
scale_exp=np.arange(-int(np.floor((self.number_of_scales-1)/2)),int(np.ceil((self.number_of_scales-1)/2))+1)
self.scale_factors=self.scale_step**scale_exp
self.min_scale_factor=self.scale_step**(np.ceil(np.log(max(5/self.crop_size[0],5/self.crop_size[1]))/np.log(self.scale_step)))
self.max_scale_factor=self.scale_step**(np.floor(np.log(min(image.shape[0]/self.base_target_sz[1],
image.shape[1]/self.base_target_sz[0]))/np.log(self.scale_step)))
if self.interpolate_response>=3:
self.ky=np.roll(np.arange(-int(np.floor((self.feature_map_sz[1]-1)/2)),int(np.ceil((self.feature_map_sz[1]-1)/2+1))),
-int(np.floor((self.feature_map_sz[1]-1)/2)))
self.kx=np.roll(np.arange(-int(np.floor((self.feature_map_sz[0]-1)/2)),int(np.ceil((self.feature_map_sz[0]-1)/2+1))),
-int(np.floor((self.feature_map_sz[0]-1)/2))).T
# self.kx = self.kx.reshape(self.kx.shape[0],1)
# self.kx=np.roll(np.arange(-int(np.floor((self.feature_map_sz[1]-1)/2)),int(np.ceil((self.feature_map_sz[1]-1)/2+1))),
# -int(np.floor((self.feature_map_sz[1]-1)/2)),axis=0)
# self.ky=np.roll(np.arange(-int(np.floor((self.feature_map_sz[0]-1)/2)),int(np.ceil((self.feature_map_sz[0]-1)/2+1))),
# -int(np.floor((self.feature_map_sz[0]-1)/2)),axis=0)
self.M_prev = np.zeros(self.feature_map_sz)
self.small_filter_sz=(int(np.floor(self.base_target_sz[0]/self.feature_ratio)),int(np.floor(self.base_target_sz[1]/self.feature_ratio)))
# self.scale_estimator = LPScaleEstimator(self.target_sz, config=self.scale_config)
# self.scale_estimator.init(image, self._center, self.base_target_sz, self.current_scale_factor)
pixels=self.get_sub_window(image,self._center,model_sz=self.crop_size,
scaled_sz=(int(np.round(self.crop_size[0]*self.current_scale_factor)),
int(np.round(self.crop_size[1]*self.current_scale_factor))))
feature=self.extract_hc_feture(pixels, cell_size=self.feature_ratio)
self.model_xf=fft2(self._window[:,:,None]*feature)
self.g_f=self.ADMM(self.model_xf)
#self.model_xf = self.model_xf[:,:,:,np.newaxis]
responsef = np.sum(np.conj(self.g_f)*self.model_xf,axis=2)
if len(np.shape(responsef)) == 4:
responsef = np.transpose(responsef,(0,1,3,2))
# responsef = responsef[:,:,np.newaxis] ๅจๅฝๆฐไธญๅ ็ปดๅบฆ
responsef_padded = resize_dft2(responsef,self.interp_sz)
# response in the spatial domain
# MATLAB: response = ifft2(responsef_padded, 'symmetric')
response = np.real(ifft2(responsef_padded))
# response = np.real(cifft2(responsef_padded))
# response = ifft2_sym(responsef_padded)
self.M_prev = np.squeeze(np.fft.fftshift(response))
# self.M_prev = np.fft.fftshift(response)
# self.M_prev = np.sum(self.M_prev,axis=2)
self.max_M_prev = self.M_prev.max()
id_max_prev = np.argwhere(self.M_prev == self.max_M_prev)
self.id_ymax_prev = id_max_prev[:,0]
self.id_xmax_prev = id_max_prev[:,1]
# target_sz= tuple(np.floor((self.target_sz[0]*self.current_scale_factor,self.target_sz[1]*self.current_scale_factor)))
# new_state = [self._center[0]-np.floor(target_sz[0]/2),self._center[1]-np.floor(target_sz[1]/2),target_sz[0],target_sz[1]]
# out = {'target_bbox': new_state}
# return out
def update(self, image, frame_id):
self.frame = frame_id
x=None
for scale_ind in range(self.number_of_scales):
current_scale=self.current_scale_factor*self.scale_factors[scale_ind]
sub_window=self.get_sub_window(image,self._center,model_sz=self.crop_size,
scaled_sz=(int(round(self.crop_size[0]*current_scale)),
int(round(self.crop_size[1]*current_scale))))
feature= self.extract_hc_feture(sub_window, self.cell_size)[:, :, :, np.newaxis]
if x is None:
x=feature
else:
x=np.concatenate((x,feature),axis=3)
xtf=fft2(x*self._window[:,:,None,None])
responsef=np.sum(np.conj(self.g_f)[:,:,:,None]*xtf,axis=2)
if len(np.shape(responsef)) == 4:
responsef = np.transpose(responsef,(0,1,3,2))
if self.interpolate_response==2:
self.interp_sz=(int(np.floor(self.yf.shape[1]*self.feature_ratio*self.current_scale_factor)),
int(np.floor(self.yf.shape[0]*self.feature_ratio*self.current_scale_factor)))
responsef_padded=resize_dft2(responsef,self.interp_sz)
response=np.real(ifft2(responsef_padded))
# response = np.real(cifft2(responsef_padded))
# response = ifft2_sym(responsef_padded)
if self.interpolate_response==3:
raise ValueError
elif self.interpolate_response==4:
disp_row,disp_col,sind=resp_newton(response,responsef_padded,self.newton_iterations, self.ky,self.kx,self.feature_map_sz)
else:
row,col,sind=np.unravel_index(np.argmax(response,axis=None),response.shape)
disp_row = (row - 1 + int(np.floor(self.interp_sz[1] - 1) / 2)) % self.interp_sz[1] - int(np.floor((self.interp_sz[1] - 1) / 2))
disp_col = (col - 1 + int(np.floor(self.interp_sz[0] - 1) / 2)) % self.interp_sz[0] - int(np.floor((self.interp_sz[0] - 1) / 2))
if self.interpolate_response==0 or self.interpolate_response==3 or self.interpolate_response==4:
factor=self.feature_ratio*self.current_scale_factor*self.scale_factors[sind]
elif self.interpolate_response==1:
factor=self.current_scale_factor*self.scale_factors[sind]
elif self.interpolate_response==2:
factor=self.scale_factors[sind]
else:
raise ValueError
dx,dy=int(np.round(disp_col*factor)),int(np.round(disp_row*factor))
self.current_scale_factor=self.current_scale_factor*self.scale_factors[sind]
self.current_scale_factor=max(self.current_scale_factor,self.min_scale_factor)
self.current_scale_factor=min(self.current_scale_factor,self.max_scale_factor)
# self.current_scale_factor = self.scale_estimator.update(image, self._center, self.base_target_sz,
# self.current_scale_factor)
self._center=(self._center[0]+dx,self._center[1]+dy)
# find peak in the map
self.M_curr = np.fft.fftshift(response[:,:,sind])
self.max_M_curr = self.M_curr.max()
id_max_curr = np.argwhere(self.M_curr == self.max_M_curr)
# self.id_xmax_curr = id_max_curr[0][0]
# self.id_ymax_curr = id_max_curr[0][1]
self.id_ymax_curr = id_max_curr[:,0]
self.id_xmax_curr = id_max_curr[:,1]
# do shifting of previous response map
shift_x = self.id_xmax_curr - self.id_xmax_prev
shift_y = self.id_ymax_curr - self.id_ymax_prev
sz_shift_y = len(shift_y)
sz_shift_x = len(shift_x)
if sz_shift_y > 1:
shift_y = shift_y[0]
if sz_shift_x > 1:
shift_x = shift_x[0]
self.M_prev = np.roll(self.M_prev,shift_y,0)
self.M_prev = np.roll(self.M_prev,shift_x,1)
# self.M_prev = np.roll(self.M_prev,shift_x,0)
# self.M_prev = np.roll(self.M_prev,shift_y,1)
# map difference
# map_diff(frame) = norm(abs(M_prev - M_curr))
pixels=self.get_sub_window(image,self._center,model_sz=self.crop_size,
scaled_sz=(int(round(self.crop_size[0]*self.current_scale_factor)),
int(round(self.crop_size[1]*self.current_scale_factor))))
feature=self.extract_hc_feture(pixels, cell_size=self.cell_size)
#feature=cv2.resize(pixels,self.feature_map_sz)/255-0.5
xf=fft2(feature*self._window[:,:,None])
##
self.model_xf=(1-self.interp_factor)*self.model_xf+self.interp_factor*xf
self.g_f = self.ADMM(self.model_xf)
self.M_prev = self.M_curr
self.max_M_prev = self.max_M_curr
self.id_xmax_prev = self.id_xmax_curr
self.id_ymax_prev = self.id_ymax_curr
# target_sz= tuple(np.floor((self.base_target_sz[0]*self.current_scale_factor,self.base_target_sz[1]*self.current_scale_factor)))
target_sz=(self.base_target_sz[0]*self.current_scale_factor,self.base_target_sz[1]*self.current_scale_factor)
# new_state = [self._center[0]-np.floor(target_sz[0]/2),self._center[1]-np.floor(target_sz[1]/2),target_sz[0],target_sz[1]]
new_state = [self._center[0]-target_sz[0]/2,self._center[1]-target_sz[1]/2,target_sz[0],target_sz[1]]
# out = {'target_bbox': new_state}
box = np.array(new_state)
return box
def get_subwindow_no_window(self,img,pos,sz):
h,w=sz[1],sz[0]
xs = (np.floor(pos[0]) + np.arange(w) - np.floor(w / 2)).astype(np.int64)
ys = (np.floor(pos[1]) + np.arange(h) - np.floor(h / 2)).astype(np.int64)
xs[xs < 0] = 0
ys[ys < 0] = 0
xs[xs >= img.shape[1]] = img.shape[1] - 1
ys[ys >= img.shape[0]] = img.shape[0] - 1
out = img[ys, :][:, xs]
xs,ys=np.meshgrid(xs,ys)
return xs,ys,out
def ADMM(self,xf):
g_f = np.zeros_like(xf)
h_f = np.zeros_like(g_f)
l_f = np.zeros_like(g_f)
mu = 1
beta = 10
mumax = 10000
i = 1
T = self.feature_map_sz[0] * self.feature_map_sz[1]
S_xx = np.sum(np.conj(xf) * xf, 2)
while i <= self.admm_iterations:
A = mu / (self.admm_gamma + 1)
B = S_xx + T * A
S_lx = np.sum(np.conj(xf) * l_f, axis=2)
S_hx = np.sum(np.conj(xf) * h_f, axis=2)
# tmp0 = (1 / (T * A) * (self.yf[:, :, None] * xf)) + (self.admm_gamma / A) * (self.M_prev[:, :, None] * xf) - ((1 / A) * l_f) + (mu/A)*h_f
# tmp1 = 1 / (T * A) * (xf * ((S_xx * self.yf)[:, :, None])) + (self.admm_gamma/A) * (xf * ((S_xx[:,:,np.newaxis] * self.M_prev[:, :, None])))
tmp0 = ((1 / (T * A)) * (self.yf[:, :, None] * xf)) + (self.admm_gamma / A) * (self.M_prev[:,:,None] * xf) - ((1 / A) * l_f) + (mu/A)*h_f
tmp1 = 1 / (T * A) * (xf * ((S_xx * self.yf)[:, :, None])) + (self.admm_gamma/A) * (xf * ((S_xx * self.M_prev)[:,:,None]))
tmp2 = (1 / A) * (xf * (S_lx[:, :, None]))
tmp3 = (mu/A) * xf * S_hx[:, :, None]
# solve for g
g_f = (1 / (1 + self.admm_gamma)) * (tmp0 - (tmp1 - tmp2 + tmp3) / B[:, :, None])
# solve for h
h = (T / ((mu * T) + self.admm_lambda)) * ifft2(mu * g_f + l_f)
xs, ys, h = self.get_subwindow_no_window(h,
(np.floor(self.feature_map_sz[0] / 2), np.floor(self.feature_map_sz[1] / 2)),
self.small_filter_sz)
t = np.zeros((self.feature_map_sz[1], self.feature_map_sz[0], h.shape[2]),dtype=np.complex64)
# if len(np.shape(h)) == 4:
# h = np.sum(h,axis=3)
t[ys,xs,:] = h
# t[ys,:][:,xs] = h
h_f = fft2(t)
l_f = l_f + (mu * (g_f - h_f))
mu = min(beta * mu, mumax)
i += 1
return g_f
def get_sub_window(self, img, center, model_sz, scaled_sz=None):
model_sz = (int(model_sz[0]), int(model_sz[1]))
if scaled_sz is None:
sz = model_sz
else:
sz = scaled_sz
sz = (max(int(sz[0]), 2), max(int(sz[1]), 2))
# without padding
xs = (np.floor(center[0]) + np.arange(sz[0])+1 - np.floor(sz[0]/2)).astype(np.int64)-1
ys = (np.floor(center[1]) + np.arange(sz[1])+1 - np.floor(sz[1]/2)).astype(np.int64)-1
# %check for out-of-bounds coordinates, and set them to the values at
# %the borders
xs[xs < 0] = 0
ys[ys < 0] = 0
xs[xs >= img.shape[1]] = img.shape[1] - 1
ys[ys >= img.shape[0]] = img.shape[0] - 1
im_patch = img[ys, :][:, xs]
# im_patch = cv2.getRectSubPix(img, sz, center)
if model_sz is not None:
im_patch = mex_resize(im_patch, model_sz)
# if (min(xs)<1 and min(ys)<1) or (max(xs)>img.shape[1] and max(ys)>img.shape[0]):
# cv2.imwrite('test.jpg',im_patch.astype(np.uint8))
return im_patch.astype(np.uint8)
def extract_hc_feture(self,patch,cell_size):
# patch = cv2.cvtColor(patch, cv2.COLOR_RGB2BGR)
hog_feature=extract_hog_feature(patch,cell_size)
# cn_feature=extract_cn_feature(patch,cell_size)
# hc_feature=np.concatenate((hog_feature,cn_feature),axis=2)
# return hc_feature
return hog_feature
|
Jaraxxus-Me/UAVpytrackers_for_Odroid | lib/eco/features/__init__.py | <reponame>Jaraxxus-Me/UAVpytrackers_for_Odroid
from .features import GrayFeature,FHogFeature, TableFeature, fhog, mround |
Jaraxxus-Me/UAVpytrackers_for_Odroid | bacflibs/scale_estimator.py | import numpy as np
import cv2
from numpy.fft import fft, ifft
from .feature import extract_hog_feature
#from .feature import extract_pyhog_feature
from .utils import cos_window
from .fft_tools import ifft2,fft2
class DSSTScaleEstimator:
def __init__(self,target_sz,config):
init_target_sz = np.array([target_sz[0],target_sz[1]])
self.config=config
num_scales = self.config.number_of_scales_filter
scale_step = self.config.scale_step_filter
scale_sigma = np.sqrt(self.config.number_of_scales_filter) * self.config.scale_sigma_factor
scale_exp = np.arange(-np.floor(num_scales - 1)/2,
np.ceil(num_scales-1)/2+1,
dtype=np.float32)
# interp_scale_exp = np.arange(-np.floor((self.config.number_of_interp_scales - 1) / 2),
# np.ceil((self.config.number_of_interp_scales - 1) / 2) + 1,
# dtype=np.float32)
self.scale_size_factors = scale_step ** (-scale_exp)
# self.interp_scale_factors = scale_step ** interp_scale_exp_shift
ys = np.exp(-0.5 * (scale_exp ** 2) / (scale_sigma ** 2))
self.yf = fft(ys)
self.window = np.hanning(ys.shape[0]).T.astype(np.float32)
# make sure the scale model is not to large, to save computation time
self.num_scales = num_scales
self.scale_step = scale_step
if self.config.scale_model_factor ** 2 * np.prod(init_target_sz) > self.config.scale_model_max_area:
scale_model_factor = np.sqrt(self.config.scale_model_max_area / np.prod(init_target_sz))
else:
scale_model_factor = self.config.scale_model_factor
# set the scale model size
self.scale_model_sz = np.floor(init_target_sz * scale_model_factor)
# self.max_scale_dim = self.config.s_num_compressed_dim == 'MAX'
# if self.max_scale_dim:
# self.s_num_compressed_dim = len(self.scale_size_factors)
# else:
# self.s_num_compressed_dim = self.config.s_num_compressed_dim
def init(self,im,pos,base_target_sz,current_scale_factor):
# self.scale_factors = np.array([1])
scales = current_scale_factor * self.scale_size_factors
xs = self._extract_scale_sample(im, pos, base_target_sz, scales, self.scale_model_sz,self.window)
# compute projection basis
# if self.max_scale_dim:
# self.basis, _ = scipy.linalg.qr(self.s_num, mode='economic')
# scale_basis_den, _ = scipy.linalg.qr(xs, mode='economic')
# else:
# U, _, _ = np.linalg.svd(self.s_num)
# self.basis = U[:, :self.s_num_compressed_dim]
# V, _, _ = np.linalg.svd(xs)
# scale_basis_den = V[:, :self.s_num_compressed_dim]
# self.basis = self.basis.T
# compute numerator
# feat_proj = self.basis.dot(self.s_num) * self.window
xsf = np.fft.fft(xs, axis=1)
self.sf_num = self.yf * np.conj(xsf)
# update denominator
# xs = scale_basis_den.T.dot(xs)*self.window
new_sf_den = np.sum(xsf*np.conj(xsf), 0)
self.sf_den = new_sf_den
def update(self, im, pos, base_target_sz, current_scale_factor):
# base_target_sz=np.array([base_target_sz[0],base_target_sz[1]])
# get scale filter features
scales = current_scale_factor * self.scale_size_factors
xs = self._extract_scale_sample(im, pos, base_target_sz, scales, self.scale_model_sz,self.window)
# project
# xs = self.basis.dot(xs) * self.window
# get scores
xsf = np.fft.fft(xs, axis=1)
scale_responsef = np.sum(self.sf_num * xsf, 0) / (self.sf_den + self.config.scale_lambda)
# interp_scale_response = np.real(ifft(resize_dft(scale_responsef, self.config.number_of_interp_scales)))
recovered_scale_index = np.argmax(np.real(ifft(scale_responsef)))
# if self.config.do_poly_interp:
# # fit a quadratic polynomial to get a refined scale estimate
# id1 = (recovered_scale_index - 1) % self.config.number_of_interp_scales
# id2 = (recovered_scale_index + 1) % self.config.number_of_interp_scales
# poly_x = np.array([self.interp_scale_factors[id1], self.interp_scale_factors[recovered_scale_index],
# self.interp_scale_factors[id2]])
# poly_y = np.array(
# [interp_scale_response[id1], interp_scale_response[recovered_scale_index], interp_scale_response[id2]])
# poly_A = np.array([[poly_x[0] ** 2, poly_x[0], 1],
# [poly_x[1] ** 2, poly_x[1], 1],
# [poly_x[2] ** 2, poly_x[2], 1]], dtype=np.float32)
# poly = np.linalg.inv(poly_A).dot(poly_y.T)
# scale_change_factor = - poly[1] / (2 * poly[0])
# else:
# scale_change_factor = self.interp_scale_factors[recovered_scale_index]
current_scale_factor=current_scale_factor*self.scale_size_factors[recovered_scale_index]
scales = current_scale_factor * self.scale_size_factors
# xs = self._shift_scale_sample(im, pos, base_target_sz, xs, recovered_scale_index, scales, self.window, self.scale_model_sz)
xs = self._extract_scale_sample(im, pos, base_target_sz, scales, self.scale_model_sz,self.window)
# self.s_num = (1 - self.config.scale_learning_rate) * self.s_num + self.config.scale_learning_rate * xs
# # compute projection basis
# if self.max_scale_dim:
# self.basis, _ = scipy.linalg.qr(self.s_num, mode='economic')
# scale_basis_den, _ = scipy.linalg.qr(xs, mode='economic')
# else:
# U, _, _ = np.linalg.svd(self.s_num)
# self.basis = U[:, :self.s_num_compressed_dim]
# V,_,_=np.linalg.svd(xs)
# scale_basis_den=V[:,:self.s_num_compressed_dim]
# self.basis = self.basis.T
#
# # compute numerator
# feat_proj = self.basis.dot(self.s_num) * self.window
xsf = np.fft.fft(xs, axis=1)
new_sf_num = self.yf * np.conj(xsf)
new_sf_den = np.sum(xsf*np.conj(xsf), 0)
self.sf_num = (1 - self.config.scale_learning_rate) * self.sf_num + self.config.scale_learning_rate * new_sf_num
self.sf_den = (1 - self.config.scale_learning_rate) * self.sf_den + self.config.scale_learning_rate * new_sf_den
return current_scale_factor
def _extract_scale_sample(self, im, pos, base_target_sz, scale_factors, scale_model_sz,window):
scale_sample = []
base_target_sz=np.array([base_target_sz[0],base_target_sz[1]])
for idx, scale in enumerate(scale_factors):
patch_sz = np.floor(base_target_sz * scale)
im_patch=cv2.getRectSubPix(im,(int(patch_sz[0]),int(patch_sz[1])),pos)
if scale_model_sz[0] > patch_sz[1]:
interpolation = cv2.INTER_LINEAR
else:
interpolation = cv2.INTER_AREA
im_patch_resized = cv2.resize(im_patch, (int(scale_model_sz[0]),int(scale_model_sz[1])), interpolation=interpolation).astype(np.uint8)
im_patch_resized=im_patch_resized[:,:,[2,1,0]]
temp=extract_hog_feature(im_patch_resized,cell_size=4)
# temp=self.extrac_feature_test(im_patch, (4,8), 31)
scale_sample.append(temp.reshape((-1,1),order="F")*window[idx])
scale_sample = np.concatenate(scale_sample, axis=1)
return scale_sample
def _shift_scale_sample(self,im, pos, base_target_sz, xs, recovered_scale, scaleFactors,scale_window, scale_model_sz):
nScales=len(scaleFactors)
base_target_sz=np.array([base_target_sz[0],base_target_sz[1]])
out=[]
shift_pos=int(recovered_scale-np.ceil(nScales/2))
if shift_pos==0:
out=xs
elif shift_pos>0:
for j in range(nScales-shift_pos):
xin=np.expand_dims(xs[:,j+shift_pos],axis=1)
out.append(xin/(scale_window[j+shift_pos]+0.00001)*scale_window[j])
for i in range(shift_pos):
patch_sz = np.floor(base_target_sz * scaleFactors[nScales-shift_pos+i])
im_patch=cv2.getRectSubPix(im,(int(patch_sz[0]),int(patch_sz[1])),pos)
if scale_model_sz[0] > patch_sz[1]:
interpolation = cv2.INTER_LINEAR
else:
interpolation = cv2.INTER_AREA
im_patch_resized = cv2.resize(im_patch, (int(scale_model_sz[0]),int(scale_model_sz[1])), interpolation=interpolation).astype(np.uint8)
im_patch_resized=im_patch_resized[:,:,[2,1,0]]
temp=extract_hog_feature(im_patch_resized,cell_size=4)
# temp=self.extrac_feature_test(im_patch, (4,8), 31)
out.append(temp.reshape((-1, 1),order="F")*scale_window[nScales-shift_pos+i])
out=np.concatenate(out, axis=1)
else:
for i in range(-shift_pos):
patch_sz = np.floor(base_target_sz * scaleFactors[i])
im_patch=cv2.getRectSubPix(im,(int(patch_sz[0]),int(patch_sz[1])),pos)
if scale_model_sz[0] > patch_sz[1]:
interpolation = cv2.INTER_LINEAR
else:
interpolation = cv2.INTER_AREA
im_patch_resized = cv2.resize(im_patch, (int(scale_model_sz[0]),int(scale_model_sz[1])), interpolation=interpolation).astype(np.uint8)
im_patch_resized=im_patch_resized[:,:,[2,1,0]]
temp=extract_hog_feature(im_patch_resized,cell_size=4)
# temp=self.extrac_feature_test(im_patch, (8,4), 31)
out.append(temp.reshape((-1, 1),order="F")*scale_window[i])
for j in range(nScales+shift_pos):
xin=np.expand_dims(xs[:,j],axis=1)
out.append(xin/(scale_window[j]+0.00001)*scale_window[j-shift_pos])
out=np.concatenate(out, axis=1)
return out
def extrac_feature_test(self,patch,sz,dim):
total_dim=dim
im=np.sum(patch,axis=2)/300
resized_patch=cv2.resize(im,sz,interpolation = cv2.INTER_AREA)
w,h=resized_patch.shape
feature_pixels=np.zeros([w,h,total_dim])
for i in range(total_dim):
feature_pixels[:,:,i]=resized_patch
return feature_pixels
class LPScaleEstimator:
def __init__(self,target_sz,config):
self.learning_rate_scale=config.learning_rate_scale
self.scale_sz_window = config.scale_sz_window
self.target_sz=target_sz
def init(self,im,pos,base_target_sz,current_scale_factor):
w,h=base_target_sz
avg_dim = (w + h) / 2.5
self.scale_sz = ((w + avg_dim) / current_scale_factor,
(h + avg_dim) / current_scale_factor)
self.scale_sz0 = self.scale_sz
self.cos_window_scale = cos_window((self.scale_sz_window[0], self.scale_sz_window[1]))
self.mag = self.cos_window_scale.shape[0] / np.log(np.sqrt((self.cos_window_scale.shape[0] ** 2 +
self.cos_window_scale.shape[1] ** 2) / 4))
# scale lp
patchL = cv2.getRectSubPix(im, (int(np.floor(current_scale_factor * self.scale_sz[0])),
int(np.floor(current_scale_factor * self.scale_sz[1]))), pos)
patchL = cv2.resize(patchL, self.scale_sz_window)
patchLp = cv2.logPolar(patchL.astype(np.float32), ((patchL.shape[1] - 1) / 2, (patchL.shape[0] - 1) / 2),
self.mag, flags=cv2.INTER_LINEAR + cv2.WARP_FILL_OUTLIERS)
self.model_patchLp = extract_hog_feature(patchLp, cell_size=4)
def update(self,im,pos,base_target_sz,current_scale_factor):
patchL = cv2.getRectSubPix(im, (int(np.floor(current_scale_factor * self.scale_sz[0])),
int(np.floor(current_scale_factor* self.scale_sz[1]))),pos)
patchL = cv2.resize(patchL, self.scale_sz_window)
# convert into logpolar
patchLp = cv2.logPolar(patchL.astype(np.float32), ((patchL.shape[1] - 1) / 2, (patchL.shape[0] - 1) / 2),
self.mag, flags=cv2.INTER_LINEAR + cv2.WARP_FILL_OUTLIERS)
patchLp = extract_hog_feature(patchLp, cell_size=4)
tmp_sc, _, _ = self.estimate_scale(self.model_patchLp, patchLp, self.mag)
tmp_sc = np.clip(tmp_sc, a_min=0.6, a_max=1.4)
scale_factor=current_scale_factor*tmp_sc
self.model_patchLp = (1 - self.learning_rate_scale) * self.model_patchLp + self.learning_rate_scale * patchLp
return scale_factor
def estimate_scale(self,model,obser,mag):
def phase_correlation(src1,src2):
s1f=fft2(src1)
s2f=fft2(src2)
num=s2f*np.conj(s1f)
d=np.sqrt(num*np.conj(num))+2e-16
Cf=np.sum(num/d,axis=2)
C=np.real(ifft2(Cf))
C=np.fft.fftshift(C,axes=(0,1))
mscore=np.max(C)
pty,ptx=np.unravel_index(np.argmax(C, axis=None), C.shape)
slobe_y=slobe_x=1
idy=np.arange(pty-slobe_y,pty+slobe_y+1).astype(np.int64)
idx=np.arange(ptx-slobe_x,ptx+slobe_x+1).astype(np.int64)
idy=np.clip(idy,a_min=0,a_max=C.shape[0]-1)
idx=np.clip(idx,a_min=0,a_max=C.shape[1]-1)
weight_patch=C[idy,:][:,idx]
s=np.sum(weight_patch)+2e-16
pty=np.sum(np.sum(weight_patch,axis=1)*idy)/s
ptx=np.sum(np.sum(weight_patch,axis=0)*idx)/s
pty=pty-(src1.shape[0])//2
ptx=ptx-(src1.shape[1])//2
return ptx,pty,mscore
ptx,pty,mscore=phase_correlation(model,obser)
rotate=pty*np.pi/(np.floor(obser.shape[1]/2))
scale = np.exp(ptx/mag)
return scale,rotate,mscore
|
Jaraxxus-Me/UAVpytrackers_for_Odroid | autotrack_config.py | class AutoTrackConfig:
hog_cell_size=4
hog_n_dim=31
gray_cell_size=4
cn_use_for_gray=False
cn_cell_size=4
cn_n_dim=10
search_area_shape = 'square' # the shape of the samples
search_area_scale = 5.0 # the scaling of the target size to get the search area
min_image_sample_size = 150 ** 2 # minimum area of image samples
max_image_sample_size = 200 ** 2 # maximum area of image samples
feature_downsample_ratio=4
reg_window_max=1e5
reg_window_min=1e-3
# detection parameters
refinement_iterations = 1 # number of iterations used to refine the resulting position in a frame
newton_iterations = 5 # the number of Netwon iterations used for optimizing the detection score
clamp_position = False # clamp the target position to be inside the image
# learning parameters
output_sigma_factor = 0.06 # label function sigma
# ADMM params
max_iterations=3
init_penalty_factor=1
max_penalty_factor=10000
penalty_scale_step=10
admm_lambda=1
epsilon=1
zeta=13
delta=0.2
nu=0.2
# scale parameters
number_of_scales = 33 # number of scales to run the detector
scale_step = 1.03 # the scale factor
use_scale_filter = True # use the fDSST scale filter or not
# scale_type='LP'
# class ScaleConfig:
# learning_rate_scale = 0.015
# scale_sz_window = (64, 64)
#
# scale_config=ScaleConfig()
scale_type = 'normal'
class ScaleConfig:
scale_sigma_factor = 0.5 # scale label function sigma
scale_lambda=0.0001
scale_learning_rate = 0.025 # scale filter learning rate
number_of_scales_filter = 33 # number of scales
number_of_interp_scales = 33 # number of interpolated scales
scale_model_factor = 1.0 # scaling of the scale model
scale_step_filter = 1.03 # the scale factor of the scale sample patch
scale_model_max_area = 32 * 16 # maximume area for the scale sample patch
scale_feature = 'HOG4' # features for the scale filter (only HOG4 supported)
s_num_compressed_dim = 'MAX' # number of compressed feature dimensions in the scale filter
lamBda = 1e-4 # scale filter regularization
do_poly_interp = False
scale_config = ScaleConfig()
#
normalize_power = 2 # Lp normalization with this p
normalize_size = True # also normalize with respect to the spatial size of the feature
normalize_dim = True # also normalize with respect to the dimensionality of the feature
square_root_normalization = False
|
uchida-takumi/my_simple_mecab | example.py | <filename>example.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ไฝฟใๆนใฎไพ
"""
from src.my_simple_mecab import keitaiso
text = "ไปๆฅใฏไฝๆใซๅธฐใฃใฆใใพใใ๏ผใใจๅฝผๅฅณใฏๅฐใญใฆใใ"
use_PoW = ['ๅ่ฉ','ๅ่ฉ','ๅฝขๅฎน่ฉ','ๅฏ่ฉ','่จๅท',]
stop_words = ['*','ใใ','ไธ','ใฎ','ๆง','ใใกใ','้','ใจใใ','ใฏใ','\u3000',]
k = keitaiso(use_PoW=use_PoW,stop_words=stop_words)
print('ไปฅไธใmecabใไฝฟใฃใฆๅฝขๆ
็ด ่งฃๆใใพใใ')
print(text)
print('ใผใผใผใผ')
print('็ตๆ๏ผ๏ผ')
print(k.basic_tokenize(text))
print('ใผใผใผใผ')
print('็ตๆ๏ผ๏ผ')
print(k.tokenize(text))
|
uchida-takumi/my_simple_mecab | src/my_simple_mecab.py | # -*- coding: utf-8 -*-
"""
MeCabใไฝฟใฃใๅฝขๆ
็ด ่งฃๆใงใใญในใใใใฏใใซๅใใใใคใงใใ
"""
import MeCab
from collections import Counter
class keitaiso:
def __init__(self, use_PoW=['ๅ่ฉ','ๅ่ฉ','ๅฝขๅฎน่ฉ','ๅฏ่ฉ','่จๅท'], stop_words=[], use_words=[], user_dic_files=[]):
"""
ARGUMENT
----------------
use_PoW [list]:
.tokenize(text) ใงๅบๅใใใๅ่ฉใๆๅฎใใใ
ex) use_PoW=['ๅ่ฉ','ๅ่ฉ'] ใชใๅ่ฉใจๅ่ฉใใๅบๅใใชใใ
stop_words [list]:
.tokenize(text) ใงๅบๅใใชใๅ่ชใๆๅฎใใใโ
use_words [list]:
.tokenize(text) ใงๅบๅใใๅ่ชใฎ้ๅใๆๅฎใใใ
defaultใฏ็ฉบใชในใ([])ใงใใใใใฎๅ ดๅใฏๅ
จใฆใฎๅ่ชใๅบๅใใใใ
user_dic_files [list]:
Mecabใๅฝขๆ
็ด ่งฃๆใซ็จใใใฆใผใถใผ่พๆธใๆๅฎใใๅ ดๅใซ็จใใใ
EXAMPLE
----------------
text = "ใใใใๆกใๆกใฎๅ
ใงใ"
use_PoW = ['ๅ่ฉ','ๅ่ฉ']
stop_words = ['*','ใใ','ไธ','ใฎ','ๆง','ใใกใ','้','ใจใใ','ใฏใ','\u3000',]
k = keitaiso(use_PoW=use_PoW,stop_words=stop_words)
print(k.basic_tokenize(text))
> [[0, 'ๅ่ฉ', 'ใใใ'], [1, 'ๅฉ่ฉ', 'ใ'], [2, 'ๅ่ฉ', 'ๆก'], [3, 'ๅฉ่ฉ', 'ใ'], [4, 'ๅ่ฉ', 'ๆก'], [5, 'ๅฉ่ฉ', 'ใฎ'], [6, 'ๅ่ฉ', 'ๅ
'], [7, 'ๅฉๅ่ฉ', 'ใงใ']]
print(k.tokenize(text))
> [[0, 'ๅ่ฉ', 'ใใใ'], [2, 'ๅ่ฉ', 'ๆก'], [4, 'ๅ่ฉ', 'ๆก'], [6, 'ๅ่ฉ', 'ๅ
']]
print(k.tokenize_to_bag_of_words(text))
> {'ใใใ': 1, 'ๆก': 2, 'ๅ
': 1}
print(k.tokenize_to_wakachi(text))
> "ใใใ ๆก ๆก ๅ
"
"""
self.use_PoW = use_PoW
self.stop_words = stop_words
self.use_words = use_words
if len(user_dic_files)>0:
arg_user_dic_files = '-u '+','.join(user_dic_files)
else:
arg_user_dic_files = ''
self.tagger = MeCab.Tagger(arg_user_dic_files)
def tokenize(self, text):
"""
text ใๅฝขๆ
็ด ๅ่งฃใใ็ตๆใ่ฟๅดใใ
ARGUMENT
---------------
text [str]:
ๆฅๆฌ่ชใฎๆ็ซ
"""
processing = self.basic_tokenize(text)
processing = self.filter_PoW(processing)
processing = self.filter_stop_words(processing)
processing = self.filter_use_words(processing)
return processing
def tokenize_to_bag_of_words(self, text):
"""
text ใๅฝขๆ
็ด ๅ่งฃใใ็ตๆใ Bag of words ใง้่จใใdictionaryๅใจใใฆ่ฟๅดใใใ
ARGUMENT
---------------
text [str]:
ๆฅๆฌ่ชใฎๆ็ซ
"""
processing = self.tokenize(text)
processing = [p[2] for p in processing]
processing = Counter(processing)
return dict(processing)
def tokenize_to_wakachi(self, text):
"""
text ใๅฝขๆ
็ด ๅ่งฃใใ็ตๆใ ใใใกๆธใ ๅฝขๅผใฎstrใจใใฆ่ฟๅดใใใ
ARGUMENT
---------------
text [str]:
ๆฅๆฌ่ชใฎๆ็ซ
"""
processing = self.tokenize(text)
processing = [p[2] for p in processing]
return ' '.join(processing)
def basic_tokenize(self, text):
if not isinstance(text, str):
parsed = [[1,'*','*'],]
else:
parsed = self.tagger.parse(text).split('\n')[:-2]
parsed = [p.split('\t')[1] for p in parsed]
parsed = [[idx, p.split(',')[0], p.split(',')[6]] for idx, p in enumerate(parsed)]
return parsed
def filter_PoW(self, basic_tokenized,):
return [token for token in basic_tokenized if token[1] in self.use_PoW]
def filter_stop_words(self, basic_tokenized,):
return [token for token in basic_tokenized if token[2] not in self.stop_words]
def filter_use_words(self, basic_tokenized,):
if self.use_words == []:
return basic_tokenized
else:
return [token for token in basic_tokenized if token[2] in self.use_words]
def change_dict(self, tokenized,):
words_list = [token[2] for token in tokenized]
words_set = set(words_list)
result_dict = dict()
for word in words_set:
result_dict[word] = words_list.count(word)
return result_dict
if __name__ == "__main__":
text = "ไปๆฅใฏไฝๆใซๅธฐใฃใฆใใพใใ๏ผใใจๅฝผๅฅณใฏๅฐใญใฆใใ"
use_PoW = ['ๅ่ฉ','ๅ่ฉ','ๅฝขๅฎน่ฉ','ๅฏ่ฉ','่จๅท',]
stop_words = ['*','ใใ','ไธ','ใฎ','ๆง','ใใกใ','้','ใจใใ','ใฏใ','\u3000',]
k = keitaiso(use_PoW=use_PoW,stop_words=stop_words)
print(k.basic_tokenize(text))
print(k.tokenize(text))
|
StAugust/kaggle | titanic/test.py | <filename>titanic/test.py<gh_stars>0
import numpy as np
if __name__ == '__main__':
print("this is the main method")
|
MLackner/NUSFSpectra | pythonmodules/pySfgProcess/spikeRemoval.py | # -*- coding: utf-8 -*-
"""
This script searches a directory and reads in the ASCII data files produced by
WinSpec, corrects for cosmic ray spikes with a rolling median filter, and
replaces the original file with the updated file if spikes are found.
The original file is placed in a new directory called preSpikeCorr
"""
#import os
import os
#Set directory here !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
os.chdir("/Users/pohno/Box Sync/Science/Data/SFG/Solstice/11192017/caf2_water_flow/run4")
#import numpy for array functionality
import numpy as np
#import shutil for moving and copying files
import shutil
#import pandas for rolling median function
import pandas
#import matplotlib for plotting
import matplotlib.pyplot as plt
#import math for floor
import math
def correctSpectrum(name):
#read in datafile
origFile = open(name,'r')
#initialize array for wavelengths and counts
wavelengths = np.array([])
counts = np.array([])
#go through each line in datafile and append to arrays
for line in origFile:
fields = line.split()
wavelengths = np.append(wavelengths,[float(fields[0])])
counts = np.append(counts,[float(fields[3])])
plt.figure()
#plot
plt.plot(wavelengths,counts)
windowSize = 7
medians = pandas.Series(counts).rolling(window = windowSize,center=True).median()
#number of nan at beginning and end to replace
numRep = math.floor(windowSize/2)
#replace beginning and end nan with the first/last computed value
for i in range(numRep):
medians[i] = medians[numRep]
medians[len(medians)-i-1] = medians[len(medians)-numRep-1]
#find difference of each point with the median of its window
differences = counts-medians
#threshold past which if it is further from median it will sense that it is a spike
threshold = 200
#empty array to hold zero or one if point is a spike
spike = np.zeros(len(differences),)
for i in range(len(differences)):
if differences[i] > threshold:
spike[i] = 1
print("Spike found at point index",i,"with wavelength",wavelengths[i])
#if a peak is found
if np.sum(spike) > 0:
#read in datafile
origFile = open(name,'r')
#create new file to put modified
newFile = open("temp" + name,"w")
#create copy for new corrected array
countsCORR = counts.copy()
for i in range(len(spike)):
singleLine = origFile.readline()
#if the point needs to be replaced
if spike[i] == 1:
#check up to five points to the left for the edge or for an ok point
for j in range(5):
if (i-1-j) < 0:
left = [] #if its edge only take from right point
break
else:
if spike[i-1-j] == 0:
left = counts[i-1-j] #or get the first acceptable point
break
#check up to five points to the right for the edge or for an ok point
for j in range(5):
if (i+j+1) >= len(spike):
right = [] #if its edge only take from the left point
break
else:
if spike[i+1+j] == 0:
right = counts[i+1+j] #or get the first acceptable point
break
#get the average of the two or the value if its only one
tempValArray = np.array([])
tempValArray = np.append(tempValArray,left)
tempValArray = np.append(tempValArray,right)
ave = tempValArray.mean()
#round down to integer number of counts
countsCORR[i] = math.floor(ave)
#get line from original file, modify
singleLineList = singleLine.split()
singleLineList[3] = str(int(ave))
singleLine = (singleLineList[0] + "\t" +
singleLineList[1] + "\t" +
singleLineList[2] + "\t" +
singleLineList[3] + "\n")
#write original or modified line
newFile.write(singleLine)
#close new file
newFile.close()
#move original file to preSpikeCorr directory
shutil.copy2(name,"preSpikeCorr")
#rename the new temp file to the original name
os.rename("temp"+name,name)
#plot corrected value
plt.plot(wavelengths,countsCORR)
else:
print("No spikes found")
return
def correctSpectraInDir():
#check if there is a folder to put pre spike corr data, and create if not
if not os.path.exists("preSpikeCorr"):
os.makedirs("preSpikeCorr")
#get list of all filenames
filenames = os.listdir()
#go through each file
for filename in filenames:
#make sure it is a textfile
if ".txt" in filename:
datafile = open(filename,"r")
#make sure it has 4 data columns, then call correctSpectrum function
if len(datafile.readline().split()) == 4:
print("Searching for spikes in: ", filename)
correctSpectrum(filename)
return
#run it
correctSpectraInDir()
|
MLackner/NUSFSpectra | pythonmodules/pySfgProcess/pscalib.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 7 16:42:16 2018
@author: geiger
"""
#import dfg class to hold individual dfg acquisition
from dfg import DFG
#import numpy
import numpy as np
from scipy.optimize import curve_fit
from scipy import exp
#import matplotlib for plotting
import matplotlib.pyplot as plt
class PScalib():
def __init__(self,path):
#store path of file
self.path = path
self.dfg = DFG(path,'pscalib')
self.shifts = np.array([0.0,0.0])
def plot(self):
plt.figure()
plt.plot(self.dfg.wn,self.dfg.counts)
plt.xlim([2750,3150])
plt.title('PS Calibration')
def fitPeak(self,val1,val2,peak):
#peak equals 0 or 1, with different theoretical centers
peakCenters = np.array([2850.3,3060.7])
#indexes for the values entered that bound the peak
idx1 = (np.abs(self.dfg.wn - val1)).argmin()
idx2 = (np.abs(self.dfg.wn - val2)).argmin()
#segments within bounds
xShort = self.dfg.wn[idx2:idx1+1]
yShort = self.dfg.counts[idx2:idx1+1]
#gaussian function to fit the segment
def gauss(x,a,x0,sigma,y0):
return a*exp(-(x-x0)**2/(2*sigma**2))+y0
#initial guesses for the fit
x0 = xShort[int(len(xShort)/2)-1] #guess mean is middle point in range
#guess width is 25% to 57% of range
sigma = xShort[int(len(xShort)*(3/4))] - xShort[int(len(xShort)*(1/4))]
y0 = yShort[0] #guess y0 is first point
a = y0-yShort[int(len(xShort)/2)] #guess amplitude
#fit
popt,pcov = curve_fit(gauss,xShort,yShort,p0=[a,x0,sigma,y0])
#plot with fit and points
plt.figure()
plt.plot(self.dfg.wn[idx2-5:idx1+5],self.dfg.counts[idx2-5:idx1+5])
plt.plot(xShort,gauss(xShort,*popt),'ro:',label='fit')
plt.plot(self.dfg.wn[idx2],self.dfg.counts[idx2],'o',markersize=5)
plt.plot(self.dfg.wn[idx1],self.dfg.counts[idx1],'o',markersize=5)
#set shift for this peak
self.shifts[peak] = popt[1]-peakCenters[peak]
print('Peak shift from peak',str(peak),'is',"%.2f" % self.shifts[peak])
def evaluateShift(self):
print('Average shift is ', "%.2f" % np.mean(self.shifts))
return np.mean(self.shifts)
|
MLackner/NUSFSpectra | pythonmodules/pySfgProcess/spectrum.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 31 17:04:26 2017
Class to hold data for a compilation of DFGs making up a whole spectrum. Methods
in this class make up the majority of processing that occurs.
The class gets initialized with the path to a folder with a set of DFGs, both
of the sample and of the background. The initializion reads in all of the DFGs,
and seperates them into a list of backgrounds (self.bgs) and actual sample DFGs
(self.dfgs).
Once an object is created, many methods on it can be called to output details
of the object to the command line, to plot data, to process it, and to write
data to text files to be further worked with or plotted using an individual's
preferred plotting program.
Description of methods:
Print information to output:
-printDFGs(): prints a list of sample DFGs to the output
-printBGs(): prints a list of BG DFGs to the output
Plot data:
-plotDFGs(): creates a plot of all the sample DFGs on one plot
-plotBGs(): creates a plot of all the background DFGs on one plot
-plotIndDFGs(): creates individual plots of each sample DFG
-plotIndBGs(): creates individual plots of each background DFG
-plotFullDFGs(): creates a plot of each DFG that has been padded against the
full wn array. padDFGs() must have been called prior to calling this.
-plotSumDFGs(): creates a plot of the sum of every padded DFG. sumFullDFGs()
must have been called prior to calling this.
-plotSmoothRawDFGs(): creates a plot of the smoothed and pre-smoothed
padded DFGs. smoothDFGs() must have been called prior to calling this.
-plotTruncatedDFGs(): creates a plot of the truncated DFGs. A gold reference
spectra must have been created and truncated.
-plotSumTruncatedDFGs(): creates a plot of the sum of the truncated DFGs.
truncateFullDFGs() must have been called prior to calling this.
Write data to file:
-writeDFGs(name): writes each of the individual sample DFGs to a file of the
name specified.
-writeFullDFGs(name): writes each of the fullDFGs that have been padded with
zeros to file. padDFGs() must have been called prior to calling this.
-writeSumDFG(name): writes the sum of the fullDFGs to file. sumFullDFGs() must
have been called prior to calling this.
-writeSmoothedDFGs(name): writes the smoothed fullDFGs to file. smoothDFGs()
must have been called prior to calling this.
-writeTruncatedDFGs(name): writes the truncated DFGs to file. A gold reference
spectra must have been created and truncated, and truncateFullDFGs() must have
been called prior to calling this.
-writeSumTruncatedDFG(name): writes the sum of the truncated DFGs to file.
sumTruncatedDFGs() must have been called prior to calling this.
Processing data:
removeCRs(threshold): finds and removes cosmic rays from the sample and background
DFGs. Cosmic rays produce spurious spikes in the spectrum that are 1 or 2 points
wide and frequently outnumber all surrounding points by 100s or 1000s of counts.
By using a rolling median filter, these outlying points are detected and replaced
with the average of the non-outlying points to their immediate left and right.
The method leaves the lists bgs and dfgs in place and simply replaces the spurious
points.
subtractBGs(): subtracts each background spectrum from the sample spectra.
The method goes through the list of sample DFGs, identifies the correct background
DFG by going through the list and finding the one that is centered around the same
wavelength, and then subtracts the background spectrum from the sample.
padDFGs(): pads each sample DFG with zeros on either side so that each DFG aligns with
the others. This allows them to be plotted against the same array, summed up, etc.
A future iteration could automatically calculate how many zeros to pad on either
side by looking at the wavenumber arrays; currently this number is preset.
sumFullDFGs(): sums up all of the sample DFGs that have been padded with zeros.
padDFGs() must be called before calling this method.
smoothDFGs(sigma): smooths the full DFGs using a guassian filter with width sigma,
the default is five and seems to be appropriate to smooth without using significant
resolution. The method copies the pre-smoothed DFGs into another list to save them
and then smooths the DFGs in the list dfgsFull.
findTruncateIndices(threshold): finds the location of where individual DFGs
should be truncated. The threshold determines at what value of the max intensity
the truncation should happen, the default is 0.05 (5%). The method saves these indices
in a list that gets attached the spectrum object, it should only be called on a
gold reference spectrum; that gold reference spectrum is then passed to other
sample spectrum which use the indices created by this method to truncate its
spectra.
truncateFullDFGs(gold): truncates a sample spectrum at the positions determined by
a gold reference spectrum. A gold reference spectrum must exist and have had the
findTruncateIndices() method called on it. This gets passed to the sample spectrum
and its indices are used to truncate the sample spectrum.
sumTruncatedDFGs(): sums up all of the truncated DFGs. truncateFullDFGs() must
have been called prior to calling this method.
@author: pohno
"""
#import os to get list of files
import os
#import dfg class to hold individual dfg acquisition
from dfg import DFG
#import matplotlib for plotting
import matplotlib.pyplot as plt
#import pandas for rolling median function
import pandas
#import numpy
import numpy as np
#import math
import math
#import fullwn array
from fullwn import FullWN
#import scipy
from scipy.ndimage.filters import gaussian_filter1d
#import copy for deep copying
import copy
class Spectrum():
def __init__(self,path,shift = 0):
#store path of this sets of DFG positions
self.path = path
#initialize empty lists for DFGs and backgrounds
self.dfgs = []
self.bgs = []
print('Importing DFGs and BGs...')
#change the directory to the specified directory
# os.chdir(path)
#go through each file/directory
for f in os.listdir(path):
#check if its a .txt file
if f[-4:] == '.SPE':
#look at first part of name of text files
name = f.split('.')[0]
if name.isdigit():
#if it is numbers assume it is DFG
self.dfgs = self.dfgs + [DFG(path + '/' + f,name)]
elif name[-2:] == 'bg':
#if it ends it bg assume it is a bg
self.bgs = self.bgs + [DFG(path + '/' + f,name)]
#sort by name
self.dfgs.sort(key=lambda x: x.name)
self.bgs.sort(key=lambda x: x.name)
#create array for fullwn
fullwn = FullWN()
#give the fullwn array to itself, calibrate
self.fullwn = fullwn.fullwn - shift
#calibrate dfg wns
for dfg in self.dfgs:
dfg.wn = dfg.wn - shift
#output the DFGs that have been imported
print('Has dfgs:')
self.printDFGs()
#output the background DFGs that have been imported
print('Has bgs:')
self.printBGs()
#OUTPUT INFO METHODS
#print each sample DFG
def printDFGs(self):
for dfg in self.dfgs:
print(dfg.name)
#print each background DFG
def printBGs(self):
for bg in self.bgs:
print(bg.name)
#PLOTTING METHODS
#plot all sample DFGs
def plotDFGs(self):
plt.figure()
for dfg in self.dfgs:
plt.plot(dfg.wn,dfg.counts)
plt.title('DFGs')
#plot all background DFGs
def plotBGs(self):
plt.figure()
for bg in self.bgs:
plt.plot(bg.wn,bg.counts)
plt.title('BGs')
#plot each sample DFG individually
def plotIndDFGs(self):
for dfg in self.dfgs:
plt.figure()
plt.plot(dfg.wn,dfg.counts)
plt.title(dfg.name)
#plot each background DFG individually
def plotIndBGs(self):
for bg in self.bgs:
plt.figure()
plt.plot(bg.wn,bg.counts)
plt.title(bg.name)
#plot each sample DFG with its associated bg dfg
def plotDFGandBGsandGold(self,gold):
for dfg in self.dfgs:
plt.figure()
plt.plot(dfg.wn,dfg.counts)
#identify background by finding median wavelength
dfgMedian = int(np.median(dfg.wl))
#tracker for seeing if you found background
foundBG = False
#go through each background, see if one with matching median is there
for bg in self.bgs:
if dfgMedian == int(np.median(bg.wl)):
print("For dfg",dfg.name,"found",bg.name)
plt.plot(bg.wn,bg.counts)
foundBG = True
#if one wasn't found, print that
if not foundBG:
print("No bg found for dfg",dfg.name)
#for dfgGold in gold.dfgs:
# if dfgMedian == int(np.median(dfgGold.wl)):
# plt.plot(dfgGold.wn,dfgGold.counts)
plt.title(dfg.name)
def plotIndPaddedDFGs(self):
plt.figure()
for dfg in self.dfgs:
plt.plot(dfg.wn,dfg.counts,'b.')
for dfg in self.dfgsFull:
plt.plot(self.fullwn,dfg.counts,'r')
plt.title('Padded and Ind DFGs')
#plot each DFG that has been padded with zeros against fullwn
def plotFullDFGs(self):
plt.figure()
for dfg in self.dfgsFull:
plt.plot(self.fullwn,dfg.counts)
plt.title('Padded DFGs')
#plot each DFG that has been padded with zeros against fullwn
def plotFullBGs(self):
plt.figure()
for bg in self.bgsFull:
plt.plot(self.fullwn,bg.counts)
plt.title('Padded BGs')
#plot the sum of all the padded DFGs against fullwn
def plotSumDFG(self):
plt.figure()
plt.plot(self.fullwn,self.dfgSum)
plt.title('Sum of DFGs')
#plot the smoothed and the raw padded DFGs against fullwn
def plotSmoothRawDFGs(self):
plt.figure()
for dfg in self.dfgsPreSmoothed:
plt.plot(self.fullwn,dfg.counts,'ro')
for dfg in self.dfgsFull:
plt.plot(self.fullwn,dfg.counts,'b')
plt.title('Smoothed and Raw DFGs')
#plot the DFGs that have been truncated according to the gold reference
def plotTruncatedDFGs(self):
plt.figure()
for dfg in self.dfgsFullTruncated:
plt.plot(self.fullwn,dfg.counts)
plt.title('Truncated DFGs')
#plot the sum of the truncated DFGs
def plotSumTruncatedDFG(self):
plt.figure()
plt.plot(self.fullwn,self.dfgTruncatedSum)
plt.title('Sum of truncated DFGs')
#WRITING METHODS
#write each individual sample DFG to file
def writeDFGs(self,name):
data = np.zeros(444)
for dfg in self.dfgs:
data = np.vstack((data,dfg.wn))
data = np.vstack((data,dfg.counts))
data = data.transpose()
fmt = '%.5f'
np.savetxt(name,data,fmt,delimiter=',')
def writeBGs(self,name):
data = np.zeros(444)
for bg in self.bgs:
data = np.vstack((data,bg.wn))
data = np.vstack((data,bg.counts))
data = data.transpose()
fmt = '%.5f'
np.savetxt(name,data,fmt,delimiter=',')
#write each padded DFG to file
def writeFullDFGs(self,name):
data = self.fullwn
for dfg in self.dfgsFull:
data = np.vstack((data,dfg.counts))
data = data.transpose()
fmt = '%.5f'
np.savetxt(name,data,fmt,delimiter=',')
#write each padded BG to file
def writeFullBGs(self,name):
data = self.fullwn
for bg in self.bgsFull:
data = np.vstack((data,bg.counts))
data = data.transpose()
fmt = '%.5f'
np.savetxt(name,data,fmt,delimiter=',')
#write the sum of the padded DFGs to file
def writeSumDFG(self,name):
data = np.vstack((self.fullwn,self.dfgSum))
data = data.transpose()
fmt = '%.5f'
np.savetxt(name,data,fmt,delimiter=',')
#write the smoothed DFGs to file
def writeSmoothedDFGs(self,name):
data = self.fullwn
for dfg in self.dfgsFull:
data = np.vstack((data,dfg.counts))
data = data.transpose()
fmt = '%.5f'
np.savetxt(name,data,fmt,delimiter=',')
#write the truncated DFGs to file
def writeTruncatedDFGs(self,name):
data = self.fullwn
for dfg in self.dfgsFullTruncated:
data = np.vstack((data,dfg.counts))
data = data.transpose()
fmt = '%.5f'
np.savetxt(name,data,fmt,delimiter=',')
#write the sum of the truncated DFGs to file
def writeSumTruncatedDFG(self,name):
print('Truncated, summed wave written to',name)
data = np.vstack((self.fullwn,self.dfgTruncatedSum)).transpose()
fmt = '%.5f'
np.savetxt(name,data,fmt,delimiter=',',header='wn,counts',comments='')
#ACTUAL DATA PROCESSING METHODS
#remove all cosmic rays for each DFG and BG
def removeCRs(self,threshold=200):
print('Removing cosmic rays from spectra...')
#function that uses a median filter to identify a CR in a single DFG
def removeCRindDFG(dfg,threshold):
#choose how big of a window for the rolling median
windowSize = 7
medians = pandas.Series(dfg.counts).rolling(window = windowSize,center=True).median()
#number of nan at beginning and end to replace
numRep = math.floor(windowSize/2)
#replace beginning and end nan with the first/last computed value
for i in range(numRep):
medians[i] = medians[numRep]
medians[len(medians)-i-1] = medians[len(medians)-numRep-1]
#find difference of each point with the median of its window
differences = dfg.counts-medians
#empty array to hold zero or one if point is a spike
spike = np.zeros(len(differences),)
for i in range(len(differences)):
if differences[i] > threshold:
spike[i] = 1
print("Spike found at point index",i,"with wavenumber",dfg.wn[i],"cm^-1")
#if there any spikes found
if np.sum(spike) > 0:
#go through and replace the spike with the average on both sides
for i in range(len(spike)):
#if the point needs to be replaced
if spike[i] == 1:
#check up to five points to the left for the edge or for an ok point
for j in range(5):
if (i-1-j) < 0:
left = [] #if its edge only take from right point
break
else:
if spike[i-1-j] == 0:
left = dfg.counts[i-1-j] #or get the first acceptable point
break
#check up to five points to the right for the edge or for an ok point
for j in range(5):
if (i+j+1) >= len(spike):
right = [] #if its edge only take from the left point
break
else:
if spike[i+1+j] == 0:
right = dfg.counts[i+1+j] #or get the first acceptable point
break
#get the average of the two or the value if its only one
tempValArray = np.array([])
tempValArray = np.append(tempValArray,left)
tempValArray = np.append(tempValArray,right)
ave = tempValArray.mean()
#round down to integer number of counts
dfg.counts[i] = math.floor(ave)
else:
print("No spikes found in " + dfg.name)
return
#go through each dfg and remove CRs
for dfg in self.dfgs:
removeCRindDFG(dfg,threshold)
#go through each bg and remove CRs
for bg in self.bgs:
removeCRindDFG(bg,threshold)
#find and subtract correct background
def subtractBGs(self):
print('Subtracting BGs from DFGs...')
#create list to hold pre-bg subtracted dfgs
self.dfgsRaw = copy.deepcopy(self.dfgs)
#go through each dfg
for dfg in self.dfgs:
#identify background by finding median wavelength
dfgMedian = int(np.median(dfg.wl))
#tracker for seeing if you found background
foundBG = False
#go through each background, see if one with matching median is there
for bg in self.bgs:
if dfgMedian == int(np.median(bg.wl)):
print("For dfg",dfg.name,"found",bg.name)
dfg.counts = dfg.counts - bg.counts
foundBG = True
#if one wasn't found, print that
if not foundBG:
print("No bg found for dfg",dfg.name)
#pad each DFG with zeros before and/or after so they align and can be summed up
def padDFGs(self):
print('Padding DFGs with Zeros...')
#dictionary to hold number of zeros to pad on either side
padding = dict(det615=[0,467],det620=[58,409],det625=[116,351],det630=[174,293],
det635=[232,235],det640=[290,177], det642=[314,153],det645=[349,118],
det655=[467,0])
#length of fullwn is 911
#for 615 add 467 after
#for 620 add 58 before and 409 after
#for 625 add 116 before and 351 after
#for 630 add 174 before and 293 after
#for 635 add 232 before and 235 after
#for 640 add 290 before adn 177 after
#for 645 add 349 before adn 118 after
#for 655 add 467 before
#copy dfgs into new list
self.dfgsFull = copy.deepcopy(self.dfgs)
for dfg in self.dfgsFull:
key = 'det' + str(int(np.median(dfg.wl)))
dfg.counts = np.append(np.append(np.zeros(padding[key][0]),dfg.counts),
np.zeros(padding[key][1]))
#pad each DFG with zeros before and/or after so they align and can be summed up
def padBGs(self):
print('Padding ฮGs with Zeros...')
#dictionary to hold number of zeros to pad on either side
padding = dict(det615=[0,467],det620=[58,409],det625=[116,351],det630=[174,293],
det635=[232,235],det640=[290,177], det642=[314,153],det645=[349,118],
det655=[467,0])
#length of fullwn is 911
#for 615 add 467 after
#for 620 add 58 before and 409 after
#for 625 add 116 before and 351 after
#for 630 add 174 before and 293 after
#for 635 add 232 before and 235 after
#for 640 add 290 before adn 177 after
#for 645 add 349 before adn 118 after
#for 655 add 467 before
#copy dfgs into new list
self.bgsFull = copy.deepcopy(self.bgs)
for bg in self.bgsFull:
key = 'det' + str(int(np.median(bg.wl)))
bg.counts = np.append(np.append(np.zeros(padding[key][0]),bg.counts),
np.zeros(padding[key][1]))
#sum up the padded DFGs
def sumFullDFGs(self):
print('Summing full DFGs...')
self.dfgSum = np.zeros(911)
for dfg in self.dfgsFull:
self.dfgSum = self.dfgSum + dfg.counts
#smooth each DFG with a Gaussian window
def smoothDFGs(self,sigma=5):
print('Smoothing DFGs...')
self.dfgsPreSmoothed = copy.deepcopy(self.dfgsFull)
for dfg in self.dfgsFull:
#use gaussian filter imported from scipy.ndimage.filters
dfg.counts = gaussian_filter1d(dfg.counts,sigma)
#find indices of reference spectra where the signal falls off to 5% of max
def findTruncateIndices(self,threshold=0.05):
print('Finding truncation thresholds at',threshold,'...')
#create list to hold indices
self.truncateIndices = []
#go through each dfg
for dfg in self.dfgsFull:
#find max
maxVal = dfg.counts.max()
#find index of the max
maxIndex = dfg.counts.argmax()
#find left and right indexes
#set both as zero to start
leftIndex = []
rightIndex = []
#go through one half
for i in np.arange(maxIndex,len(dfg.counts)-1,1):
#find first point less than threshold and choose
if dfg.counts[i]-maxVal*threshold < 0:
rightIndex = i
break
#if nothing chosen use minimum
if not rightIndex:
rightIndex = dfg.counts[maxIndex:].argmin()
#go through other half
for i in np.arange(maxIndex,0,-1):
#find first point less than threshold and choose
if dfg.counts[i]-maxVal*threshold < 0:
leftIndex = i
break
if not leftIndex:
leftIndex = dfg.counts[:maxIndex].argmin()
#add the found values to the list
self.truncateIndices = self.truncateIndices + [[leftIndex,rightIndex]]
#truncate padded DFGs according to indices set according to gold spectrum
def truncateFullDFGs(self,gold):
print('Truncating DFGs...')
#copy dfgs into new list
self.dfgsFullTruncated = copy.deepcopy(self.dfgsFull)
#set all the values equal to zero not within the indices determined by
#the gold reference spectrum
for i,dfg in enumerate(self.dfgsFullTruncated):
dfg.counts[:gold.truncateIndices[i][0]] = 0
dfg.counts[gold.truncateIndices[i][1]:] = 0
#sum up these truncated DFGs
def sumTruncatedDFGs(self):
print('Summing truncated DFGs...')
self.dfgTruncatedSum = np.zeros(len(self.fullwn))
for dfg in self.dfgsFullTruncated:
self.dfgTruncatedSum = self.dfgTruncatedSum + dfg.counts
|
MLackner/NUSFSpectra | pythonmodules/pySfgProcess/dfg.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 31 17:04:26 2017
Class to hold data from a single DFG acquisition. A set of DFGs may or may not
make up a full spectrum from spectrum.py.
DFG stands for 'difference frequency generation'. When collecting a broad SFG
vibrational spectrum, the IR wavelength and detector wavelength need to be
shifted across frequency space as they are too narrow to do everything at one
time. The IR wavelength is shifted by changing the position of something called
the DFG crystal, hence the name of a single one of these acquisitions.
The name property holds the name of the file. The wl property is an array of
the wavelength values that are read in. The counts property is the number of
photons that are read in. The wn property is the wl array converted to wavelengths.
@author: pohno
"""
#import winspec
from winspec import SpeFile
#import numpy
import numpy as np
class DFG():
def __init__(self,path,name):
#add name
self.name = name
#read in file
f = SpeFile(path)
#get data from file
self.wl = f.xaxis
self.counts= f.data[0].flatten().astype(float)
#convert wavelength values to wavenumber
self.wn = self.convertWLtoWN(self.wl,795)
#function to convert wavelength values to wavenumber based on upconvert wl
def convertWLtoWN(self,wlArray,visWL):
visWN = 10**7/visWL
return 10**7/wlArray - visWN
|
MLackner/NUSFSpectra | pythonmodules/pySfgProcess/fullwn.py | <reponame>MLackner/NUSFSpectra
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 23 21:04:16 2017
Object that is just an array that holds all the wavenumber values that the
acquisitions, once they are padded with zeros and/or properly summed up,
are plotted against.
@author: pohno
"""
#import numpy
import numpy as np
class FullWN():
def __init__(self):
self.fullwn = np.array([4206.36,4203.91,4201.43,4198.98,4196.5,4194.05,
4191.58,4189.13,4186.66,4184.21,4181.77,4179.3,
4176.85,4174.38,4171.94,4169.47,4167.03,4164.59,
4162.13,4159.69,4157.25,4154.79,4152.35,4149.89,
4147.46,4145.02,4142.56,4140.13,4137.7,4135.27,
4132.81,4130.38,4127.95,4125.5,4123.07,4120.64,
4118.19,4115.76,4113.34,4110.91,4108.46,4106.04,
4103.62,4101.2,4098.78,4096.33,4093.91,4091.5,
4089.08,4086.66,4084.22,4081.8,4079.39,4076.97,
4074.56,4072.12,4069.71,4067.3,4065.085,4062.675,
4060.25,4057.845,4055.435,4053,4050.595,4048.19,
4045.785,4043.365,4040.965,4038.56,4036.16,
4033.745,4031.345,4028.945,4026.545,4024.13,
4021.73,4019.335,4016.94,4014.54,4012.135,
4009.74,4007.345,4004.955,4002.565,4000.16,
3997.765,3995.37,3992.985,3990.6,3988.21,
3985.82,3983.42,3981.035,3978.645,3976.265,
3973.88,3971.495,3969.13,3966.745,3964.365,
3961.98,3959.6,3957.21,3954.83,3952.455,3950.09,
3947.715,3945.335,3942.965,3940.585,3938.215,
3935.85,3933.48,3931.11,3928.74,3926.566667,
3924.196667,3921.816667,3919.446667,3917.076667,
3914.73,3912.366667,3909.996667,3907.636667,
3905.27,3902.903333,3900.543333,3898.19,3895.83,
3893.463333,3891.103333,3888.763333,3886.406667,
3884.046667,3881.696667,3879.343333,3876.986667,
3874.636667,3872.296667,3869.936667,3867.586667,
3865.233333,3862.886667,3860.536667,3858.196667,
3855.853333,3853.516667,3851.173333,3848.836667,
3846.486667,3844.15,3841.803333,3839.466667,
3837.13,3834.796667,3832.453333,3830.126667,
3827.786667,3825.456667,3823.113333,3820.79,
3818.456667,3816.126667,3813.8,3811.473333,
3809.133333,3806.816667,3804.48,3802.153333,
3799.84,3797.513333,3795.183333,3792.866667,
3790.61,3788.2975,3785.9725,3783.6425,3781.3275,
3778.9975,3776.69,3774.3725,3772.0475,3769.7325,
3767.4175,3765.1125,3762.795,3760.48,3758.17,
3755.86,3753.54,3751.235,3748.9275,3746.6075,
3744.305,3742.0025,3739.695,3737.39,3735.0875,
3732.785,3730.4725,3728.18,3725.87,3723.58,
3721.27,3718.975,3716.68,3714.38,3712.085,
3709.79,3707.495,3705.2025,3702.9075,3700.615,
3698.3225,3696.0325,3693.745,3691.4575,3689.17,
3686.8775,3684.59,3682.31,3680.025,3677.7375,
3675.4575,3673.1775,3670.88,3668.61,3666.3325,
3664.045,3661.7775,3659.495,3657.166,3654.89,
3652.614,3650.332,3648.062,3645.774,3643.496,
3641.236,3638.958,3636.686,3634.406,3632.138,
3629.878,3627.6,3625.334,3623.064,3620.802,
3618.534,3616.268,3614.008,3611.748,3609.478,
3607.216,3604.962,3602.702,3600.438,3598.182,
3595.926,3593.66,3591.41,3589.15,3586.894,
3584.644,3582.39,3580.14,3577.888,3575.632,
3573.378,3571.136,3568.89,3566.642,3564.392,
3562.142,3559.902,3557.658,3555.414,3553.168,
3550.932,3548.682,3546.442,3544.21,3541.96,
3539.726,3537.486,3535.252,3533.016,3530.784,
3528.546,3526.141667,3523.906667,3521.665,
3519.431667,3517.203333,3514.971667,3512.731667,
3510.508333,3508.276667,3506.051667,3503.818333,
3501.591667,3499.366667,3497.14,3494.913333,
3492.693333,3490.466667,3488.246667,3486.021667,
3483.803333,3481.576667,3479.358333,3477.14,
3474.918333,3472.706667,3470.491667,3468.271667,
3466.048333,3463.843333,3461.623333,3459.413333,
3457.201667,3455.001667,3452.781667,3450.576667,
3448.365,3446.161667,3443.945,3441.741667,3439.54,
3437.331667,3435.121667,3432.921667,3430.716667,
3428.518333,3426.31,3424.115,3421.916667,3419.715,
3417.515,3415.316667,3413.125,3410.923333,
3408.728333,3406.536667,3404.343333,3402.156667,
3399.958333,3397.761667,3395.594286,3393.4,
3391.205714,3389.017143,3386.83,3384.64,
3382.444286,3380.261429,3378.071429,3375.888571,
3373.698571,3371.514286,3369.331429,3367.152857,
3364.961429,3362.782857,3360.61,3358.421429,
3356.24,3354.06,3351.885714,3349.711429,3347.53,
3345.355714,3343.175714,3341.007143,3338.831429,
3336.655714,3334.485714,3332.318571,3330.144286,
3327.977143,3325.802857,3323.638571,3321.472857,
3319.304286,3317.14,3314.974286,3312.805714,
3310.645714,3308.48,3306.32,3304.154286,3302,
3299.834286,3297.675714,3295.514286,3293.362857,
3291.195714,3289.044286,3286.887143,3284.735714,
3282.584286,3280.42,3278.275714,3276.121429,
3273.972857,3271.822857,3269.671429,3267.527143,
3265.374286,3263.23,3261.078571,3258.937143,
3256.787143,3254.647143,3252.502857,3250.358571,
3248.214286,3246.071429,3243.928571,3241.792857,
3239.652857,3237.51,3235.38,3233.242857,
3231.105714,3228.971429,3226.832857,3224.698571,
3222.567143,3220.427143,3218.301429,3216.17,
3214.04,3211.908571,3209.774286,3207.651429,
3205.521429,3203.394286,3201.268571,3199.14,
3197.017143,3194.892857,3192.764286,3190.11,
3187.985,3185.86,3183.743333,3181.62,3179.5,
3177.378333,3175.265,3173.136667,3171.02,3168.9,
3166.788333,3164.665,3162.553333,3160.433333,
3158.323333,3156.211667,3154.098333,3151.991667,
3149.876667,3147.766667,3145.655,3143.551667,
3141.615714,3139.497143,3137.385714,3135.281429,
3133.17,3131.064286,3128.958571,3126.848571,
3124.74,3122.638571,3120.531429,3118.431429,
3116.328571,3114.221429,3112.127143,3110.027143,
3107.915714,3105.827143,3103.725714,3101.627143,
3099.531429,3097.432857,3095.338571,3093.244286,
3091.148571,3089.058571,3086.965714,3084.875714,
3082.781429,3080.688571,3078.601429,3076.517143,
3074.424286,3072.338571,3070.244286,3067.688333,
3065.6,3063.516667,3061.426667,3059.336667,
3057.25,3055.166667,3053.088333,3051.003333,
3048.921667,3046.838333,3044.758333,3042.678333,
3040.603333,3038.521667,3036.44,3034.368333,
3032.288333,3030.211667,3028.131667,3026.063333,
3023.986667,3021.913333,3019.846667,3017.77,3015.7,
3013.623333,3011.556667,3009.496667,3007.423333,
3005.355,3003.29,3001.223333,2999.16,2997.09,
2995.025,2992.97,2990.901667,2988.841667,
2986.771667,2984.725,2982.66,2980.596667,
2978.546667,2976.481667,2974.428333,2972.368333,
2970.32,2968.253333,2966.208333,2964.156667,
2962.103333,2960.043333,2957.995,2955.941667,
2953.9,2951.841667,2949.793333,2947.218,2945.168,
2943.118,2941.076,2939.024,2936.976,2934.93,
2932.882,2930.846,2928.794,2926.752,2924.708,
2922.666,2920.622,2918.576,2916.546,2914.5,
2912.464,2910.43,2908.396,2906.352,2904.322,
2902.28,2900.242,2898.214,2896.18,2894.15,
2892.116,2890.088,2888.05,2886.028,2883.996,
2881.97,2879.944,2877.912,2875.888,2873.858,
2871.84,2869.812,2867.792,2865.768,2863.744,
2861.726,2859.704,2857.682,2855.66,2853.646,
2851.622,2849.608,2847.592,2845.576,2843.562,
2841.55,2839.532,2837.516,2835.5,2833.496,
2831.482,2828.9175,2826.905,2824.895,2822.8875,
2820.8725,2818.8625,2816.855,2814.845,2812.8425,
2810.835,2808.8275,2806.8175,2804.8125,2802.815,
2800.805,2798.8075,2796.805,2794.805,2792.7975,
2790.8025,2788.795,2786.8075,2784.8025,2782.805,
2780.8075,2778.815,2776.8175,2774.8275,2772.8275,
2770.8375,2768.84,2766.8475,2764.8675,2762.8725,
2760.8725,2758.8825,2756.8925,2754.92,2752.9275,
2750.9375,2748.95,2746.965,2744.985,2742.995,
2741.015,2739.025,2737.04,2735.0625,2733.0825,
2731.1025,2729.1225,2727.1425,2725.155,2723.19,
2721.2125,2719.2325,2717.2575,2715.285,2712.826667,
2710.853333,2708.876667,2706.91,2704.94,2702.95,
2700.983333,2699.01,2697.043333,2695.066667,
2693.096667,2691.13,2689.153333,2687.196667,
2685.226667,2683.253333,2681.29,2679.33,
2677.356667,2675.396667,2673.43,2671.466667,
2669.503333,2667.543333,2665.576667,2663.623333,
2661.666667,2659.7,2657.746667,2655.783333,
2653.826667,2651.87,2649.913333,2647.96,
2646.003333,2644.046667,2642.103333,2640.146667,
2638.193333,2636.243333,2634.29,2632.336667,
2630.396667,2628.446667,2626.493333,2624.546667,
2622.603333,2620.653333,2618.706667,2616.766667,
2614.813333,2612.873333,2610.926667,2608.99,
2607.046667,2605.106667,2603.16,2601.236667,
2599.135,2597.2,2595.255,2593.32,2591.375,2589.44,
2587.5,2585.57,2583.63,2581.69,2579.76,2577.82,
2575.89,2573.95,2572.025,2570.095,2568.155,
2566.225,2564.305,2562.365,2560.435,2558.5,
2556.575,2554.655,2552.72,2550.795,2548.87,
2546.94,2545.02,2543.1,2541.175,2539.24,2537.335,
2535.415,2533.485,2531.565,2529.65,2527.735,
2525.805,2523.885,2521.985,2520.065,2518.145,
2516.23,2514.315,2512.41,2510.5,2508.58,2506.66,
2504.75,2502.85,2500.94,2499.03,2497.11,2495.2,
2493.305,2491.395,2489.49,2487.585,2485.23,
2483.33,2481.4,2479.49,2477.59,2475.69,2473.78,
2471.88,2469.98,2468.07,2466.17,2464.27,2462.37,
2460.47,2458.57,2456.67,2454.77,2452.88,2450.98,
2449.08,2447.18,2445.29,2443.39,2441.5,2439.6,
2437.71,2435.81,2433.92,2432.03,2430.13,2428.24,
2426.35,2424.46,2422.57,2420.7,2418.81,2416.92,
2415.03,2413.14,2411.26,2409.37,2407.48,2405.62,
2403.73,2401.85,2399.96,2398.08,2396.19,2394.33,
2392.45,2390.57,2388.68,2386.83,2384.94,2383.06,
2381.18,2379.3,2377.45,2375.57,2373.69,2371.81,
2369.96,2368.08,2366.2,2364.35,2362.47,2360.6,
2358.75,2356.87,2355,2353.13,2351.28,2349.4,
2347.55,2345.68,2343.81,2341.96,2340.09,2338.22,
2336.38,2334.51,2332.66,2330.8,2328.93,2327.08,
2325.22,2323.37,2321.51,2319.67,2317.8,2315.94,
2314.1,2312.23,2310.39,2308.53,2306.69,2304.83,
2302.99,2301.13,2299.3,2297.44,2295.6,2293.74,
2291.91,2290.07,2288.21,2286.38,2284.52,2282.69,
2280.83,2279,2277.17,2275.32,2273.49,2271.63,
2269.8,2267.97,2266.12])
|
CollectivaT-dev/ckan | ckan/tests/logic/action/test_get.py | <reponame>CollectivaT-dev/ckan
# encoding: utf-8
import datetime
import re
import copy
import pytest
from six import text_type
from six.moves import xrange
import ckan.logic as logic
import ckan.logic.schema as schema
import ckan.plugins as p
import ckan.tests.factories as factories
import ckan.tests.helpers as helpers
from ckan import __version__
from ckan.lib.search.common import SearchError
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestPackageShow(object):
def test_package_show(self):
# simple dataset, simple checks
dataset1 = factories.Dataset()
dataset2 = helpers.call_action("package_show", id=dataset1["id"])
assert dataset2["name"] == dataset1["name"]
missing_keys = set(("title", "groups")) - set(dataset2.keys())
assert not missing_keys, missing_keys
def test_package_show_with_full_dataset(self):
# an full dataset
org = factories.Organization()
group = factories.Group()
dataset1 = factories.Dataset(
resources=[
{
"url": "http://example.com/image.png",
"format": "png",
"name": "Image 1",
}
],
tags=[{u"name": u"science"}],
extras=[{u"key": u"subject", u"value": u"science"}],
groups=[{u"id": group["id"]}],
owner_org=org["id"],
)
dataset2 = helpers.call_action("package_show", id=dataset1["id"])
# checking the whole dataset is a bit brittle as a test, but it
# documents what the package_dict is clearly and tracks how it changes
# as CKAN changes over time.
# fix values which change every time you run this test
def replace_uuid(dict_, key):
assert key in dict_
dict_[key] = u"<SOME-UUID>"
def replace_datetime(dict_, key):
assert key in dict_
dict_[key] = u"2019-05-24T15:52:30.123456"
def replace_number_suffix(dict_, key):
# e.g. "Test Dataset 23" -> "Test Dataset "
assert key in dict_
dict_[key] = re.sub(r"\d+$", "num", dict_[key])
replace_uuid(dataset2, "id")
replace_uuid(dataset2, "creator_user_id")
replace_uuid(dataset2, "owner_org")
replace_number_suffix(dataset2, "name")
replace_datetime(dataset2, "metadata_created")
replace_datetime(dataset2, "metadata_modified")
replace_datetime(dataset2['resources'][0], "metadata_modified")
replace_uuid(dataset2["groups"][0], "id")
replace_number_suffix(dataset2["groups"][0], "name")
replace_number_suffix(dataset2["groups"][0], "title")
replace_number_suffix(dataset2["groups"][0], "display_name")
replace_uuid(dataset2["organization"], "id")
replace_number_suffix(dataset2["organization"], "name")
replace_number_suffix(dataset2["organization"], "title")
replace_datetime(dataset2["organization"], "created")
replace_uuid(dataset2["resources"][0], "id")
replace_uuid(dataset2["resources"][0], "package_id")
replace_number_suffix(dataset2["resources"][0], "name")
replace_datetime(dataset2["resources"][0], "created")
replace_uuid(dataset2["tags"][0], "id")
assert dataset2 == {
u"author": None,
u"author_email": None,
u"creator_user_id": u"<SOME-UUID>",
u"extras": [{u"key": u"subject", u"value": u"science"}],
u"groups": [
{
u"description": u"A test description for this test group.",
u"display_name": u"Test Group num",
u"id": u"<SOME-UUID>",
u"image_display_url": u"",
u"name": u"test_group_num",
u"title": u"Test Group num",
}
],
u"id": u"<SOME-UUID>",
u"isopen": False,
u"license_id": None,
u"license_title": None,
u"maintainer": None,
u"maintainer_email": None,
u"metadata_created": u"2019-05-24T15:52:30.123456",
u"metadata_modified": u"2019-05-24T15:52:30.123456",
u"name": u"test_dataset_num",
u"notes": u"Just another test dataset.",
u"num_resources": 1,
u"num_tags": 1,
u"organization": {
u"approval_status": u"approved",
u"created": u"2019-05-24T15:52:30.123456",
u"description": u"Just another test organization.",
u"id": u"<SOME-UUID>",
u"image_url": u"http://placekitten.com/g/200/100",
u"is_organization": True,
u"name": u"test_org_num",
u"state": u"active",
u"title": u"Test Organization",
u"type": u"organization",
},
u"owner_org": u"<SOME-UUID>",
u"private": False,
u"relationships_as_object": [],
u"relationships_as_subject": [],
u"resources": [
{
u"cache_last_updated": None,
u"cache_url": None,
u"created": u"2019-05-24T15:52:30.123456",
u"description": u"",
u"format": u"PNG",
u"hash": u"",
u"id": u"<SOME-UUID>",
u"last_modified": None,
u"metadata_modified": u"2019-05-24T15:52:30.123456",
u"mimetype": None,
u"mimetype_inner": None,
u"name": u"Image num",
u"package_id": u"<SOME-UUID>",
u"position": 0,
u"resource_type": None,
u"size": None,
u"state": u"active",
u"url": u"http://example.com/image.png",
u"url_type": None,
}
],
u"state": u"active",
u"tags": [
{
u"display_name": u"science",
u"id": u"<SOME-UUID>",
u"name": u"science",
u"state": u"active",
u"vocabulary_id": None,
}
],
u"title": u"Test Dataset",
u"type": u"dataset",
u"url": None,
u"version": None,
}
def test_package_show_with_custom_schema(self):
dataset1 = factories.Dataset()
from ckan.logic.schema import default_show_package_schema
custom_schema = default_show_package_schema()
def foo(key, data, errors, context):
data[key] = "foo"
custom_schema["new_field"] = [foo]
dataset2 = helpers.call_action(
"package_show",
id=dataset1["id"],
context={"schema": custom_schema},
)
assert dataset2["new_field"] == "foo"
def test_package_show_with_custom_schema_return_default_schema(self):
dataset1 = factories.Dataset()
from ckan.logic.schema import default_show_package_schema
custom_schema = default_show_package_schema()
def foo(key, data, errors, context):
data[key] = "foo"
custom_schema["new_field"] = [foo]
dataset2 = helpers.call_action(
"package_show",
id=dataset1["id"],
use_default_schema=True,
context={"schema": custom_schema},
)
assert "new_field" not in dataset2
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestGroupList(object):
def test_group_list(self):
group1 = factories.Group()
group2 = factories.Group()
group_list = helpers.call_action("group_list")
assert sorted(group_list) == sorted(
[g["name"] for g in [group1, group2]]
)
def test_group_list_in_presence_of_organizations(self):
"""
Getting the group_list should only return groups of type 'group' (not
organizations).
"""
group1 = factories.Group()
group2 = factories.Group()
factories.Organization()
factories.Organization()
group_list = helpers.call_action("group_list")
assert sorted(group_list) == sorted(
[g["name"] for g in [group1, group2]]
)
def test_group_list_in_presence_of_custom_group_types(self):
"""Getting the group_list shouldn't return custom group types."""
group1 = factories.Group()
group2 = factories.Group()
factories.Group(type="custom")
group_list = helpers.call_action("group_list")
assert sorted(group_list) == sorted(
[g["name"] for g in [group1, group2]]
)
def test_group_list_return_custom_group(self):
"""
Getting the group_list with a type defined should only return
groups of that type.
"""
group1 = factories.Group(type="custom")
group2 = factories.Group(type="custom")
factories.Group()
factories.Group()
group_list = helpers.call_action("group_list", type="custom")
assert sorted(group_list) == sorted(
[g["name"] for g in [group1, group2]]
)
def test_group_list_sort_by_package_count(self):
factories.Group(name="aa")
factories.Group(name="bb")
factories.Dataset(groups=[{"name": "aa"}, {"name": "bb"}])
factories.Dataset(groups=[{"name": "bb"}])
group_list = helpers.call_action("group_list", sort="package_count")
assert sorted(group_list) == sorted(["bb", "aa"])
def test_group_list_sort_by_package_count_ascending(self):
factories.Group(name="aa")
factories.Group(name="bb")
factories.Dataset(groups=[{"name": "aa"}, {"name": "bb"}])
factories.Dataset(groups=[{"name": "aa"}])
group_list = helpers.call_action(
"group_list", sort="package_count asc"
)
assert group_list == ["bb", "aa"]
def test_group_list_sort_default(self):
factories.Group(name="zz", title="aa")
factories.Group(name="yy", title="bb")
group_list = helpers.call_action(
"group_list"
)
assert group_list == ['zz', 'yy']
@pytest.mark.ckan_config("ckan.default_group_sort", "name")
def test_group_list_sort_from_config(self):
factories.Group(name="zz", title="aa")
factories.Group(name="yy", title="bb")
group_list = helpers.call_action(
"group_list"
)
assert group_list == ['yy', 'zz']
def eq_expected(self, expected_dict, result_dict):
superfluous_keys = set(result_dict) - set(expected_dict)
assert not superfluous_keys, "Did not expect key: %s" % " ".join(
("%s=%s" % (k, result_dict[k]) for k in superfluous_keys)
)
for key in expected_dict:
assert expected_dict[key] == result_dict[key], (
"%s=%s should be %s"
% (key, result_dict[key], expected_dict[key])
)
def test_group_list_all_fields(self):
group = factories.Group()
group_list = helpers.call_action("group_list", all_fields=True)
expected_group = dict(group)
for field in ("users", "tags", "extras", "groups"):
del expected_group[field]
assert group_list[0] == expected_group
assert "extras" not in group_list[0]
assert "tags" not in group_list[0]
assert "groups" not in group_list[0]
assert "users" not in group_list[0]
assert "datasets" not in group_list[0]
def _create_bulk_groups(self, name, count):
from ckan import model
groups = [
model.Group(name="{}_{}".format(name, i)) for i in range(count)
]
model.Session.add_all(groups)
model.repo.commit_and_remove()
def test_limit_default(self):
self._create_bulk_groups("group_default", 1010)
results = helpers.call_action("group_list")
assert len(results) == 1000 # i.e. default value
@pytest.mark.ckan_config("ckan.group_and_organization_list_max", "5")
def test_limit_configured(self):
self._create_bulk_groups("group_default", 7)
results = helpers.call_action("group_list")
assert len(results) == 5 # i.e. configured limit
def test_all_fields_limit_default(self):
self._create_bulk_groups("org_all_fields_default", 30)
results = helpers.call_action("group_list", all_fields=True)
assert len(results) == 25 # i.e. default value
@pytest.mark.ckan_config(
"ckan.group_and_organization_list_all_fields_max", "5"
)
def test_all_fields_limit_configured(self):
self._create_bulk_groups("org_all_fields_default", 30)
results = helpers.call_action("group_list", all_fields=True)
assert len(results) == 5 # i.e. configured limit
def test_group_list_extras_returned(self):
group = factories.Group(extras=[{"key": "key1", "value": "val1"}])
group_list = helpers.call_action(
"group_list", all_fields=True, include_extras=True
)
assert group_list[0]["extras"] == group["extras"]
assert group_list[0]["extras"][0]["key"] == "key1"
def test_group_list_users_returned(self):
user = factories.User()
group = factories.Group(
users=[{"name": user["name"], "capacity": "admin"}]
)
group_list = helpers.call_action(
"group_list", all_fields=True, include_users=True
)
assert group_list[0]["users"] == group["users"]
assert group_list[0]["users"][0]["name"] == group["users"][0]["name"]
# NB there is no test_group_list_tags_returned because tags are not in the
# group_create schema (yet)
def test_group_list_groups_returned(self):
parent_group = factories.Group(tags=[{"name": "river"}])
child_group = factories.Group(
groups=[{"name": parent_group["name"]}], tags=[{"name": "river"}]
)
group_list = helpers.call_action(
"group_list", all_fields=True, include_groups=True
)
child_group_returned = group_list[0]
if group_list[0]["name"] == child_group["name"]:
child_group_returned, parent_group_returned = group_list
else:
child_group_returned, parent_group_returned = group_list[::-1]
expected_parent_group = dict(parent_group)
assert [g["name"] for g in child_group_returned["groups"]] == [
expected_parent_group["name"]
]
def test_group_list_limit(self):
group1 = factories.Group()
group2 = factories.Group()
group3 = factories.Group()
group_names = [g["name"] for g in [group1, group2, group3]]
group_list = helpers.call_action("group_list", limit=1)
assert len(group_list) == 1
assert group_list[0] == sorted(group_names)[0]
def test_group_list_offset(self):
group1 = factories.Group()
group2 = factories.Group()
group3 = factories.Group()
group_names = [g["name"] for g in [group1, group2, group3]]
group_list = helpers.call_action("group_list", offset=2)
assert len(group_list) == 1
# group list returns sorted result. This is not necessarily
# order of creation
assert group_list[0] == sorted(group_names)[2]
def test_group_list_limit_and_offset(self):
group1 = factories.Group()
group2 = factories.Group()
group3 = factories.Group()
group_list = helpers.call_action("group_list", offset=1, limit=1)
assert len(group_list) == 1
assert group_list[0] == group2["name"]
def test_group_list_wrong_limit(self):
with pytest.raises(logic.ValidationError):
helpers.call_action("group_list", limit="a")
def test_group_list_wrong_offset(self):
with pytest.raises(logic.ValidationError):
helpers.call_action("group_list", offset="-2")
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestGroupShow(object):
def test_group_show(self):
group = factories.Group(user=factories.User())
group_dict = helpers.call_action(
"group_show", id=group["id"], include_datasets=True
)
group_dict.pop("packages", None)
assert group_dict == group
def test_group_show_error_not_found(self):
with pytest.raises(logic.NotFound):
helpers.call_action("group_show", id="does_not_exist")
def test_group_show_error_for_organization(self):
org = factories.Organization()
with pytest.raises(logic.NotFound):
helpers.call_action("group_show", id=org["id"])
def test_group_show_packages_returned(self):
user_name = helpers.call_action("get_site_user")["name"]
group = factories.Group(user=factories.User())
datasets = [
{"name": "dataset_1", "groups": [{"name": group["name"]}]},
{"name": "dataset_2", "groups": [{"name": group["name"]}]},
]
for dataset in datasets:
helpers.call_action(
"package_create", context={"user": user_name}, **dataset
)
group_dict = helpers.call_action(
"group_show", id=group["id"], include_datasets=True
)
assert len(group_dict["packages"]) == 2
assert group_dict["package_count"] == 2
def test_group_show_packages_returned_for_view(self):
user_name = helpers.call_action("get_site_user")["name"]
group = factories.Group(user=factories.User())
datasets = [
{"name": "dataset_1", "groups": [{"name": group["name"]}]},
{"name": "dataset_2", "groups": [{"name": group["name"]}]},
]
for dataset in datasets:
helpers.call_action(
"package_create", context={"user": user_name}, **dataset
)
group_dict = helpers.call_action(
"group_show",
id=group["id"],
include_datasets=True,
context={"for_view": True},
)
assert len(group_dict["packages"]) == 2
assert group_dict["package_count"] == 2
def test_group_show_no_packages_returned(self):
user_name = helpers.call_action("get_site_user")["name"]
group = factories.Group(user=factories.User())
datasets = [
{"name": "dataset_1", "groups": [{"name": group["name"]}]},
{"name": "dataset_2", "groups": [{"name": group["name"]}]},
]
for dataset in datasets:
helpers.call_action(
"package_create", context={"user": user_name}, **dataset
)
group_dict = helpers.call_action(
"group_show", id=group["id"], include_datasets=False
)
assert "packages" not in group_dict
assert group_dict["package_count"] == 2
def test_group_show_does_not_show_private_datasets(self):
"""group_show() should never show private datasets.
If a dataset is a private member of an organization and also happens to
be a member of a group, group_show() should not return the dataset as
part of the group dict, even if the user calling group_show() is a
member or admin of the group or the organization or is a sysadmin.
"""
org_member = factories.User()
org = factories.Organization(user=org_member)
private_dataset = factories.Dataset(
user=org_member, owner_org=org["name"], private=True
)
group = factories.Group()
# Add the private dataset to the group.
helpers.call_action(
"member_create",
id=group["id"],
object=private_dataset["id"],
object_type="package",
capacity="public",
)
# Create a member user and an admin user of the group.
group_member = factories.User()
helpers.call_action(
"member_create",
id=group["id"],
object=group_member["id"],
object_type="user",
capacity="member",
)
group_admin = factories.User()
helpers.call_action(
"member_create",
id=group["id"],
object=group_admin["id"],
object_type="user",
capacity="admin",
)
# Create a user who isn't a member of any group or organization.
non_member = factories.User()
sysadmin = factories.Sysadmin()
# None of the users should see the dataset when they call group_show().
for user in (
org_member,
group_member,
group_admin,
non_member,
sysadmin,
None,
):
if user is None:
context = None # No user logged-in.
else:
context = {"user": user["name"]}
group = helpers.call_action(
"group_show",
id=group["id"],
include_datasets=True,
context=context,
)
assert private_dataset["id"] not in [
dataset["id"] for dataset in group["packages"]
], "group_show() should never show private datasets"
@pytest.mark.ckan_config("ckan.search.rows_max", "5")
def test_package_limit_configured(self):
group = factories.Group()
for i in range(7):
factories.Dataset(groups=[{"id": group["id"]}])
id = group["id"]
results = helpers.call_action("group_show", id=id, include_datasets=1)
assert len(results["packages"]) == 5 # i.e. ckan.search.rows_max
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestOrganizationList(object):
def test_organization_list(self):
org1 = factories.Organization()
org2 = factories.Organization()
org_list = helpers.call_action("organization_list")
assert sorted(org_list) == sorted([g["name"] for g in [org1, org2]])
def test_organization_list_in_presence_of_groups(self):
"""
Getting the organization_list only returns organization group
types.
"""
org1 = factories.Organization()
org2 = factories.Organization()
factories.Group()
factories.Group()
org_list = helpers.call_action("organization_list")
assert sorted(org_list) == sorted([g["name"] for g in [org1, org2]])
def test_organization_list_in_presence_of_custom_group_types(self):
"""
Getting the organization_list only returns organization group
types.
"""
org1 = factories.Organization()
org2 = factories.Organization()
factories.Group(type="custom")
factories.Group(type="custom")
org_list = helpers.call_action("organization_list")
assert sorted(org_list) == sorted([g["name"] for g in [org1, org2]])
def test_organization_list_return_custom_organization_type(self):
"""
Getting the org_list with a type defined should only return
orgs of that type.
"""
org1 = factories.Organization()
org2 = factories.Organization(type="custom_org")
factories.Group(type="custom")
factories.Group(type="custom")
org_list = helpers.call_action("organization_list", type="custom_org")
assert sorted(org_list) == sorted(
[g["name"] for g in [org2]]
), "{}".format(org_list)
def _create_bulk_orgs(self, name, count):
from ckan import model
orgs = [
model.Group(
name="{}_{}".format(name, i),
is_organization=True,
type="organization",
)
for i in range(count)
]
model.Session.add_all(orgs)
model.repo.commit_and_remove()
def test_limit_default(self):
self._create_bulk_orgs("org_default", 1010)
results = helpers.call_action("organization_list")
assert len(results) == 1000 # i.e. default value
@pytest.mark.ckan_config("ckan.group_and_organization_list_max", "5")
def test_limit_configured(self):
self._create_bulk_orgs("org_default", 7)
results = helpers.call_action("organization_list")
assert len(results) == 5 # i.e. configured limit
def test_all_fields_limit_default(self):
self._create_bulk_orgs("org_all_fields_default", 30)
results = helpers.call_action("organization_list", all_fields=True)
assert len(results) == 25 # i.e. default value
@pytest.mark.ckan_config(
"ckan.group_and_organization_list_all_fields_max", "5"
)
def test_all_fields_limit_configured(self):
self._create_bulk_orgs("org_all_fields_default", 30)
results = helpers.call_action("organization_list", all_fields=True)
assert len(results) == 5 # i.e. configured limit
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestOrganizationShow(object):
def test_organization_show(self):
org = factories.Organization()
org_dict = helpers.call_action(
"organization_show", id=org["id"], include_datasets=True
)
org_dict.pop("packages", None)
assert org_dict == org
def test_organization_show_error_not_found(self):
with pytest.raises(logic.NotFound):
helpers.call_action("organization_show", id="does_not_exist")
def test_organization_show_error_for_group(self):
group = factories.Group()
with pytest.raises(logic.NotFound):
helpers.call_action("organization_show", id=group["id"])
def test_organization_show_packages_returned(self):
user_name = helpers.call_action("get_site_user")["name"]
org = factories.Organization()
datasets = [
{"name": "dataset_1", "owner_org": org["name"]},
{"name": "dataset_2", "owner_org": org["name"]},
]
for dataset in datasets:
helpers.call_action(
"package_create", context={"user": user_name}, **dataset
)
org_dict = helpers.call_action(
"organization_show", id=org["id"], include_datasets=True
)
assert len(org_dict["packages"]) == 2
assert org_dict["package_count"] == 2
def test_organization_show_private_packages_not_returned(self):
user_name = helpers.call_action("get_site_user")["name"]
org = factories.Organization()
datasets = [
{"name": "dataset_1", "owner_org": org["name"]},
{"name": "dataset_2", "owner_org": org["name"], "private": True},
]
for dataset in datasets:
helpers.call_action(
"package_create", context={"user": user_name}, **dataset
)
org_dict = helpers.call_action(
"organization_show", id=org["id"], include_datasets=True
)
assert len(org_dict["packages"]) == 1
assert org_dict["packages"][0]["name"] == "dataset_1"
assert org_dict["package_count"] == 1
@pytest.mark.ckan_config("ckan.search.rows_max", "5")
def test_package_limit_configured(self):
org = factories.Organization()
for i in range(7):
factories.Dataset(owner_org=org["id"])
id = org["id"]
results = helpers.call_action(
"organization_show", id=id, include_datasets=1
)
assert len(results["packages"]) == 5 # i.e. ckan.search.rows_max
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestUserList(object):
def test_user_list_default_values(self):
user = factories.User()
got_users = helpers.call_action("user_list")
# There is one default user
assert len(got_users) == 2
got_user = got_users[0]
assert got_user["id"] == user["id"]
assert got_user["name"] == user["name"]
assert got_user["fullname"] == user["fullname"]
assert got_user["display_name"] == user["display_name"]
assert got_user["created"] == user["created"]
assert got_user["about"] == user["about"]
assert got_user["sysadmin"] == user["sysadmin"]
assert got_user["number_created_packages"] == 0
assert "password" not in got_user
assert "reset_key" not in got_user
assert "apikey" not in got_user
assert "email" not in got_user
assert "datasets" not in got_user
def test_user_list_edits(self):
user = factories.User()
dataset = factories.Dataset(user=user)
dataset["title"] = "Edited title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
got_users = helpers.call_action("user_list")
# There is one default user
assert len(got_users) == 2
got_user = got_users[0]
assert got_user["number_created_packages"] == 1
def test_user_list_excludes_deleted_users(self):
user = factories.User()
factories.User(state="deleted")
got_users = helpers.call_action("user_list")
# There is one default user
assert len(got_users) == 2
assert got_users[0]["name"] == user["name"]
def test_user_list_not_all_fields(self):
user = factories.User()
got_users = helpers.call_action("user_list", all_fields=False)
# There is one default user
assert len(got_users) == 2
got_user = got_users[0]
assert got_user == user["name"]
def test_user_list_filtered_by_email(self):
user_a = factories.User(email="<EMAIL>")
factories.User(email="<EMAIL>")
got_users = helpers.call_action(
"user_list", email="<EMAIL>", all_fields=False
)
assert len(got_users) == 1
got_user = got_users[0]
assert got_user == user_a["name"]
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestUserShow(object):
def test_user_show_default_values(self):
user = factories.User()
got_user = helpers.call_action("user_show", id=user["id"])
assert got_user["id"] == user["id"]
assert got_user["name"] == user["name"]
assert got_user["fullname"] == user["fullname"]
assert got_user["display_name"] == user["display_name"]
assert got_user["created"] == user["created"]
assert got_user["about"] == user["about"]
assert got_user["sysadmin"] == user["sysadmin"]
assert got_user["number_created_packages"] == 0
assert "password" not in got_user
assert "reset_key" not in got_user
assert "apikey" not in got_user
assert "email" not in got_user
assert "datasets" not in got_user
assert "password_hash" not in got_user
def test_user_show_keep_email(self):
user = factories.User()
got_user = helpers.call_action(
"user_show", context={"keep_email": True}, id=user["id"]
)
assert got_user["email"] == user["email"]
assert "apikey" not in got_user
assert "password" not in got_user
assert "reset_key" not in got_user
def test_user_show_keep_apikey(self):
user = factories.User()
got_user = helpers.call_action(
"user_show", context={"keep_apikey": True}, id=user["id"]
)
assert "email" not in got_user
assert got_user["apikey"] == user["apikey"]
assert "password" not in got_user
assert "reset_key" not in got_user
def test_user_show_normal_user_no_password_hash(self):
user = factories.User()
got_user = helpers.call_action(
"user_show", id=user["id"], include_password_hash=True
)
assert "password_hash" not in got_user
def test_user_show_for_myself(self):
user = factories.User()
got_user = helpers.call_action(
"user_show", context={"user": user["name"]}, id=user["id"]
)
assert got_user["email"] == user["email"]
assert got_user["apikey"] == user["apikey"]
assert "password" not in got_user
assert "reset_key" not in got_user
def test_user_show_sysadmin_values(self):
user = factories.User()
sysadmin = factories.User(sysadmin=True)
got_user = helpers.call_action(
"user_show", context={"user": sysadmin["name"]}, id=user["id"]
)
assert got_user["email"] == user["email"]
assert got_user["apikey"] == user["apikey"]
assert "password" not in got_user
assert "reset_key" not in got_user
def test_user_show_sysadmin_password_hash(self):
user = factories.User(password="<PASSWORD>")
sysadmin = factories.User(sysadmin=True)
got_user = helpers.call_action(
"user_show",
context={"user": sysadmin["name"]},
id=user["id"],
include_password_hash=True,
)
assert got_user["email"] == user["email"]
assert got_user["apikey"] == user["apikey"]
assert "password_hash" in got_user
assert "password" not in got_user
assert "reset_key" not in got_user
def test_user_show_include_datasets(self):
user = factories.User()
dataset = factories.Dataset(user=user)
got_user = helpers.call_action(
"user_show", include_datasets=True, id=user["id"]
)
assert len(got_user["datasets"]) == 1
assert got_user["datasets"][0]["name"] == dataset["name"]
def test_user_show_include_datasets_excludes_draft_and_private(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state="deleted")
factories.Dataset(user=user, state="draft")
factories.Dataset(user=user, private=True, owner_org=org["name"])
got_user = helpers.call_action(
"user_show", include_datasets=True, id=user["id"]
)
assert len(got_user["datasets"]) == 1
assert got_user["datasets"][0]["name"] == dataset["name"]
assert got_user["number_created_packages"] == 1
def test_user_show_include_datasets_includes_draft_myself(self):
# a user viewing his own user should see the draft and private datasets
user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user)
dataset_deleted = factories.Dataset(user=user, state="deleted")
factories.Dataset(user=user, state="draft")
factories.Dataset(user=user, private=True, owner_org=org["name"])
got_user = helpers.call_action(
"user_show",
context={"user": user["name"]},
include_datasets=True,
id=user["id"],
)
assert len(got_user["datasets"]) == 3
datasets_got = set([user_["name"] for user_ in got_user["datasets"]])
assert dataset_deleted["name"] not in datasets_got
assert got_user["number_created_packages"] == 3
def test_user_show_include_datasets_includes_draft_sysadmin(self):
# sysadmin should see the draft and private datasets
user = factories.User()
sysadmin = factories.Sysadmin()
org = factories.Organization(user=user)
factories.Dataset(user=user)
dataset_deleted = factories.Dataset(user=user, state="deleted")
factories.Dataset(user=user, state="draft")
factories.Dataset(user=user, private=True, owner_org=org["name"])
got_user = helpers.call_action(
"user_show",
context={"user": sysadmin["name"]},
include_datasets=True,
id=user["id"],
)
assert len(got_user["datasets"]) == 3
datasets_got = set([user_["name"] for user_ in got_user["datasets"]])
assert dataset_deleted["name"] not in datasets_got
assert got_user["number_created_packages"] == 3
@pytest.mark.usefixtures("clean_db", "clean_index", "with_request_context")
class TestCurrentPackageList(object):
def test_current_package_list(self):
"""
Test current_package_list_with_resources with no parameters
"""
user = factories.User()
dataset1 = factories.Dataset(user=user)
dataset2 = factories.Dataset(user=user)
current_package_list = helpers.call_action(
"current_package_list_with_resources"
)
assert len(current_package_list) == 2
def test_current_package_list_limit_param(self):
"""
Test current_package_list_with_resources with limit parameter
"""
user = factories.User()
dataset1 = factories.Dataset(user=user)
dataset2 = factories.Dataset(user=user)
current_package_list = helpers.call_action(
"current_package_list_with_resources", limit=1
)
assert len(current_package_list) == 1
assert current_package_list[0]["name"] == dataset2["name"]
def test_current_package_list_offset_param(self):
"""
Test current_package_list_with_resources with offset parameter
"""
user = factories.User()
dataset1 = factories.Dataset(user=user)
dataset2 = factories.Dataset(user=user)
current_package_list = helpers.call_action(
"current_package_list_with_resources", offset=1
)
assert len(current_package_list) == 1
assert current_package_list[0]["name"] == dataset1["name"]
def test_current_package_list_private_datasets_anonoymous_user(self):
"""
Test current_package_list_with_resources with an anoymous user and
a private dataset
"""
user = factories.User()
org = factories.Organization(user=user)
dataset1 = factories.Dataset(
user=user, owner_org=org["name"], private=True
)
dataset2 = factories.Dataset(user=user)
current_package_list = helpers.call_action(
"current_package_list_with_resources", context={}
)
assert len(current_package_list) == 1
def test_current_package_list_private_datasets_sysadmin_user(self):
"""
Test current_package_list_with_resources with a sysadmin user and a
private dataset
"""
user = factories.User()
org = factories.Organization(user=user)
dataset1 = factories.Dataset(
user=user, owner_org=org["name"], private=True
)
dataset2 = factories.Dataset(user=user)
sysadmin = factories.Sysadmin()
current_package_list = helpers.call_action(
"current_package_list_with_resources",
context={"user": sysadmin["name"]},
)
assert len(current_package_list) == 2
@pytest.mark.usefixtures("clean_db", "clean_index", "with_request_context")
class TestPackageAutocomplete(object):
def test_package_autocomplete_does_not_return_private_datasets(self):
user = factories.User()
org = factories.Organization(user=user)
dataset1 = factories.Dataset(
user=user, owner_org=org["name"], title="Some public stuff"
)
dataset2 = factories.Dataset(
user=user,
owner_org=org["name"],
private=True,
title="Some private stuff",
)
package_list = helpers.call_action(
"package_autocomplete", context={"ignore_auth": False}, q="some"
)
assert len(package_list) == 1
def test_package_autocomplete_does_return_private_datasets_from_my_org(
self,
):
user = factories.User()
org = factories.Organization(
users=[{"name": user["name"], "capacity": "member"}]
)
factories.Dataset(
user=user, owner_org=org["id"], title="Some public stuff"
)
factories.Dataset(
user=user,
owner_org=org["id"],
private=True,
title="Some private stuff",
)
package_list = helpers.call_action(
"package_autocomplete",
context={"user": user["name"], "ignore_auth": False},
q="some",
)
assert len(package_list) == 2
def test_package_autocomplete_works_for_the_middle_part_of_title(self):
factories.Dataset(title="Some public stuff")
factories.Dataset(title="Some random stuff")
package_list = helpers.call_action("package_autocomplete", q="bli")
assert len(package_list) == 1
package_list = helpers.call_action("package_autocomplete", q="tuf")
assert len(package_list) == 2
@pytest.mark.usefixtures("clean_db", "clean_index", "with_request_context")
class TestPackageSearch(object):
def test_search(self):
factories.Dataset(title="Rivers")
factories.Dataset(title="Lakes") # decoy
search_result = helpers.call_action("package_search", q="rivers")
assert search_result["results"][0]["title"] == "Rivers"
assert search_result["count"] == 1
def test_search_fl(self):
d1 = factories.Dataset(title="Rivers", name="test_ri")
d2 = factories.Dataset(title="Lakes")
search_result = helpers.call_action(
"package_search", q="rivers", fl=["title", "name"]
)
assert search_result["results"] == [
{"title": "Rivers", "name": "test_ri"}
]
search_result = helpers.call_action(
"package_search", q="rivers", fl="title,name"
)
assert search_result["results"] == [
{"title": "Rivers", "name": "test_ri"}
]
search_result = helpers.call_action(
"package_search", q="rivers", fl=["id"]
)
assert search_result["results"] == [{"id": d1["id"]}]
def test_search_all(self):
factories.Dataset(title="Rivers")
factories.Dataset(title="Lakes")
search_result = helpers.call_action("package_search") # no q
assert search_result["count"] == 2
def test_bad_action_parameter(self):
with pytest.raises(SearchError):
helpers.call_action("package_search", weird_param=1)
def test_bad_solr_parameter(self):
with pytest.raises(SearchError):
helpers.call_action("package_search", sort="metadata_modified")
# SOLR doesn't like that we didn't specify 'asc' or 'desc'
# SOLR error is 'Missing sort order' or 'Missing_sort_order',
# depending on the solr version.
def _create_bulk_datasets(self, name, count):
from ckan import model
pkgs = [
model.Package(name="{}_{}".format(name, i)) for i in range(count)
]
model.Session.add_all(pkgs)
model.repo.commit_and_remove()
def test_rows_returned_default(self):
self._create_bulk_datasets("rows_default", 11)
results = logic.get_action("package_search")({}, {})
assert len(results["results"]) == 10 # i.e. 'rows' default value
@pytest.mark.ckan_config("ckan.search.rows_max", "12")
def test_rows_returned_limited(self):
self._create_bulk_datasets("rows_limited", 14)
results = logic.get_action("package_search")({}, {"rows": "15"})
assert len(results["results"]) == 12 # i.e. ckan.search.rows_max
def test_facets(self):
org = factories.Organization(name="test-org-facet", title="Test Org")
factories.Dataset(owner_org=org["id"])
factories.Dataset(owner_org=org["id"])
data_dict = {"facet.field": ["organization"]}
search_result = helpers.call_action("package_search", **data_dict)
assert search_result["count"] == 2
assert search_result["search_facets"] == {
"organization": {
"items": [
{
"count": 2,
"display_name": u"Test Org",
"name": "test-org-facet",
}
],
"title": "organization",
}
}
def test_facet_limit(self):
group1 = factories.Group(name="test-group-fl1", title="Test Group 1")
group2 = factories.Group(name="test-group-fl2", title="Test Group 2")
factories.Dataset(
groups=[{"name": group1["name"]}, {"name": group2["name"]}]
)
factories.Dataset(groups=[{"name": group1["name"]}])
factories.Dataset()
data_dict = {"facet.field": ["groups"], "facet.limit": 1}
search_result = helpers.call_action("package_search", **data_dict)
assert len(search_result["search_facets"]["groups"]["items"]) == 1
assert search_result["search_facets"] == {
"groups": {
"items": [
{
"count": 2,
"display_name": u"Test Group 1",
"name": "test-group-fl1",
}
],
"title": "groups",
}
}
def test_facet_no_limit(self):
group1 = factories.Group()
group2 = factories.Group()
factories.Dataset(
groups=[{"name": group1["name"]}, {"name": group2["name"]}]
)
factories.Dataset(groups=[{"name": group1["name"]}])
factories.Dataset()
data_dict = {"facet.field": ["groups"], "facet.limit": -1} # no limit
search_result = helpers.call_action("package_search", **data_dict)
assert len(search_result["search_facets"]["groups"]["items"]) == 2
def test_sort(self):
factories.Dataset(name="test0")
factories.Dataset(name="test1")
factories.Dataset(name="test2")
search_result = helpers.call_action(
"package_search", sort="metadata_created desc"
)
result_names = [result["name"] for result in search_result["results"]]
assert result_names == [u"test2", u"test1", u"test0"]
@pytest.mark.ckan_config("ckan.search.default_package_sort", "metadata_created asc")
def test_sort_default_from_config(self):
factories.Dataset(name="test0")
factories.Dataset(name="test1")
factories.Dataset(name="test2")
search_result = helpers.call_action(
"package_search"
)
result_names = [result["name"] for result in search_result["results"]]
assert result_names == [u"test0", u"test1", u"test2"]
def test_package_search_on_resource_name(self):
"""
package_search() should allow searching on resource name field.
"""
resource_name = "resource_abc"
factories.Resource(name=resource_name)
search_result = helpers.call_action("package_search", q="resource_abc")
assert (
search_result["results"][0]["resources"][0]["name"]
== resource_name
)
def test_package_search_excludes_private_and_drafts(self):
"""
package_search() with no options should not return private and draft
datasets.
"""
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state="deleted")
factories.Dataset(user=user, state="draft")
factories.Dataset(user=user, private=True, owner_org=org["name"])
results = helpers.call_action("package_search")["results"]
assert len(results) == 1
assert results[0]["name"] == dataset["name"]
def test_package_search_with_fq_excludes_private(self):
"""
package_search() with fq capacity:private should not return private
and draft datasets.
"""
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state="deleted")
factories.Dataset(user=user, state="draft")
factories.Dataset(user=user, private=True, owner_org=org["name"])
fq = "capacity:private"
results = helpers.call_action("package_search", fq=fq)["results"]
assert len(results) == 0
def test_package_search_with_fq_excludes_drafts(self):
"""
A sysadmin user can't use fq drafts to get draft datasets. Nothing is
returned.
"""
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user, name="dataset")
factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state="deleted", name="deleted-dataset")
factories.Dataset(user=user, state="draft", name="draft-dataset")
factories.Dataset(
user=other_user, state="draft", name="other-draft-dataset"
)
factories.Dataset(
user=user,
private=True,
owner_org=org["name"],
name="private-dataset",
)
fq = "state:draft"
results = helpers.call_action("package_search", fq=fq)["results"]
assert len(results) == 0
def test_package_search_with_include_drafts_option_excludes_drafts_for_anon_user(
self,
):
"""
An anon user can't user include_drafts to get draft datasets.
"""
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state="deleted")
draft_dataset = factories.Dataset(user=user, state="draft")
factories.Dataset(user=user, private=True, owner_org=org["name"])
results = logic.get_action("package_search")(
{u"user": u""}, {"include_drafts": True}
)["results"]
assert len(results) == 1
assert results[0]["name"] != draft_dataset["name"]
assert results[0]["name"] == dataset["name"]
def test_package_search_with_include_drafts_option_includes_drafts_for_sysadmin(
self,
):
"""
A sysadmin can use the include_drafts option to get draft datasets for
all users.
"""
user = factories.User()
other_user = factories.User()
sysadmin = factories.Sysadmin()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state="deleted")
draft_dataset = factories.Dataset(user=user, state="draft")
other_draft_dataset = factories.Dataset(user=other_user, state="draft")
factories.Dataset(user=user, private=True, owner_org=org["name"])
results = logic.get_action("package_search")(
{"user": sysadmin["name"]}, {"include_drafts": True}
)["results"]
assert len(results) == 3
names = [r["name"] for r in results]
assert draft_dataset["name"] in names
assert other_draft_dataset["name"] in names
assert dataset["name"] in names
def test_package_search_with_include_drafts_false_option_doesnot_include_drafts_for_sysadmin(
self,
):
"""
A sysadmin with include_drafts option set to `False` will not get
drafts returned in results.
"""
user = factories.User()
other_user = factories.User()
sysadmin = factories.Sysadmin()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state="deleted")
draft_dataset = factories.Dataset(user=user, state="draft")
other_draft_dataset = factories.Dataset(user=other_user, state="draft")
factories.Dataset(user=user, private=True, owner_org=org["name"])
results = logic.get_action("package_search")(
{"user": sysadmin["name"]}, {"include_drafts": False}
)["results"]
assert len(results) == 1
names = [r["name"] for r in results]
assert draft_dataset["name"] not in names
assert other_draft_dataset["name"] not in names
assert dataset["name"] in names
def test_package_search_with_include_drafts_option_includes_drafts_for_user(
self,
):
"""
The include_drafts option will include draft datasets for the
authorized user, but not drafts for other users.
"""
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user, name="dataset")
other_dataset = factories.Dataset(
user=other_user, name="other-dataset"
)
factories.Dataset(user=user, state="deleted", name="deleted-dataset")
draft_dataset = factories.Dataset(
user=user, state="draft", name="draft-dataset"
)
other_draft_dataset = factories.Dataset(
user=other_user, state="draft", name="other-draft-dataset"
)
factories.Dataset(
user=user,
private=True,
owner_org=org["name"],
name="private-dataset",
)
results = logic.get_action("package_search")(
{"user": user["name"]}, {"include_drafts": True}
)["results"]
assert len(results) == 3
names = [r["name"] for r in results]
assert draft_dataset["name"] in names
assert other_draft_dataset["name"] not in names
assert dataset["name"] in names
assert other_dataset["name"] in names
def test_package_search_with_fq_for_create_user_id_will_include_datasets_for_other_users(
self,
):
"""
A normal user can use the fq creator_user_id to get active datasets
(but not draft) for another user.
"""
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user, name="dataset")
other_dataset = factories.Dataset(
user=other_user, name="other-dataset"
)
factories.Dataset(user=user, state="deleted", name="deleted-dataset")
draft_dataset = factories.Dataset(
user=user, state="draft", name="draft-dataset"
)
other_draft_dataset = factories.Dataset(
user=other_user, state="draft", name="other-draft-dataset"
)
factories.Dataset(
user=user,
private=True,
owner_org=org["name"],
name="private-dataset",
)
fq = "creator_user_id:{0}".format(other_user["id"])
results = logic.get_action("package_search")(
{"user": user["name"]}, {"fq": fq}
)["results"]
assert len(results) == 1
names = [r["name"] for r in results]
assert draft_dataset["name"] not in names
assert other_draft_dataset["name"] not in names
assert dataset["name"] not in names
assert other_dataset["name"] in names
def test_package_search_with_fq_for_create_user_id_will_not_include_drafts_for_other_users(
self,
):
"""
A normal user can't use fq creator_user_id and drafts to get draft
datasets for another user.
"""
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user, name="dataset")
factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state="deleted", name="deleted-dataset")
factories.Dataset(user=user, state="draft", name="draft-dataset")
factories.Dataset(
user=other_user, state="draft", name="other-draft-dataset"
)
factories.Dataset(
user=user,
private=True,
owner_org=org["name"],
name="private-dataset",
)
fq = "(creator_user_id:{0} AND +state:draft)".format(other_user["id"])
results = logic.get_action("package_search")(
{"user": user["name"]}, {"fq": fq, "include_drafts": True}
)["results"]
assert len(results) == 0
def test_package_search_with_fq_for_creator_user_id_and_drafts_and_include_drafts_option_will_not_include_drafts_for_other_user(
self,
):
"""
A normal user can't use fq creator_user_id and drafts and the
include_drafts option to get draft datasets for another user.
"""
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user, name="dataset")
factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state="deleted", name="deleted-dataset")
factories.Dataset(user=user, state="draft", name="draft-dataset")
factories.Dataset(
user=other_user, state="draft", name="other-draft-dataset"
)
factories.Dataset(
user=user,
private=True,
owner_org=org["name"],
name="private-dataset",
)
fq = "(creator_user_id:{0} AND +state:draft)".format(other_user["id"])
results = logic.get_action("package_search")(
{"user": user["name"]}, {"fq": fq, "include_drafts": True}
)["results"]
assert len(results) == 0
def test_package_search_with_fq_for_creator_user_id_and_include_drafts_option_will_not_include_drafts_for_other_user(
self,
):
"""
A normal user can't use fq creator_user_id and the include_drafts
option to get draft datasets for another user.
"""
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user, name="dataset")
other_dataset = factories.Dataset(
user=other_user, name="other-dataset"
)
factories.Dataset(user=user, state="deleted", name="deleted-dataset")
factories.Dataset(user=user, state="draft", name="draft-dataset")
other_draft_dataset = factories.Dataset(
user=other_user, state="draft", name="other-draft-dataset"
)
factories.Dataset(
user=user,
private=True,
owner_org=org["name"],
name="private-dataset",
)
fq = "creator_user_id:{0}".format(other_user["id"])
results = logic.get_action("package_search")(
{"user": user["name"]}, {"fq": fq, "include_drafts": True}
)["results"]
names = [r["name"] for r in results]
assert len(results) == 1
assert other_dataset["name"] in names
assert other_draft_dataset["name"] not in names
def test_package_search_with_fq_for_create_user_id_will_include_drafts_for_other_users_for_sysadmin(
self,
):
"""
Sysadmins can use fq to get draft datasets for another user.
"""
user = factories.User()
sysadmin = factories.Sysadmin()
other_user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user, name="dataset")
factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state="deleted", name="deleted-dataset")
draft_dataset = factories.Dataset(
user=user, state="draft", name="draft-dataset"
)
factories.Dataset(
user=other_user, state="draft", name="other-draft-dataset"
)
factories.Dataset(
user=user,
private=True,
owner_org=org["name"],
name="private-dataset",
)
fq = "(creator_user_id:{0} AND +state:draft)".format(user["id"])
results = logic.get_action("package_search")(
{"user": sysadmin["name"]}, {"fq": fq}
)["results"]
names = [r["name"] for r in results]
assert len(results) == 1
assert dataset["name"] not in names
assert draft_dataset["name"] in names
def test_package_search_private_with_include_private(self):
"""
package_search() can return private datasets when
`include_private=True`
"""
user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user, state="deleted")
factories.Dataset(user=user, state="draft")
private_dataset = factories.Dataset(
user=user, private=True, owner_org=org["name"]
)
results = logic.get_action("package_search")(
{"user": user["name"]}, {"include_private": True}
)["results"]
assert [r["name"] for r in results] == [private_dataset["name"]]
def test_package_search_private_with_include_private_wont_show_other_orgs_private(
self,
):
user = factories.User()
user2 = factories.User()
org = factories.Organization(user=user)
org2 = factories.Organization(user=user2)
private_dataset = factories.Dataset(
user=user2, private=True, owner_org=org2["name"]
)
results = logic.get_action("package_search")(
{"user": user["name"]}, {"include_private": True}
)["results"]
assert [r["name"] for r in results] == []
def test_package_search_private_with_include_private_syadmin(self):
user = factories.User()
sysadmin = factories.Sysadmin()
org = factories.Organization(user=user)
private_dataset = factories.Dataset(
user=user, private=True, owner_org=org["name"]
)
results = logic.get_action("package_search")(
{"user": sysadmin["name"]}, {"include_private": True}
)["results"]
assert [r["name"] for r in results] == [private_dataset["name"]]
def test_package_works_without_user_in_context(self):
"""
package_search() should work even if user isn't in the context (e.g.
ckanext-showcase tests.
"""
logic.get_action("package_search")({}, dict(q="anything"))
def test_local_parameters_not_supported(self):
with pytest.raises(SearchError):
helpers.call_action(
"package_search", q='{!child of="content_type:parentDoc"}'
)
@pytest.mark.ckan_config("ckan.plugins", "example_idatasetform")
@pytest.mark.usefixtures("clean_db", "with_plugins", "with_request_context")
class TestPackageAutocompleteWithDatasetForm(object):
def test_custom_schema_returned(self):
dataset1 = factories.Dataset(custom_text="foo")
query = helpers.call_action(
"package_search", q="id:{0}".format(dataset1["id"])
)
assert query["results"][0]["id"] == dataset1["id"]
assert query["results"][0]["custom_text"] == "foo"
def test_custom_schema_not_returned(self):
dataset1 = factories.Dataset(custom_text="foo")
query = helpers.call_action(
"package_search",
q="id:{0}".format(dataset1["id"]),
use_default_schema=True,
)
assert query["results"][0]["id"] == dataset1["id"]
assert "custom_text" not in query["results"][0]
assert query["results"][0]["extras"][0]["key"] == "custom_text"
assert query["results"][0]["extras"][0]["value"] == "foo"
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestBadLimitQueryParameters(object):
"""test class for #1258 non-int query parameters cause 500 errors
Test that validation errors are raised when calling actions with
bad parameters.
"""
def test_activity_list_actions(self):
actions = [
"user_activity_list",
"package_activity_list",
"group_activity_list",
"organization_activity_list",
"recently_changed_packages_activity_list",
"current_package_list_with_resources",
]
for action in actions:
with pytest.raises(logic.ValidationError):
helpers.call_action(
action,
id="test_user",
limit="not_an_int",
offset="not_an_int",
)
with pytest.raises(logic.ValidationError):
helpers.call_action(
action, id="test_user", limit=-1, offset=-1
)
def test_package_search_facet_field_is_json(self):
kwargs = {"facet.field": "notjson"}
with pytest.raises(logic.ValidationError):
helpers.call_action("package_search", **kwargs)
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestOrganizationListForUser(object):
"""Functional tests for the organization_list_for_user() action function."""
def test_when_user_is_not_a_member_of_any_organizations(self):
"""
When the user isn't a member of any organizations (in any capacity)
organization_list_for_user() should return an empty list.
"""
user = factories.User()
context = {"user": user["name"]}
# Create an organization so we can test that it does not get returned.
factories.Organization()
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert organizations == []
def test_when_user_is_an_admin_of_one_organization(self):
"""
When the user is an admin of one organization
organization_list_for_user() should return a list of just that one
organization.
"""
user = factories.User()
context = {"user": user["name"]}
organization = factories.Organization()
# Create a second organization just so we can test that it does not get
# returned.
factories.Organization()
helpers.call_action(
"member_create",
id=organization["id"],
object=user["id"],
object_type="user",
capacity="admin",
)
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert len(organizations) == 1
assert organizations[0]["id"] == organization["id"]
def test_when_user_is_an_admin_of_three_organizations(self):
"""
When the user is an admin of three organizations
organization_list_for_user() should return a list of all three
organizations.
"""
user = factories.User()
context = {"user": user["name"]}
organization_1 = factories.Organization()
organization_2 = factories.Organization()
organization_3 = factories.Organization()
# Create a second organization just so we can test that it does not get
# returned.
factories.Organization()
# Make the user an admin of all three organizations:
for organization in (organization_1, organization_2, organization_3):
helpers.call_action(
"member_create",
id=organization["id"],
object=user["id"],
object_type="user",
capacity="admin",
)
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert len(organizations) == 3
ids = [organization["id"] for organization in organizations]
for organization in (organization_1, organization_2, organization_3):
assert organization["id"] in ids
def test_when_permissions_extend_to_sub_organizations(self):
"""
When the user is an admin of one organization
organization_list_for_user() should return a list of just that one
organization.
"""
user = factories.User()
context = {"user": user["name"]}
user["capacity"] = "admin"
top_organization = factories.Organization(users=[user])
middle_organization = factories.Organization(users=[user])
bottom_organization = factories.Organization()
# Create another organization just so we can test that it does not get
# returned.
factories.Organization()
helpers.call_action(
"member_create",
id=bottom_organization["id"],
object=middle_organization["id"],
object_type="group",
capacity="parent",
)
helpers.call_action(
"member_create",
id=middle_organization["id"],
object=top_organization["id"],
object_type="group",
capacity="parent",
)
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert len(organizations) == 3
org_ids = set(org["id"] for org in organizations)
assert bottom_organization["id"] in org_ids
def test_does_return_members(self):
"""
By default organization_list_for_user() should return organizations
that the user is just a member (not an admin) of.
"""
user = factories.User()
context = {"user": user["name"]}
organization = factories.Organization()
helpers.call_action(
"member_create",
id=organization["id"],
object=user["id"],
object_type="user",
capacity="member",
)
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert [org["id"] for org in organizations] == [organization["id"]]
def test_does_return_editors(self):
"""
By default organization_list_for_user() should return organizations
that the user is just an editor (not an admin) of.
"""
user = factories.User()
context = {"user": user["name"]}
organization = factories.Organization()
helpers.call_action(
"member_create",
id=organization["id"],
object=user["id"],
object_type="user",
capacity="editor",
)
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert [org["id"] for org in organizations] == [organization["id"]]
def test_editor_permission(self):
"""
organization_list_for_user() should return organizations that the user
is an editor of if passed a permission that belongs to the editor role.
"""
user = factories.User()
context = {"user": user["name"]}
organization = factories.Organization()
helpers.call_action(
"member_create",
id=organization["id"],
object=user["id"],
object_type="user",
capacity="editor",
)
organizations = helpers.call_action(
"organization_list_for_user",
permission="create_dataset",
context=context,
)
assert [org["id"] for org in organizations] == [organization["id"]]
def test_member_permission(self):
"""
organization_list_for_user() should return organizations that the user
is a member of if passed a permission that belongs to the member role.
"""
user = factories.User()
context = {"user": user["name"]}
organization = factories.Organization()
helpers.call_action(
"member_create",
id=organization["id"],
object=user["id"],
object_type="user",
capacity="member",
)
organizations = helpers.call_action(
"organization_list_for_user", permission="read", context=context
)
assert [org["id"] for org in organizations] == [organization["id"]]
def test_invalid_permission(self):
"""
organization_list_for_user() should return an empty list if passed a
non-existent or invalid permission.
Note that we test this with a user who is an editor of one organization.
If the user was an admin of the organization then it would return that
organization - admins have all permissions, including permissions that
don't exist.
"""
user = factories.User()
context = {"user": user["name"]}
organization = factories.Organization()
factories.Organization()
helpers.call_action(
"member_create",
id=organization["id"],
object=user["id"],
object_type="user",
capacity="editor",
)
for permission in ("", " ", "foo", 27.3, 5, True, False, None):
organizations = helpers.call_action(
"organization_list_for_user",
permission=permission,
context=context,
)
assert organizations == []
def test_that_it_does_not_return_groups(self):
"""
organization_list_for_user() should not return groups that the user is
a member, editor or admin of.
"""
user = factories.User()
context = {"user": user["name"]}
group_1 = factories.Group()
group_2 = factories.Group()
group_3 = factories.Group()
helpers.call_action(
"member_create",
id=group_1["id"],
object=user["id"],
object_type="user",
capacity="member",
)
helpers.call_action(
"member_create",
id=group_2["id"],
object=user["id"],
object_type="user",
capacity="editor",
)
helpers.call_action(
"member_create",
id=group_3["id"],
object=user["id"],
object_type="user",
capacity="admin",
)
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert organizations == []
def test_that_it_does_not_return_previous_memberships(self):
"""
organization_list_for_user() should return organizations that the user
was previously an admin of.
"""
user = factories.User()
context = {"user": user["name"]}
organization = factories.Organization()
# Make the user an admin of the organization.
helpers.call_action(
"member_create",
id=organization["id"],
object=user["id"],
object_type="user",
capacity="admin",
)
# Remove the user from the organization.
helpers.call_action(
"member_delete",
id=organization["id"],
object=user["id"],
object_type="user",
)
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert organizations == []
def test_when_user_is_sysadmin(self):
"""
When the user is a sysadmin organization_list_for_user() should just
return all organizations, even if the user is not a member of them.
"""
user = factories.Sysadmin()
context = {"user": user["name"]}
organization = factories.Organization()
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert [org["id"] for org in organizations] == [organization["id"]]
def test_that_it_does_not_return_deleted_organizations(self):
"""
organization_list_for_user() should not return deleted organizations
that the user was an admin of.
"""
user = factories.User()
context = {"user": user["name"]}
organization = factories.Organization()
# Make the user an admin of the organization.
helpers.call_action(
"member_create",
id=organization["id"],
object=user["id"],
object_type="user",
capacity="admin",
)
# Delete the organization.
helpers.call_action(
"organization_delete", id=organization["id"], context=context
)
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert organizations == []
def test_with_no_authorized_user(self):
"""
organization_list_for_user() should return an empty list if there's no
authorized user. Users who aren't logged-in don't have any permissions.
"""
# Create an organization so we can test that it doesn't get returned.
organization = factories.Organization()
organizations = helpers.call_action("organization_list_for_user")
assert organizations == []
def test_organization_list_for_user_returns_all_roles(self):
user1 = factories.User()
user2 = factories.User()
user3 = factories.User()
org1 = factories.Organization(
users=[
{"name": user1["name"], "capacity": "admin"},
{"name": user2["name"], "capacity": "editor"},
]
)
org2 = factories.Organization(
users=[
{"name": user1["name"], "capacity": "member"},
{"name": user2["name"], "capacity": "member"},
]
)
org3 = factories.Organization(
users=[{"name": user1["name"], "capacity": "editor"}]
)
org_list_for_user1 = helpers.call_action(
"organization_list_for_user", id=user1["id"]
)
assert sorted([org["id"] for org in org_list_for_user1]) == sorted(
[org1["id"], org2["id"], org3["id"]]
)
org_list_for_user2 = helpers.call_action(
"organization_list_for_user", id=user2["id"]
)
assert sorted([org["id"] for org in org_list_for_user2]) == sorted(
[org1["id"], org2["id"]]
)
org_list_for_user3 = helpers.call_action(
"organization_list_for_user", id=user3["id"]
)
assert org_list_for_user3 == []
@pytest.mark.ckan_config("ckan.plugins", "image_view")
@pytest.mark.usefixtures("clean_db", "with_plugins")
class TestShowResourceView(object):
def test_resource_view_show(self):
resource = factories.Resource()
resource_view = {
"resource_id": resource["id"],
"view_type": u"image_view",
"title": u"View",
"description": u"A nice view",
"image_url": "url",
}
new_view = helpers.call_action("resource_view_create", **resource_view)
result = helpers.call_action("resource_view_show", id=new_view["id"])
result.pop("id")
result.pop("package_id")
assert result == resource_view
def test_resource_view_show_id_missing(self):
with pytest.raises(logic.ValidationError):
helpers.call_action("resource_view_show")
def test_resource_view_show_id_not_found(self):
with pytest.raises(logic.NotFound):
helpers.call_action("resource_view_show", id="does_not_exist")
class TestGetHelpShow(object):
def test_help_show_basic(self):
function_name = "package_search"
result = helpers.call_action("help_show", name=function_name)
function = logic.get_action(function_name)
assert result == function.__doc__
def test_help_show_no_docstring(self):
function_name = "package_search"
function = logic.get_action(function_name)
actual_docstring = function.__doc__
function.__doc__ = None
result = helpers.call_action("help_show", name=function_name)
function.__doc__ = actual_docstring
assert result is None
def test_help_show_not_found(self):
function_name = "unknown_action"
with pytest.raises(logic.NotFound):
helpers.call_action("help_show", name=function_name)
@pytest.mark.usefixtures("clean_db")
class TestConfigOptionShow(object):
@pytest.mark.ckan_config("ckan.site_title", "My Test CKAN")
def test_config_option_show_in_config_not_in_db(self):
"""config_option_show returns value from config when value on in
system_info table."""
title = helpers.call_action(
"config_option_show", key="ckan.site_title"
)
assert title == "My Test CKAN"
@pytest.mark.ckan_config("ckan.site_title", "My Test CKAN")
def test_config_option_show_in_config_and_in_db(self):
"""config_option_show returns value from db when value is in both
config and system_info table."""
params = {"ckan.site_title": "Test site title"}
helpers.call_action("config_option_update", **params)
title = helpers.call_action(
"config_option_show", key="ckan.site_title"
)
assert title == "Test site title"
@pytest.mark.ckan_config("ckan.not.editable", "My non editable option")
def test_config_option_show_not_whitelisted_key(self):
"""config_option_show raises exception if key is not a whitelisted
config option."""
with pytest.raises(logic.ValidationError):
helpers.call_action("config_option_show", key="ckan.not.editable")
class TestConfigOptionList(object):
def test_config_option_list(self):
"""config_option_list returns whitelisted config option keys"""
keys = helpers.call_action("config_option_list")
schema_keys = list(schema.update_configuration_schema().keys())
assert keys == schema_keys
def remove_pseudo_users(user_list):
pseudo_users = set(("logged_in", "visitor"))
user_list[:] = [
user for user in user_list if user["name"] not in pseudo_users
]
@pytest.mark.usefixtures("clean_db")
class TestTagShow(object):
def test_tag_show_for_free_tag(self):
dataset = factories.Dataset(tags=[{"name": "acid-rain"}])
tag_in_dataset = dataset["tags"][0]
tag_shown = helpers.call_action("tag_show", id="acid-rain")
assert tag_shown["name"] == "acid-rain"
assert tag_shown["display_name"] == "acid-rain"
assert tag_shown["id"] == tag_in_dataset["id"]
assert tag_shown["vocabulary_id"] is None
assert "packages" not in tag_shown
@pytest.mark.usefixtures("clean_index")
def test_tag_show_with_datasets(self):
dataset = factories.Dataset(tags=[{"name": "acid-rain"}])
tag_shown = helpers.call_action(
"tag_show", id="acid-rain", include_datasets=True
)
assert [d["name"] for d in tag_shown["packages"]] == [dataset["name"]]
def test_tag_show_not_found(self):
with pytest.raises(logic.NotFound):
helpers.call_action("tag_show", id="does-not-exist")
def test_tag_show_for_flexible_tag(self):
# A 'flexible' tag is one with spaces, some punctuation
# and foreign characters in its name
dataset = factories.Dataset(tags=[{"name": u"Flexible. \u30a1"}])
tag_shown = helpers.call_action(
"tag_show", id=u"Flexible. \u30a1", include_datasets=True
)
assert tag_shown["name"] == u"Flexible. \u30a1"
assert tag_shown["display_name"] == u"Flexible. \u30a1"
assert [d["name"] for d in tag_shown["packages"]] == [dataset["name"]]
def test_tag_show_for_vocab_tag(self):
vocab = factories.Vocabulary(tags=[dict(name="acid-rain")])
dataset = factories.Dataset(tags=vocab["tags"])
tag_in_dataset = dataset["tags"][0]
tag_shown = helpers.call_action(
"tag_show",
id="acid-rain",
vocabulary_id=vocab["id"],
include_datasets=True,
)
assert tag_shown["name"] == "acid-rain"
assert tag_shown["display_name"] == "acid-rain"
assert tag_shown["id"] == tag_in_dataset["id"]
assert tag_shown["vocabulary_id"] == vocab["id"]
assert [d["name"] for d in tag_shown["packages"]] == [dataset["name"]]
@pytest.mark.usefixtures("clean_db")
class TestTagList(object):
def test_tag_list(self):
factories.Dataset(tags=[{"name": "acid-rain"}, {"name": "pollution"}])
factories.Dataset(tags=[{"name": "pollution"}])
tag_list = helpers.call_action("tag_list")
assert set(tag_list) == set(("acid-rain", "pollution"))
def test_tag_list_all_fields(self):
factories.Dataset(tags=[{"name": "acid-rain"}])
tag_list = helpers.call_action("tag_list", all_fields=True)
assert tag_list[0]["name"] == "acid-rain"
assert tag_list[0]["display_name"] == "acid-rain"
assert "packages" not in tag_list
def test_tag_list_with_flexible_tag(self):
# A 'flexible' tag is one with spaces, punctuation (apart from commas)
# and foreign characters in its name
flexible_tag = u"Flexible. \u30a1"
factories.Dataset(tags=[{"name": flexible_tag}])
tag_list = helpers.call_action("tag_list", all_fields=True)
assert tag_list[0]["name"] == flexible_tag
def test_tag_list_with_vocab(self):
vocab = factories.Vocabulary(
tags=[dict(name="acid-rain"), dict(name="pollution")]
)
tag_list = helpers.call_action("tag_list", vocabulary_id=vocab["id"])
assert set(tag_list) == set(("acid-rain", "pollution"))
def test_tag_list_vocab_not_found(self):
with pytest.raises(logic.NotFound):
helpers.call_action("tag_list", vocabulary_id="does-not-exist")
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestMembersList(object):
def test_dataset_delete_marks_membership_of_group_as_deleted(self):
sysadmin = factories.Sysadmin()
group = factories.Group()
dataset = factories.Dataset(groups=[{"name": group["name"]}])
context = {"user": sysadmin["name"]}
group_members = helpers.call_action(
"member_list", context, id=group["id"], object_type="package"
)
assert len(group_members) == 1
assert group_members[0][0] == dataset["id"]
assert group_members[0][1] == "package"
helpers.call_action("package_delete", context, id=dataset["id"])
group_members = helpers.call_action(
"member_list", context, id=group["id"], object_type="package"
)
assert len(group_members) == 0
def test_dataset_delete_marks_membership_of_org_as_deleted(self):
sysadmin = factories.Sysadmin()
org = factories.Organization()
dataset = factories.Dataset(owner_org=org["id"])
context = {"user": sysadmin["name"]}
org_members = helpers.call_action(
"member_list", context, id=org["id"], object_type="package"
)
assert len(org_members) == 1
assert org_members[0][0] == dataset["id"]
assert org_members[0][1] == "package"
helpers.call_action("package_delete", context, id=dataset["id"])
org_members = helpers.call_action(
"member_list", context, id=org["id"], object_type="package"
)
assert len(org_members) == 0
def test_user_delete_marks_membership_of_group_as_deleted(self):
sysadmin = factories.Sysadmin()
group = factories.Group()
user = factories.User()
context = {"user": sysadmin["name"]}
member_dict = {
"username": user["id"],
"id": group["id"],
"role": "member",
}
helpers.call_action("group_member_create", context, **member_dict)
group_members = helpers.call_action(
"member_list",
context,
id=group["id"],
object_type="user",
capacity="member",
)
assert len(group_members) == 1
assert group_members[0][0] == user["id"]
assert group_members[0][1] == "user"
helpers.call_action("user_delete", context, id=user["id"])
group_members = helpers.call_action(
"member_list",
context,
id=group["id"],
object_type="user",
capacity="member",
)
assert len(group_members) == 0
def test_user_delete_marks_membership_of_org_as_deleted(self):
sysadmin = factories.Sysadmin()
org = factories.Organization()
user = factories.User()
context = {"user": sysadmin["name"]}
member_dict = {
"username": user["id"],
"id": org["id"],
"role": "member",
}
helpers.call_action(
"organization_member_create", context, **member_dict
)
org_members = helpers.call_action(
"member_list",
context,
id=org["id"],
object_type="user",
capacity="member",
)
assert len(org_members) == 1
assert org_members[0][0] == user["id"]
assert org_members[0][1] == "user"
helpers.call_action("user_delete", context, id=user["id"])
org_members = helpers.call_action(
"member_list",
context,
id=org["id"],
object_type="user",
capacity="member",
)
assert len(org_members) == 0
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestFollow(object):
def test_followee_list(self):
group1 = factories.Group(title="Finance")
group2 = factories.Group(title="Environment")
group3 = factories.Group(title="Education")
user = factories.User()
context = {"user": user["name"]}
helpers.call_action("follow_group", context, id=group1["id"])
helpers.call_action("follow_group", context, id=group2["id"])
followee_list = helpers.call_action(
"followee_list", context, id=user["name"]
)
assert len(followee_list) == 2
assert sorted([f["display_name"] for f in followee_list]) == [
"Environment",
"Finance",
]
def test_followee_list_with_q(self):
group1 = factories.Group(title="Finance")
group2 = factories.Group(title="Environment")
group3 = factories.Group(title="Education")
user = factories.User()
context = {"user": user["name"]}
helpers.call_action("follow_group", context, id=group1["id"])
helpers.call_action("follow_group", context, id=group2["id"])
followee_list = helpers.call_action(
"followee_list", context, id=user["name"], q="E"
)
assert len(followee_list) == 1
assert followee_list[0]["display_name"] == "Environment"
class TestStatusShow(object):
@pytest.mark.ckan_config("ckan.plugins", "stats")
@pytest.mark.usefixtures("clean_db", "with_plugins", "with_request_context")
def test_status_show(self):
status = helpers.call_action(u"status_show")
assert status[u"ckan_version"] == __version__
assert status[u"site_url"] == u"http://test.ckan.net"
assert status[u"site_title"] == u"CKAN"
assert status[u"site_description"] == u""
assert status[u"locale_default"] == u"en"
assert type(status[u"extensions"]) == list
assert status[u"extensions"] == [u"stats"]
class TestJobList(helpers.FunctionalRQTestBase):
def test_all_queues(self):
"""
Test getting jobs from all queues.
"""
job1 = self.enqueue()
job2 = self.enqueue()
job3 = self.enqueue(queue=u"my_queue")
jobs = helpers.call_action(u"job_list")
assert len(jobs) == 3
assert {job[u"id"] for job in jobs} == {job1.id, job2.id, job3.id}
def test_specific_queues(self):
"""
Test getting jobs from specific queues.
"""
job1 = self.enqueue()
job2 = self.enqueue(queue=u"q2")
job3 = self.enqueue(queue=u"q3")
job4 = self.enqueue(queue=u"q3")
jobs = helpers.call_action(u"job_list", queues=[u"q2"])
assert len(jobs) == 1
assert jobs[0][u"id"] == job2.id
jobs = helpers.call_action(u"job_list", queues=[u"q2", u"q3"])
assert len(jobs) == 3
assert {job[u"id"] for job in jobs} == {job2.id, job3.id, job4.id}
class TestJobShow(helpers.FunctionalRQTestBase):
def test_existing_job(self):
"""
Test showing an existing job.
"""
job = self.enqueue(queue=u"my_queue", title=u"Title")
d = helpers.call_action(u"job_show", id=job.id)
assert d[u"id"] == job.id
assert d[u"title"] == u"Title"
assert d[u"queue"] == u"my_queue"
assert (
_seconds_since_timestamp(d[u"created"], u"%Y-%m-%dT%H:%M:%S") < 10
)
def test_not_existing_job(self):
"""
Test showing a not existing job.
"""
with pytest.raises(logic.NotFound):
helpers.call_action(u"job_show", id=u"does-not-exist")
def _seconds_since_timestamp(timestamp, format_):
dt = datetime.datetime.strptime(timestamp, format_)
now = datetime.datetime.utcnow()
assert now > dt # we assume timestamp is not in the future
return (now - dt).total_seconds()
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestActivityShow(object):
def test_simple_without_data(self):
dataset = factories.Dataset()
user = factories.User()
activity = factories.Activity(
user_id=user["id"],
object_id=dataset["id"],
activity_type="new package",
data={"package": copy.deepcopy(dataset), "actor": "Mr Someone"},
)
activity_shown = helpers.call_action(
"activity_show", id=activity["id"], include_data=False
)
assert activity_shown["user_id"] == user["id"]
assert (
_seconds_since_timestamp(
activity_shown["timestamp"], u"%Y-%m-%dT%H:%M:%S.%f"
)
< 10
)
assert activity_shown["object_id"] == dataset["id"]
assert activity_shown["data"] == {"package": {"title": "Test Dataset"}}
assert activity_shown["activity_type"] == u"new package"
def test_simple_with_data(self):
dataset = factories.Dataset()
user = factories.User()
activity = factories.Activity(
user_id=user["id"],
object_id=dataset["id"],
activity_type="new package",
data={"package": copy.deepcopy(dataset), "actor": "Mr Someone"},
)
activity_shown = helpers.call_action(
"activity_show", id=activity["id"], include_data=True
)
assert activity_shown["user_id"] == user["id"]
assert (
_seconds_since_timestamp(
activity_shown["timestamp"], u"%Y-%m-%dT%H:%M:%S.%f"
)
< 10
)
assert activity_shown["object_id"] == dataset["id"]
assert activity_shown["data"] == {
"package": dataset,
"actor": "Mr Someone",
}
assert activity_shown["activity_type"] == u"new package"
def _clear_activities():
from ckan import model
model.Session.query(model.ActivityDetail).delete()
model.Session.query(model.Activity).delete()
model.Session.flush()
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestPackageActivityList(object):
def test_create_dataset(self):
user = factories.User()
dataset = factories.Dataset(user=user)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
assert "extras" not in activities[0]["data"]["package"]
def test_change_dataset(self):
user = factories.User()
_clear_activities()
dataset = factories.Dataset(user=user)
original_title = dataset["title"]
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package",
"new package",
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
assert (
activities[0]["data"]["package"]["title"]
== "Dataset with changed title"
)
# the old dataset still has the old title
assert activities[1]["activity_type"] == "new package"
assert activities[1]["data"]["package"]["title"] == original_title
def test_change_dataset_add_extra(self):
user = factories.User()
dataset = factories.Dataset(user=user)
_clear_activities()
dataset["extras"].append(dict(key="rating", value="great"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
assert "extras" not in activities[0]["data"]["package"]
def test_change_dataset_change_extra(self):
user = factories.User()
dataset = factories.Dataset(
user=user, extras=[dict(key="rating", value="great")]
)
_clear_activities()
dataset["extras"][0] = dict(key="rating", value="ok")
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
assert "extras" not in activities[0]["data"]["package"]
def test_change_dataset_delete_extra(self):
user = factories.User()
dataset = factories.Dataset(
user=user, extras=[dict(key="rating", value="great")]
)
_clear_activities()
dataset["extras"] = []
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
assert "extras" not in activities[0]["data"]["package"]
def test_change_dataset_add_resource(self):
user = factories.User()
dataset = factories.Dataset(user=user)
_clear_activities()
resource = factories.Resource(package_id=dataset["id"], user=user)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
# NB the detail is not included - that is only added in by
# activity_list_to_html()
def test_change_dataset_change_resource(self):
user = factories.User()
dataset = factories.Dataset(
user=user,
resources=[dict(url="https://example.com/foo.csv", format="csv")],
)
_clear_activities()
dataset["resources"][0]["format"] = "pdf"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_delete_resource(self):
user = factories.User()
dataset = factories.Dataset(
user=user,
resources=[dict(url="https://example.com/foo.csv", format="csv")],
)
_clear_activities()
dataset["resources"] = []
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_add_tag(self):
user = factories.User()
dataset = factories.Dataset(user=user)
_clear_activities()
dataset["tags"].append(dict(name="checked"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_delete_tag_from_dataset(self):
user = factories.User()
dataset = factories.Dataset(user=user, tags=[dict(name="checked")])
_clear_activities()
dataset["tags"] = []
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_delete_dataset(self):
user = factories.User()
dataset = factories.Dataset(user=user)
_clear_activities()
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"deleted package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_private_dataset_has_no_activity(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
dataset = factories.Dataset(
private=True, owner_org=org["id"], user=user
)
dataset["tags"] = []
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def test_private_dataset_delete_has_no_activity(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
dataset = factories.Dataset(
private=True, owner_org=org["id"], user=user
)
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def _create_bulk_package_activities(self, count):
dataset = factories.Dataset()
from ckan import model
objs = [
model.Activity(
user_id=None,
object_id=dataset["id"],
activity_type=None,
data=None,
)
for i in range(count)
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
return dataset["id"]
def test_limit_default(self):
id = self._create_bulk_package_activities(35)
results = helpers.call_action("package_activity_list", id=id)
assert len(results) == 31 # i.e. default value
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_limit_configured(self):
id = self._create_bulk_package_activities(7)
results = helpers.call_action("package_activity_list", id=id)
assert len(results) == 5 # i.e. ckan.activity_list_limit
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
@pytest.mark.ckan_config("ckan.activity_list_limit_max", "7")
def test_limit_hits_max(self):
id = self._create_bulk_package_activities(9)
results = helpers.call_action(
"package_activity_list", id=id, limit="9"
)
assert len(results) == 7 # i.e. ckan.activity_list_limit_max
def test_normal_user_doesnt_see_hidden_activities(self):
# activity is 'hidden' because dataset is created by site_user
dataset = factories.Dataset()
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def test_sysadmin_user_doesnt_see_hidden_activities_by_default(self):
# activity is 'hidden' because dataset is created by site_user
dataset = factories.Dataset()
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def test_sysadmin_user_can_include_hidden_activities(self):
# activity is 'hidden' because dataset is created by site_user
dataset = factories.Dataset()
activities = helpers.call_action(
"package_activity_list",
include_hidden_activity=True,
id=dataset["id"],
)
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestUserActivityList(object):
def test_create_user(self):
user = factories.User()
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"new user"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == user["id"]
def test_create_dataset(self):
user = factories.User()
_clear_activities()
dataset = factories.Dataset(user=user)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_dataset_changed_by_another_user(self):
user = factories.User()
another_user = factories.Sysadmin()
dataset = factories.Dataset(user=user)
_clear_activities()
dataset["extras"].append(dict(key="rating", value="great"))
helpers.call_action(
"package_update", context={"user": another_user["name"]}, **dataset
)
# the user might have created the dataset, but a change by another
# user does not show on the user's activity stream
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == []
def test_change_dataset_add_extra(self):
user = factories.User()
dataset = factories.Dataset(user=user)
_clear_activities()
dataset["extras"].append(dict(key="rating", value="great"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_add_tag(self):
user = factories.User()
dataset = factories.Dataset(user=user)
_clear_activities()
dataset["tags"].append(dict(name="checked"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_create_group(self):
user = factories.User()
_clear_activities()
group = factories.Group(user=user)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"new group"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == group["id"]
assert activities[0]["data"]["group"]["title"] == group["title"]
def test_delete_group_using_group_delete(self):
user = factories.User()
group = factories.Group(user=user)
_clear_activities()
helpers.call_action(
"group_delete", context={"user": user["name"]}, **group
)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"deleted group"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == group["id"]
assert activities[0]["data"]["group"]["title"] == group["title"]
def test_delete_group_by_updating_state(self):
user = factories.User()
group = factories.Group(user=user)
_clear_activities()
group["state"] = "deleted"
helpers.call_action(
"group_update", context={"user": user["name"]}, **group
)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"deleted group"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == group["id"]
assert activities[0]["data"]["group"]["title"] == group["title"]
def test_create_organization(self):
user = factories.User()
_clear_activities()
org = factories.Organization(user=user)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"new organization"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert activities[0]["data"]["group"]["title"] == org["title"]
def test_delete_org_using_organization_delete(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
helpers.call_action(
"organization_delete", context={"user": user["name"]}, **org
)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"deleted organization"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert activities[0]["data"]["group"]["title"] == org["title"]
def test_delete_org_by_updating_state(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
org["state"] = "deleted"
helpers.call_action(
"organization_update", context={"user": user["name"]}, **org
)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"deleted organization"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert activities[0]["data"]["group"]["title"] == org["title"]
def _create_bulk_user_activities(self, count):
user = factories.User()
from ckan import model
objs = [
model.Activity(
user_id=user["id"],
object_id=None,
activity_type=None,
data=None,
)
for i in range(count)
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
return user["id"]
def test_limit_default(self):
id = self._create_bulk_user_activities(35)
results = helpers.call_action("user_activity_list", id=id)
assert len(results) == 31 # i.e. default value
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_limit_configured(self):
id = self._create_bulk_user_activities(7)
results = helpers.call_action("user_activity_list", id=id)
assert len(results) == 5 # i.e. ckan.activity_list_limit
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
@pytest.mark.ckan_config("ckan.activity_list_limit_max", "7")
def test_limit_hits_max(self):
id = self._create_bulk_user_activities(9)
results = helpers.call_action("user_activity_list", id=id, limit="9")
assert len(results) == 7 # i.e. ckan.activity_list_limit_max
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestGroupActivityList(object):
def test_create_group(self):
user = factories.User()
group = factories.Group(user=user)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"new group"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == group["id"]
assert activities[0]["data"]["group"]["title"] == group["title"]
def test_change_group(self):
user = factories.User()
_clear_activities()
group = factories.Group(user=user)
original_title = group["title"]
group["title"] = "Group with changed title"
helpers.call_action(
"group_update", context={"user": user["name"]}, **group
)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"changed group",
"new group",
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == group["id"]
assert (
activities[0]["data"]["group"]["title"]
== "Group with changed title"
)
# the old group still has the old title
assert activities[1]["activity_type"] == "new group"
assert activities[1]["data"]["group"]["title"] == original_title
def test_create_dataset(self):
user = factories.User()
group = factories.Group(user=user)
_clear_activities()
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset(self):
user = factories.User()
group = factories.Group(user=user)
_clear_activities()
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
original_title = dataset["title"]
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"changed package",
"new package",
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
# the old dataset still has the old title
assert activities[1]["activity_type"] == "new package"
assert activities[1]["data"]["package"]["title"] == original_title
def test_change_dataset_add_extra(self):
user = factories.User()
group = factories.Group(user=user)
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
_clear_activities()
dataset["extras"].append(dict(key="rating", value="great"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_add_tag(self):
user = factories.User()
group = factories.Group(user=user)
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
_clear_activities()
dataset["tags"].append(dict(name="checked"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_delete_dataset(self):
user = factories.User()
group = factories.Group(user=user)
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
_clear_activities()
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"deleted package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_that_used_to_be_in_the_group(self):
user = factories.User()
group = factories.Group(user=user)
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
# remove the dataset from the group
dataset["groups"] = []
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
_clear_activities()
# edit the dataset
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
# dataset change should not show up in its former group
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == []
def test_delete_dataset_that_used_to_be_in_the_group(self):
user = factories.User()
group = factories.Group(user=user)
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
# remove the dataset from the group
dataset["groups"] = []
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
_clear_activities()
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
# NOTE:
# ideally the dataset's deletion would not show up in its old group
# but it can't be helped without _group_activity_query getting very
# complicated
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"deleted package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def _create_bulk_group_activities(self, count):
group = factories.Group()
from ckan import model
objs = [
model.Activity(
user_id=None,
object_id=group["id"],
activity_type=None,
data=None,
)
for i in range(count)
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
return group["id"]
def test_limit_default(self):
id = self._create_bulk_group_activities(35)
results = helpers.call_action("group_activity_list", id=id)
assert len(results) == 31 # i.e. default value
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_limit_configured(self):
id = self._create_bulk_group_activities(7)
results = helpers.call_action("group_activity_list", id=id)
assert len(results) == 5 # i.e. ckan.activity_list_limit
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
@pytest.mark.ckan_config("ckan.activity_list_limit_max", "7")
def test_limit_hits_max(self):
id = self._create_bulk_group_activities(9)
results = helpers.call_action("group_activity_list", id=id, limit="9")
assert len(results) == 7 # i.e. ckan.activity_list_limit_max
def test_normal_user_doesnt_see_hidden_activities(self):
# activity is 'hidden' because group is created by site_user
group = factories.Group()
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == []
def test_sysadmin_user_doesnt_see_hidden_activities_by_default(self):
# activity is 'hidden' because group is created by site_user
group = factories.Group()
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == []
def test_sysadmin_user_can_include_hidden_activities(self):
# activity is 'hidden' because group is created by site_user
group = factories.Group()
activities = helpers.call_action(
"group_activity_list", include_hidden_activity=True, id=group["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"new group"
]
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestOrganizationActivityList(object):
def test_create_organization(self):
user = factories.User()
org = factories.Organization(user=user)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"new organization"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert activities[0]["data"]["group"]["title"] == org["title"]
def test_change_organization(self):
user = factories.User()
_clear_activities()
org = factories.Organization(user=user)
original_title = org["title"]
org["title"] = "Organization with changed title"
helpers.call_action(
"organization_update", context={"user": user["name"]}, **org
)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed organization",
"new organization",
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert (
activities[0]["data"]["group"]["title"]
== "Organization with changed title"
)
# the old org still has the old title
assert activities[1]["activity_type"] == "new organization"
assert activities[1]["data"]["group"]["title"] == original_title
def test_create_dataset(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
dataset = factories.Dataset(owner_org=org["id"], user=user)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
dataset = factories.Dataset(owner_org=org["id"], user=user)
original_title = dataset["title"]
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package",
"new package",
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
# the old dataset still has the old title
assert activities[1]["activity_type"] == "new package"
assert activities[1]["data"]["package"]["title"] == original_title
def test_change_dataset_add_tag(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
_clear_activities()
dataset["tags"].append(dict(name="checked"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_delete_dataset(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
_clear_activities()
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"deleted package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_that_used_to_be_in_the_org(self):
user = factories.User()
org = factories.Organization(user=user)
org2 = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
# remove the dataset from the org
dataset["owner_org"] = org2["id"]
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
_clear_activities()
# edit the dataset
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
# dataset change should not show up in its former group
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def test_delete_dataset_that_used_to_be_in_the_org(self):
user = factories.User()
org = factories.Organization(user=user)
org2 = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
# remove the dataset from the group
dataset["owner_org"] = org2["id"]
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
_clear_activities()
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
# dataset deletion should not show up in its former org
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def _create_bulk_org_activities(self, count):
org = factories.Organization()
from ckan import model
objs = [
model.Activity(
user_id=None,
object_id=org["id"],
activity_type=None,
data=None,
)
for i in range(count)
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
return org["id"]
def test_limit_default(self):
id = self._create_bulk_org_activities(35)
results = helpers.call_action("organization_activity_list", id=id)
assert len(results) == 31 # i.e. default value
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_limit_configured(self):
id = self._create_bulk_org_activities(7)
results = helpers.call_action("organization_activity_list", id=id)
assert len(results) == 5 # i.e. ckan.activity_list_limit
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
@pytest.mark.ckan_config("ckan.activity_list_limit_max", "7")
def test_limit_hits_max(self):
id = self._create_bulk_org_activities(9)
results = helpers.call_action(
"organization_activity_list", id=id, limit="9"
)
assert len(results) == 7 # i.e. ckan.activity_list_limit_max
def test_normal_user_doesnt_see_hidden_activities(self):
# activity is 'hidden' because org is created by site_user
org = factories.Organization()
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def test_sysadmin_user_doesnt_see_hidden_activities_by_default(self):
# activity is 'hidden' because org is created by site_user
org = factories.Organization()
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def test_sysadmin_user_can_include_hidden_activities(self):
# activity is 'hidden' because org is created by site_user
org = factories.Organization()
activities = helpers.call_action(
"organization_activity_list",
include_hidden_activity=True,
id=org["id"],
)
assert [activity["activity_type"] for activity in activities] == [
"new organization"
]
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestRecentlyChangedPackagesActivityList(object):
def test_create_dataset(self):
user = factories.User()
org = factories.Dataset(user=user)
activities = helpers.call_action(
"recently_changed_packages_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert activities[0]["data"]["package"]["title"] == org["title"]
def test_change_dataset(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
dataset = factories.Dataset(owner_org=org["id"], user=user)
original_title = dataset["title"]
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"recently_changed_packages_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package",
"new package",
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
# the old dataset still has the old title
assert activities[1]["activity_type"] == "new package"
assert activities[1]["data"]["package"]["title"] == original_title
def test_change_dataset_add_extra(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
_clear_activities()
dataset["extras"].append(dict(key="rating", value="great"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"recently_changed_packages_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_add_tag(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
_clear_activities()
dataset["tags"].append(dict(name="checked"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"recently_changed_packages_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_delete_dataset(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
_clear_activities()
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"deleted package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def _create_bulk_package_activities(self, count):
from ckan import model
objs = [
model.Activity(
user_id=None,
object_id=None,
activity_type="new_package",
data=None,
)
for i in range(count)
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
def test_limit_default(self):
self._create_bulk_package_activities(35)
results = helpers.call_action(
"recently_changed_packages_activity_list"
)
assert len(results) == 31 # i.e. default value
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_limit_configured(self):
self._create_bulk_package_activities(7)
results = helpers.call_action(
"recently_changed_packages_activity_list"
)
assert len(results) == 5 # i.e. ckan.activity_list_limit
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
@pytest.mark.ckan_config("ckan.activity_list_limit_max", "7")
def test_limit_hits_max(self):
self._create_bulk_package_activities(9)
results = helpers.call_action(
"recently_changed_packages_activity_list", limit="9"
)
assert len(results) == 7 # i.e. ckan.activity_list_limit_max
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestDashboardActivityList(object):
def test_create_user(self):
user = factories.User()
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [activity["activity_type"] for activity in activities] == [
"new user"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == user["id"]
# user's own activities are always marked ``'is_new': False``
assert not activities[0]["is_new"]
def test_create_dataset(self):
user = factories.User()
_clear_activities()
dataset = factories.Dataset(user=user)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
# user's own activities are always marked ``'is_new': False``
assert not activities[0]["is_new"]
def test_create_group(self):
user = factories.User()
_clear_activities()
group = factories.Group(user=user)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [activity["activity_type"] for activity in activities] == [
"new group"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == group["id"]
assert activities[0]["data"]["group"]["title"] == group["title"]
# user's own activities are always marked ``'is_new': False``
assert not activities[0]["is_new"]
def test_create_organization(self):
user = factories.User()
_clear_activities()
org = factories.Organization(user=user)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [activity["activity_type"] for activity in activities] == [
"new organization"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert activities[0]["data"]["group"]["title"] == org["title"]
# user's own activities are always marked ``'is_new': False``
assert not activities[0]["is_new"]
def _create_bulk_package_activities(self, count):
user = factories.User()
from ckan import model
objs = [
model.Activity(
user_id=user["id"],
object_id=None,
activity_type=None,
data=None,
)
for i in range(count)
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
return user["id"]
def test_limit_default(self):
id = self._create_bulk_package_activities(35)
results = helpers.call_action(
"dashboard_activity_list", context={"user": id}
)
assert len(results) == 31 # i.e. default value
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_limit_configured(self):
id = self._create_bulk_package_activities(7)
results = helpers.call_action(
"dashboard_activity_list", context={"user": id}
)
assert len(results) == 5 # i.e. ckan.activity_list_limit
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
@pytest.mark.ckan_config("ckan.activity_list_limit_max", "7")
def test_limit_hits_max(self):
id = self._create_bulk_package_activities(9)
results = helpers.call_action(
"dashboard_activity_list", limit="9", context={"user": id}
)
assert len(results) == 7 # i.e. ckan.activity_list_limit_max
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestDashboardNewActivities(object):
def test_users_own_activities(self):
# a user's own activities are not shown as "new"
user = factories.User()
dataset = factories.Dataset(user=user)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
group = factories.Group(user=user)
group["title"] = "Group with changed title"
helpers.call_action(
"group_update", context={"user": user["name"]}, **group
)
helpers.call_action(
"group_delete", context={"user": user["name"]}, **group
)
new_activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [activity["is_new"] for activity in new_activities] == [
False
] * 7
new_activities_count = helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
assert new_activities_count == 0
def test_activities_by_a_followed_user(self):
user = factories.User()
followed_user = factories.User()
helpers.call_action(
"follow_user", context={"user": user["name"]}, **followed_user
)
_clear_activities()
dataset = factories.Dataset(user=followed_user)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update",
context={"user": followed_user["name"]},
**dataset
)
helpers.call_action(
"package_delete",
context={"user": followed_user["name"]},
**dataset
)
group = factories.Group(user=followed_user)
group["title"] = "Group with changed title"
helpers.call_action(
"group_update", context={"user": followed_user["name"]}, **group
)
helpers.call_action(
"group_delete", context={"user": followed_user["name"]}, **group
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
activity["activity_type"] for activity in activities[::-1]
] == [
"new package",
"changed package",
"deleted package",
"new group",
"changed group",
"deleted group",
]
assert [activity["is_new"] for activity in activities] == [True] * 6
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 6
)
def test_activities_on_a_followed_dataset(self):
user = factories.User()
another_user = factories.Sysadmin()
_clear_activities()
dataset = factories.Dataset(user=another_user)
helpers.call_action(
"follow_dataset", context={"user": user["name"]}, **dataset
)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": another_user["name"]}, **dataset
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
(activity["activity_type"], activity["is_new"])
for activity in activities[::-1]
] == [
("new package", True),
# NB The 'new package' activity is in our activity stream and shows
# as "new" even though it occurred before we followed it. This is
# known & intended design.
("changed package", True),
]
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 2
)
def test_activities_on_a_followed_group(self):
user = factories.User()
another_user = factories.Sysadmin()
_clear_activities()
group = factories.Group(user=user)
helpers.call_action(
"follow_group", context={"user": user["name"]}, **group
)
group["title"] = "Group with changed title"
helpers.call_action(
"group_update", context={"user": another_user["name"]}, **group
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
(activity["activity_type"], activity["is_new"])
for activity in activities[::-1]
] == [
("new group", False), # False because user did this one herself
("changed group", True),
]
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 1
)
def test_activities_on_a_dataset_in_a_followed_group(self):
user = factories.User()
another_user = factories.Sysadmin()
group = factories.Group(user=user)
_clear_activities()
dataset = factories.Dataset(
groups=[{"name": group["name"]}], user=another_user
)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"follow_dataset", context={"user": user["name"]}, **dataset
)
helpers.call_action(
"package_update", context={"user": another_user["name"]}, **dataset
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
(activity["activity_type"], activity["is_new"])
for activity in activities[::-1]
] == [("new package", True), ("changed package", True)]
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 2
)
def test_activities_that_should_not_show(self):
user = factories.User()
_clear_activities()
# another_user does some activity unconnected with user
another_user = factories.Sysadmin()
group = factories.Group(user=another_user)
dataset = factories.Dataset(
groups=[{"name": group["name"]}], user=another_user
)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": another_user["name"]}, **dataset
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
(activity["activity_type"], activity["is_new"])
for activity in activities[::-1]
] == []
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 0
)
@pytest.mark.ckan_config("ckan.activity_list_limit", "15")
def test_maximum_number_of_new_activities(self):
"""Test that the new activities count does not go higher than 15, even
if there are more than 15 new activities from the user's followers."""
user = factories.User()
another_user = factories.Sysadmin()
dataset = factories.Dataset()
helpers.call_action(
"follow_dataset", context={"user": user["name"]}, **dataset
)
for n in range(0, 20):
dataset["notes"] = "Updated {n} times".format(n=n)
helpers.call_action(
"package_update",
context={"user": another_user["name"]},
**dataset
)
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 15
)
@pytest.mark.usefixtures('clean_db', 'with_request_context')
class TestResourceSearch(object):
def test_required_fields(self):
with pytest.raises(logic.ValidationError):
helpers.call_action('resource_search')
helpers.call_action('resource_search', query='name:*')
def test_base_search(self):
factories.Resource(name='one')
factories.Resource(name='two')
result = helpers.call_action('resource_search', query="name:three")
assert not result['count']
result = helpers.call_action('resource_search', query="name:one")
assert result['count'] == 1
result = helpers.call_action('resource_search', query="name:")
assert result['count'] == 2
def test_date_search(self):
res = factories.Resource()
result = helpers.call_action(
'resource_search', query="created:" + res['created'])
assert result['count'] == 1
def test_number_search(self):
factories.Resource(size=10)
result = helpers.call_action('resource_search', query="size:10")
assert result['count'] == 1
|
davidrelke/CARLA-2DBBox | collectData.py | # Example program to save several sensor data including bounding box
# Sensors: RGB Camera (+BoundingBox), De[th Camera, Segmentation Camera, Lidar Camera
# By <NAME>
# 2020
# Last tested on CARLA 0.9.10.1
# CARLA Simulator is licensed under the terms of the MIT license
# For a copy, see <https://opensource.org/licenses/MIT>
# For more information about CARLA Simulator, visit https://carla.org/
import sys
import time
import argparse
import logging
import random
import queue
import math
import psutil
from queue import Queue
from datetime import datetime
from subprocess import Popen
from typing import List
# pylint: disable= no-name-in-module
from win32process import DETACHED_PROCESS
try:
# sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
# sys.version_info.major,
# sys.version_info.minor,
# 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
sys.path.append("C:/Studium/DLVR/CARLA/PythonAPI/carla/dist/carla-0.9.10-py3.7-win-amd64.egg")
except IndexError:
print('carla not found')
# pylint: disable= import-error, wrong-import-position
# noinspection PyUnresolvedReferences
import carla
import carla_vehicle_annotator as cva
def retrieve_data(sensor_queue, frame, timeout=5):
while True:
try:
data = sensor_queue.get(True, timeout)
except queue.Empty:
return None
if data.frame == frame:
return data
SAVE_RGB = True
SAVE_DEPTH = False
SAVE_SEGM = False
SAVE_LIDAR = False
TICK_SENSOR = 1
all_id = []
walkers_list = []
def main():
argparser = argparse.ArgumentParser(
description=__doc__)
argparser.add_argument(
'--host',
metavar='H',
default='127.0.0.1',
help='IP of the host server (default: 127.0.0.1)')
argparser.add_argument(
'-p', '--port',
metavar='P',
default=2000,
type=int,
help='TCP port to listen to (default: 2000)')
argparser.add_argument(
'-n', '--number-of-vehicles',
metavar='N',
default=50,
type=int,
help='number of vehicles (default: 50)')
argparser.add_argument(
'-tm_p', '--tm_port',
metavar='P',
default=8000,
type=int,
help='port to communicate with TM (default: 8000)')
argparser.add_argument(
'-ec', '--start_carla',
default=False,
type=bool,
help='Start and end CARLA automatically')
argparser.add_argument(
'-mi', '--max_images',
default=2_000,
type=int,
help='Number of images captured before the script ends')
argparser.add_argument(
'-mt', '--max_time',
default=30,
type=int,
help='Minutes before the script ends')
args = argparser.parse_args()
if args.start_carla:
_ = Popen([r'C:\Studium\DLVR\\CARLA\\CarlaUE4.exe'], creationflags=DETACHED_PROCESS).pid
time.sleep(5)
vehicles_list = []
nonvehicles_list = []
client = carla.Client(args.host, args.port)
client.set_timeout(10.0)
start_time = datetime.now()
try:
# region setup
traffic_manager = client.get_trafficmanager(args.tm_port)
traffic_manager.set_global_distance_to_leading_vehicle(2.0)
world = client.get_world()
print('\nRUNNING in synchronous mode\n')
settings = world.get_settings()
traffic_manager.set_synchronous_mode(True)
if not settings.synchronous_mode:
synchronous_master = True
settings.synchronous_mode = True
settings.fixed_delta_seconds = 0.05
world.apply_settings(settings)
else:
synchronous_master = False
blueprints: carla.BlueprintLibrary = world.get_blueprint_library().filter('vehicle.*')
spawn_points: List[carla.Transform] = world.get_map().get_spawn_points()
number_of_spawn_points = len(spawn_points)
if args.number_of_vehicles < number_of_spawn_points:
random.shuffle(spawn_points)
elif args.number_of_vehicles > number_of_spawn_points:
msg = 'Requested %d vehicles, but could only find %d spawn points'
logging.warning(msg, args.number_of_vehicles, number_of_spawn_points)
args.number_of_vehicles = number_of_spawn_points
SpawnActor = carla.command.SpawnActor
SetAutopilot = carla.command.SetAutopilot
FutureActor = carla.command.FutureActor
images_path, labels_path = cva.setup_data_directory()
# endregion
# region Spawn ego vehicle and sensors
q_list: List[Queue] = []
idx: int = 0
tick_queue: Queue = Queue()
world.on_tick(tick_queue.put)
q_list.append(tick_queue)
tick_idx: int = idx
idx = idx + 1
# Spawn ego vehicle
ego_bp = random.choice(blueprints)
ego_transform = random.choice(spawn_points)
ego_vehicle = world.spawn_actor(ego_bp, ego_transform)
vehicles_list.append(ego_vehicle)
ego_vehicle.set_autopilot(True)
print('Ego-vehicle ready')
# Spawn RGB camera
cam_transform = carla.Transform(carla.Location(x=1.5, z=2.4))
cam_bp = world.get_blueprint_library().find('sensor.camera.rgb')
cam_bp.set_attribute('sensor_tick', str(TICK_SENSOR))
cam = world.spawn_actor(cam_bp, cam_transform, attach_to=ego_vehicle)
nonvehicles_list.append(cam)
cam_queue = queue.Queue()
cam.listen(cam_queue.put)
q_list.append(cam_queue)
cam_idx = idx
idx = idx + 1
print('RGB camera ready')
# Spawn depth camera
depth_bp = world.get_blueprint_library().find('sensor.camera.depth')
depth_bp.set_attribute('sensor_tick', str(TICK_SENSOR))
depth = world.spawn_actor(depth_bp, cam_transform, attach_to=ego_vehicle)
cc_depth_log = carla.ColorConverter.LogarithmicDepth
nonvehicles_list.append(depth)
depth_queue = queue.Queue()
depth.listen(depth_queue.put)
q_list.append(depth_queue)
depth_idx = idx
idx = idx + 1
print('Depth camera ready')
# Spawn segmentation camera
if SAVE_SEGM:
segm_bp = world.get_blueprint_library().find('sensor.camera.semantic_segmentation')
segm_bp.set_attribute('sensor_tick', str(TICK_SENSOR))
segm_transform = carla.Transform(carla.Location(x=1.5, z=2.4))
segm = world.spawn_actor(segm_bp, segm_transform, attach_to=ego_vehicle)
cc_segm = carla.ColorConverter.CityScapesPalette
nonvehicles_list.append(segm)
segm_queue = queue.Queue()
segm.listen(segm_queue.put)
q_list.append(segm_queue)
segm_idx = idx
idx = idx + 1
print('Segmentation camera ready')
# Spawn LIDAR sensor
if SAVE_LIDAR:
lidar_bp = world.get_blueprint_library().find('sensor.lidar.ray_cast')
lidar_bp.set_attribute('sensor_tick', str(TICK_SENSOR))
lidar_bp.set_attribute('channels', '64')
lidar_bp.set_attribute('points_per_second', '1120000')
lidar_bp.set_attribute('upper_fov', '30')
lidar_bp.set_attribute('range', '100')
lidar_bp.set_attribute('rotation_frequency', '20')
lidar_transform = carla.Transform(carla.Location(x=0, z=4.0))
lidar = world.spawn_actor(lidar_bp, lidar_transform, attach_to=ego_vehicle)
nonvehicles_list.append(lidar)
lidar_queue = queue.Queue()
lidar.listen(lidar_queue.put)
q_list.append(lidar_queue)
lidar_idx = idx
idx = idx + 1
print('LIDAR ready')
# endregion
# region Spawn vehicles
batch = []
number_of_bikes = number_of_cars = number_of_motorbikes = int(math.ceil(args.number_of_vehicles * 0.25))
number_of_trucks = args.number_of_vehicles - number_of_bikes - number_of_cars - number_of_motorbikes
truck_blueprints = [blueprints.find("vehicle.tesla.cybertruck"), blueprints.find("vehicle.carlamotors.carlacola")]
car_blueprints_ids = [
"vehicle.citroen.c3",
"vehicle.chevrolet.impala",
"vehicle.audi.a2",
"vehicle.nissan.micra",
"vehicle.audi.tt",
"vehicle.bmw.grandtourer",
"vehicle.bmw.isetta",
"vehicle.dodge_charger.police",
"vehicle.jeep.wrangler_rubicon",
"vehicle.mercedes-benz.coupe",
"vehicle.mini.cooperst",
"vehicle.nissan.patrol",
"vehicle.seat.leon",
"vehicle.toyota.prius",
"vehicle.tesla.model3",
"vehicle.audi.etron",
"vehicle.volkswagen.t2",
"vehicle.lincoln.mkz2017",
"vehicle.mustang.mustang"
]
car_blueprints = [blueprints.find(i) for i in car_blueprints_ids]
bike_blueprints_ids = [
"vehicle.bh.crossbike",
"vehicle.gazelle.omafiets",
"vehicle.diamondback.century"
]
bike_blueprints = [blueprints.find(i) for i in bike_blueprints_ids]
motorbike_blueprints_ids = [
"vehicle.harley-davidson.low_rider",
"vehicle.yamaha.yzf",
"vehicle.kawasaki.ninja"
]
motorbike_blueprints = [blueprints.find(i) for i in motorbike_blueprints_ids]
i = 0
for n in range(number_of_trucks):
blueprint = random.choice(truck_blueprints)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
blueprint.set_attribute('role_name', 'autopilot')
batch.append(SpawnActor(blueprint, spawn_points[i]).then(SetAutopilot(FutureActor, True)))
i += 1
for n in range(number_of_cars):
blueprint = random.choice(car_blueprints)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
blueprint.set_attribute('role_name', 'autopilot')
batch.append(SpawnActor(blueprint, spawn_points[i]).then(SetAutopilot(FutureActor, True)))
i += 1
for n in range(number_of_motorbikes):
blueprint = random.choice(motorbike_blueprints)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
blueprint.set_attribute('role_name', 'autopilot')
batch.append(SpawnActor(blueprint, spawn_points[i]).then(SetAutopilot(FutureActor, True)))
i += 1
for n in range(number_of_bikes):
blueprint = random.choice(bike_blueprints)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
blueprint.set_attribute('role_name', 'autopilot')
batch.append(SpawnActor(blueprint, spawn_points[i]).then(SetAutopilot(FutureActor, True)))
i += 1
for response in client.apply_batch_sync(batch, synchronous_master):
if response.error:
logging.error(response.error)
else:
vehicles_list.append(response.actor_id)
print('Created %d npc vehicles \n' % len(vehicles_list))
# endregion
# region Spawn Walkers
# some settings
percentagePedestriansRunning = 0.0 # how many pedestrians will run
percentagePedestriansCrossing = 0.0 # how many pedestrians will walk through the road
blueprintsWalkers = world.get_blueprint_library().filter("walker.pedestrian.*")
# 1. take all the random locations to spawn
spawn_points = []
for i in range(20):
spawn_point = carla.Transform()
loc = world.get_random_location_from_navigation()
if (loc != None):
spawn_point.location = loc
spawn_points.append(spawn_point)
# 2. we spawn the walker object
batch = []
walker_speed = []
for spawn_point in spawn_points:
walker_bp = random.choice(blueprintsWalkers)
# set as not invincible
if walker_bp.has_attribute('is_invincible'):
walker_bp.set_attribute('is_invincible', 'false')
# set the max speed
if walker_bp.has_attribute('speed'):
if (random.random() > percentagePedestriansRunning):
# walking
walker_speed.append(walker_bp.get_attribute('speed').recommended_values[1])
else:
# running
walker_speed.append(walker_bp.get_attribute('speed').recommended_values[2])
else:
print("Walker has no speed")
walker_speed.append(0.0)
batch.append(SpawnActor(walker_bp, spawn_point))
results = client.apply_batch_sync(batch, True)
walker_speed2 = []
for i in range(len(results)):
if results[i].error:
logging.error(results[i].error)
else:
walkers_list.append({"id": results[i].actor_id})
walker_speed2.append(walker_speed[i])
walker_speed = walker_speed2
# 3. we spawn the walker controller
batch = []
walker_controller_bp = world.get_blueprint_library().find('controller.ai.walker')
for i in range(len(walkers_list)):
batch.append(SpawnActor(walker_controller_bp, carla.Transform(), walkers_list[i]["id"]))
results = client.apply_batch_sync(batch, True)
for i in range(len(results)):
if results[i].error:
logging.error(results[i].error)
else:
walkers_list[i]["con"] = results[i].actor_id
# 4. we put altogether the walkers and controllers id to get the objects from their id
for i in range(len(walkers_list)):
all_id.append(walkers_list[i]["con"])
all_id.append(walkers_list[i]["id"])
all_actors = world.get_actors(all_id)
# wait for a tick to ensure client receives the last transform of the walkers we have just created
# if not args.sync or not synchronous_master:
# world.wait_for_tick()
# else:
# world.tick()
# 5. initialize each controller and set target to walk to (list is [controler, actor, controller, actor ...])
# set how many pedestrians can cross the road
world.set_pedestrians_cross_factor(percentagePedestriansCrossing)
for i in range(0, len(all_id), 2):
# start walker
all_actors[i].start()
# set walk to random point
all_actors[i].go_to_location(world.get_random_location_from_navigation())
# max speed
all_actors[i].set_max_speed(float(walker_speed[int(i / 2)]))
# endregion
# Begin the loop
time_sim = 0
images_captured: int = 0
time_limit_reached: bool = False
while images_captured < args.max_images and not time_limit_reached:
# Extract the available data
now_frame = world.tick()
# Check whether it's time to capture data
if time_sim >= TICK_SENSOR:
minutes_passed = (datetime.now() - start_time).total_seconds() / 60
if minutes_passed >= args.max_time:
time_limit_reached = True
data = [retrieve_data(q, now_frame) for q in q_list]
assert all(x.frame == now_frame for x in data if x is not None)
# Skip if any sensor data is not available
if None in data:
continue
# vehicles_raw = world.get_actors().filter('vehicle.*')
all_actors = world.get_actors()
vehicles_raw = []
for actor in all_actors:
if actor.type_id.startswith('walker.pedestrian') or actor.type_id.startswith('vehicle'):
vehicles_raw.append(actor)
# vehicles_raw = world.get_actors().filter('walker.pedestrian.*')
snap = data[tick_idx]
rgb_img = data[cam_idx]
depth_img = data[depth_idx]
# Attach additional information to the snapshot
vehicles = cva.snap_processing(vehicles_raw, snap)
# Save depth image, RGB image, and Bounding Boxes data
if SAVE_DEPTH:
depth_img.save_to_disk('out_depth/%06d.png' % depth_img.frame, cc_depth_log)
depth_meter = cva.extract_depth(depth_img)
filtered, removed = cva.auto_annotate(vehicles, cam, depth_meter, max_dist=50,
json_path='vehicle_class_carla.json')
bboxes = filtered['bbox']
classes = filtered['class']
big_enough_bboxes = []
filtered_classes = []
for b, c in zip(bboxes, classes):
# Dict with:
# bbox: [[min_x, min_y], [max_x, max_y]]
# vehicles
x1 = b[0][0]
x2 = b[1][0]
y1 = b[0][1]
y2 = b[1][1]
delta_x = x1 - x2 if x1 > x2 else x2 - x1
delta_y = y1 - y2 if y1 > y2 else y2 - y1
area: float = delta_x * delta_y
if area > 350:
big_enough_bboxes.append(b)
filtered_classes.append(c)
if len(big_enough_bboxes) > 0:
images_captured += 1
if images_captured % 10 == 0:
print(f"Captured {images_captured}/{args.max_images} in {math.ceil(minutes_passed)} minutes")
cva.save_output(rgb_img, big_enough_bboxes, filtered_classes, removed['bbox'], removed['class'], save_patched=True, out_format='json')
cva.save2darknet(bboxes=big_enough_bboxes, vehicle_class=filtered_classes, carla_img=rgb_img, save_train=True, images_path=images_path, labels_path=labels_path)
# Save segmentation image
if SAVE_SEGM:
segm_img = data[segm_idx]
segm_img.save_to_disk('out_segm/%06d.png' % segm_img.frame, cc_segm)
# Save LIDAR data
if SAVE_LIDAR:
lidar_data = data[lidar_idx]
lidar_data.save_to_disk('out_lidar/%06d.ply' % segm_img.frame)
time_sim = 0
time_sim = time_sim + settings.fixed_delta_seconds
finally:
# cva.save2darknet(None, None, None, save_train=True)
try:
cam.stop()
depth.stop()
if SAVE_SEGM:
segm.stop()
if SAVE_LIDAR:
lidar.stop()
except:
print("Simulation ended before sensors have been created")
settings = world.get_settings()
settings.synchronous_mode = False
settings.fixed_delta_seconds = None
world.apply_settings(settings)
print('\ndestroying %d vehicles' % len(vehicles_list))
client.apply_batch([carla.command.DestroyActor(x) for x in vehicles_list])
print('destroying %d nonvehicles' % len(nonvehicles_list))
client.apply_batch([carla.command.DestroyActor(x) for x in nonvehicles_list])
# stop walker controllers (list is [controller, actor, controller, actor ...])
# for i in range(0, len(all_id), 2):
# all_actors[i].stop()
print('\ndestroying %d walkers' % len(walkers_list))
client.apply_batch([carla.command.DestroyActor(x) for x in all_id])
time.sleep(0.5)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
for proc in psutil.process_iter():
# check whether the process to kill name matches
if proc.name() == "CarlaUE4-Win64-Shipping.exe":
proc.kill()
break
print('\ndone.')
|
davidrelke/CARLA-2DBBox | carla_vehicle_annotator.py | <gh_stars>0
### Functions to extract 2D vehicle Bounding Box from CARLA
### By <NAME>
### Based example from CARLA Github client_bounding_boxes.py
### 2020
### Last tested on CARLA 0.9.10.1
### All of functions in PART 1 and PART 2 are copied from client_bounding_boxes.py example
### Except functions that convert 3D bounding boxes to 2D bounding boxes
### CARLA Simulator and client_bounding_boxes.py are licensed under the terms of the MIT license
### For a copy, see <https://opensource.org/licenses/MIT>
### For more information about CARLA Simulator, visit https://carla.org/
import numpy as np
import random
import PIL
from PIL import Image
from PIL import ImageDraw
import json
import pickle
import os
import glob
import sys
import cv2
import carla
### PART 0
### Calculate bounding boxes and apply the filter ###
#####################################################
### Use this function to get 2D bounding boxes of visible vehicles to camera using semantic LIDAR
def auto_annotate_lidar(vehicles, camera, lidar_data, max_dist = 100, min_detect = 5, show_img = None, json_path = None):
filtered_data = filter_lidar(lidar_data, camera, max_dist)
if show_img != None:
show_lidar(filtered_data, camera, show_img)
### Delete this section if object_idx issue has been fixed in CARLA
filtered_data = np.array([p for p in filtered_data if p.object_idx != 0])
filtered_data = get_points_id(filtered_data, vehicles, camera, max_dist)
###
visible_id, idx_counts = np.unique([p.object_idx for p in filtered_data], return_counts=True)
visible_vehicles = [v for v in vehicles if v.id in visible_id]
visible_vehicles = [v for v in vehicles if idx_counts[(visible_id == v.id).nonzero()[0]] >= min_detect]
bounding_boxes_2d = [get_2d_bb(vehicle, camera) for vehicle in visible_vehicles]
filtered_out = {}
filtered_out['vehicles'] = visible_vehicles
filtered_out['bbox'] = bounding_boxes_2d
if json_path is not None:
filtered_out['class'] = get_vehicle_class(visible_vehicles, json_path)
return filtered_out, filtered_data
### Use this function to get 2D bounding boxes of visible vehicle to camera
def auto_annotate(vehicles, camera, depth_img, max_dist=100, depth_margin=-1, patch_ratio=0.5, resize_ratio=0.5, json_path=None):
depth_show = False
vehicles = filter_angle_distance(vehicles, camera, max_dist)
bounding_boxes_2d = [get_2d_bb(vehicle, camera) for vehicle in vehicles]
if json_path is not None:
vehicle_class = get_vehicle_class(vehicles, json_path)
else:
vehicle_class = []
filtered_out, removed_out, _, _ = filter_occlusion_bbox(bounding_boxes_2d, vehicles, camera, depth_img, vehicle_class, depth_show, depth_margin, patch_ratio, resize_ratio)
return filtered_out, removed_out
### Same with auto_annotate(), but with debugging function for the occlusion filter
def auto_annotate_debug(vehicles, camera, depth_img, depth_show=False, max_dist=100, depth_margin=-1, patch_ratio=0.5, resize_ratio=0.5, json_path=None):
vehicles = filter_angle_distance(vehicles, camera, max_dist)
bounding_boxes_2d = [get_2d_bb(vehicle, camera) for vehicle in vehicles]
if json_path is not None:
vehicle_class = get_vehicle_class(vehicles, json_path)
else:
vehicle_class = []
filtered_out, removed_out, depth_area, depth_show = filter_occlusion_bbox(bounding_boxes_2d, vehicles, camera, depth_img, vehicle_class, depth_show, depth_margin, patch_ratio, resize_ratio)
return filtered_out, removed_out, depth_area, depth_show
#####################################################
#####################################################
### PART 1
### Use this function to get camera k matrix ########
#####################################################
### Get camera intrinsic matrix 'k'
def get_camera_intrinsic(sensor):
VIEW_WIDTH = int(sensor.attributes['image_size_x'])
VIEW_HEIGHT = int(sensor.attributes['image_size_y'])
VIEW_FOV = int(float(sensor.attributes['fov']))
calibration = np.identity(3)
calibration[0, 2] = VIEW_WIDTH / 2.0
calibration[1, 2] = VIEW_HEIGHT / 2.0
calibration[0, 0] = calibration[1, 1] = VIEW_WIDTH / (2.0 * np.tan(VIEW_FOV * np.pi / 360.0))
return calibration
#######################################################
#######################################################
### PART 2
### Use these functions to find 2D BB in the image ####
#######################################################
### Extract bounding box vertices of vehicle
def create_bb_points(vehicle):
cords = np.zeros((8, 4))
extent = vehicle.bounding_box.extent
cords[0, :] = np.array([extent.x, extent.y, -extent.z, 1])
cords[1, :] = np.array([-extent.x, extent.y, -extent.z, 1])
cords[2, :] = np.array([-extent.x, -extent.y, -extent.z, 1])
cords[3, :] = np.array([extent.x, -extent.y, -extent.z, 1])
cords[4, :] = np.array([extent.x, extent.y, extent.z, 1])
cords[5, :] = np.array([-extent.x, extent.y, extent.z, 1])
cords[6, :] = np.array([-extent.x, -extent.y, extent.z, 1])
cords[7, :] = np.array([extent.x, -extent.y, extent.z, 1])
return cords
### Get transformation matrix from carla.Transform object
def get_matrix(transform):
rotation = transform.rotation
location = transform.location
c_y = np.cos(np.radians(rotation.yaw))
s_y = np.sin(np.radians(rotation.yaw))
c_r = np.cos(np.radians(rotation.roll))
s_r = np.sin(np.radians(rotation.roll))
c_p = np.cos(np.radians(rotation.pitch))
s_p = np.sin(np.radians(rotation.pitch))
matrix = np.matrix(np.identity(4))
matrix[0, 3] = location.x
matrix[1, 3] = location.y
matrix[2, 3] = location.z
matrix[0, 0] = c_p * c_y
matrix[0, 1] = c_y * s_p * s_r - s_y * c_r
matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r
matrix[1, 0] = s_y * c_p
matrix[1, 1] = s_y * s_p * s_r + c_y * c_r
matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r
matrix[2, 0] = s_p
matrix[2, 1] = -c_p * s_r
matrix[2, 2] = c_p * c_r
return matrix
### Transform coordinate from vehicle reference to world reference
def vehicle_to_world(cords, vehicle):
bb_transform = carla.Transform(vehicle.bounding_box.location)
bb_vehicle_matrix = get_matrix(bb_transform)
vehicle_world_matrix = get_matrix(vehicle.get_transform())
bb_world_matrix = np.dot(vehicle_world_matrix, bb_vehicle_matrix)
world_cords = np.dot(bb_world_matrix, np.transpose(cords))
return world_cords
### Transform coordinate from world reference to sensor reference
def world_to_sensor(cords, sensor):
sensor_world_matrix = get_matrix(sensor.get_transform())
world_sensor_matrix = np.linalg.inv(sensor_world_matrix)
sensor_cords = np.dot(world_sensor_matrix, cords)
return sensor_cords
### Transform coordinate from vehicle reference to sensor reference
def vehicle_to_sensor(cords, vehicle, sensor):
world_cord = vehicle_to_world(cords, vehicle)
sensor_cord = world_to_sensor(world_cord, sensor)
return sensor_cord
### Summarize bounding box creation and project the poins in sensor image
def get_bounding_box(vehicle, sensor):
camera_k_matrix = get_camera_intrinsic(sensor)
bb_cords = create_bb_points(vehicle)
cords_x_y_z = vehicle_to_sensor(bb_cords, vehicle, sensor)[:3, :]
cords_y_minus_z_x = np.concatenate([cords_x_y_z[1, :], -cords_x_y_z[2, :], cords_x_y_z[0, :]])
bbox = np.transpose(np.dot(camera_k_matrix, cords_y_minus_z_x))
camera_bbox = np.concatenate([bbox[:, 0] / bbox[:, 2], bbox[:, 1] / bbox[:, 2], bbox[:, 2]], axis=1)
return camera_bbox
### Draw 2D bounding box (4 vertices) from 3D bounding box (8 vertices) in image
### 2D bounding box is represented by two corner points
def p3d_to_p2d_bb(p3d_bb):
min_x = np.amin(p3d_bb[:,0])
min_y = np.amin(p3d_bb[:,1])
max_x = np.amax(p3d_bb[:,0])
max_y = np.amax(p3d_bb[:,1])
p2d_bb = np.array([[min_x,min_y] , [max_x,max_y]])
return p2d_bb
### Summarize 2D bounding box creation
def get_2d_bb(vehicle, sensor):
p3d_bb = get_bounding_box(vehicle, sensor)
p2d_bb = p3d_to_p2d_bb(p3d_bb)
return p2d_bb
#######################################################
#######################################################
### PART 3
### Use these functions to remove invisible vehicles ##
#######################################################
### Get numpy 2D array of vehicles' location and rotation from world reference, also locations from sensor reference
def get_list_transform(vehicles_list, sensor):
t_list = []
for vehicle in vehicles_list:
v = vehicle.get_transform()
transform = [v.location.x , v.location.y , v.location.z , v.rotation.roll , v.rotation.pitch , v.rotation.yaw]
t_list.append(transform)
t_list = np.array(t_list).reshape((len(t_list),6))
transform_h = np.concatenate((t_list[:,:3],np.ones((len(t_list),1))),axis=1)
sensor_world_matrix = get_matrix(sensor.get_transform())
world_sensor_matrix = np.linalg.inv(sensor_world_matrix)
transform_s = np.dot(world_sensor_matrix, transform_h.T).T
return t_list , transform_s
### Remove vehicles that are not in the FOV of the sensor
def filter_angle(vehicles_list, v_transform, v_transform_s, sensor):
attr_dict = sensor.attributes
VIEW_FOV = float(attr_dict['fov'])
v_angle = np.arctan2(v_transform_s[:,1],v_transform_s[:,0]) * 180 / np.pi
selector = np.array(np.absolute(v_angle) < (int(VIEW_FOV)/2))
vehicles_list_f = [v for v, s in zip(vehicles_list, selector) if s]
v_transform_f = v_transform[selector[:,0],:]
v_transform_s_f = v_transform_s[selector[:,0],:]
return vehicles_list_f , v_transform_f , v_transform_s_f
### Remove vehicles that have distance > max_dist from the sensor
def filter_distance(vehicles_list, v_transform, v_transform_s, sensor, max_dist=100):
s = sensor.get_transform()
s_transform = np.array([s.location.x , s.location.y , s.location.z])
dist2 = np.sum(np.square(v_transform[:,:3] - s_transform), axis=1)
selector = dist2 < (max_dist**2)
vehicles_list_f = [v for v, s in zip(vehicles_list, selector) if s]
v_transform_f = v_transform[selector,:]
v_transform_s_f = v_transform_s[selector,:]
return vehicles_list_f , v_transform_f , v_transform_s_f
### Remove vehicles that are occluded from the sensor view based on one point depth measurement
### NOT USED by default because of the unstable result
def filter_occlusion_1p(vehicles_list, v_transform, v_transform_s, sensor, depth_img, depth_margin=2.0):
camera_k_matrix = get_camera_intrinsic(sensor)
CAM_W = int(sensor.attributes['image_size_x'])
CAM_H = int(sensor.attributes['image_size_y'])
pos_x_y_z = v_transform_s.T
pos_y_minus_z_x = np.concatenate([pos_x_y_z[1, :], -pos_x_y_z[2, :]-0.0, pos_x_y_z[0, :]])
img_pos = np.transpose(np.dot(camera_k_matrix, pos_y_minus_z_x))
camera_pos = np.concatenate([img_pos[:, 0] / img_pos[:, 2], img_pos[:, 1] / img_pos[:, 2], img_pos[:, 2]], axis=1)
u_arr = np.array(camera_pos[:,0]).flatten()
v_arr = np.array(camera_pos[:,1]).flatten()
dist = np.array(v_transform_s[:,0]).flatten()
depth_patches = []
v_depth = []
for u, v in zip(list(u_arr),list(v_arr)):
if u<=CAM_W and v<=CAM_H:
v_depth.append(depth_img[int(v),int(u)])
depth_a = np.array([[int(u)-3,int(v)-3] , [int(u)+3,int(v)+3]])
depth_patches.append(depth_a)
else:
v_depth.append(0)
v_depth = np.array(v_depth)
selector = (dist-v_depth) < depth_margin
vehicles_list_f = [v for v, s in zip(vehicles_list, selector) if s]
v_transform_f = v_transform[selector,:]
v_transform_s_f = v_transform_s[selector,:]
return vehicles_list_f , v_transform_f , v_transform_s_f, depth_patches
### Apply angle and distance filters in one function
def filter_angle_distance(vehicles_list, sensor, max_dist=100):
vehicles_transform , vehicles_transform_s = get_list_transform(vehicles_list, sensor)
vehicles_list , vehicles_transform , vehicles_transform_s = filter_distance(vehicles_list, vehicles_transform, vehicles_transform_s, sensor, max_dist)
vehicles_list , vehicles_transform , vehicles_transform_s = filter_angle(vehicles_list, vehicles_transform, vehicles_transform_s, sensor)
return vehicles_list
### Apply occlusion filter based on resized bounding box depth values
def filter_occlusion_bbox(bounding_boxes, vehicles, sensor, depth_img, v_class=None, depth_capture=False, depth_margin=-1, patch_ratio=0.5, resize_ratio=0.5):
filtered_bboxes = []
filtered_vehicles = []
filtered_v_class = []
filtered_out = {}
removed_bboxes = []
removed_vehicles = []
removed_v_class = []
removed_out = {}
selector = []
patches = []
patch_delta = []
_, v_transform_s = get_list_transform(vehicles, sensor)
for v, vs, bbox in zip(vehicles,v_transform_s,bounding_boxes):
dist = vs[:,0]
if depth_margin < 0:
depth_margin = (v.bounding_box.extent.x**2+v.bounding_box.extent.y**2)**0.5 + 0.25
uc = int((bbox[0,0]+bbox[1,0])/2)
vc = int((bbox[0,1]+bbox[1,1])/2)
wp = int((bbox[1,0]-bbox[0,0])*resize_ratio/2)
hp = int((bbox[1,1]-bbox[0,1])*resize_ratio/2)
u1 = uc-wp
u2 = uc+wp
v1 = vc-hp
v2 = vc+hp
depth_patch = np.array(depth_img[v1:v2,u1:u2])
dist_delta = dist-depth_patch
s_patch = np.array(dist_delta < depth_margin)
s = np.sum(s_patch) > s_patch.shape[0]*patch_ratio
selector.append(s)
patches.append(np.array([[u1,v1],[u2,v2]]))
patch_delta.append(dist_delta)
for bbox,v,s in zip(bounding_boxes,vehicles,selector):
if s:
filtered_bboxes.append(bbox)
filtered_vehicles.append(v)
else:
removed_bboxes.append(bbox)
removed_vehicles.append(v)
filtered_out['bbox']=filtered_bboxes
filtered_out['vehicles']=filtered_vehicles
removed_out['bbox']=removed_bboxes
removed_out['vehicles']=removed_vehicles
if v_class is not None:
for cls,s in zip(v_class,selector):
if s:
filtered_v_class.append(cls)
else:
removed_v_class.append(cls)
filtered_out['class']=filtered_v_class
removed_out['class']=removed_v_class
if depth_capture:
depth_debug(patches, depth_img, sensor)
for i,matrix in enumerate(patch_delta):
print("\nvehicle "+ str(i) +": \n" + str(matrix))
depth_capture = False
return filtered_out, removed_out, patches, depth_capture
### Display area in depth image where measurement values are taken
def depth_debug(depth_patches, depth_img, sensor):
CAM_W = int(sensor.attributes['image_size_x'])
CAM_H = int(sensor.attributes['image_size_y'])
#depth_img = depth_img/1000*255
depth_img = np.log10(depth_img)
depth_img = depth_img*255/4
depth_img
depth_3ch = np.zeros((CAM_H,CAM_W,3))
depth_3ch[:,:,0] = depth_3ch[:,:,1] = depth_3ch[:,:,2] = depth_img
depth_3ch = np.uint8(depth_3ch)
image = Image.fromarray(depth_3ch, 'RGB')
img_draw = ImageDraw.Draw(image)
for crop in depth_patches:
u1 = int(crop[0,0])
v1 = int(crop[0,1])
u2 = int(crop[1,0])
v2 = int(crop[1,1])
crop_bbox = [(u1,v1),(u2,v2)]
img_draw.rectangle(crop_bbox, outline ="red")
image.show()
### Filter out lidar points that are outside camera FOV
def filter_lidar(lidar_data, camera, max_dist):
CAM_W = int(camera.attributes['image_size_x'])
CAM_H = int(camera.attributes['image_size_y'])
CAM_HFOV = float(camera.attributes['fov'])
CAM_VFOV = np.rad2deg(2*np.arctan(np.tan(np.deg2rad(CAM_HFOV/2))*CAM_H/CAM_W))
lidar_points = np.array([[p.point.y,-p.point.z,p.point.x] for p in lidar_data])
dist2 = np.sum(np.square(lidar_points), axis=1).reshape((-1))
p_angle_h = np.absolute(np.arctan2(lidar_points[:,0],lidar_points[:,2]) * 180 / np.pi).reshape((-1))
p_angle_v = np.absolute(np.arctan2(lidar_points[:,1],lidar_points[:,2]) * 180 / np.pi).reshape((-1))
selector = np.array(np.logical_and(dist2 < (max_dist**2), np.logical_and(p_angle_h < (CAM_HFOV/2), p_angle_v < (CAM_VFOV/2))))
filtered_lidar = [pt for pt, s in zip(lidar_data, selector) if s]
return filtered_lidar
### Save camera image with projected lidar points for debugging purpose
def show_lidar(lidar_data, camera, carla_img):
lidar_np = np.array([[p.point.y,-p.point.z,p.point.x] for p in lidar_data])
cam_k = get_camera_intrinsic(camera)
# Project LIDAR 3D to Camera 2D
lidar_2d = np.transpose(np.dot(cam_k,np.transpose(lidar_np)))
lidar_2d = (lidar_2d/lidar_2d[:,2].reshape((-1,1))).astype(int)
# Visualize the result
c_scale = []
for pts in lidar_data:
if pts.object_idx == 0: c_scale.append(255)
else: c_scale.append(0)
carla_img.convert(carla.ColorConverter.Raw)
img_bgra = np.array(carla_img.raw_data).reshape((carla_img.height,carla_img.width,4))
img_rgb = np.zeros((carla_img.height,carla_img.width,3))
img_rgb[:,:,0] = img_bgra[:,:,2]
img_rgb[:,:,1] = img_bgra[:,:,1]
img_rgb[:,:,2] = img_bgra[:,:,0]
img_rgb = np.uint8(img_rgb)
for p,c in zip(lidar_2d,c_scale):
c = int(c)
cv2.circle(img_rgb,tuple(p[:2]),1,(c,c,c),-1)
filename = 'out_lidar_img/%06d.jpg' % carla_img.frame
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
img_rgb = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2RGB)
cv2.imwrite(filename, img_rgb)
### Add actor ID of the vehcile hit by the lidar points
### Only used before the object_id issue of semantic lidar solved
def get_points_id(lidar_points, vehicles, camera, max_dist):
vehicles_f = filter_angle_distance(vehicles, camera, max_dist)
fixed_lidar_points = []
for p in lidar_points:
sensor_world_matrix = get_matrix(camera.get_transform())
pw = np.dot(sensor_world_matrix, [[p.point.x],[p.point.y],[p.point.z],[1]])
pw = carla.Location(pw[0,0],pw[1,0],pw[2,0])
for v in vehicles_f:
if v.bounding_box.contains(pw, v.get_transform()):
p.object_idx = v.id
break
fixed_lidar_points.append(p)
return fixed_lidar_points
#######################################################
#######################################################
### PART 4
### Function to return vehicle's label ################
#######################################################
def get_vehicle_class(vehicles, json_path=None):
f = open(json_path)
json_data = json.load(f)
vehicles_data = json_data['classification']
other_class = json_data["reference"].get('others')
class_list = []
for v in vehicles:
# Pedestrians have a type_id like 'walker.pedestrian.0011'
type_id: str = v.type_id
if type_id.startswith("walker.pedestrian."):
type_id = "walker.pedestrian"
v_class = int(vehicles_data.get(type_id, other_class))
class_list.append(v_class)
return class_list
#######################################################
#######################################################
### PART 5
### Function to save output ###########################
#######################################################
### Use this function to save the rgb image (with and without bounding box) and bounding boxes data
def save_output(carla_img, bboxes, vehicle_class=None, old_bboxes=None, old_vehicle_class=None, cc_rgb=carla.ColorConverter.Raw, path='', save_patched=False, add_data=None, out_format='pickle'):
# carla_img.save_to_disk(path + 'out_rgb/%06d.png' % carla_img.frame, cc_rgb)
carla_img.convert(cc_rgb)
img_bgra = np.array(carla_img.raw_data).reshape((carla_img.height,carla_img.width,4))
img_rgb = np.zeros((carla_img.height,carla_img.width,3))
img_rgb[:,:,0] = img_bgra[:,:,2]
img_rgb[:,:,1] = img_bgra[:,:,1]
img_rgb[:,:,2] = img_bgra[:,:,0]
img_rgb = np.uint8(img_rgb)
image = Image.fromarray(img_rgb, 'RGB')
img_draw = ImageDraw.Draw(image)
filename = path + 'out_rgb/%06d.png' % carla_img.frame
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
image.save(filename)
out_dict = {}
bboxes_list = [bbox.tolist() for bbox in bboxes]
out_dict['bboxes'] = bboxes_list # Get bboxes here
if vehicle_class is not None:
out_dict['vehicle_class'] = vehicle_class # Get Class here
if old_bboxes is not None:
old_bboxes_list = [bbox.tolist() for bbox in old_bboxes]
out_dict['removed_bboxes'] = old_bboxes_list
if old_vehicle_class is not None:
out_dict['removed_vehicle_class'] = old_vehicle_class
if add_data is not None:
out_dict['others'] = add_data
if out_format=='json':
filename = path + 'out_bbox/%06d.json' % carla_img.frame
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as outfile:
json.dump(out_dict, outfile, indent=4)
else:
filename = path + 'out_bbox/%06d.pkl' % carla_img.frame
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as outfile:
json.dump(out_dict, outfile, indent=4)
if save_patched:
for crop in bboxes:
u1 = int(crop[0,0])
v1 = int(crop[0,1])
u2 = int(crop[1,0])
v2 = int(crop[1,1])
crop_bbox = [(u1,v1),(u2,v2)]
img_draw.rectangle(crop_bbox, outline ="red")
filename = path + 'out_rgb_bbox/%06d.png' % carla_img.frame
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
image.save(filename)
def setup_data_directory(json_path: str = 'vehicle_class.json', data_path: str = 'data/') -> [str, str]:
images_path: str = data_path + "images"
labels_path = data_path + "labels"
if not os.path.exists(labels_path):
os.makedirs(labels_path)
print(labels_path + ' directory did not exists, new directory created')
if not os.path.exists(images_path):
os.makedirs(images_path)
print(images_path + ' directory did not exists, new directory created')
# f = open(json_path)
# json_data = json.load(f)
# classes = json_data['reference']
# class_list: List[str] = [None] * len(classes)
# for c in classes:
# class_list[classes[c]] = c
# with open(data_path + '/classes.txt', 'w') as class_file:
# for c in class_list:
# class_file.write(c + '\n')
# class_file.close()
return images_path, labels_path
### Use this function to save bounding box result in darknet training format
def save2darknet(bboxes, vehicle_class, carla_img, images_path: str, labels_path: str,
cc_rgb = carla.ColorConverter.Raw, val_split: float = 0.75, save_train: bool = False):
bbr = bboxes is not None
vcr = vehicle_class is not None
cir = carla_img is not None
if bbr or vcr or cir:
# save image
carla_img.convert(cc_rgb)
img_bgra = np.array(carla_img.raw_data).reshape((carla_img.height,carla_img.width,4))
img_rgb = np.zeros((carla_img.height,carla_img.width,3))
img_rgb[:,:,0] = img_bgra[:,:,2]
img_rgb[:,:,1] = img_bgra[:,:,1]
img_rgb[:,:,2] = img_bgra[:,:,0]
img_rgb = np.uint8(img_rgb)
image = Image.fromarray(img_rgb, 'RGB')
image_file_path = images_path + "/" + '%06d.jpg' % carla_img.frame
image.save(image_file_path)
# save bounding box data
datastr = ''
for box, v_class in zip(bboxes, vehicle_class):
uc = ((box[0,0] + box[1,0])/2) / carla_img.width
vc = ((box[0,1] + box[1,1])/2) / carla_img.height
w = (box[1,0] - box[0,0]) / carla_img.width
h = (box[1,1] - box[0,1]) / carla_img.height
datastr = datastr + f"{v_class} {uc} {vc} {w} {h} \n"
with open(labels_path + '/' + '%06d.txt' % carla_img.frame, 'w') as filetxt:
filetxt.write(datastr)
filetxt.close()
if save_train:
goes_to_train = random.randrange(100) < val_split * 100
if goes_to_train:
with open(images_path + '/../train.txt', 'a+') as train_file:
train_file.write(image_file_path + "\n")
else:
with open(images_path + '/../val.txt', 'a+') as val_file:
val_file.write(image_file_path + "\n")
### Use this function to convert depth image (carla.Image) to a depth map in meter
def extract_depth(depth_img):
depth_img.convert(carla.ColorConverter.Depth)
depth_meter = np.array(depth_img.raw_data).reshape((depth_img.height,depth_img.width,4))[:,:,0] * 1000 / 255
return depth_meter
### Use this function to get vehciles' snapshots that can be processed by auto_annotate() function.
def snap_processing(vehiclesActor, worldSnap):
vehicles = []
for v in vehiclesActor:
vid = v.id
vsnap = worldSnap.find(vid)
if vsnap is None:
continue
vsnap.bounding_box = v.bounding_box
vsnap.type_id = v.type_id
vehicles.append(vsnap)
return vehicles
#######################################################
#######################################################
|
davidrelke/CARLA-2DBBox | count_types.py | import os
from os import listdir
from os.path import isfile, join
from typing import List
labels_path = os.path.abspath("data/labels")
label_files: List[str] = [labels_path + "/" + f for f in listdir(labels_path) if isfile(join(labels_path, f))]
num_cars = 0
num_trucks = 0
num_bikes = 0
num_motorbikes = 0
num_person = 0
for file in label_files:
with open(file, "r") as label_file:
for line in label_file.readlines():
if line.startswith("0"):
num_cars = num_cars + 1
elif line.startswith("1"):
num_trucks = num_trucks + 1
elif line.startswith("2"):
num_motorbikes = num_motorbikes + 1
elif line.startswith("3"):
num_bikes = num_bikes + 1
elif line.startswith("4"):
num_person = num_person + 1
print(f"cars: {num_cars} trucks: {num_trucks} bikes: {num_bikes} motorbikes: {num_motorbikes} person: {num_person}") |
julianmak/pydra | native/sta2dfft.py | <gh_stars>0
#/usr/bin/env python3
#
# JM: 12 Apr 2018
#
# the sta2dfft.f90 adapted for python
# contains 2d spectral commands which uses stafft
from stafft import *
# This module performs FFTs in two directions on two dimensional arrays using
# the stafft library module to actually compute the FFTs. If FFTs in one
# direction only are required use the stafft module directly. The module can
# compute any combination of sine, cosine and full FFTs in each direction.
# Along with the usual forwards (physical -> Fourier space) and reverse
# (Fourier space -> physical) routines there are also routines for computing
# the first derivatives in either direction.
#
# The convention is that for each direction the array is dimensioned 1:nx or
# 1:ny for either the sine or full transforms. While the cosine transforms
# require the additional endpoint so 0:nx or 0:ny.
#
# The routines contained in this module are:
#
# init2dfft(nx,ny,lx,ly,xfactors,yfactors,xtrig,ytrig,kx,ky)
# This routine initialises all the arrays needed for further
# transforms. The integers nx and ny are the array dimensions. Then
# lx and ly are the domain lengths - these are needed for the correct
# scaling when computing derivatives. The arrays xfactors, yfactors,
# xtrig and ytrig are needed to perform the various FFTs by the stafft
# module (see there for further details. kx and ky are arrays to hold
# the wavenumbers associated with each mode in the domain, and are
# used in computing derivatives.
#
# **If it is known at initialisation that no derivatives are required
# it is possible just to pass 1.d0 for each of lx and ly, along with
# dummy arrays for kx and ky since these are only needed for
# computing the derviatives.**
from numpy import pi, arange
#=====================================================================
def init2dfft(nx, ny, lx, ly):
"""
This subroutine performs the initialisation work for all subsequent
transform and derivative routines.
It calls the initfft() routine from the supproting 1d FFT module for
transforms in both x and y directions.
The routine then defines the two wavenumber arrays, one in each direction.
Input:
n =
Returns:
factors =
"""
xfactors, xtrig = initfft(nx)
yfactors, ytrig = initfft(ny)
if (lx != 0.0):
# Define x wavenumbers:
sc = pi / lx
kx = sc * arange(1, nx + 1)
else:
# Catastrophic end to run if wave number definition fails:
print('**********************************************')
print(' Wavenumber array definition not possible.')
print(' Domain length in x equal to zero not allowed.')
print(' STOPPING...')
print('**********************************************')
if (ly != 0.0):
# Define y wavenumbers:
sc = pi / ly
ky = sc * arange(1, ny + 1)
else:
# Catastrophic end to run if wave number definition fails:
print('**********************************************')
print(' Wavenumber array definition not possible.')
print(' Domain length in y equal to zero not allowed.')
print(' STOPPING...')
print('**********************************************')
return (xfactors, yfactors, xtrig, ytrig, kx, ky)
|
julianmak/pydra | wrapper/pydra_analysis.py | #!/usr/bin/env python3
#
# JM, 29 Oct 2018
#
# some subfunctions to do analysis of pydra data
from pydra_misc import * # numpy is loaded here as np
from casl import parameters, spectral, constants
#-------------------------------------------------------------------------------
# generate the eddy momentum quantities in layers and modes
def calc_eddy_mom(data_dir, parameters, constants, kt):
"""
Subfunction to generate eddy momentum quantities in layers and modes (using
zonal mean)
Input:
data_dir data directory
parameters parameter module from load
constants constants module from load
kt time stamp
Output:
K_L1L2 2d field of EKE in layers
K_btbc 2d field of EKE in modes
M_L1L2 2d field of M in layers
M_btbc 2d field of M in modes
N_L1L2 2d field of N in layers
N_btbc 2d field of N in modes
"""
t_now, qq = read_qq(data_dir, parameters.nx, parameters.ny, kt)
# swap axis to have the indexing consistent with the Fortran code main_invert
qq = np.swapaxes(qq, 0, 1)
# no topography
fhb = np.zeros((parameters.ny + 1, parameters.nx))
# invert for the velocity fields
uu, vv, _ = spectral.main_invert(qq, fhb)
uu_btbc = layers_to_modes(uu, constants)
vv_btbc = layers_to_modes(vv, constants)
# calculate the EKE
K_L1L2 = np.zeros(uu.shape)
K_btbc = np.zeros(uu_btbc.shape)
K_L1L2[:, :, 0] = zonal_eke(uu[:, :, 0], vv[:, :, 0])
K_L1L2[:, :, 1] = zonal_eke(uu[:, :, 1], vv[:, :, 1])
K_btbc[:, :, 0] = zonal_eke(uu_btbc[:, :, 0], vv_btbc[:, :, 0])
K_btbc[:, :, 1] = zonal_eke(uu_btbc[:, :, 1], vv_btbc[:, :, 1])
# calculate the other correlations
M_L1L2 = np.zeros(uu.shape)
N_L1L2 = np.zeros(uu.shape)
M_btbc = np.zeros(uu_btbc.shape)
N_btbc = np.zeros(uu_btbc.shape)
M_L1L2[:, :, 0] = (zonal_corr(vv[:, :, 0], vv[:, :, 0])
- zonal_corr(uu[:, :, 0], uu[:, :, 0])) / 2.0
M_L1L2[:, :, 1] = (zonal_corr(vv[:, :, 1], vv[:, :, 1])
- zonal_corr(uu[:, :, 1], uu[:, :, 1])) / 2.0
N_L1L2[:, :, 0] = zonal_corr(vv[:, :, 0], uu[:, :, 0])
N_L1L2[:, :, 1] = zonal_corr(vv[:, :, 1], uu[:, :, 1])
M_btbc[:, :, 0] = (zonal_corr(vv_btbc[:, :, 0], vv_btbc[:, :, 0])
- zonal_corr(uu_btbc[:, :, 0], uu_btbc[:, :, 0])) / 2.0
M_btbc[:, :, 1] = (zonal_corr(vv_btbc[:, :, 1], vv_btbc[:, :, 1])
- zonal_corr(uu_btbc[:, :, 1], uu_btbc[:, :, 1])) / 2.0
N_btbc[:, :, 0] = zonal_corr(vv_btbc[:, :, 0], uu_btbc[:, :, 0])
N_btbc[:, :, 1] = zonal_corr(vv_btbc[:, :, 1], uu_btbc[:, :, 1])
return (K_L1L2, K_btbc, M_L1L2, N_L1L2, M_btbc, N_btbc)
#-------------------------------------------------------------------------------
# generate the eddy buoyancy quantities in layers (there is no modal EPE so to speak)
def calc_eddy_buoy(data_dir, parameters, constants, kt):
"""
Subfunction to generate EPE in layers (using zonal mean)
Input:
data_dir data directory
parameters parameter module from load
constants constants module from load
kt time stamp
Output:
P 2d field of EPE in layers
R 2d field of R in layers
S 2d field of S in layers
"""
t_now, qq = read_qq(data_dir, parameters.nx, parameters.ny, kt)
# swap axis to have the indexing consistent with the Fortran code main_invert
qq = np.swapaxes(qq, 0, 1)
# no topography
fhb = np.zeros((parameters.ny + 1, parameters.nx))
# invert for the velocity fields
uu, vv, pp = spectral.main_invert(qq, fhb)
# calculate the EPE
P = np.zeros(qq.shape)
P[:, :, 1] = (0.25 * (1.0 - parameters.h1) * (parameters.kdbar ** 2)
* (zonal_demean(pp[:, :, 0] - pp[:, :, 1])) ** 2
)
P[:, :, 0] = (0.25 * ( parameters.h1) * (parameters.kdbar ** 2)
* (zonal_demean(pp[:, :, 0] - pp[:, :, 1])) ** 2
)
R = (0.25 * (parameters.h1 * (1.0 - parameters.h1) * (parameters.kdbar ** 2))
* (zonal_demean(uu[:, :, 0]) + zonal_demean(uu[:, :, 1]))
* (zonal_demean(pp[:, :, 1]) - zonal_demean(pp[:, :, 0]))
)
S = (0.25 * (parameters.h1 * (1.0 - parameters.h1) * (parameters.kdbar ** 2))
* (zonal_demean(vv[:, :, 0]) + zonal_demean(vv[:, :, 1]))
* (zonal_demean(pp[:, :, 1]) - zonal_demean(pp[:, :, 0]))
)
return (P, R, S)
#-------------------------------------------------------------------------------
# generate the geometric parameters
class geom_data:
"""
strcuture containing geometric parameters where M, N, R, S, P, K have been
zonally averaged:
gamma_m_L1L2 1d field of gamma_m in layers
phi_m_L1L2 1d field of phi_m in layers
gamma_m_btbc 1d field of gamma_m in modes
phi_m_btbc 1d field of phi_m in modes
gamma_b 1d field of gamma_b in layers
phi_b 1d field of phi_b in layers
gamma_t 1d field of gamma_t in layers
phi_t 1d field of phi_t in layers
lam 1d field of lambda in layers
K_L1L2 1d field of EKE in layers
P 1d field of EPE in layers
E_L1L2 1d field of E in layers
"""
name = "zonal averaged geometric data"
def calc_geom_param(data_dir, parameters, constants, kt):
"""
Subfunction to generate the geometric factors as defined in Marshall et al.
(2012) (though mostly from Youngs et al., 2017)
Input:
data_dir data directory
parameters parameter module from load
constants constants module from load
kt time stamp
Output:
data_geom a structure, containing
gamma_m_L1L2 1d field of gamma_m in layers
phi_m_L1L2 1d field of phi_m in layers
gamma_m_btbc 1d field of gamma_m in modes
phi_m_btbc 1d field of phi_m in modes
gamma_b 1d field of gamma_b in layers
phi_b 1d field of phi_b in layers
gamma_t 1d field of gamma_t in layers
phi_t 1d field of phi_t in layers
lam 1d field of lambda in layers
K_L1L2 1d field of EKE in layers
P 1d field of EPE in layers
E_L1L2 1d field of E in layers
"""
data_geom = geom_data()
K_L1L2, K_btbc, M_L1L2, N_L1L2, M_btbc, N_btbc = calc_eddy_mom(data_dir, parameters, constants, kt)
data_geom.gamma_m_L1L2 = (
np.sqrt(zonal_ave(M_L1L2) ** 2 + zonal_ave(N_L1L2) ** 2)
/ np.maximum(zonal_ave(K_L1L2), 1e-16)
)
data_geom.gamma_m_btbc = (
np.sqrt(zonal_ave(M_btbc) ** 2 + zonal_ave(N_btbc) ** 2)
/ np.maximum(zonal_ave(K_btbc), 1e-16)
)
# 0 =< phi_m =< pi
# make sure to use the atan2 to get the correct quadrant otherwise shifting
# by pi is required depending on the sign of the argument
# the minus sign on the M bit is important
data_geom.phi_m_L1L2 = 0.5 * np.arctan2(zonal_ave(N_L1L2), -zonal_ave(M_L1L2))
data_geom.phi_m_btbc = 0.5 * np.arctan2(zonal_ave(N_btbc), -zonal_ave(M_btbc))
# buoyancy anisotropy and angle
P, R, S = calc_eddy_buoy(data_dir, parameters, constants, kt)
data_geom.gamma_b = np.zeros(data_geom.gamma_m_L1L2.shape)
data_geom.gamma_b[:, 0] = np.sqrt(
(zonal_ave(R) ** 2 + zonal_ave(S) ** 2)
/ np.maximum(zonal_ave(K_L1L2[:, :, 0]) * zonal_ave(P[:, :, 0]), 1e-16)
/ (2.0 * parameters.h1 * (1.0 - parameters.h1) * parameters.kdbar ** 2)
)
data_geom.gamma_b[:, 1] = np.sqrt(
(zonal_ave(R) ** 2 + zonal_ave(S) ** 2)
/ np.maximum(zonal_ave(K_L1L2[:, :, 1]) * zonal_ave(P[:, :, 1]), 1e-16)
/ (2.0 * parameters.h1 * (1.0 - parameters.h1) * parameters.kdbar ** 2)
)
# -pi =< phi_b =< pi
# make sure to use the atan2 to get the correct quadrant otherwise shifting
# by pi is required depending on the sign of the argument
# NOTE: be careful of the angle on the boundary with atan2!
data_geom.phi_b = np.arctan2(zonal_ave(S), zonal_ave(R))
# total eddy energy and energy partition angle
data_geom.K_L1L2 = zonal_ave(K_L1L2)
data_geom.P = zonal_ave(P)
data_geom.E_L1L2 = zonal_ave(K_L1L2 + P)
# 0 =< lam =< pi / 2
# this one doesn't matter too much but be careful with having the things
# the right way up!
data_geom.lam = np.arctan2(np.sqrt(zonal_ave(P)), np.sqrt(zonal_ave(K_L1L2)))
# project onto x-z plane
data_geom.phi_t = 0.5 * np.arctan(data_geom.gamma_b * np.tan(2.0 * data_geom.lam))
data_geom.gamma_t = np.cos(2.0 * data_geom.lam) / np.maximum(np.cos(2.0 * data_geom.phi_t), 1e-16)
return data_geom
#TODO: write a function does the processed data to generate e.g. alpha etc
# but also plot the e.g. sin(phi_b) and gamma and so forth
|
julianmak/pydra | native/parameters.py | #!/usr/bin/env python3
#
# JM: 11 Apr 2018
#
# sample parameters.py for testing purposes, should write a script
# to grab the relevant things from the folder parameters.f90 file
# Initial time loop from which to begin the simulation:
loopinit = 0
# loopinit > 0 may be used for continuing a simulation (see replace below)
# Logical to indicate replacing existing data with the output of a new
# simulation or to append to existing data:
replace = False
# Number of fluid layers (here always 2):
nz = 2
# Number of grid boxes in the x & y directions (inversion grid):
nx = 128
ny = 64
# Number of contours used for representing PV in either layer:
ncontq = 80
# ncontq : used to compute the PV contour interval from
# dq = (qq_max-qq_min)/ncontq (in each layer)
# Simulation time length etc..
tsim = 750.0
tgsave = 1.0
tcsave = 50.0
# tsim : total duration of the simulation
# tgsave : grid data save time increment
# tcsave : contour data save time increment (approximate)
# ***NOTE*** tcsave should always be an integer multiple of tgsave
# ***Physical parameters:***
alpha = 1.0
h1 = 0.50
kdbar = 20.0
ellx = 6.28318530717958647692
elly = 3.14159265358979323846
ymin = -1.57079632679489661923
beta = 2.51327412287183459076
u1bot = 0.0
u1top = 0.0
u2bot = 0.0
u2top = 0.0
rtherm1 = 0.0
rtherm2 = 0.0
rekman = 0.0
eirate = 0.0
vorvor = 1.0
rheton = 0.1
thdmax = 0.0
sponlen = 3.14159265358979323846
iseed = 2583211
mod1bc = 0
topogr = False
# h1 : fractional thickness of the lower layer
# kdbar : f*sqrt{(H1+H2)/(g*H1*H2*(1-alpha))} where H1 & H2 are the
# layer depths, f is the Coriolis frequency and g is gravity
# ellx : domain width in x (periodic, centred at 0)
# h1 : fractional thickness of the lower layer
# kdbar : f*sqrt{(H1+H2)/(g*H1*H2*(1-alpha))} where H1 & H2 are the
# layer depths, f is the Coriolis frequency and g is gravity
# ellx : domain width in x (periodic, centred at 0)
# elly : domain width in y
# ymin : minimum value of y (ymax = ymin + elly; see constants.f90)
# beta : planetary vorticity gradient
# u1bot : zonal mean mode 1 (smaller kd) velocity at y = ymin
# u1top : zonal mean mode 1 (smaller kd) velocity at y = ymax
# *** NOTE: these are not used if kd = kd1 = 0 ***
# u2bot : zonal mean mode 2 (larger kd) velocity at y = ymin
# u2top : zonal mean mode 2 (larger kd) velocity at y = ymax
# rtherm1: thermal damping rate (1/tau_1) of lower layer thickness
# rtherm2: thermal damping rate (1/tau_2) of upper layer thickness
# rekman : Ekman damping rate (1/tau_E)
# eirate : enstrophy input rate (via heton pairs of point vortices
# which are converted to gridded vorticity and added to qd)
# vorvor : the mean vorticity magnitude associated with the point
# vortex upon placement on the grid
# rheton : the radius of each of the pair of vortices added as a heton
# iseed : seed for initialising point vortex forcing
# topogr : logical variable to indicate presence of topography
# mod1bc : boundary condition used on mode 1 when kd1 > 0;
# use 1 to implement fixed zonal mean velocities u1bot & u1top;
# use 2 to implement zero average streamfunction and velocity;
# (not used if kd1 = 0)
# thdmax : max rate of thermal damping at the domain edges
# sponlen: sponge length of vorticity damping; the function used
# is defined in casl.f90 (see "sponge")
|
julianmak/pydra | native/spectral.py | #/usr/bin/env python3
#
# JM: 12 Apr 2018
#
# the secptral adapted for python
# contains inversion commands etc.
from constants import *
# from generic import *
from sta2dfft import *
from numpy import arange, zeros, pi, exp, sqrt, around, log10
# define some parameters
nwx = int(nx / 2)
nwxm1, mwxp1 = nwx - 1, nwx + 1
#========================================================================!
# From main code: call init_invert to initialise !
# then call main_invert(qq,fhb,uu,vv,pp) to perform inversion!
#========================================================================!
def init_spectral():
# set up FFTs:
xfactors, yfactors, xtrig, ytrig, hrkx, rky = init2dfft(nx, ny, ellx, elly)
# Fractional y grid values:
fac = 1.0 / ny
yh1 = fac * arange(ny + 1)
yh0 = 1.0 - yh1
# Define y & beta*y:
yg = ymin + gly * arange(ny + 1)
bety = beta * yg
# Define x wavenumbers:
rkx = zeros(nx) # define the zero wavenumber here
for kx in range(1, nwx):
kxc = nx - kx
rkx[kx ] = hrkx[2 * kx - 1]
rkx[kxc] = hrkx[2 * kx - 1]
rkx[nwx] = hrkx[nx - 1]
scx = 2.0 * pi / ellx
rkxmax = scx * nwx
frkx = zeros(nx) # define the zero here
wratx = rkx / rkxmax
frkx[1:nx] = rkx[1:nx] * exp(-36.0 * wratx[1:nx] ** 36.0)
# Define y wavenumbers:
scy = pi / elly
rkymax = scy * ny
frky = zeros(ny)
wraty = rky / rkymax
frky[1:ny] = rky[1:ny] * exp(-36.0 * wraty[1:ny] ** 36.0)
#----------------------------------------------------------------------
# Initialise arrays for computing the spectrum of any field:
delk = sqrt(scx ** 2 + scy ** 2)
delki = 1.0 / delk
kmax = around( sqrt(rkxmax ** 2 + rkymax ** 2) * delki )
spmf = zeros(max(nx + 1, ny + 1))
kmag = zeros((nx, ny))
alk = zeros(max(nx, ny))
for kx in range(nx):
k = int( around(rkx[kx] * delki) )
kmag[kx, 0] = k
spmf[k] += 1
for ky in range(ny):
for kx in range(nx):
k = int( around( sqrt(rkx[kx] ** 2 + rky[ky] ** 2) * delki ) )
kmag[kx, ky] = k
spmf[k] += 1
# for i in range(len(spmf)):
# print("%.10f" % spmf[i])
# print("%13.2f" % spmf[i])
# Compute spectrum multiplication factor (spmf) to account for unevenly
# sampled shells and normalise spectra by 8/(nx*ny) so that the sum
# of the spectrum is equal to the L2 norm of the original field:
snorm = 4.0 * pi / (nx * ny)
spmf[0] = 0.0
for k in range(1, int(kmax + 1)):
spmf[k] = snorm * k / spmf[k]
alk[k] = log10(delk * k)
# Only output shells which are fully occupied (k <= kmaxred):
kmaxred = around( sqrt( (rkxmax ** 2 + rkymax ** 2) / 2.0 ) *delki )
#!----------------------------------------------------------------------
# !Define inverse spectral inversion operators:
#if (barot) then
# !The first mode is barotropic (kd1 = 0):
# qgop1(0,0)=zero
#else
# qgop1(0,0)=-one/(kd1sq+small)
#endif
#qgop2(0,0)=-one/kd2sq
#laplace(0,0)=zero
#do kx=1,nxm1
# rksq=rkx(kx)**2
# qgop1(kx,0)=-one/(rksq+kd1sq)
# qgop2(kx,0)=-one/(rksq+kd2sq)
# laplace(kx,0)=-rksq
#enddo
#do ky=1,ny
# rksq=rky(ky)**2
# qgop1(0,ky)=-one/(rksq+kd1sq)
# qgop2(0,ky)=-one/(rksq+kd2sq)
# laplace(0,ky)=-rksq
#enddo
#do ky=1,ny
# do kx=1,nxm1
# rksq=rkx(kx)**2+rky(ky)**2
# qgop1(kx,ky)=-one/(rksq+kd1sq)
# qgop2(kx,ky)=-one/(rksq+kd2sq)
# laplace(kx,ky)=-rksq
# enddo
#enddo
#!----------------------------------------------------------------------
# !Hyperbolic functions used to correct boundary conditions in inversion:
# !First mode:
#do kx=1,nxm1
# fac=sqrt(rkx(kx)**2+kd1sq)*elly
# div=one/(one-exp(-two*fac))
# do iy=1,nym1
# argm=fac*(one-yh1(iy))
# argp=fac*(one+yh1(iy))
# decy1(iy,kx)=(exp(-argm)-exp(-argp))*div
# enddo
#enddo
# !Second mode (kd2 > kd1 is assumed):
#do kx=1,nxm1
# fac=sqrt(rkx(kx)**2+kd2sq)*elly
# div=one/(one-exp(-two*fac))
# do iy=1,nym1
# argm=fac*(one-yh1(iy))
# argp=fac*(one+yh1(iy))
# decy2(iy,kx)=(exp(-argm)-exp(-argp))*div
# enddo
#enddo
|
julianmak/pydra | native/stafft.py | #/usr/bin/env python3
#
# JM: 11 Apr 2018
#
# the stafft.f90 adapted for python
# standalone and it shouldn't depend on anything
#
#-------------------------------------------------------------------------------
# Fourier transform module.
# This is not a general purpose transform package but is designed to be
# quick for arrays of length 2^n. It will work if the array length is of
# the form 2^i * 3^j * 4^k * 5^l * 6^m (integer powers obviously).
#
# Minimal error-checking is performed by the code below. The only check is that
# the initial factorisation can be performed.
# Therefore if the transforms are called with an array of length <2, or a trig array
# not matching the length of the array to be transformed the code will fail in a
# spectacular way (eg. Seg. fault or nonsense returned).
# It is up to the calling code to ensure everything is called sensibly.
# The reason for stripping error checking is to speed up the backend by performing
# less if() evaluations - as errors in practice seem to occur very rarely.
# So the good news is this should be a fast library - the bad is that you may have to pick
# around in it if there are failures.
#
# To initialise the routines call init(n,factors,trig,ierr).
# This fills a factorisation array (factors), and a sin/cos array (trig).
# These must be kept in memory by the calling program.
# The init routine can be called multiple times with different arrays if more than
# one length of array is to be transformed.
# If a factorisation of the array length n cannot be found (as specified above)
# then the init routine will exit immediately and the integer ierr will be set to 1.
# If the init returns with ierr=0 then the call was successful.
#
# Top-level subroutines contained in this module are:
# 1) initfft(n,factors,trig) :
# Performs intialisation of the module, by working out the factors of n (the FFT length).
# This will fail if n is not factorised completely by 2,3,4,5,6.
# The trig array contains the necessary cosine and sine values.
# Both arrays passed to init **must** be kept between calls to routines in this module.
# 2) forfft(m,n,x,trig,factors) :
# This performs a FFT of an array x containing m vectors of length n.
# The transform length is n.
# This inverse of this transform is obtained by revfft.
# 3) revfft(m,n,x,trig,factors) :
# This performs an inverse FFT of an array x containing m vectors of length n.
# The transform length is n.
# This inverse of this transform is forfft.
# 4) dct(m,n,x,trig,factors) :
# This performs a discrete cosine transform of an array x containing m vectors of length n.
# The transform length is n.
# This routine calls forfft and performs pre- and post- processing to obtain the transform.
# This transform is it's own inverse.
# 5) dst(m,n,x,trig,factors) :
# This performs a discrete sine transform of an array x containing m vectors of length n.
# The transform length is n.
# This routine calls forfft and performs pre- and post- processing to obtain the transform.
# This transform is it's own inverse.
#
# The storage of the transformed array is in 'Hermitian form'. This means that, for the jth vector
# the values x(j,1:nw) contain the cosine modes of the transform, while the values x(j,nw+1:n) contain
# the sine modes (in reverse order ie. wave number increasing from n back to nw+1).
# [Here, for even n, nw=n/2, and for odd n, nw=(n-1)/2].
from sys import exit
from numpy import array, zeros, pi, sin, cos, mod, sqrt
#----------------------------
def initfft(n):
"""
Subroutine performs initialisation work for all the transforms.
It calls routines to factorise the array length n and then sets up
a trig array full of sin/cos values used in the transform backend.
Input:
n = an integer
Returns:
factors = factors of n in a length 5 array
trig = 0 if ok otherwise 1 if n has factors other than 2,3,4,5,6
"""
# First factorise n:
factors, ierr = factorisen(n)
# Return if factorisation unsuccessful:
if (ierr == 1):
# Catastrophic end to run if factorisation fails:
print('****************************')
print(' Factorisation not possible.')
print(' Only factors from 2-6 allowed.')
print(' STOPPING...')
print('****************************')
exit("breaking in stafft/initfft")
# Define list of factors array:
fac = array((6, 4, 2, 3, 5))
# Define constants needed in trig array definition:
ftwopin = 2.0 * pi / n
rem = n
## TO FIX: there is a bug here, the outputs don't agree with the fortran one
m = 0 #?? fortran one starts at 1 but this is going to be an index
trig = zeros(2 * n)
for i in range(5):
for j in range(int(factors[i])):
rem /= fac[i]
for k in range(1, fac[i]):
for l in range(int(rem)):
trig[m] = ftwopin * (k * l)
m += 1
ftwopin *= fac[i]
for i in range(n-1):
trig[n+i] = -sin(trig[i])
trig[i ] = cos(trig[i])
return (factors, trig)
#============================================
def factorisen(n):
"""
Subroutine to factorise factors of n
Input:
n = an integer
Returns:
factors = factors of n in a length 5 array
ierr = 0 if ok otherwise 1 if n has factors other than 2,3,4,5,6
"""
ierr = 0
# Initialiase factors array:
factors = zeros(5)
rem = n
# Find factors of 6:
while (mod(rem, 6) == 0):
factors[0] += 1
rem /= 6
if (rem == 1):
return (factors, ierr)
# Find factors of 4:
while (mod(rem, 4) == 0):
factors[1] += 1
rem /= 4
if (rem == 1):
return (factors, ierr)
# Find factors of 2:
while (mod(rem, 2) == 0):
factors[2] += 1
rem /= 2
if (rem == 1):
return (factors, ierr)
# Find factors of 3:
while (mod(rem, 3) == 0):
factors[3] += 1
rem /= 3
if (rem == 1):
return (factors, ierr)
# Find factors of 5:
while (mod(rem, 5) == 0):
factors[4] += 1
rem /= 5
if (rem == 1):
return (factors, ierr)
# If code reaches this point factorisation has
# failed - return error code in ierr:
ierr = 1
return (factors, ierr)
#============================================
def forfft(m, n, x, trig, factors):
"""
Main physical to spectral (forward) FFT routine.
Performs m transforms of length n in the array x which is dimensioned x(m,n).
The arrays trig and factors are filled by the init routine and
should be kept from call to call.
Backend consists of mixed-radix routines, with 'decimation in time'.
Transform is stored in Hermitian form.
Input:
m =
n =
x = input array
trig =
factors =
Returns:
xhat = transformed array
"""
# #Arguments declarations:
#double precision:: x(0:m*n-1),trig(0:2*n-1)
#integer:: m,n,factors(5)
# #Local declarations:
#double precision:: wk(0:m*n-1),normfac
#integer:: i,rem,cum,iloc
#logical:: orig
# Initialise flip/flop logical and counters
orig = True
rem = n
cum = 1
# Use factors of 5:
for i in range(int(factors[4])):
rem /= 5
iloc = int((rem - 1) * 5 * cum)
if orig:
print("orig")
print(trig[iloc])
print(trig[n + iloc])
x, wk = forrdx5(int(m * rem), cum, trig[iloc], trig[n + iloc])
else:
print("not orig")
print(trig[iloc])
print(trig[n + iloc])
wk, x = forrdx5(int(m * rem), cum, trig[iloc], trig[n + iloc])
orig = not orig
cum *= 5
#do i=1,factors(5)
# rem=rem/5
# iloc=(rem-1)*5*cum
# if (orig) then
# call forrdx5(x,wk,m*rem,cum,trig(iloc),trig(n+iloc))
# else
# call forrdx5(wk,x,m*rem,cum,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*5
#enddo
# #Use factors of 3:
#do i=1,factors(4)
# rem=rem/3
# iloc=(rem-1)*3*cum
# if (orig) then
# call forrdx3(x,wk,m*rem,cum,trig(iloc),trig(n+iloc))
# else
# call forrdx3(wk,x,m*rem,cum,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*3
#enddo
# #Use factors of 2:
#do i=1,factors(3)
# rem=rem/2
# iloc=(rem-1)*2*cum
# if (orig) then
# call forrdx2(x,wk,m*rem,cum,trig(iloc),trig(n+iloc))
# else
# call forrdx2(wk,x,m*rem,cum,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*2
#enddo
# #Use factors of 4:
#do i=1,factors(2)
# rem=rem/4
# iloc=(rem-1)*4*cum
# if (orig) then
# call forrdx4(x,wk,m*rem,cum,trig(iloc),trig(n+iloc))
# else
# call forrdx4(wk,x,m*rem,cum,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*4
#enddo
# #Use factors of 6:
#do i=1,factors(1)
# rem=rem/6
# iloc=(rem-1)*6*cum
# if (orig) then
# call forrdx6(x,wk,m*rem,cum,trig(iloc),trig(n+iloc))
# else
# call forrdx6(wk,x,m*rem,cum,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*6
#enddo
# Multiply by the normalisation constant and put
# transformed array in the right location:
normfac = 1.0 / sqrt(n)
if (orig):
xhat = x * normfac
else:
xhat = wk * normfac
return xhat
##=====================================================
#subroutine revfft(m,n,x,trig,factors)
## Main spectral to physical (reverse) FFT routine.
## Performs m reverse transforms of length n in the array x which is dimensioned x(m,n).
## The arrays trig and factors are filled by the init routine and
## should be kept from call to call.
## Backend consists of mixed-radix routines, with 'decimation in frequency'.
## Reverse transform starts in Hermitian form.
#implicit none
# #Arguments declarations:
#double precision:: x(0:m*n-1),trig(0:2*n-1)
#integer:: m,n,factors(5)
# #Local declarations:
#double precision:: wk(0:m*n-1),normfac
#integer:: i,k,cum,rem,iloc
#logical:: orig
##----------------------------------------
# #Flip the sign of the sine coefficients:
#do i=(n/2+1)*m,n*m-1
# x(i)=-x(i)
#enddo
# #Scale 0 and Nyquist frequencies:
#do i=0,m-1
# x(i)=0.5d0*x(i)
#enddo
#if (mod(n,2) .eq. 0) then
# k=m*n/2
# do i=0,m-1
# x(k+i)=0.5d0*x(k+i)
# enddo
#endif
# #Initialise flip/flop logical and counters
#orig=.true.
#cum=1
#rem=n
# #Use factors of 6:
#do i=1,factors(1)
# rem=rem/6
# iloc=(cum-1)*6*rem
# if (orig) then
# call revrdx6(x,wk,m*cum,rem,trig(iloc),trig(n+iloc))
# else
# call revrdx6(wk,x,m*cum,rem,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*6
#enddo
# #Use factors of 4:
#do i=1,factors(2)
# rem=rem/4
# iloc=(cum-1)*4*rem
# if (orig) then
# call revrdx4(x,wk,m*cum,rem,trig(iloc),trig(n+iloc))
# else
# call revrdx4(wk,x,m*cum,rem,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*4
#enddo
# #Use factors of 2:
#do i=1,factors(3)
# rem=rem/2
# iloc=(cum-1)*2*rem
# if (orig) then
# call revrdx2(x,wk,m*cum,rem,trig(iloc),trig(n+iloc))
# else
# call revrdx2(wk,x,m*cum,rem,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*2
#enddo
# #Use factors of 3:
#do i=1,factors(4)
# rem=rem/3
# iloc=(cum-1)*3*rem
# if (orig) then
# call revrdx3(x,wk,m*cum,rem,trig(iloc),trig(n+iloc))
# else
# call revrdx3(wk,x,m*cum,rem,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*3
#enddo
# #Use factors of 5:
#do i=1,factors(5)
# rem=rem/5
# iloc=(cum-1)*5*rem
# if (orig) then
# call revrdx5(x,wk,m*cum,rem,trig(iloc),trig(n+iloc))
# else
# call revrdx5(wk,x,m*cum,rem,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*5
#enddo
# #Multiply by the normalisation constant and put
# #transformed array in the right location:
#normfac=2.0d0/sqrt(dble(n))
#if (orig) then
# do i=0,m*n-1
# x(i)=x(i)*normfac
# enddo
#else
# do i=0,m*n-1
# x(i)=wk(i)*normfac
# enddo
#endif
#return
#end subroutine
##============================================
#subroutine dct(m,n,x,trig,factors)
## This routine computes multiple fourier cosine transforms of sequences
## of doubles using the forfft routine to compute the FFT,
## along with pre- and post-processing steps to extract the dst.
#implicit none
##Argument declarations:
#integer:: m,n,factors(5)
#double precision:: x(m,0:n),trig(2*n)
##Local declarations:
#double precision,parameter:: pi=3.141592653589793238462643383279502884197169399375105820974944592307816d0
#double precision,parameter:: rt2=1.414213562373095048801688724209698078569671875376948073176679737990732d0
#double precision:: wk(1:m,0:n-1),fpin,rtn,rowsum
#integer:: i,j,nd2
##--------------------------------------------------
#fpin=pi/dble(n)
#rtn=sqrt(dble(n))
# #Pre-process the array and store it in wk:
#do i=1,m
# wk(i,0)=0.5d0*(x(i,0)+x(i,n))
#enddo
#do j=1,n-1
# do i=1,m
# wk(i,j)=0.5d0*(x(i,j)+x(i,n-j))-sin(dble(j)*fpin)*(x(i,j)-x(i,n-j))
# enddo
#enddo
# #Get the first element of the transform x(i,1) and store
# #in x(i,n), as this is not overwritten when x is used
# #as a work array in the forfft routine called next:
#do i=1,m
# rowsum=0.0d0
# rowsum=rowsum+0.5d0*x(i,0)
# do j=1,n-1
# rowsum=rowsum+x(i,j)*cos(dble(j)*fpin)
# enddo
# rowsum=rowsum-0.5d0*x(i,n)
# x(i,n)=rt2*rowsum/rtn
#enddo
# #Transform the wk array by use of the general FFT routine:
#call forfft(m,n,wk,trig,factors)
# #Post-process the result of the FFT to get the dst of x and
# #put the result back into the x array:
#do i=1,m
# x(i,0)=rt2*wk(i,0)
#enddo
#do i=1,m
# x(i,1)=x(i,n)
#enddo
#if (mod(n,2) .eq. 0) then
# nd2=n/2
# do j=1,nd2-1
# do i=1,m
# x(i,2*j)=rt2*wk(i,j)
# x(i,2*j+1)=x(i,2*j-1)-rt2*wk(i,n-j)
# enddo
# enddo
# do i=1,m
# x(i,n)=rt2*wk(i,nd2)
# enddo
#else if (mod(n,2) .eq. 1) then
# do j=1,(n-1)/2
# do i=1,m
# x(i,2*j)=rt2*wk(i,j)
# x(i,2*j+1)=x(i,2*j-1)-rt2*wk(i,n-j)
# enddo
# enddo
#endif
#return
#end subroutine
##=============================================================
#subroutine dst(m,n,x,trig,factors)
## This routine computes multiple fourier sine transforms of sequences
## of doubles using the forfft routine to compute the FFT,
## along with pre- and post-processing steps to extract the dst.
#implicit none
##Argument declarations:
#integer:: m,n,factors(5)
#double precision:: x(m,n),trig(2*n)
##Local declarations:
#double precision,parameter:: pi=3.141592653589793238462643383279502884197169399375105820974944592307816d0
#double precision,parameter:: rt2=1.414213562373095048801688724209698078569671875376948073176679737990732d0
#double precision:: wk(1:m,0:n-1),fpin
#integer:: i,j
##------------------------------------------
#fpin=pi/dble(n)
# #Pre-process the array and store it in wk:
# #First set 0 frequency element to zero:
#do i=1,m
# wk(i,0)=0.0d0
#enddo
# #Next set up the rest of the array:
#do j=1,n-1
# do i=1,m
# wk(i,j)=0.5d0*(x(i,j)-x(i,n-j))+sin(dble(j)*fpin)*(x(i,j)+x(i,n-j))
# enddo
#enddo
# #Transform the wk array by use of the general FFT routine:
#call forfft(m,n,wk,trig,factors)
# #Post-process the result of the FFT to get the dst of x and
# #put the result back into the x array:
#do i=1,m
# x(i,1)=wk(i,0)/rt2
#enddo
#if (mod(n,2) .eq. 0) then
# do j=1,n/2-1
# do i=1,m
# x(i,2*j)=-rt2*wk(i,n-j)
# enddo
# do i=1,m
# x(i,2*j+1)=rt2*wk(i,j)+x(i,2*j-1)
# enddo
# enddo
#else if (mod(n,2) .eq. 1) then
# do j=1,(n-1)/2-1
# do i=1,m
# x(i,2*j)=-rt2*wk(i,n-j)
# x(i,2*j+1)=rt2*wk(i,j)+x(i,2*j-1)
# enddo
# enddo
# do i=1,m
# x(i,n-1)=-rt2*wk(i,(n+1)/2)
# enddo
#endif
# # Set the Nyquist frequency element to zero:
#do i=1,m
# x(i,n)=0.0d0
#enddo
#return
#end subroutine
##==================================================
##====================================================
## Internal radix routines only beyond this point...
## Abandon hope all ye who enter in#
##====================================================
## Physical to spectral (forward) routines:
##====================================================
#subroutine forrdx6(a,b,nv,lv,cosine,sine)
## Radix six physical to Hermitian FFT with 'decimation in time'.
#implicit none
#
# #Arguments declarations:
#integer:: nv,lv
#double precision:: a(0:nv-1,0:5,0:lv-1),b(0:nv-1,0:lv-1,0:5),cosine(0:lv-1,5),sine(0:lv-1,5)
# #Local declarations:
#double precision,parameter:: sinfpi3=0.8660254037844386467637231707529361834714026269051903140279034897259665d0
#double precision:: x1p,x2p,x3p,x4p,x5p
#double precision:: y1p,y2p,y3p,y4p,y5p
#double precision:: s1k,s2k,s3k,s4k,s5k
#double precision:: c1k,c2k,c3k,c4k,c5k
#double precision:: t1i,t1r,t2i,t2r,t3i,t3r
#double precision:: u0i,u0r,u1i,u1r,u2i,u2r
#double precision:: v0i,v0r,v1i,v1r,v2i,v2r
#double precision:: q1,q2,q3,q4,q5,q6
#integer:: i,k,kc,lvd2
##-----------------------------------------
# #Do k=0 first:
#do i=0,nv-1
# t1r=a(i,2,0)+a(i,4,0)
# t2r=a(i,0,0)-0.5d0*t1r
# t3r=sinfpi3*(a(i,4,0)-a(i,2,0))
# u0r=a(i,0,0)+t1r
# t1i=a(i,5,0)+a(i,1,0)
# t2i=a(i,3,0)-0.5d0*t1i
# t3i=sinfpi3*(a(i,5,0)-a(i,1,0))
# v0r=a(i,3,0)+t1i
# b(i,0,0)=u0r+v0r
# b(i,0,1)=t2r-t2i
# b(i,0,2)=t2r+t2i
# b(i,0,3)=u0r-v0r
# b(i,0,4)=t3i-t3r
# b(i,0,5)=t3r+t3i
#enddo
# #Next do remaining k:
#if (nv .le. (lv-1)/2) then
# do i=0,nv-1
# do k=1,(lv-1)/2
# kc=lv-k
# x1p=cosine(k,1)*a(i,1, k)-sine(k,1)*a(i,1,kc)
# y1p=cosine(k,1)*a(i,1,kc)+sine(k,1)*a(i,1, k)
# x2p=cosine(k,2)*a(i,2, k)-sine(k,2)*a(i,2,kc)
# y2p=cosine(k,2)*a(i,2,kc)+sine(k,2)*a(i,2, k)
# x3p=cosine(k,3)*a(i,3, k)-sine(k,3)*a(i,3,kc)
# y3p=cosine(k,3)*a(i,3,kc)+sine(k,3)*a(i,3, k)
# x4p=cosine(k,4)*a(i,4, k)-sine(k,4)*a(i,4,kc)
# y4p=cosine(k,4)*a(i,4,kc)+sine(k,4)*a(i,4, k)
# x5p=cosine(k,5)*a(i,5, k)-sine(k,5)*a(i,5,kc)
# y5p=cosine(k,5)*a(i,5,kc)+sine(k,5)*a(i,5, k)
# t1r=x2p+x4p
# t1i=y2p+y4p
# t2r=a(i,0,k)-0.5d0*t1r
# t2i=a(i,0,kc)-0.5d0*t1i
# t3r=sinfpi3*(x2p-x4p)
# t3i=sinfpi3*(y2p-y4p)
# u0r=a(i,0,k)+t1r
# u0i=a(i,0,kc)+t1i
# u1r=t2r+t3i
# u1i=t2i-t3r
# u2r=t2r-t3i
# u2i=t2i+t3r
# t1r=x5p+x1p
# t1i=y5p+y1p
# t2r=x3p-0.5d0*t1r
# t2i=y3p-0.5d0*t1i
# t3r=sinfpi3*(x5p-x1p)
# t3i=sinfpi3*(y5p-y1p)
# v0r=x3p+t1r
# v0i=y3p+t1i
# v1r=t2r+t3i
# v1i=t3r-t2i
# v2r=t2r-t3i
# v2i=t2i+t3r
# b(i, k,0)=u0r+v0r
# b(i,kc,0)=u2r-v2r
# b(i, k,1)=u1r-v1r
# b(i,kc,1)=u1r+v1r
# b(i, k,2)=u2r+v2r
# b(i,kc,2)=u0r-v0r
# b(i, k,3)=v0i-u0i
# b(i,kc,3)=u2i+v2i
# b(i, k,4)=v1i-u1i
# b(i,kc,4)=u1i+v1i
# b(i, k,5)=v2i-u2i
# b(i,kc,5)=u0i+v0i
# enddo
# enddo
#else
# do k=1,(lv-1)/2
# kc=lv-k
# c1k=cosine(k,1)
# s1k=sine(k,1)
# c2k=cosine(k,2)
# s2k=sine(k,2)
# c3k=cosine(k,3)
# s3k=sine(k,3)
# c4k=cosine(k,4)
# s4k=sine(k,4)
# c5k=cosine(k,5)
# s5k=sine(k,5)
# do i=0,nv-1
# x1p=c1k*a(i,1, k)-s1k*a(i,1,kc)
# y1p=c1k*a(i,1,kc)+s1k*a(i,1, k)
# x2p=c2k*a(i,2, k)-s2k*a(i,2,kc)
# y2p=c2k*a(i,2,kc)+s2k*a(i,2, k)
# x3p=c3k*a(i,3, k)-s3k*a(i,3,kc)
# y3p=c3k*a(i,3,kc)+s3k*a(i,3, k)
# x4p=c4k*a(i,4, k)-s4k*a(i,4,kc)
# y4p=c4k*a(i,4,kc)+s4k*a(i,4, k)
# x5p=c5k*a(i,5, k)-s5k*a(i,5,kc)
# y5p=c5k*a(i,5,kc)+s5k*a(i,5, k)
# t1r=x2p+x4p
# t1i=y2p+y4p
# t2r=a(i,0,k)-0.5d0*t1r
# t2i=a(i,0,kc)-0.5d0*t1i
# t3r=sinfpi3*(x2p-x4p)
# t3i=sinfpi3*(y2p-y4p)
# u0r=a(i,0,k)+t1r
# u0i=a(i,0,kc)+t1i
# u1r=t2r+t3i
# u1i=t2i-t3r
# u2r=t2r-t3i
# u2i=t2i+t3r
# t1r=x5p+x1p
# t1i=y5p+y1p
# t2r=x3p-0.5d0*t1r
# t2i=y3p-0.5d0*t1i
# t3r=sinfpi3*(x5p-x1p)
# t3i=sinfpi3*(y5p-y1p)
# v0r=x3p+t1r
# v0i=y3p+t1i
# v1r=t2r+t3i
# v1i=t3r-t2i
# v2r=t2r-t3i
# v2i=t2i+t3r
# b(i, k,0)=u0r+v0r
# b(i,kc,0)=u2r-v2r
# b(i, k,1)=u1r-v1r
# b(i,kc,1)=u1r+v1r
# b(i, k,2)=u2r+v2r
# b(i,kc,2)=u0r-v0r
# b(i, k,3)=v0i-u0i
# b(i,kc,3)=u2i+v2i
# b(i, k,4)=v1i-u1i
# b(i,kc,4)=u1i+v1i
# b(i, k,5)=v2i-u2i
# b(i,kc,5)=u0i+v0i
# enddo
# enddo
#endif
# #Catch the case k=lv/2 when lv even:
#if (mod(lv,2) .eq. 0) then
# lvd2=lv/2
# do i=0,nv-1
# q1=a(i,2,lvd2)-a(i,4,lvd2)
# q2=a(i,0,lvd2)+0.5d0*q1
# q3=sinfpi3*(a(i,2,lvd2)+a(i,4,lvd2))
# q4=a(i,1,lvd2)+a(i,5,lvd2)
# q5=-a(i,3,lvd2)-0.5d0*q4
# q6=sinfpi3*(a(i,1,lvd2)-a(i,5,lvd2))
# b(i,lvd2,0)=q2+q6
# b(i,lvd2,1)=a(i,0,lvd2)-q1
# b(i,lvd2,2)=q2-q6
# b(i,lvd2,3)=q5+q3
# b(i,lvd2,4)=a(i,3,lvd2)-q4
# b(i,lvd2,5)=q5-q3
# enddo
#endif
#
#return
#end subroutine
##================================================
def forrdx5(nv, lv, cosine, sine):
"""
Radix five physical to Hermitian FFT with 'decimation in time'.
Input: TO ADD
nv =
lv =
cosine =
sine =
Returns: TO ADD
a =
b =
"""
# define some parameters and variables
rtf516 = 0.5590169943749474241022934171828190588601545899028814310677243113526302
sinf2pi5= 0.9510565162951535721164393333793821434056986341257502224473056444301532
sinfpi5 = 0.5877852522924731291687059546390727685976524376431459910722724807572785
sinrat = 0.6180339887498948482045868343656381177203091798057628621354486227052605
a = zeros((nv, 5, lv))
b = zeros((nv, lv, 5))
# Do k=0 first:
for i in range(nv):
t1r = a[i, 1, 0] + a[i, 4, 0]
t2r = a[i, 2, 0] + a[i, 3, 0]
t3r = sinf2pi5 * (a[i, 4, 0] - a[i, 1, 0])
t4r = sinf2pi5 * (a[i, 2, 0] - a[i, 3, 0])
t5r = t1r + t2r
t6r = rtf516 * (t1r - t2r)
t7r = a[i, 0, 0] - 0.25 * t5r
b[i, 0, 0] = a[i, 0, 0] + t5r
b[i, 0, 1] = t7r + t6r
b[i, 0, 2] = t7r - t6r
b[i, 0, 3] = t4r + sinrat * t3r
b[i, 0, 4] = t3r - sinrat * t4r
# Next do remaining k:
if (nv < (lv - 1) / 2):
for i in range(nv):
for k in range(1, int((lv - 1) / 2) + 1):
kc = lv - k
print("%.3f, %.3f, %.3f" % (kc, lv, k) )
print(cosine)
print(a[i, 1, k])
print(sine[k, 0])
print(a[i, 1, kc])
x1p = cosine[k, 0] * a[i, 1, k ] - sine[k, 0] * a[i, 1, kc]
y1p = cosine[k, 0] * a[i, 1, kc] + sine[k, 0] * a[i, 1, k ]
x2p = cosine[k, 1] * a[i, 2, k ] - sine[k, 1] * a[i, 2, kc]
y2p = cosine[k, 1] * a[i, 2, kc] + sine[k, 1] * a[i, 2, k ]
x3p = cosine[k, 2] * a[i, 3, k ] - sine[k, 2] * a[i, 3, kc]
y3p = cosine[k, 2] * a[i, 3, kc] + sine[k, 2] * a[i, 3, k ]
x4p = cosine[k, 3] * a[i, 4, k ] - sine[k, 3] * a[i, 4, kc]
y4p = cosine[k, 3] * a[i, 4, kc] + sine[k, 3] * a[i, 4, k ]
t1r = x1p + x4p
t1i = y1p + y4p
t2r = x2p + x3p
t2i = y2p + y3p
t3r = sinf2pi5 * (x1p - x4p)
t3i = sinf2pi5 * (y1p - y4p)
t4r = sinf2pi5 * (x2p - x3p)
t4i = sinf2pi5 * (y2p - y3p)
t5r = t1r + t2r
t5i = t1i + t2i
t6r = rtf516 * (t1r - t2r)
t6i = rtf516 * (t1i - t2i)
t7r = a[i, 0, k ] - 0.25 * t5r
t7i = a[i, 0, kc] - 0.25 * t5i
t8r = t7r + t6r
t8i = t7i + t6i
t9r = t7r - t6r
t9i = t7i - t6i
t10r = t3r + sinrat * t4r
t10i = t3i + sinrat * t4i
t11r = t4r - sinrat * t3r
t11i = sinrat * t3i - t4i
b[i, k, 0] = a[i, 0, k ] + t5r
b[i, kc, 0] = t8r - t10i
b[i, k, 1] = t8r + t10i
b[i, kc, 1] = t9r - t11i
b[i, k, 2] = t9r + t11i
b[i, kc, 2] = t9i + t11r
b[i, k, 3] = t11r - t9i
b[i, kc, 3] = t8i - t10r
b[i, k, 4] = -t8i - t10r
b[i, kc, 4] = a[i, 0, kc] + t5i
else:
for k in range(1, int((lv - 1) / 2) + 1):
kc = lv - k
c1k = cosine[k, 0]
s1k = sine[k, 0]
c2k = cosine[k, 1]
s2k = sine[k, 1]
c3k = cosine[k, 2]
s3k = sine[k, 2]
c4k = cosine[k, 3]
s4k = sine[k, 3]
for i in range(nv):
x1p = c1k * a[i, 1, k] - s1k * a[i, 1, kc]
y1p = c1k * a[i, 1, kc] + s1k * a[i, 1, k]
x2p = c2k * a[i, 2, k] - s2k * a[i, 2, kc]
y2p = c2k * a[i, 2, kc] + s2k * a[i, 2, k]
x3p = c3k * a[i, 3, k] - s3k * a[i, 3, kc]
y3p = c3k * a[i, 3, kc] + s3k * a[i, 3, k]
x4p = c4k * a[i, 4, k] - s4k * a[i, 4, kc]
y4p = c4k * a[i, 4, kc] + s4k * a[i, 4, k]
t1r = x1p + x4p
t1i = y1p + y4p
t2r = x2p + x3p
t2i = y2p + y3p
t3r = sinf2pi5 * (x1p - x4p)
t3i = sinf2pi5 * (y1p - y4p)
t4r = sinf2pi5 * (x2p - x3p)
t4i = sinf2pi5 * (y2p - y3p)
t5r = t1r + t2r
t5i = t1i + t2i
t6r = rtf516 * (t1r - t2r)
t6i = rtf516 * (t1i - t2i)
t7r = a[i, 0, k ] - 0.25 * t5r
t7i = a[i, 0, kc] - 0.25 * t5i
t8r = t7r + t6r
t8i = t7i + t6i
t9r = t7r - t6r
t9i = t7i - t6i
t10r = t3r + sinrat * t4r
t10i = t3i + sinrat * t4i
t11r = t4r - sinrat * t3r
t11i = sinrat * t3i - t4i
b[i, k, 0] = a[i, 0, k ] + t5r
b[i, kc, 0] = t8r - t10i
b[i, k, 1] = t8r + t10i
b[i, kc, 1] = t9r - t11i
b[i, k, 2] = t9r + t11i
b[i, kc, 2] = t9i + t11r
b[i, k, 3] = t11r - t9i
b[i, kc, 3] = t8i - t10r
b[i, k, 4] = -t8i - t10r
b[i, kc, 4] = a[i, 0, kc] + t5i
return (a, b)
##===========================================
#subroutine forrdx4(a,b,nv,lv,cosine,sine)
## Radix four physical to Hermitian FFT with 'decimation in time'.
#implicit none
# #Arguments declarations:
#integer:: nv,lv
#double precision:: a(0:nv-1,0:3,0:lv-1),b(0:nv-1,0:lv-1,0:3),cosine(0:lv-1,1:3),sine(0:lv-1,1:3)
# #Local declarations:
#double precision,parameter:: rtf12=0.7071067811865475244008443621048490392848359376884740365883398689953662d0
#double precision:: x1p,x2p,x3p,y1p,y2p,y3p
#double precision:: s1k,s2k,s3k,c1k,c2k,c3k
#double precision:: t1i,t1r,t2i,t2r,t3i,t3r,t4i,t4r
#double precision:: q1,q2
#integer:: i,k,kc,lvd2
##-----------------------------------------------
# #Do k=0 first:
#do i=0,nv-1
# t1r=a(i,0,0)+a(i,2,0)
# t2r=a(i,1,0)+a(i,3,0)
# b(i,0,0)=t1r+t2r
# b(i,0,1)=a(i,0,0)-a(i,2,0)
# b(i,0,2)=t1r-t2r
# b(i,0,3)=a(i,3,0)-a(i,1,0)
#enddo
# #Next do remaining k:
#if (nv .lt. (lv-1)/2) then
# do i=0,nv-1
# do k=1,(lv-1)/2
# kc=lv-k
# x1p=cosine(k,1)*a(i,1, k)-sine(k,1)*a(i,1,kc)
# y1p=cosine(k,1)*a(i,1,kc)+sine(k,1)*a(i,1, k)
# x2p=cosine(k,2)*a(i,2, k)-sine(k,2)*a(i,2,kc)
# y2p=cosine(k,2)*a(i,2,kc)+sine(k,2)*a(i,2, k)
# x3p=cosine(k,3)*a(i,3, k)-sine(k,3)*a(i,3,kc)
# y3p=cosine(k,3)*a(i,3,kc)+sine(k,3)*a(i,3, k)
# t1r=a(i,0,k)+x2p
# t1i=a(i,0,kc)+y2p
# t2r=x1p+x3p
# t2i=y1p+y3p
# t3r=a(i,0,k)-x2p
# t3i=a(i,0,kc)-y2p
# t4r=x3p-x1p
# t4i=y1p-y3p
# b(i, k,0)=t1r+t2r
# b(i,kc,0)=t3r-t4i
# b(i, k,1)=t3r+t4i
# b(i,kc,1)=t1r-t2r
# b(i, k,2)=t2i-t1i
# b(i,kc,2)=t3i+t4r
# b(i, k,3)=t4r-t3i
# b(i,kc,3)=t1i+t2i
# enddo
# enddo
#else
# do k=1,(lv-1)/2
# kc=lv-k
# c1k=cosine(k,1)
# s1k=sine(k,1)
# c2k=cosine(k,2)
# s2k=sine(k,2)
# c3k=cosine(k,3)
# s3k=sine(k,3)
# do i=0,nv-1
# x1p=c1k*a(i,1, k)-s1k*a(i,1,kc)
# y1p=c1k*a(i,1,kc)+s1k*a(i,1, k)
# x2p=c2k*a(i,2, k)-s2k*a(i,2,kc)
# y2p=c2k*a(i,2,kc)+s2k*a(i,2, k)
# x3p=c3k*a(i,3, k)-s3k*a(i,3,kc)
# y3p=c3k*a(i,3,kc)+s3k*a(i,3, k)
# t1r=a(i,0,k)+x2p
# t1i=a(i,0,kc)+y2p
# t2r=x1p+x3p
# t2i=y1p+y3p
# t3r=a(i,0,k)-x2p
# t3i=a(i,0,kc)-y2p
# t4r=x3p-x1p
# t4i=y1p-y3p
# b(i, k,0)=t1r+t2r
# b(i,kc,0)=t3r-t4i
# b(i, k,1)=t3r+t4i
# b(i,kc,1)=t1r-t2r
# b(i, k,2)=t2i-t1i
# b(i,kc,2)=t3i+t4r
# b(i, k,3)=t4r-t3i
# b(i,kc,3)=t1i+t2i
# enddo
# enddo
#endif
# #Catch the case k=lv/2 when lv even:
#if (mod(lv,2) .eq. 0) then
# lvd2=lv/2
# do i=0,nv-1
# q1=rtf12*(a(i,1,lvd2)-a(i,3,lvd2))
# q2=rtf12*(a(i,1,lvd2)+a(i,3,lvd2))
# b(i,lvd2,0)=a(i,0,lvd2)+q1
# b(i,lvd2,1)=a(i,0,lvd2)-q1
# b(i,lvd2,2)=a(i,2,lvd2)-q2
# b(i,lvd2,3)=-a(i,2,lvd2)-q2
# enddo
#endif
#return
#end subroutine
##================================================
#subroutine forrdx3(a,b,nv,lv,cosine,sine)
## Radix three physical to Hermitian FFT with 'decimation in time'.
#implicit none
# #Arguments declarations:
#integer:: nv,lv
#double precision:: a(0:nv-1,0:2,0:lv-1),b(0:nv-1,0:lv-1,0:2),cosine(0:lv-1,1:2),sine(0:lv-1,1:2)
# #Local declarations:
#double precision,parameter:: sinfpi3=0.8660254037844386467637231707529361834714026269051903140279034897259665d0
#double precision:: x1p,x2p,y1p,y2p
#double precision:: s1k,s2k,c1k,c2k
#double precision:: t1i,t1r,t2i,t2r,t3i,t3r
#integer:: i,k,kc
##---------------------------------------------
# #Do k=0 first:
#do i=0,nv-1
# t1r=a(i,1,0)+a(i,2,0)
# b(i,0,0)=a(i,0,0)+t1r
# b(i,0,1)=a(i,0,0)-0.5d0*t1r
# b(i,0,2)=sinfpi3*(a(i,2,0)-a(i,1,0))
#enddo
# #Next do remaining k:
#if (nv .le. (lv-1)/2) then
# do i=0,nv-1
# do k=1,(lv-1)/2
# kc=lv-k
# x1p=cosine(k,1)*a(i,1, k)-sine(k,1)*a(i,1,kc)
# y1p=cosine(k,1)*a(i,1,kc)+sine(k,1)*a(i,1, k)
# x2p=cosine(k,2)*a(i,2, k)-sine(k,2)*a(i,2,kc)
# y2p=cosine(k,2)*a(i,2,kc)+sine(k,2)*a(i,2, k)
# t1r=x1p+x2p
# t1i=y1p+y2p
# t2r=a(i,0, k)-0.5d0*t1r
# t2i=0.5d0*t1i-a(i,0,kc)
# t3r=sinfpi3*(x2p-x1p)
# t3i=sinfpi3*(y1p-y2p)
# b(i, k,0)=a(i,0, k)+t1r
# b(i,kc,0)=t2r-t3i
# b(i, k,1)=t2r+t3i
# b(i,kc,1)=t3r-t2i
# b(i, k,2)=t2i+t3r
# b(i,kc,2)=a(i,0,kc)+t1i
# enddo
# enddo
#else
# do k=1,(lv-1)/2
# kc=lv-k
# c1k=cosine(k,1)
# s1k=sine(k,1)
# c2k=cosine(k,2)
# s2k=sine(k,2)
# do i=0,nv-1
# x1p=c1k*a(i,1, k)-s1k*a(i,1,kc)
# y1p=c1k*a(i,1,kc)+s1k*a(i,1, k)
# x2p=c2k*a(i,2, k)-s2k*a(i,2,kc)
# y2p=c2k*a(i,2,kc)+s2k*a(i,2, k)
# t1r=x1p+x2p
# t1i=y1p+y2p
# t2r=a(i,0, k)-0.5d0*t1r
# t2i=0.5d0*t1i-a(i,0,kc)
# t3r=sinfpi3*(x2p-x1p)
# t3i=sinfpi3*(y1p-y2p)
# b(i, k,0)=a(i,0, k)+t1r
# b(i,kc,0)=t2r-t3i
# b(i, k,1)=t2r+t3i
# b(i,kc,1)=t3r-t2i
# b(i, k,2)=t2i+t3r
# b(i,kc,2)=a(i,0,kc)+t1i
# enddo
# enddo
#endif
#return
#end subroutine
##========================================
#subroutine forrdx2(a,b,nv,lv,cosine,sine)
## Radix two physical to Hermitian FFT with 'decimation in time'.
#implicit none
# #Arguments declarations:
#integer:: nv,lv
#double precision:: a(0:nv-1,0:1,0:lv-1),b(0:nv-1,0:lv-1,0:1),cosine(0:lv-1),sine(0:lv-1)
# #Local declarations:
#double precision:: x1,y1,c1k,s1k
#integer:: i,k,kc
##-----------------------------------------
# #Do k=0 first:
#do i=0,nv-1
# b(i,0,0)=a(i,0,0)+a(i,1,0)
# b(i,0,1)=a(i,0,0)-a(i,1,0)
#enddo
# #Next do remaining k:
#if (nv .lt. (lv-1)/2) then
# do i=0,nv-1
# do k=1,(lv-1)/2
# kc=lv-k
# x1=cosine(k)*a(i,1, k)-sine(k)*a(i,1,kc)
# y1=cosine(k)*a(i,1,kc)+sine(k)*a(i,1, k)
# b(i, k,0)=a(i,0, k)+x1
# b(i,kc,0)=a(i,0, k)-x1
# b(i, k,1)=y1-a(i,0,kc)
# b(i,kc,1)=a(i,0,kc)+y1
# enddo
# enddo
#else
# do k=1,(lv-1)/2
# kc=lv-k
# c1k=cosine(k)
# s1k=sine(k)
# do i=0,nv-1
# x1=c1k*a(i,1, k)-s1k*a(i,1,kc)
# y1=c1k*a(i,1,kc)+s1k*a(i,1, k)
# b(i, k,0)=a(i,0, k)+x1
# b(i,kc,0)=a(i,0, k)-x1
# b(i, k,1)=y1-a(i,0,kc)
# b(i,kc,1)=a(i,0,kc)+y1
# enddo
# enddo
#endif
#return
#end subroutine
##======================================
##====================================================
## Spectral to physical (reverse) routines:
##====================================================
#subroutine revrdx6(a,b,nv,lv,cosine,sine)
##Radix six Hermitian to physical FFT with 'decimation in frequency'.
#implicit none
# #Arguments declarations:
#integer:: nv,lv
#double precision:: a(0:nv-1,0:lv-1,0:5),b(0:nv-1,0:5,0:lv-1),cosine(0:lv-1,1:5),sine(0:lv-1,1:5)
# #Local declarations:
#double precision,parameter:: sinfpi3=0.8660254037844386467637231707529361834714026269051903140279034897259665d0
#double precision:: x1p,x2p,x3p,x4p,x5p
#double precision:: y1p,y2p,y3p,y4p,y5p
#double precision:: s1k,s2k,s3k,s4k,s5k
#double precision:: c1k,c2k,c3k,c4k,c5k
#double precision:: t1i,t1r,t2i,t2r,t3i,t3r
#double precision:: u0i,u0r,u1i,u1r,u2i,u2r
#double precision:: v0i,v0r,v1i,v1r,v2i,v2r
#double precision:: q1,q2,q3,q4,q5,q6
#integer:: i,k,kc,lvd2
##-----------------------------------------
# #Do k=0 first:
#do i=0,nv-1
# t2r=a(i,0,0)-0.5d0*a(i,0,2)
# t3r=sinfpi3*a(i,0,4)
# u0r=a(i,0,0)+a(i,0,2)
# u1r=t2r+t3r
# u2r=t2r-t3r
# t2i=a(i,0,3)-0.5d0*a(i,0,1)
# t3i=-sinfpi3*a(i,0,5)
# v0r=a(i,0,3)+a(i,0,1)
# v1r=t2i+t3i
# v2r=t2i-t3i
# b(i,0,0)=u0r+v0r
# b(i,1,0)=u1r-v1r
# b(i,2,0)=u2r+v2r
# b(i,3,0)=u0r-v0r
# b(i,4,0)=u1r+v1r
# b(i,5,0)=u2r-v2r
#enddo
# #Next do remaining k:
#if (nv .le. (lv-1)/2) then
# do i=0,nv-1
# do k=1,(lv-1)/2
# kc=lv-k
# t1r=a(i, k,2)+a(i,kc,1)
# t1i=a(i,kc,3)-a(i, k,4)
# t2r=a(i, k,0)-0.5d0*t1r
# t2i=a(i,kc,5)-0.5d0*t1i
# t3r=sinfpi3*(a(i, k,2)-a(i,kc,1))
# t3i=sinfpi3*(a(i,kc,3)+a(i, k,4))
# u0r=a(i, k,0)+t1r
# u0i=a(i,kc,5)+t1i
# u1r=t2r+t3i
# u1i=t2i-t3r
# u2r=t2r-t3i
# u2i=t2i+t3r
# t1r=a(i,kc,0)+a(i,k,1)
# t1i=a(i,kc,4)-a(i,k,5)
# t2r=a(i,kc,2)-0.5d0*t1r
# t2i=-a(i,k,3)-0.5d0*t1i
# t3r=sinfpi3*(a(i,kc,0)-a(i, k,1))
# t3i=sinfpi3*(-a(i,k,5)-a(i,kc,4))
# v0r=a(i,kc,2)+t1r
# v0i=t1i-a(i,k,3)
# v1r=t2r+t3i
# v1i=t2i-t3r
# v2r=t2r-t3i
# v2i=t2i+t3r
# x1p=u1r-v1r
# y1p=u1i-v1i
# x2p=u2r+v2r
# y2p=u2i+v2i
# x3p=u0r-v0r
# y3p=u0i-v0i
# x4p=u1r+v1r
# y4p=u1i+v1i
# x5p=u2r-v2r
# y5p=u2i-v2i
# b(i,0, k)=u0r+v0r
# b(i,0,kc)=u0i+v0i
# b(i,1, k)=cosine(k,1)*x1p-sine(k,1)*y1p
# b(i,1,kc)=cosine(k,1)*y1p+sine(k,1)*x1p
# b(i,2, k)=cosine(k,2)*x2p-sine(k,2)*y2p
# b(i,2,kc)=cosine(k,2)*y2p+sine(k,2)*x2p
# b(i,3, k)=cosine(k,3)*x3p-sine(k,3)*y3p
# b(i,3,kc)=cosine(k,3)*y3p+sine(k,3)*x3p
# b(i,4, k)=cosine(k,4)*x4p-sine(k,4)*y4p
# b(i,4,kc)=cosine(k,4)*y4p+sine(k,4)*x4p
# b(i,5, k)=cosine(k,5)*x5p-sine(k,5)*y5p
# b(i,5,kc)=cosine(k,5)*y5p+sine(k,5)*x5p
# enddo
# enddo
#else
# do k=1,(lv-1)/2
# kc=lv-k
# c1k=cosine(k,1)
# s1k=sine(k,1)
# c2k=cosine(k,2)
# s2k=sine(k,2)
# c3k=cosine(k,3)
# s3k=sine(k,3)
# c4k=cosine(k,4)
# s4k=sine(k,4)
# c5k=cosine(k,5)
# s5k=sine(k,5)
# do i=0,nv-1
# t1r=a(i, k,2)+a(i,kc,1)
# t1i=a(i,kc,3)-a(i, k,4)
# t2r=a(i, k,0)-0.5d0*t1r
# t2i=a(i,kc,5)-0.5d0*t1i
# t3r=sinfpi3*(a(i, k,2)-a(i,kc,1))
# t3i=sinfpi3*(a(i,kc,3)+a(i, k,4))
# u0r=a(i, k,0)+t1r
# u0i=a(i,kc,5)+t1i
# u1r=t2r+t3i
# u1i=t2i-t3r
# u2r=t2r-t3i
# u2i=t2i+t3r
# t1r=a(i,kc,0)+a(i,k,1)
# t1i=a(i,kc,4)-a(i,k,5)
# t2r=a(i,kc,2)-0.5d0*t1r
# t2i=-a(i,k,3)-0.5d0*t1i
# t3r=sinfpi3*(a(i,kc,0)-a(i, k,1))
# t3i=sinfpi3*(-a(i,k,5)-a(i,kc,4))
# v0r=a(i,kc,2)+t1r
# v0i=t1i-a(i,k,3)
# v1r=t2r+t3i
# v1i=t2i-t3r
# v2r=t2r-t3i
# v2i=t2i+t3r
# x1p=u1r-v1r
# y1p=u1i-v1i
# x2p=u2r+v2r
# y2p=u2i+v2i
# x3p=u0r-v0r
# y3p=u0i-v0i
# x4p=u1r+v1r
# y4p=u1i+v1i
# x5p=u2r-v2r
# y5p=u2i-v2i
# b(i,0, k)=u0r+v0r
# b(i,0,kc)=u0i+v0i
# b(i,1, k)=c1k*x1p-s1k*y1p
# b(i,1,kc)=c1k*y1p+s1k*x1p
# b(i,2, k)=c2k*x2p-s2k*y2p
# b(i,2,kc)=c2k*y2p+s2k*x2p
# b(i,3, k)=c3k*x3p-s3k*y3p
# b(i,3,kc)=c3k*y3p+s3k*x3p
# b(i,4, k)=c4k*x4p-s4k*y4p
# b(i,4,kc)=c4k*y4p+s4k*x4p
# b(i,5, k)=c5k*x5p-s5k*y5p
# b(i,5,kc)=c5k*y5p+s5k*x5p
# enddo
# enddo
#endif
# #Catch the case k=lv/2 when lv even:
#if (mod(lv,2) .eq. 0) then
# lvd2=lv/2
# do i=0,nv-1
# q1=a(i,lvd2,0)+a(i,lvd2,2)
# q2=a(i,lvd2,5)+a(i,lvd2,3)
# q3=a(i,lvd2,1)-0.5d0*q1
# q4=a(i,lvd2,4)+0.5d0*q2
# q5=sinfpi3*(a(i,lvd2,0)-a(i,lvd2,2))
# q6=sinfpi3*(a(i,lvd2,5)-a(i,lvd2,3))
# b(i,0,lvd2)=a(i,lvd2,1)+q1
# b(i,1,lvd2)=q4+q5
# b(i,2,lvd2)=q6-q3
# b(i,3,lvd2)=q2-a(i,lvd2,4)
# b(i,4,lvd2)=q3+q6
# b(i,5,lvd2)=q4-q5
# enddo
#endif
#return
#end subroutine
##=======================================
#subroutine revrdx5(a,b,nv,lv,cosine,sine)
## Radix five Hermitian to physical FFT with 'decimation in frequency'.
#implicit none
# #Arguments declarations:
#integer:: nv,lv
#double precision:: a(0:nv-1,0:lv-1,0:4),b(0:nv-1,0:4,0:lv-1),cosine(0:lv-1,1:4),sine(0:lv-1,1:4)
# #Local declarations:
#double precision,parameter:: rtf516=0.5590169943749474241022934171828190588601545899028814310677243113526302d0
#double precision,parameter:: sinf2pi5=0.9510565162951535721164393333793821434056986341257502224473056444301532d0
#double precision,parameter:: sinfpi5=0.5877852522924731291687059546390727685976524376431459910722724807572785d0
#double precision,parameter:: sinrat=0.6180339887498948482045868343656381177203091798057628621354486227052605d0
#double precision:: x1p,x2p,x3p,x4p,y1p,y2p,y3p,y4p
#double precision:: s1k,s2k,s3k,s4k,c1k,c2k,c3k,c4k
#double precision:: t1i,t1r,t2i,t2r,t3i,t3r,t4i,t4r,t5i,t5r,t6i,t6r
#double precision:: t7i,t7r,t8i,t8r,t9i,t9r,t10i,t10r,t11i,t11r
#integer:: i,k,kc
##----------------------------------------------------
# #Do k=0 first:
#do i=0,nv-1
# t3r=sinf2pi5*a(i,0,4)
# t4r=sinf2pi5*a(i,0,3)
# t5r=a(i,0,1)+a(i,0,2)
# t6r=rtf516*(a(i,0,1)-a(i,0,2))
# t7r=a(i,0,0)-0.25d0*t5r
# t8r=t7r+t6r
# t9r=t7r-t6r
# t10r=t3r+sinrat*t4r
# t11r=sinrat*t3r-t4r
# b(i,0,0)=a(i,0,0)+t5r
# b(i,1,0)=t8r+t10r
# b(i,2,0)=t9r+t11r
# b(i,3,0)=t9r-t11r
# b(i,4,0)=t8r-t10r
#enddo
# #Next do remaining k:
#if (nv .le. (lv-1)/2) then
# do i=0,nv-1
# do k=1,(lv-1)/2
# kc=lv-k
# t1r=a(i, k,1)+a(i,kc,0)
# t1i=a(i,kc,3)-a(i, k,4)
# t2r=a(i, k,2)+a(i,kc,1)
# t2i=a(i,kc,2)-a(i, k,3)
# t3r=sinf2pi5*(a(i, k,1)-a(i,kc,0))
# t3i=sinf2pi5*(a(i,kc,3)+a(i, k,4))
# t4r=sinf2pi5*(a(i, k,2)-a(i,kc,1))
# t4i=sinf2pi5*(a(i,kc,2)+a(i, k,3))
# t5r=t1r+t2r
# t5i=t1i+t2i
# t6r=rtf516*(t1r-t2r)
# t6i=rtf516*(t1i-t2i)
# t7r=a(i,k,0)-0.25d0*t5r
# t7i=a(i,kc,4)-0.25d0*t5i
# t8r=t7r+t6r
# t8i=t7i+t6i
# t9r=t7r-t6r
# t9i=t7i-t6i
# t10r=t3r+sinrat*t4r
# t10i=t3i+sinrat*t4i
# t11r=sinrat*t3r-t4r
# t11i=sinrat*t3i-t4i
# x1p=t8r+t10i
# y1p=t8i-t10r
# x2p=t9r+t11i
# y2p=t9i-t11r
# x3p=t9r-t11i
# y3p=t9i+t11r
# x4p=t8r-t10i
# y4p=t8i+t10r
# b(i,0, k)=a(i, k,0)+t5r
# b(i,0,kc)=a(i,kc,4)+t5i
# b(i,1, k)=cosine(k,1)*x1p-sine(k,1)*y1p
# b(i,1,kc)=cosine(k,1)*y1p+sine(k,1)*x1p
# b(i,2, k)=cosine(k,2)*x2p-sine(k,2)*y2p
# b(i,2,kc)=cosine(k,2)*y2p+sine(k,2)*x2p
# b(i,3, k)=cosine(k,3)*x3p-sine(k,3)*y3p
# b(i,3,kc)=cosine(k,3)*y3p+sine(k,3)*x3p
# b(i,4, k)=cosine(k,4)*x4p-sine(k,4)*y4p
# b(i,4,kc)=cosine(k,4)*y4p+sine(k,4)*x4p
# enddo
# enddo
#else
# do k=1,(lv-1)/2
# kc=lv-k
# c1k=cosine(k,1)
# s1k=sine(k,1)
# c2k=cosine(k,2)
# s2k=sine(k,2)
# c3k=cosine(k,3)
# s3k=sine(k,3)
# c4k=cosine(k,4)
# s4k=sine(k,4)
# do i=0,nv-1
# t1r=a(i, k,1)+a(i,kc,0)
# t1i=a(i,kc,3)-a(i, k,4)
# t2r=a(i, k,2)+a(i,kc,1)
# t2i=a(i,kc,2)-a(i, k,3)
# t3r=sinf2pi5*(a(i, k,1)-a(i,kc,0))
# t3i=sinf2pi5*(a(i,kc,3)+a(i, k,4))
# t4r=sinf2pi5*(a(i, k,2)-a(i,kc,1))
# t4i=sinf2pi5*(a(i,kc,2)+a(i, k,3))
# t5r=t1r+t2r
# t5i=t1i+t2i
# t6r=rtf516*(t1r-t2r)
# t6i=rtf516*(t1i-t2i)
# t7r=a(i,k,0)-0.25d0*t5r
# t7i=a(i,kc,4)-0.25d0*t5i
# t8r=t7r+t6r
# t8i=t7i+t6i
# t9r=t7r-t6r
# t9i=t7i-t6i
# t10r=t3r+sinrat*t4r
# t10i=t3i+sinrat*t4i
# t11r=sinrat*t3r-t4r
# t11i=sinrat*t3i-t4i
# x1p=t8r+t10i
# y1p=t8i-t10r
# x2p=t9r+t11i
# y2p=t9i-t11r
# x3p=t9r-t11i
# y3p=t9i+t11r
# x4p=t8r-t10i
# y4p=t8i+t10r
# b(i,0, k)=a(i, k,0)+t5r
# b(i,0,kc)=a(i,kc,4)+t5i
# b(i,1, k)=c1k*x1p-s1k*y1p
# b(i,1,kc)=c1k*y1p+s1k*x1p
# b(i,2, k)=c2k*x2p-s2k*y2p
# b(i,2,kc)=c2k*y2p+s2k*x2p
# b(i,3, k)=c3k*x3p-s3k*y3p
# b(i,3,kc)=c3k*y3p+s3k*x3p
# b(i,4, k)=c4k*x4p-s4k*y4p
# b(i,4,kc)=c4k*y4p+s4k*x4p
# enddo
# enddo
#endif
#return
#end subroutine
##=================================================
#subroutine revrdx4(a,b,nv,lv,cosine,sine)
##Radix four Hermitian to physical FFT with 'decimation in frequency'.
#implicit none
# #Arguments declarations:
#integer:: nv,lv
#double precision:: a(0:nv-1,0:lv-1,0:3),b(0:nv-1,0:3,0:lv-1),cosine(0:lv-1,1:3),sine(0:lv-1,1:3)
# #Local declarations:
#double precision,parameter:: rtf12=0.7071067811865475244008443621048490392848359376884740365883398689953662d0
#double precision:: x1p,x2p,x3p,y1p,y2p,y3p
#double precision:: s1k,s2k,s3k,c1k,c2k,c3k
#double precision:: t1i,t1r,t2i,t2r,t3i,t3r,t4i,t4r
#integer:: i,k,kc,lvd2
##--------------------------------------------------
# #Do k=0 first:
#do i=0,nv-1
# t1r=a(i,0,0)+a(i,0,2)
# t2r=a(i,0,1)
# t3r=a(i,0,0)-a(i,0,2)
# t4r=a(i,0,3)
# b(i,0,0)=t1r+t2r
# b(i,1,0)=t3r+t4r
# b(i,2,0)=t1r-t2r
# b(i,3,0)=t3r-t4r
#enddo
# #Next do remaining k:
#if (nv .lt. (lv-1)/2) then
# do i=0,nv-1
# do k=1,(lv-1)/2
# kc=lv-k
# t1r=a(i, k,0)+a(i,kc,1)
# t1i=a(i,kc,3)-a(i, k,2)
# t2r=a(i, k,1)+a(i,kc,0)
# t2i=a(i,kc,2)-a(i, k,3)
# t3r=a(i, k,0)-a(i,kc,1)
# t3i=a(i,kc,3)+a(i, k,2)
# t4r=a(i, k,1)-a(i,kc,0)
# t4i=a(i,kc,2)+a(i, k,3)
# x1p=t3r+t4i
# y1p=t3i-t4r
# x2p=t1r-t2r
# y2p=t1i-t2i
# x3p=t3r-t4i
# y3p=t3i+t4r
# b(i,0, k)=t1r+t2r
# b(i,0,kc)=t1i+t2i
# b(i,1, k)=cosine(k,1)*x1p-sine(k,1)*y1p
# b(i,1,kc)=cosine(k,1)*y1p+sine(k,1)*x1p
# b(i,2, k)=cosine(k,2)*x2p-sine(k,2)*y2p
# b(i,2,kc)=cosine(k,2)*y2p+sine(k,2)*x2p
# b(i,3, k)=cosine(k,3)*x3p-sine(k,3)*y3p
# b(i,3,kc)=cosine(k,3)*y3p+sine(k,3)*x3p
# enddo
# enddo
#else
# do k=1,(lv-1)/2
# kc=lv-k
# c1k=cosine(k,1)
# s1k=sine(k,1)
# c2k=cosine(k,2)
# s2k=sine(k,2)
# c3k=cosine(k,3)
# s3k=sine(k,3)
# do i=0,nv-1
# t1r=a(i,k,0)+a(i,kc,1)
# t1i=a(i,kc,3)-a(i,k,2)
# t2r=a(i,k,1)+a(i,kc,0)
# t2i=a(i,kc,2)-a(i,k,3)
# t3r=a(i,k,0)-a(i,kc,1)
# t3i=a(i,kc,3)+a(i,k,2)
# t4r=a(i,k,1)-a(i,kc,0)
# t4i=a(i,kc,2)+a(i,k,3)
# x1p=t3r+t4i
# y1p=t3i-t4r
# x2p=t1r-t2r
# y2p=t1i-t2i
# x3p=t3r-t4i
# y3p=t3i+t4r
# b(i,0, k)=t1r+t2r
# b(i,0,kc)=t1i+t2i
# b(i,1, k)=c1k*x1p-s1k*y1p
# b(i,1,kc)=c1k*y1p+s1k*x1p
# b(i,2, k)=c2k*x2p-s2k*y2p
# b(i,2,kc)=c2k*y2p+s2k*x2p
# b(i,3, k)=c3k*x3p-s3k*y3p
# b(i,3,kc)=c3k*y3p+s3k*x3p
# enddo
# enddo
#endif
# #Catch the case k=lv/2 when lv even:
#if (mod(lv,2) .eq. 0) then
# lvd2=lv/2
# do i=0,nv-1
# b(i,0,lvd2)=a(i,lvd2,0)+a(i,lvd2,1)
# b(i,2,lvd2)=a(i,lvd2,3)-a(i,lvd2,2)
# t3r=a(i,lvd2,0)-a(i,lvd2,1)
# t4r=a(i,lvd2,3)+a(i,lvd2,2)
# b(i,1,lvd2)=rtf12*(t3r+t4r)
# b(i,3,lvd2)=rtf12*(t4r-t3r)
# enddo
#endif
#return
#end subroutine
##=================================================
#subroutine revrdx3(a,b,nv,lv,cosine,sine)
##Radix three Hermitian to physical FFT with 'decimation in frequency'.
#implicit none
# #Arguments declarations:
#integer:: nv,lv
#double precision:: a(0:nv-1,0:lv-1,0:2),b(0:nv-1,0:2,0:lv-1),cosine(0:lv-1,1:2),sine(0:lv-1,1:2)
#
# #Local declarations:
#double precision,parameter:: sinfpi3=0.8660254037844386467637231707529361834714026269051903140279034897259665d0
#double precision:: x1p,x2p,y1p,y2p
#double precision:: c2k,c1k,s2k,s1k
#double precision:: t1i,t1r,t2i,t2r,t3i,t3r
#integer:: i,k,kc
##-------------------------------------------------
# #Do k=0 first:
#do i=0,nv-1
# t1r=a(i,0,1)
# t2r=a(i,0,0)-0.5d0*t1r
# t3r=sinfpi3*a(i,0,2)
# b(i,0,0)=a(i,0,0)+t1r
# b(i,1,0)=t2r+t3r
# b(i,2,0)=t2r-t3r
#enddo
# #Next do remaining k:
#if (nv .le. (lv-1)/2) then
# do i=0,nv-1
# do k=1,(lv-1)/2
# kc=lv-k
# t1r=a(i, k,1)+a(i,kc,0)
# t1i=a(i,kc,1)-a(i, k,2)
# t2r=a(i, k,0)-0.5d0*t1r
# t2i=a(i,kc,2)-0.5d0*t1i
# t3r=sinfpi3*(a(i, k,1)-a(i,kc,0))
# t3i=sinfpi3*(a(i,kc,1)+a(i, k,2))
# x1p=t2r+t3i
# y1p=t2i-t3r
# x2p=t2r-t3i
# y2p=t2i+t3r
# b(i,0, k)=a(i, k,0)+t1r
# b(i,0,kc)=a(i,kc,2)+t1i
# b(i,1, k)=cosine(k,1)*x1p-sine(k,1)*y1p
# b(i,1,kc)=sine(k,1)*x1p+cosine(k,1)*y1p
# b(i,2, k)=cosine(k,2)*x2p-sine(k,2)*y2p
# b(i,2,kc)=sine(k,2)*x2p+cosine(k,2)*y2p
# enddo
# enddo
#else
# do k=1,(lv-1)/2
# kc=lv-k
# c1k=cosine(k,1)
# s1k=sine(k,1)
# c2k=cosine(k,2)
# s2k=sine(k,2)
# do i=0,nv-1
# t1r=a(i, k,1)+a(i,kc,0)
# t1i=a(i,kc,1)-a(i, k,2)
# t2r=a(i, k,0)-0.5d0*t1r
# t2i=a(i,kc,2)-0.5d0*t1i
# t3r=sinfpi3*(a(i, k,1)-a(i,kc,0))
# t3i=sinfpi3*(a(i,kc,1)+a(i, k,2))
# x1p=t2r+t3i
# y1p=t2i-t3r
# x2p=t2r-t3i
# y2p=t2i+t3r
# b(i,0, k)=a(i, k,0)+t1r
# b(i,0,kc)=a(i,kc,2)+t1i
# b(i,1, k)=c1k*x1p-s1k*y1p
# b(i,1,kc)=s1k*x1p+c1k*y1p
# b(i,2, k)=c2k*x2p-s2k*y2p
# b(i,2,kc)=s2k*x2p+c2k*y2p
# enddo
# enddo
#endif
#return
#end subroutine
##================================================
#subroutine revrdx2(a,b,nv,lv,cosine,sine)
##Radix two Hermitian to physical FFT with 'decimation in frequency'.
#implicit none
# #Arguments declarations:
#integer:: nv,lv
#double precision:: a(0:nv-1,0:lv-1,0:1),b(0:nv-1,0:1,0:lv-1),cosine(0:lv-1),sine(0:lv-1)
# #Local declarations:
#double precision:: x1p,y1p,c1k,s1k
#integer:: i,k,kc
##-----------------------------------------
# #Do k=0 first:
#do i=0,nv-1
# b(i,0,0)=a(i,0,0)+a(i,0,1)
# b(i,1,0)=a(i,0,0)-a(i,0,1)
#enddo
# #Next do remaining k:
#if (nv .lt. (lv-1)/2) then
# do i=0,nv-1
# do k=1,(lv-1)/2
# kc=lv-k
# x1p=a(i, k,0)-a(i,kc,0)
# y1p=a(i,kc,1)+a(i, k,1)
# b(i,0, k)=a(i, k,0)+a(i,kc,0)
# b(i,0,kc)=a(i,kc,1)-a(i, k,1)
# b(i,1, k)=cosine(k)*x1p-sine(k)*y1p
# b(i,1,kc)=cosine(k)*y1p+sine(k)*x1p
# enddo
# enddo
#else
# do k=1,(lv-1)/2
# kc=lv-k
# c1k=cosine(k)
# s1k=sine(k)
# do i=0,nv-1
# x1p=a(i, k,0)-a(i,kc,0)
# y1p=a(i,kc,1)+a(i, k,1)
# b(i,0, k)=a(i, k,0)+a(i,kc,0)
# b(i,0,kc)=a(i,kc,1)-a(i, k,1)
# b(i,1, k)=c1k*x1p-s1k*y1p
# b(i,1,kc)=c1k*y1p+s1k*x1p
# enddo
# enddo
#endif
#return
#end subroutine
##=================================================
|
julianmak/pydra | native/constants.py | ##/usr/bin/env python3
#
# JM: 11 Apr 2018
#
# constants that are unchanged, depending on the customisable parameters.py
from parameters import *
from numpy import sqrt, pi
# Contains all the non-modifiable parameters as well as all
# quantities which never change throughout a simulation
# for the suite of casl f90 codes.
# Grid dimensions +/- 1 & 2:
nxp1, nxm1, nxm2 = nx + 1, nx - 1, nx - 2
nyp1, nym1 = ny + 1, ny - 1
# Fine grid used normally in contour -> grid conversion:
mgf = 4
nxf, nyf = mgf * nx, mgf * ny
# mgf: fine grid/coarse grid ratio (4 is required by subroutine
# coarsen in contours.f90)
# Fine grid dimensions +/- 1 & 2:
nxfm1, nxfm2 = nxf - 1, nxf - 2
nyfp1, nyfm1 = nyf + 1, nyf - 1
# Ultra-fine grid used in contouring:
mgu = 16
nxu, nyu = mgu * nx, mgu * ny
# mgu: ultra-fine grid/coarse grid ratio (16 is the default)
# Ultra-fine grid dimensions +/- 1 & 2:
nxum1, nxum2 = nxu - 1, nxu - 2
nyup1, nyum1 = nyu + 1, nyu - 1
# For reading & writing direct access data:
ngridp = nx * nyp1
nbytes = 4 * (ngridp + 1)
# Maximum number of contour levels (used in surgery and congen):
nlevm = 2000
# nlevm: up to 2*nlevm contour levels are allowed
# Maximum number of contour nodes:
npm = 625 * nx * ny
# Maximum number of contours:
nm = npm / 20 + npm / 200
# Maximum number of nodes on any single contour:
nprm = npm / 10
# Maximum number of nodes in any contour level:
nplm = npm / 2
# Generic double precision numerical constants:
# JM: got rid of this
# Domain lengths and inverses:
hlx = 0.5 * ellx
hlxi = 1.0 / hlx
xmin, xmax = -hlx, hlx
ymax = ymin + elly
ycen = (ymin + ymax) / 2.0
hly = (1.0 - 1.0e-12) * 0.5 * elly
ybeg = ycen - hly
# Fractional thickness of the upper layer (note: h1 + h2 = 1):
h2 = 1.0 - h1
h1h2 = h1 * h2
h1inv, h2inv = 1.0 / h1, 1.0 / h2
# Constants used to define relative vorticity and interface displacements:
alphac = 1.0 - alpha
kdbarsq = kdbar * kdbar
h1kdbarsq = h1 * kdbarsq
h1h2kdbarsq = h1h2 * kdbarsq
h1h2ackdbarsq = h1h2 * alphac * kdbarsq
massfac = 1.0 / (h1 + alpha * h2)
# Define Rossby deformation wavenumbers (for each mode):
gamma1 = 0.50 - sqrt(0.25 - alphac * h1 * h2)
gamma2 = 0.50 + sqrt(0.25 - alphac * h1 * h2)
kd1sq = gamma1 * kdbarsq
kd1 = sqrt(kd1sq)
kd2sq = gamma2 * kdbarsq
kd2 = sqrt(kd2sq)
# Define layer -> mode transformation coefficients:
vec11 = h1 / (1.0 - gamma1) #mode 1, layer 1
vec12 = (h2 - gamma1) / (1.0 - gamma1) #mode 1, layer 2
vec21 = h1 / (h2 - gamma2) #mode 2, layer 1
vec22 = 1.0 #mode 2, layer 2
vec2sum = vec21 + vec22
# Define mode -> layer transformation coefficients:
determinant = vec11 * vec22 - vec12 * vec21
vect11 = vec22 / determinant #layer 1, mode 1
vect12 =-vec12 / determinant #layer 1, mode 2
vect21 =-vec21 / determinant #layer 2, mode 1
vect22 = vec11 / determinant #layer 2, mode 2
# Initialise coefficients for decomposing energy & enstrophy into modes:
coeff1 = h1 * (vect11 ** 2) + alpha * h2 * (vect21 ** 2)
coeff2 = h1 * (vect12 ** 2) + alpha * h2 * (vect22 ** 2)
# Maximum time step:
dtmax = 0.1 * min(tgsave,tcsave)
# Set maximum Rossby wave frequency (used in adapt in evolution.f90):
srwfm = beta / ( (2.0 * pi / ellx)**2 + kd1sq )
# Basic constants:
domarea = ellx * elly
aspect = ellx / elly
glx, glxi = ellx / nx, nx / ellx
gly, glyi = elly / ny, ny / elly
garea, dsumi = glx * gly, 1.0 / (nx * ny)
#Logical control variables:
sponge = (thdmax > 0.0)
heating = ( (rtherm1 > 0.0) or (rtherm2 > 0.0) or sponge)
friction = (rekman > 0.0)
damping = (heating or friction)
barot = (alpha == 1.0)
stoch = (eirate > 0.0)
|
julianmak/pydra | native/__init__.py | <gh_stars>0
#!/usr/bin/env python3
#
# JM: 11 Apr 2018
#
# to initialise pydra analysis and load various things
from parameters import *
from constants import *
|
julianmak/pydra | wrapper/sample_f2py/test_driver.py | #!/usr/bin/env python3
#
from numpy import f2py
sourcefile = open("test_mod.f90", "rb") # open as binary
sourcecode = sourcefile.read()
print(sourcecode)
f2py.compile(sourcecode, modulename = "murp", extension = ".f90")
from murp import test_mod
test_mod.print_murp(6.0)
p = test_mod.add_murp(3.0, 4.0)
print(p)
j = test_mod.self_murp(5.0)
print(j)
p, q = test_mod.multiple_murp(6.0, 7.0)
print(p)
print(q)
|
julianmak/pydra | wrapper/pydra_misc.py | <reponame>julianmak/pydra
#!/usr/bin/env python3
#
# JM, 20 Apr 2018
#
# some misc functions relating to pydra
import os
import numpy as np
from numpy.fft import rfft, irfft
#-------------------------------------------------------------------------------
# read the PV data
def read_qq(data_dir, nx, ny, kt, num_frame = False):
"""
Subfunction to read the PV data (assumes they are called qq1 and qq2)
Input:
data_dir string for where data is located
nx number of x points
ny number of y points
kt index of frame to pull out (set to 0 to see how many frames are found)
Output:
t_now actual dump time from header
qq[x, y, layer] PV data (NOTE: indexing may change)
"""
N = nx * (ny + 1) + 1
qq = np.zeros((nx, ny + 1, 2))
qq1_filename = data_dir + "qq1.r4"
qq1_file = open(qq1_filename, "r")
file_bytes = os.path.getsize(qq1_filename)
nframes = int(file_bytes / (4 * N))
if num_frame:
print("number of frames found = %i " % nframes)
raw_array = np.fromfile(qq1_file, dtype = np.float32)
time_eles = [i * N for i in range(nframes)]
# pull out the header time
if kt == nframes - 1:
index_start, index_end = time_eles[kt] + 1, len(raw_array)
else:
index_start, index_end = time_eles[kt] + 1, time_eles[kt + 1]
t_now = raw_array[time_eles[kt]]
# pull out the data relevant to time and reshape
qq[:, :, 0] = raw_array[index_start:index_end].reshape((nx, ny + 1))
qq1_file.close()
qq2_filename = data_dir + "qq2.r4"
qq2_file = open(qq2_filename, "r")
raw_array = np.fromfile(qq2_file, dtype = np.float32)
qq[:, :, 1] = raw_array[index_start:index_end].reshape((nx, ny + 1))
qq2_file.close()
return (t_now, qq)
#-------------------------------------------------------------------------------
# transform between layer fields to modal fields
def layers_to_modes(field_L1L2, constants):
"""
Subfunction to transform layer fields to modal fields
Input:
field_L1L2 2d field in layers
Output:
field_btbc 2d field in modes
"""
field_btbc = np.zeros(field_L1L2.shape)
field_btbc[:, :, 0] = (constants.vec11 * field_L1L2[:, :, 0]
+ constants.vec12 * field_L1L2[:, :, 1])
field_btbc[:, :, 1] = (constants.vec21 * field_L1L2[:, :, 0]
+ constants.vec22 * field_L1L2[:, :, 1])
return field_btbc
#-------------------------------------------------------------------------------
def zonal_ave(infield):
"""
Subfunction to zonally average some input 2d field. Assumes it is uniformly
spaced in x (axis = 1)
Input:
infield 2d input field for data
Output:
outfield 1d zonally averaged data
"""
outfield = np.mean(infield, axis = 1)
return outfield
#-------------------------------------------------------------------------------
def zonal_demean(infield):
"""
Subfunction to get rid of the zonal mean of the input field
Input:
infield 2d input field for data
Output:
outfield 2d output field for data
"""
mean_1d = zonal_ave(infield)
outfield = np.zeros(infield.shape)
if infield.shape[1] < 10:
print("WARNING: x-axis should be longer than this, check you are throwing in arrays of the right shape!")
for i in range(infield.shape[1]):
outfield[:, i] = infield[:, i] - mean_1d # probably a faster way of doing this (broadcast?)
return outfield
#-------------------------------------------------------------------------------
def zonal_corr(f1_in, f2_in):
"""
Subfunction to generate the correlation function of two input fields,
as defined by the zonal average
e.g. u_in and v_in would give Reynolds stress
u_in and u_in would give part of M in geometric decomposition
can do (u_in, u_in) and (v_in, v_in) to give eke
Input:
f1_in, f2_in 2d input field
Output:
f1f2_out 2d output demean-ed field
"""
# do a de-mean of both fields even if some of them have no mean
# (e.g. v = d(psi)/dx)
f1_fluc = zonal_demean(f1_in)
f2_fluc = zonal_demean(f2_in)
f1f2_out = f1_fluc * f2_fluc
return f1f2_out
#-------------------------------------------------------------------------------
def zonal_eke(u_in, v_in):
"""
Subfunction to generate the eke field as defined by the zonal average
Input:
u_in, v_in 2d input velocity field
Output:
eke 2d output eke field
"""
eke = (zonal_corr(u_in, u_in) + zonal_corr(v_in, v_in)) / 2.0
return eke
#-------------------------------------------------------------------------------
def zonal_eke_int(uu_in, vv_in, parameters): # don't think this is right, redo this
"""
Subfunction to compute the domain-averaged eke value, where the
eke is defined by the zonal average (give it the layer velocity though)
Input:
u_in, v_in 2d layer input velocity field
parameters collection of system parameters
Output:
eke_domavg domain integrated value
(TODO: eke_max?)
"""
# work out the eke of layers then weight it with layer depth (which is 1)
# note the flipped parameters
eke = (zonal_eke(uu_in[:, :, 0], vv_in[:, :, 0]) * (1 - parameters.h1)
+ zonal_eke(uu_in[:, :, 1], vv_in[:, :, 1]) * parameters.h1)
# add them together and take a mean (divide by depth = 1 not written in)
eke_domavg = np.mean(np.mean(eke, axis = 0), axis = 0)
return eke_domavg
#-------------------------------------------------------------------------------
def zonal_ens(q_in):
"""
Subfunction to generate the enstrophy field as defined by the zonal average
Input:
q_in 2d input PV field
Output:
ens 2d output enstrophy field
"""
ens = zonal_corr(q_in, q_in) / 2.0
return ens
#-------------------------------------------------------------------------------
def zonal_ens_int(qq_in, parameters): # don't think this is right, redo this
"""
Subfunction to compute the domain-averaged ens value, where the
ens is defined by the zonal average (give it the layer PV though)
Input:
q_in 2d layer input PV field
parameters collection of system parameters
Output:
ens_domavg domain integrated value
(TODO: ens_max?)
"""
# work out the eke of layers then weight it with layer depth (which is 1)
ens = (zonal_ens(qq_in[:, : ,0]) * (1 - parameters.h1)
+ zonal_ens(qq_in[:, :, 1]) * parameters.h1)
# add them together and take a mean (divide by depth = 1 not written in)
ens_domavg = np.mean(np.mean(ens, axis = 0), axis = 0)
return ens_domavg
#-------------------------------------------------------------------------------
def zonal_mode_extract(infield, mode_keep, low_pass = False):
"""
Subfunction to extract or swipe out zonal modes (mode_keep) of (y, x) data.
Assumes here that the data is periodic in axis = 1 (in the x-direction) with
the end point missing
If mode_keep = 0 then this is just the zonal averaged field
Input:
in_field 2d layer input field
mode_keep the zonal mode of the data to be extracted from
Opt input:
low_pass get rid of all modes from mode_keep + 1 onwards
Output:
outfield zonal mode of the data
"""
outfield_h = rfft(infield, axis = 1)
outfield_h[:, mode_keep+1::] = 0
if not low_pass:
outfield_h[:, 0:mode_keep] = 0
return irfft(outfield_h, axis = 1)
|
julianmak/pydra | wrapper/pydra_plot.py | #!/usr/bin/env python3
#
# JM, 30 Oct 2018
#
# some plotting functions relating to pydra
from pydra_analysis import *
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams["font.family"] = "DejaVu Serif"
plt.rcParams["mathtext.fontset"] = "cm"
plt.rcParams["mathtext.rm"] = "serif"
plt.rcParams["image.cmap"] = "RdBu_r" # "*_r" is reverse of standard colour
plt.rcParams["axes.formatter.limits"] = [-3, 3]
#-------------------------------------------------------------------------------
# plot gamma_m and phi_m
def plot_geom_param(data_geom, y_vec, t_now):
# momentum things
ax = plt.subplot2grid((4, 4), (0, 0), colspan = 1)
ax.plot(data_geom.gamma_m_L1L2[:, 1], y_vec)
ax.set_xlim([0, 1.1])
ax.set_title(r"$\gamma_{m, 1} (t = %g)$" % t_now)
ax.grid()
ax = plt.subplot2grid((4, 4), (1, 0), colspan = 1)
ax.plot(data_geom.gamma_m_L1L2[:, 0], y_vec)
ax.set_xlim([0, 1.1])
ax.set_title(r"$\gamma_{m, 2} (t = %g)$" % t_now)
ax.grid()
ax = plt.subplot2grid((4, 4), (0, 2), colspan = 1)
ax.plot(data_geom.phi_m_L1L2[:, 1] / np.pi, y_vec)
ax.set_xlim([-0.6, 0.6])
ax.set_title(r"$\phi_{m, 1} / \pi (t = %g)$" % t_now)
ax.grid()
ax = plt.subplot2grid((4, 4), (1, 2), colspan = 1)
ax.plot(data_geom.phi_m_L1L2[:, 0] / np.pi, y_vec)
ax.set_xlim([-0.6, 0.6])
ax.set_title(r"$\phi_{m, 2} / \pi (t = %g)$" % t_now)
ax.grid()
ax = plt.subplot2grid((4, 4), (0, 1), colspan = 1)
ax.plot(data_geom.gamma_m_btbc[:, 0], y_vec)
ax.set_xlim([0, 1.1])
ax.set_yticklabels([])
ax.set_title(r"$\gamma_{m, \rm{bt}} (t = %g)$" % t_now)
ax.grid()
ax = plt.subplot2grid((4, 4), (1, 1), colspan = 1)
ax.plot(data_geom.gamma_m_btbc[:, 1], y_vec)
ax.set_xlim([0, 1.1])
ax.set_yticklabels([])
ax.set_title(r"$\gamma_{m, \rm{bc}} (t = %g)$" % t_now)
ax.grid()
ax = plt.subplot2grid((4, 4), (0, 3), colspan = 1)
ax.plot(data_geom.phi_m_btbc[:, 0] / np.pi, y_vec)
ax.set_xlim([-0.6, 0.6])
ax.set_title(r"$\phi_{m, \rm{bt}} / \pi (t = %g)$" % t_now)
ax.grid()
ax = plt.subplot2grid((4, 4), (1, 3), colspan = 1)
ax.plot(data_geom.phi_m_btbc[:, 1] / np.pi, y_vec)
ax.set_xlim([-0.6, 0.6])
ax.set_title(r"$\phi_{m, \rm{bc}} / \pi (t = %g)$" % t_now)
ax.grid()
# buoyancy things
ax = plt.subplot2grid((4, 4), (2, 0), colspan = 1)
ax.plot(data_geom.gamma_b[:, 1], y_vec)
ax.set_title(r"$\gamma_{b, 1} (t = %g)$" % t_now)
ax.grid()
ax = plt.subplot2grid((4, 4), (3, 0), colspan = 1)
ax.plot(data_geom.gamma_b[:, 0], y_vec)
ax.set_title(r"$\gamma_{b, 2} (t = %g)$" % t_now)
ax.grid()
ax = plt.subplot2grid((4, 4), (2, 1), colspan = 1)
ax.plot(data_geom.phi_b / np.pi, y_vec)
ax.set_xlim([-1.1, 1.1])
ax.set_title(r"$\phi_{b} / \pi (t = %g)$" % t_now)
ax.grid()
ax = plt.subplot2grid((4, 4), (2, 2), colspan = 1)
ax.plot(data_geom.gamma_t[:, 1], y_vec)
ax.set_xlim([0, 1.1])
ax.set_title(r"$\gamma_{t, 1} (t = %g)$" % t_now)
ax.grid()
ax = plt.subplot2grid((4, 4), (3, 2), colspan = 1)
ax.plot(data_geom.gamma_t[:, 0], y_vec)
ax.set_xlim([0, 1.1])
ax.set_title(r"$\gamma_{t, 2} (t = %g)$" % t_now)
ax.grid()
ax = plt.subplot2grid((4, 4), (2, 3), colspan = 1)
ax.plot(data_geom.phi_t[:, 1] / np.pi, y_vec)
ax.set_xlim([-0.1, 0.6])
ax.set_title(r"$\phi_{t, 1} / \pi (t = %g)$" % t_now)
ax.grid()
ax = plt.subplot2grid((4, 4), (3, 3), colspan = 1)
ax.plot(data_geom.phi_t[:, 0] / np.pi, y_vec)
ax.set_xlim([-0.1, 0.6])
ax.set_title(r"$\phi_{t, 2} / \pi (t = %g)$" % t_now)
ax.grid()
#TODO: write a subfunction to plot the processed (e.g. alpha, sin(phi_b)) data
|
julianmak/pydra | native/testing_run.py | <gh_stars>0
#/usr/bin/env python3
#
# JM: 11 Apr 2018
#
# just a python script to test the running of stuff
from spectral import *
nx = 128
ny = 64
ellx = 6.28318530717958647692
elly = 3.14159265358979323846
init_spectral()
#factors, trig = initfft(n)
#print(trig)
#x = forfft(m, n, trig, factors)
#print(x)
|
MichaelLiut/UTM_PyGame | Zelle Graphics/MovingBox.py | # Course: Course ID
# Name: XXXXXXXX
# Student number: 0000000
# File: MovingBox.py
# This program creates a graphical window, displays a message and a square.
# After a click, the square starts rolling to the right. When it rolls all
# the way to the right, a message is displayed, and after a click, the
# program terminates.
from graphics import *
from time import sleep
from math import *
# class Ground -----------------------------------------------------------
class Ground:
# the constructor
def __init__(self, win, length, height):
'''create ground in black colour as a thin long black rectangle
drawn at the bottom of the window `win'. `length' and `height'
are the window's dimensions.'''
self.ground = Rectangle(Point(6,height-6),Point(length-6,height-10))
self.ground.setFill('black') # make it black
self.ground.draw(win) # ddisplay it in window win
#end of class Ground -----------------------------------------------------
# function rotate --------------------------------------------------------
def rotate(p1,p2,angle):
''' this function returns a new position of point p1 after rotating the
segment <p1,p2> by angle `angle'. The angle is in radians.
The x-coordinate of p1 must be <= x-coordinate of p2 and
the y-coordinate of p2 must be <= y-coordinate of p2
The angle must be so that the angle <= pi/2. '''
t1 = abs(p1.getY()-p2.getY())
t2 = abs(p1.getX()-p2.getX())
beta = atan(t1/t2) # in radians
alpha = angle*(pi/180) # in radians
gamma = alpha+beta # in radians
r = sqrt(t1*t1+t2*t2)
a = r*sin(gamma)
b = r*cos(gamma)
dx = t2-b
dy = a-t1
p3 = Point(p1.getX()+dx,p1.getY()-dy)
return p3
# end of function rotate -------------------------------------------------
# function complete ------------------------------------------------------
def complete(p1,p2,r,angle):
''' this function computes points p3 and p4 so that
p1,p3,p4,p2 forms a square of size r. `angle' is
the angle of the segment <p1,p2> and a horizontal line.'''
dx = r*sin(angle) # compute dx
dy = r*cos(angle) # compute dy
p3 = Point(p1.getX()+dx,p1.getY()-dy) # create p3
p4 = Point(p2.getX()+dx,p2.getY()-dy) # create p4
return (p3,p4) # return p3 and p4
# end of function complete ----------------------------------------------
# function one_rotation -------------------------------------------------
def one_rotation(p1,p2,p3,p4,win):
''' thus function rotates the square p1,p3,p4,p1 one full rotation
to the right. The speed should be 5 degrees every 0.1 second. '''
s = Polygon(p1,p3,p4,p2,p1) # create the square
s.setFill('red') # make it red
s.setOutline('red')
s.draw(win) # display it in `win'
totangle = angle = 5 # angle will be moving 5 degrees
# the animation loop until the vertical line is reached
while totangle <= 90: # untill we reach vertical
p1 = rotate(p1,p2,angle) # rotate <p1,p2> around p2
# after rotating <p1,p2>, complete the whole square
p3,p4 = complete(p1,p2,100,totangle*(pi/180))
s.undraw() # remove from display the old square
s = Polygon(p1,p3,p4,p2,p1) # create the new (rotated) square
s.setFill('red') # make it red
s.setOutline('red')
s.draw(win) # display it
sleep(0.01) # sleep
totangle += 5 # cummulate the angles
# end of animation loop
# after one full rotation, rmove the square from display
s.undraw()
# end of function one_rotation ------------------------------------------
# function main ---------------------------------------------------------
def main():
height = 400 # required dimensions of the window
length = 1000
win = GraphWin("Rolling square",length,height) # create the window
ground = Ground(win,length,height) # create and display the ground
p1 = Point(20, height-10) # create initial p1 and p2
p2 = Point(p1.getX()+100,p1.getY())
p3,p4 = complete(p1,p2,100,0) # complete p1, p2 to square
s = Polygon(p1,p3,p4,p2,p1) # create the square
s.setFill('red') # make it red
s.setOutline('red')
s.draw(win) # display it in `win'
# create initial message in blue
message = Text(Point(450,70), "A mouse click will start the square rolling")
message.setTextColor('blue')
message.draw(win) # display initial message
win.getMouse() # wait for click
s.undraw() # remove the square from display
message.undraw() # remove the message from display
# the animation loop until the square is in the ight position
while p2.getX() < 900:
one_rotation(p1,p2,p3,p3,win) # rotate the squre 1 rotation
p1 = Point(p1.getX()+100,p1.getY()) # create p1 shifted 100 to the right
p2 = Point(p2.getX()+100,p2.getY()) # create p2 shifted 100 to the right
p3 = Point(p3.getX()+100,p3.getY()) # create p3 shifted 100 to the right
p4 = Point(p4.getX()+100,p4.getY()) # create p4 shifted 100 to the right
# end of animation loop
s = Polygon(p1,p3,p4,p2,p1) # create the final square
s.setFill('red') # make it red
s.setOutline('red')
s.draw(win) # display it in 'win'
# create final message in blue
message = Text(Point(450,70), "A mouse click will terminate the program")
message.setTextColor('blue')
message.draw(win) # display tfinal message
win.getMouse() # wait from click
win.close() # close the window
# end of function main --------------------------------------------------
# executable program
main()
|
MichaelLiut/UTM_PyGame | Zelle Graphics/Train.py | <gh_stars>0
# Course: Course ID
# Name: XXXXXXXX
# Student number: 0000000
# File: Train.py
# This program creates a train in the right end of a graphics window.
# After a click, the train moves left as far as it can and then stops.
# After a click, the train backs back to its original position.
# After a click, the graphics windows closes and the program terminates
# import all names from module graphics
from graphics import *
# import name sleep from module time
from time import sleep
# Wheels are common to BoxCar, PassengerCar, Caboose, and Locomotive
class Wheels:
# constructor of class Wheels
def __init__(self, win, b, length, height, r, colour):
""" Creates wheels for a car whose box left-top
corner is located at the point b, the box dimensions
are length and height. r is the wheels radius. The
wheels are drawn in the graphics window win in the
colour given in the variable colour """
x = length - 4*r # the length of the part not taken by the wheels
if x <= 0 : # the wheels are too big for the box
message = Text(Point(300,100),"wrong wheel radius")
message.draw(win)
return
# so the wheel size is OK
# the wheels are placed evenly |--O--O--|
x = int(x) / 3 # x x x
c1 = Point(b.getX()+x+r,b.getY()+height) #center of the 1st wheel
c2 = Point(b.getX()+length-x-r,b.getY()+height) #center of the 2nd wheel
self.w1 = Circle(c1,r) # create the first wheel
self.w1.setFill(colour) # give it required colour
self.w1.draw(win) # draw it
self.w2 = Circle(c2,r) # create the second wheel
self.w2.setFill(colour) # give it required colour
self.w2.draw(win) # draw it
#end of constructor
# method move -- move the wheels dx along x axis and dy along y axis
def move(self, dx, dy):
self.w1.move(dx, dy) # move the first wheel (it is a Circle)
self.w2.move(dx, dy) # move the second wheel (it is a Circle)
#end of class Wheels
# Box is common to BoxCar, PassengerCar, Caboose, and Locomotive
class Box:
# the constructor
def __init__ (self, win, b, length, height, colour):
""" Creates box for a car whose left-top corner is located
at the point b, the box dimensions are length and height.
The box is drawn in the graphics window win in the
colour given in the variable colour """
#compute the right bottom point of the box
b1 = Point(b.getX()+length,b.getY()+height)
self.box = Rectangle(b,b1) # create the box
self.box.setFill(colour) # give it required colour
self.box.draw(win) # draw the box
#end of constructor
# method move -- moves the box dx along x axis and dy along y axis
def move(self, dx, dy):
self.box.move(dx, dy) # move the box (it is a Rectangle)
#end of move
#end of class Box
# BoxCar uses Wheels and Box
class BoxCar:
# the constructor
def __init__ (self, win, b, length, height, radius, box_colour,
wheel_colour):
""" Creates a box car with a box given by b, length, height
and wheels given by b, length, height, and radius.
The box is to have colour box_colour, the wheels are
to have colour wheel_colour. The box car is drawn in
the graphics window win """
# create and draw wheels (must do wheels first so they are
# half-covered by the box)
self.wheels = Wheels(win, b, length, height, radius, wheel_colour)
# create and draw box
self.box = Box(win, b, length, height, box_colour)
# we need to add doors
p1 = Point(b.getX()+length/3,b.getY()+height/3) #compute left-top point
p2 = Point(b.getX()+(2*length)/3,b.getY()+height)#compute right-bottom point
self.door = Rectangle(p1,p2) # creates the door
self.door.draw(win) # and draw it
#end of constructor
# method move -- moves box car dx along the x axis and dy along the y axis
def move(self, dx, dy):
self.wheels.move(dx, dy) # move the wheels (must do before box!!)
self.box.move(dx, dy) # move the box
self.door.move(dx, dy) # move the door
#end of move
#end of class BoxCar
# PassengerCar uses Wheels and Box
class PassengerCar:
# the constructor
def __init__ (self, win, b, length, height, radius, box_colour,
wheel_colour):
""" Creates a passenger car with a box given by b, length, height
and wheels given by b, length, height, and radius.
The box is to have colour box_colour, the wheels are
to have colour wheel_colour. The box car is drawn in
the graphics window win """
# create and draw wheels (must do wheels first so they are
# half-covered by the box)
self.wheels = Wheels(win, b, length, height, radius, wheel_colour)
# create and draw box
self.box = Box(win, b, length, height, box_colour)
# now we need to add black left door, three blue windows, and
# black right door
d = length/11
# compute x1,y1 and x2,y2 points for the left door
x1 = b.getX()+d
y1 = b.getY()+height/4
x2 = x1+d
y2 = y1+(2*height)/4
self.door1 = Rectangle(Point(x1,y1),Point(x2,y2)) # create left door
self.door1.setFill('black') # make it black colour
self.door1.draw(win) # draw it
# compute x1,y1 and x2,y2 points for the 1st window
x1 = x1+2*d
x2 = x1+d
y2 = y1+height/4
self.window1 = Rectangle(Point(x1,y1),Point(x2,y2)) # create 1st window
self.window1.setFill('blue') # make it blue
self.window1.draw(win) # draw it
# compute x1,y1 and x2,y2 points for the 2nd window
x1 = x1+2*d
x2 = x1+d
y2 = y1+height/4
self.window2 = Rectangle(Point(x1,y1),Point(x2,y2)) # create 1st window
self.window2.setFill('blue') # make it blue
self.window2.draw(win) # draw it
# compute x1,y1 and x2,y2 points for the 3rd window
x1 = x1+2*d
x2 = x1+d
y2 = y1+height/4
self.window3 = Rectangle(Point(x1,y1),Point(x2,y2)) # create 1st window
self.window3.setFill('blue') # make it blue
self.window3.draw(win) # draw it
# compute x1,y1 and x2,y2 points for the right door
x1 = x1+2*d
y1 = b.getY()+height/4
x2 = x1+d
y2 = y1+(2*height)/4
self.door2 = Rectangle(Point(x1,y1),Point(x2,y2)) # create right door
self.door2.setFill('black') # make it black colour
self.door2.draw(win) # draw it
#end of constructor
# method move -- moves the passenger car dx along x axis and dy along y axis
def move(self, dx, dy):
self.wheels.move(dx, dy) # move the wheels (before box!!)
self.box.move(dx, dy) # move the box
self.door1.move(dx, dy) # move the left door (it is Rectangle)
self.window1.move(dx, dy) # move the 1st window (it is Rectangle)
self.window2.move(dx, dy) # move the 2nd window (it is Rectangle)
self.window3.move(dx, dy) # move the 3rd window (it is Rectangle)
self.door2.move(dx, dy) # move the right door (it is Rectangle)
#end of move
#end of class PassengerCar
# Caboose uses Wheels and Box
class Caboose:
def __init__(self, win, b, length, height, radius, box_colour,
wheel_colour, bubble_colour):
""" Creates a caboose with a box given by b, length, height of
the colour box_colour, and wheels given by b, length, height,
and radius of colour wheel_colour. The bubble's dimensions
are determined from the dimensions of the box, its colour is
given by bubble_colour. The caboose is drawn in the graphics
window win """
# create and draw wheels (before box)
self.wheels = Wheels(win, b, length, height, radius, wheel_colour)
# create bubble before box!!
# compute bounding rectangle for the bubble
p1 = Point(b.getX()+length/4,b.getY()-height/4)
p2 = Point(b.getX()+(3*length)/4,b.getY()+height/4)
self.bubble = Oval(p1,p2) # create bubble
self.bubble.setFill(bubble_colour) # make it required colour
self.bubble.draw(win) # draw it
# create and draw box
self.box = Box(win, b, length, height, box_colour)
#end of constructor
#method move -- moves caboose dx along x axis and dy along y axis
def move(self, dx, dy):
self.wheels.move(dx, dy) # move wheels (before box)
self.bubble.move(dx, dy) # move bubble (it is an Oval) - before box
self.box.move(dx, dy) # move box
#end of move
#end of class Caboose
# uses Box and Wheels
class Locomotive:
# the constructor
def __init__(self, win, b, length, height, radius, box_colour,
cowcatcher_colour, wheel_colour, smokestack_colour,
cabin_colour):
""" Creates a locomotive with a box given by b, length, height of
the colour box_colour, and wheels given by b, length, height,
and radius of colour wheel_colour. The cowcatcher colour is given
by cowcatcher_colour. The smokestack's colour is given by
smokestack_colour. The cabin colour is given by cabin_colour.
The dimensions of the cowcatcher are determined from the dimension
of the box. The dimensions of the smokestack are determined from
the dimensions of the box. The dimensions of the cabin are determined
from the dimensions of the box. The locomotive is drawn in the
graphics window win """
# create and draw wheels (before box)
self.wheels = Wheels(win, b, length, height, radius, wheel_colour)
self.box = Box(win, b, length, height, box_colour) # create and draw box
# compute the dimensions of the smokestack
p1 = Point(b.getX()+length/8,b.getY()-height/3)
p2 = Point(b.getX()+length/4,b.getY())
self.smokestack = Rectangle(p1,p2) # create the smokestack
self.smokestack.setFill(smokestack_colour) # make it required colour
self.smokestack.draw(win) # and draw it
# compute the dimensions of the cabin
p1 = Point(b.getX()+(2*length)/3,b.getY()-height/2)
p2 = Point(b.getX()+length,b.getY())
self.cabin = Rectangle(p1,p2) # create cabin
self.cabin.setFill(cabin_colour) # give it required colour
self.cabin.draw(win) # and draw it
# compute cowcatcher
p1 = Point(b.getX(),b.getY()+height)
p2 = Point(b.getX()-length/6,b.getY()+height)
self.cowcatcher = Polygon(b,p1,p2) # create cowcatcher
self.cowcatcher.setFill(cowcatcher_colour) # give it required colour
self.cowcatcher.draw(win) # and draw it
#end of constructor
# method move -- moves locomotive dx along x axis and dy along y axis
def move(self, dx, dy):
self.wheels.move(dx, dy) # move wheels (before box)
self.box.move(dx, dy) # move box
self.smokestack.move(dx, dy) # move smokestack (it is Rectangle)
self.cabin.move(dx, dy) # move cabin (it is Rectangle)
self.cowcatcher.move(dx, dy) # move cowcatcher (it is Polygon)
#end of move
#end of class Locomotive
class Tracks:
# the constructor
def __init__(self, win, height):
""" create tracks in black colour """
# create tracks as a thin long rectangle
self.tracks = Rectangle(Point(4,196-height),Point(1196,196))
self.tracks.setFill('black') # make it black
self.tracks.draw(win) # draw it
#end of class Tracks
class Train:
# the constructor
def __init__(self, win):
""" assemble a train and draw it in the graphics window win """
length = 60
height = 30
radius = 8
p = Point(1136,155) # set position of the last car
# create last caboose and draw it
self.trail_caboose = Caboose(win, p, length, height, radius, 'red', 'black', 'blue')
p = Point(p.getX()-64,p.getY()) # compute position of the second last car
# create a box car and draw it
self.BoxCar1 = BoxCar(win,p,length,height,8,'blue','black')
p = Point(p.getX()-64,p.getY()) # compute position of the third car from the end
# create a box car and draw it
self.BoxCar2 = BoxCar(win,p,length,height,8,'yellow','black')
p = Point(p.getX()-64,p.getY()) # compute position of the 4th car from the end
# create a passenger car and draw it
self.PassengerCar1 = PassengerCar(win, p, length, height, radius, 'green', 'black')
p = Point(p.getX()-64,p.getY()) # compute position of the 5th car from the end
# create a passenger car and draw it
self.PassengerCar2 = PassengerCar(win, p, length, height, radius, 'red','black')
p = Point(p.getX()-64,p.getY()) # compute position of the 6th car from the end
# create a caboose and draw it
self.lead_caboose = Caboose(win, p, length, height, radius, 'blue', 'black', 'red')
p = Point(p.getX()-64,p.getY()) # compute position of the 7th car from the end
# create a locomotive and draw it
self.locomotive = Locomotive(win, p, length, height, radius, 'black', 'brown', 'black', 'gray', 'gray')
self.start = p.getX()-length/6 # compute the position of the front of the train
#end of constructor
# method move -- moves train dx along x axis and dy along y-axis
def move(self, dx, dy):
self.trail_caboose.move(dx, dy) # move the last car
self.BoxCar1.move(dx, dy) # move the 2nd last car
self.BoxCar2.move(dx, dy) # move the 3rd car from the end
self.PassengerCar1.move(dx, dy) # move the 4th car from the end
self.PassengerCar2.move(dx, dy) # move the 5th car from the end
self.lead_caboose.move(dx, dy) # move the 6th car from the end
self.locomotive.move(dx, dy) # move the locomotive
self.start = self.start + dx # update the position of the front of the train
#end of move
#method getStart -- returns the position of the front of the train
def getStart(self):
return self.start
#end of getStart()
#end of class Train
# function main()
def main():
win = GraphWin("Train",1200,200) # create the required graphics window
tracks = Tracks(win,2) # create and display the tracks
train = Train(win) # create and display train in the starting position
original = train.getStart() # remember starting position
message = Text(Point(600,30),"click to start") # display the message
message.draw(win)
win.getMouse() # wait for click
message.undraw() # undisplay the message
# the animation loop -- move train from right to left
while train.getStart() >= 4:
train.move(-1,0)
sleep(0.000001)
# the train moved to the left
message.setText("click to reverse") # change the message
message.draw(win) # and draw it
win.getMouse() # wait for click
message.undraw() # undraw the message
# the animation loop -- move train from left to right backward
while train.getStart() < original:
train.move(2,0)
sleep(0.000001)
# the train is back to the right
message.setText("click to terminate") # change the message
message.draw(win) # draw it
win.getMouse() # wait for click
win.close() # close the graphics window
#end of main()
# the executable part of the program
main() # execute main()
# program terminates
|
MichaelLiut/UTM_PyGame | PyGame Graphics/Particle Flocking System - The Beginning/main.py | <reponame>MichaelLiut/UTM_PyGame
################################################################################
############################ UTM PyGame Workshop ###############################
############################## June 5, 2018 ##################################
################################################################################
############################## <NAME> ##################################
########################## <EMAIL> ############################
################################################################################
############################### main.py ####################################
################################################################################
"""
To install PyGame: "python3 -m pip install -U pygame --user"
PyGame Documentation: https://www.pygame.org/docs/
"""
# Package Imports
import pygame
from pygame.locals import *
import sys # not required for PyGame, but useful for exiting windows
import particle # this is a class that student's must build
import random # for acquiring a random integer N
"""
GLOBAL VARIABLES
"""
# Total number of particles to be created
global totalParticles
totalParticles = 500
# Particle List -- referring to Class Particle
global particleList
particleList = []
# PyGame Window
global screen
# Create Particle Function
def createParticle(screenSizeX, screenSizeY):
# Initialize particle
newParticle = particle.Particle(screenSizeX,screenSizeY)
# Creating a circle shape with the arguments: (screen, color, position, radius, thickness)
pygame.draw.circle(screen, newParticle.getColour(), newParticle.getPosition(), newParticle.getSize(), newParticle.getSize())
return(newParticle)
# Update Particle Function
def updateParticle(newParticle):
# Creating a circle shape with the arguments: (screen, color, position, radius, thickness)
pygame.draw.circle(screen, newParticle.getColour(), newParticle.getPosition(), newParticle.getSize(), newParticle.getSize())
# Action occurs on mouse event on Left Click
def leftClick(coordinate):
print("LEFT CLICK WAS PRESSED")
screen.fill(pygame.Color("black")) # need to clear the screen
# Generate random range to "flock" from
particleRangeFromClick = (random.randint(0, 99) % (screenSizeX/3) + 50)
for i in range (0, len(particleList)):
position = particleList[i].getPosition() # Particle's Tuple Position
particleX = position[0] # Particle's X-coordinate
particleY = position[1] # Particle's Y-coordinate
particleX -= coordinate[0] # remove the distance between click
particleY -= coordinate[1] # remove the distance between click
# in the event the distance between the click and the particle is
# within the random range, we will alter the particle's location
if ((particleX <= particleRangeFromClick) and (particleY <= particleRangeFromClick)):
particleX -= 10
particleY -= 10
particleList[i].setPosition((particleX,particleY))
updateParticle(particleList[i]) # update all particles
pygame.display.update() # refresh the PyGame Window
# Action occurs on mouse event on Right Click
def rightClick(coordinate):
print("RIGHT CLICK WAS PRESSED")
"""
Let's call these 'global values'
"""
pygame.init() # initializes the PyGame modules
# Set a PyGame Window to a size of 640 x 480
screenSizeX = 640
screenSizeY = 480
screen = pygame.display.set_mode((screenSizeX,screenSizeY))
# Give the PyGame Window a name
pygame.display.set_caption('Particle Flocking System')
# Particle Group -- a PyGame Sprite Group
# particleGroup = pygame.sprite.Group()
"""
Some Notes:
- The point (0,0) is on the top left corner of the plane/screen.
- X-axis increases from left-to-right, Y-axis increases from top-to-bottom
- All changes to the screen must be updated: pygame.display.update()
"""
colour = (0,0,0) # operates with RGB, max value of 255.
screen.fill(colour) # sets the screen colour to black
pygame.display.update() # refresh the PyGame Window
for i in range (0, totalParticles):
particleList.append(createParticle(screenSizeX,screenSizeY))
# newParticle = createParticle(screen,screenSizeX,screenSizeY)
# particleList.append(newParticle[0])
# particleGroup.add(newParticle[1])
while (True):
pygame.display.update() # refresh the PyGame Window
# pygame.event.wait() # waits on an action event before looping again
# get all user 'events'
for event in pygame.event.get():
# for a button click event (i.e. key is pressed down)
if event.type == pygame.KEYDOWN:
if event.key == K_q: # on 'q' quit application
pygame.quit(); sys.exit();
if event.key == K_ESCAPE: # on 'escape; quit application
pygame.quit(); sys.exit();
# for a mouse click event
if event.type == pygame.MOUSEBUTTONDOWN:
occurance = pygame.mouse.get_pressed() # returns a triple type
# (left, centre, right)
coordinate = pygame.mouse.get_pos() # get (X,Y) coordinate
print(coordinate)
# Left Click
if (occurance[0] == True):
leftClick(coordinate)
# Right Click
elif (occurance[2] == True):
rightClick(coordinate)
# to quit the application
if event.type == pygame.QUIT:
# and the game close the window
pygame.quit(); sys.exit();
################################################################################
################################## END #####################################
################################################################################
|
MichaelLiut/UTM_PyGame | Zelle Graphics/SmallTrain.py | # Course: Course ID
# Name: XXXXXXXX
# Student number: 0000000
# File: SmallTrain.py
# This program creates a graphical window, displays a message. After a click,
# a tractor-trailer emerges from behind the right edge of the window and moves
# from right to left. As it moves, it slowly gets smaller. It will disappear
# behind the left edge of the window. After a click, the terminates.
from graphics import *
from time import sleep
# class Road -----------------------------------------------------------
class Road:
# constructor
def __init__(self, win, height):
'''create round in black colour as a thin long black rectangle
displayd at the bottom of the window `win'. `length' and `height'
are the window's dimensions.'''
self.road = Rectangle(Point(4,196-height),Point(1196,196))
self.road.setFill('black') # make it black
self.road.draw(win) # display it in `win'
# end of constructor
#end of class Road -----------------------------------------------------
# class TrailerWheels --------------------------------------------------
class TrailerWheels:
# constructor
def __init__(self, win, p, d):
''' `win' is the window, `p' is the point on the ground where the
beginning of the trailer is, `d' is the scaling factor. '''
# the wheels consist of 4 black filled circles of radies 3d
c1 = Point(p.getX()+7*d,p.getY()-3*d) # centre of the 1st wheel
c2 = Point(p.getX()+14*d,p.getY()-3*d) # centre of the 2nd wheel
c3 = Point(p.getX()+26*d,p.getY()-3*d) # centre of the 3rd wheel
c4 = Point(p.getX()+33*d,p.getY()-3*d) # centre of the 3rd wheel
self.w1 = Circle(c1,3*d) # create the first wheel
self.w1.setFill('black') # give it required colour
self.w1.draw(win) # display it in `win'
self.w2 = Circle(c2,3*d) # create the second wheel
self.w2.setFill('black') # give it required colour
self.w2.draw(win) # display it in `win'
self.w3 = Circle(c3,3*d) # create the third wheel
self.w3.setFill('black') # give it required colour
self.w3.draw(win) # display it in `win'
self.w4 = Circle(c4,3*d) # create the third wheel
self.w4.setFill('black') # give it required colour
self.w4.draw(win) # display it in `win'
#end of constructor
def undraw(self):
self.w1.undraw() # remove from display w1
self.w2.undraw() # remove from display w2
self.w3.undraw() # remove from display w3
self.w4.undraw() # remove from display w4
# end of undraw
#end of class TrailerWheels --------------------------------------------
#class TrailerBox ------------------------------------------------------
class TrailerBox:
# constructor
def __init__(self,win,p,d,colour):
''' `win' is the window, `p' is the point on the ground where the
beginning of the trailer is, `d' is the scaling factor,
`colour; is the fill colour of the box.'''
q1 = Point(p.getX(),p.getY()-23*d) # top left point of the box
q2 = Point(p.getX()+40*d,p.getY()-3*d) # bottom right point of the box
self.box = Rectangle(q1,q2) # create the box
self.box.setFill(colour) # give it the right colour
self.box.setOutline(colour)
self.box.draw(win) # display it in `win'
# end of constructor
def undraw(self):
self.box.undraw() # remove from display it
# end of undraw
#end class TrailerBox --------------------------------------------------
#class Trailer ---------------------------------------------------------
class Trailer:
# constructor
def __init__(self,win,p,d,colour):
''' `win' is the window, `p' is the point on the ground where the
beginning of the trailer is, `d' is the scaling factor,
`colour; is the fill colour of the box of the trailer.'''
# trailer consists of trailer box and thrailer wheels
# we have to create them in this order, so the wheels are not
# covered by the tbox
self.tbox = TrailerBox(win,p,d,colour) # create and display tbox
self.wheels = TrailerWheels(win,p,d) # create and display wheels
# end of constructor
def undraw(self):
self.tbox.undraw() # remove from display the tbox
self.wheels.undraw() # remove from display the wheels
# end of andraw
# end of class Trailer -------------------------------------------------
#class TractorWheels ---------------------------------------------------
class TractorWheels:
# constructor
def __init__(self,win,p,d):
''' `win' is the window, `p' is the point on the ground where the
beginning of the tractor is, `d' is the scaling factor.'''
# the wheels consist of 3 black filled circles of radius 4d
c1 = Point(p.getX()+6*d,p.getY()-4*d) # centre of 1st wheel
self.w1 = Circle(c1,4*d) # create the wheel
self.w1.setFill('black') # make it black
self.w1.draw(win) # display it in `win'
c2 = Point(p.getX()+25*d,p.getY()-4*d) # centre of 2nd wheel
self.w2 = Circle(c2,4*d) # create the wheel
self.w2.setFill('black') # make it black
self.w2.draw(win) # display it in `win'
c3 = Point(p.getX()+34*d,p.getY()-4*d) # centre of 3rd wheel
self.w3 = Circle(c3,4*d) # create the wheel
self.w3.setFill('black') # make it black
self.w3.draw(win) # display it in `win'
# end of constructor
def undraw(self):
self.w1.undraw() # remove it form display
self.w2.undraw() # remove it form display
self.w3.undraw() # remove it form display
# end of undraw
#end of class TractorWheels --------------------------------------------
#class Tractor ---------------------------------------------------------
class Tractor:
# constructor
def __init__(self,win,p,d,colour):
''' `win' is the window, `p' is the point on the ground where the
beginning of the tractor is, `d' is the scaling factor, and
`colour' is the colour of the tractor. '''
# the tractor consists of two boxes box1 and box2 making the body,
# and a box w1 and a triangle w2 making the window, and the wheels
p1 = Point(p.getX(),p.getY()-11*d) # top left of box box1
p2 = Point(p.getX()+40*d,p.getY()-3*d) # bottom right of box box1
self.box1 = Rectangle(p1,p2) # create it
self.box1.setFill(colour) # give it colour
self.box1.setOutline(colour)
self.box1.draw(win) # displayit in `win'
p1 = Point(p.getX()+7*d,p.getY()-20*d) # top left of box box2
p2 = Point(p.getX()+16*d,p.getY()-11*d) # bottom right of box box2
self.box2 = Rectangle(p1,p2) # create it
self.box2.setFill(colour) # give it colour
self.box2.setOutline(colour)
self.box2.draw(win) # display it in `win'
p1 = Point(p.getX()+7*d,p.getY()-19*d) # top left of box w1
p2 = Point(p.getX()+15*d,p.getY()-11*d) # bottom right of box w1
self.w1 = Rectangle(p1,p2) # create it
self.w1.setFill('blue') # make it blue
self.w1.setOutline('blue')
self.w1.draw(win) # display it in `win'
p2 = Point(p.getX()+7*d,p.getY()-11*d) # point 2 of the trinagle
p3 = Point(p.getX()+3*d,p.getY()-11*d) # point 3 of the triangle
self.w2 = Polygon(p1,p2,p3,p1) # create the triangle
self.w2.setFill('blue') # make it blue
self.w2.setOutline('blue')
self.w2.draw(win) # display it in `win'
# we have to do the wheels last so they are not covered by the body
self.wheels = TractorWheels(win,p,d) # create and display the wheels
# end of constructor
def undraw(self):
self.box1.undraw() # remove it form display
self.box2.undraw() # remove it form display
self.w1.undraw() # remove it form display
self.w2.undraw() # remove it form display
self.wheels.undraw() # remove it form display
# end of undraw
#end class Tractor -----------------------------------------------------
#class TractorTrailer --------------------------------------------------
class TractorTrailer:
# constructor
def __init__(self,win,p,d,colour1,colour2,colour3):
''' `win' is the window, `p' is the point on the ground where the
beginning of the tractor is, `d' is the scaling factor,
`colour1' is the colour of the tractor, `colour2' is the colour
of the first trailer, and `colour3' is the colour of the
second trailer. '''
# tractor-trailer consists of a tractor and two trailers
self.tractor = Tractor(win,p,d,colour1) # create and display tractor at
# position p
p = Point(p.getX()+42*d,p.getY()) # compute position of trailer1
self.trailer1 = Trailer(win,p,d,colour2) # crate and display trailor1 at
# position p
p = Point(p.getX()+42*d,p.getY()) # compute the potition of trailer2
self.trailer2 = Trailer(win,p,d,colour3) # create and display tarilor2 at
# positon p
# end of constructor
def undraw(self):
self.tractor.undraw() # remove from display
self.trailer1.undraw() # remove from display
self.trailer2.undraw() # remove from display
# end of undraw
#end class TractorTrailer ----------------------------------------------
# function main --------------------------------------------------------
def main():
win = GraphWin("Tractor trailer",1200,200) # create the window
road = Road(win,2) # create and display the road
# create initial message in blue
message = Text(Point(580,70),"A mouse click will start the tractor-trailer")
message.setTextColor('blue')
message.draw(win) # display initial message
win.getMouse() # wait for click
message.undraw() # remove initial message from display
p = Point(1200,193) # compute initial starting point
d = 4 # set initial scaling factort
# create and display the initial tractor-trailer
t = TractorTrailer(win,p,d,'orange','green','grey')
# animation loop
# loop as long as at least a part of tractor-traileris visible
while p.getX()+ 124*d > 0:
sleep(0.1) # sleep
t.undraw() # remove tractor-trailer from display
d -= 0.02 # shrink the scale
p = Point(p.getX()-10,p.getY()) # compute new starting point
# create and display smaller tractor-trailer
t = TractorTrailer(win,p,d,'orange','green','grey')
# end of animation loop
# create final message in blue
message = Text(Point(600,70),"A mouse click will terminate the program")
message.setTextColor('blue')
message.draw(win) # display final message
win.getMouse() # wait for click
win.close() # close the window
# end of function main -------------------------------------------------
# executable program
main()
# program terminates
|
MichaelLiut/UTM_PyGame | Zelle Graphics/UnBlockMe.py | <reponame>MichaelLiut/UTM_PyGame
# Course: Course ID
# Name: XXXXXXXX
# Student number: 0000000
# File: UnBlockMe.py
# import names from module graphics
from graphics import *
from time import sleep
import sys
# draw the frame of the grid in the window win
# x,y are coordinates of the top left corner of the first field
def frame(x,y,win):
p = Rectangle(Point(x-8,y-8),Point(x+248,y))
p.setFill('gray')
p.setOutline('gray')
p.draw(win)
p = Rectangle(Point(x-8,y-8),Point(x,y+248))
p.setFill('gray')
p.setOutline('gray')
p.draw(win)
p = Rectangle(Point(x-8,y+240),Point(x+248,y+248))
p.setFill('gray')
p.setOutline('gray')
p.draw(win)
p = Rectangle(Point(x+240,y-8),Point(x+248,y+80))
p.setFill('gray')
p.setOutline('gray')
p.draw(win)
p = Rectangle(Point(x+240,y+120),Point(x+248,y+248))
p.setFill('gray')
p.setOutline('gray')
p.draw(win)
#end frame
#draw the grid
def grid(win):
x = 20
y = 20
for d in [40,80,120,160,200]:
p = Line(Point(x,y+d),Point(x+240,y+d))
p.setFill('gray')
p.draw(win)
p = Line(Point(x+d,y),Point(x+d,y+240))
p.setFill('gray')
p.draw(win)
#end grid
# returns coordinates of the top left and bottom right corners of the field with if fid
# x and y are coordinates of the left top corner of the first field
def fieldCoord(x,y,fid):
if 1 <= fid and fid <= 6:
return(Point(x+(fid-1)*40,y),Point(x+fid*40,y+40))
elif 7 <= fid and fid <= 12:
return(Point(x+(fid-7)*40,y+40),Point(x+(fid-6)*40,y+80))
elif 13 <= fid and fid <= 18:
return(Point(x+(fid-13)*40,y+80),Point(x+(fid-12)*40,y+120))
elif 19 <= fid and fid <= 24:
return(Point(x+(fid-19)*40,y+120),Point(x+(fid-18)*40,y+160))
elif 25 <= fid and fid <= 30:
return(Point(x+(fid-25)*40,y+160),Point(x+(fid-24)*40,y+200))
else:
return(Point(x+(fid-31)*40,y+200),Point(x+(fid-30)*40,y+240))
#end fieldCoord
# set field with id fid to color and green border. The type of the border is
# determined by btype
# btype '[' indicates border type for left end field of a horizontal block
# btype ']' indicates border type for right end field of a horizontal block
# btype '=' indicates border type for middle field of a horizontal block
# btype '^' indicates border type for top field of a vertical block
# btype 'v' indicates border type for bottom field of a vertical block
# btype '"' indicates border type for a middle field of a vertical block
# setField returns a list of rectangles drawn in that field
# x and y are coordinates of the top left corner of the first field
def setField(x,y,win,fid,color,border,btype):
tl,br = fieldCoord(x,y,fid)
r1 = Rectangle(tl,br)
r1.setFill(border)
r1.draw(win)
if btype == '[':
p1 = Point(tl.getX()+4,tl.getY()+4)
p2 = Point(br.getX(),br.getY()-4)
elif btype == ']':
p1 = Point(tl.getX(),tl.getY()+4)
p2 = Point(br.getX()-4,br.getY()-4)
elif btype == '^':
p1 = Point(tl.getX()+4,tl.getY()+4)
p2 = Point(br.getX()-4,br.getY())
elif btype == 'v':
p1 = Point(tl.getX()+4,tl.getY())
p2 = Point(br.getX()-4,br.getY()-4)
elif btype == '=':
p1 = Point(tl.getX(),tl.getY()+4)
p2 = Point(br.getX(),br.getY()-4)
else: # btype == '"'
p1 = Point(tl.getX()+4,tl.getY())
p2 = Point(br.getX()-4,br.getY())
r2 = Rectangle(p1,p2)
r2.setFill(color)
r2.draw(win)
return r1, r2
#end setField
# returns id of the field where the mouse click was made or 0 if the click was
# not on any field
# x and y are coordinates of the top left corner of the first field
def whichFieldClicked(x,y,click):
for fid in range(1,37):
tl, br = fieldCoord(x,y,fid)
a = tl.getX() < click.getX() and click.getX() < br.getX()
b = tl.getY() < click.getY() and click.getY() < br.getY()
if a and b:
return fid
#end for
return 0
#end whichFieldClicked
Left = [1,7,13,19,25,31] # list of id's on leftmost column of the grid
Top = [1,2,3,4,5,6] # list of id's on the topmost row of the grid
Right = [6,12,18,24,30,36] # list of id's on the rightmost column of the grid
Bottom = [31,32,33,34,35,36] # list of id's on the bottom row of the grid
# returns the id of the neighbouring field to the right of fid or 0
def rightNeighbour(fid):
if fid in Right:
return 0
else:
return fid+1
#end rightNeighbour
# returna the id of the neighbouring field to the left of fid or 0
def leftNeighbour(fid):
if fid in Left:
return 0
else:
return fid-1
#end leftNeighbour
# returns the id of the neighbouring field above fid or 0
def upNeighbour(field):
if fid in Top:
return 0
else:
return fid-6
#end upNeighbour
# returna the id of the neighbouring field below fid or 0
def downNeighbour(fid):
if fid in Bottom:
return 0
else:
return fid+6
#end downNeighbour
# returns True if fields with id's fid1 and fid2 are in the same row
def sameRow(fid1,fid2):
if abs(fid1-fid2) < 6:
return True
else:
return False
#end sameRow
# returns True if fields with id's fid1 and fid2 are in the same column
def sameColumn(fid1,fid2):
if (abs(fid2-fid1) % 6) == 0:
return True
else:
return False
#end sameColumn
# class Block --------------------------------------------------------
class Block:
def __init__(self, x, y, win, fid, length, orien, color):
self.x = x
self.y = y
self.win = win
self.length = length
self.color = color
self.fids = [fid]
self.hilite_flag = False
if orien == 'h':
r1, r2 = setField(x,y,win,fid,color,'black','[')
else:
r1, r2 = setField(x,y,win,fid,color,'black','^')
self.fields = []
self.fields.append([r1,r2])
self.orien = orien
if not (length == 2 or length == 3):
win.close()
print("wrong length of a block -- must be 2 or 3")
sys.exit()
if not (orien == 'h' or orien == 'v'):
win.close()
print("wrong orientation -- must be 'h' or 'v'")
sys.exit()
if orien == 'h':
f = rightNeighbour(fid)
else:
f = downNeighbour(fid)
if f == 0:
win.close()
print("block would stick out")
sys.exit()
self.fids.append(f)
if length == 2:
if orien == 'h':
r1, r2 = setField(x,y,win,f,color,'black',']')
else:
r1, r2 = setField(x,y,win,f,color,'black','v')
self.fields.append([r1,r2])
else: # length == 3
if orien == 'h':
r1, r2 = setField(x,y,win,f,color,'black','=')
else:
r1, r2 = setField(x,y,win,f,color,'black','"')
self.fields.append([r1,r2])
if orien == 'h':
f = rightNeighbour(self.fids[1])
else:
f = downNeighbour(self.fids[1])
if f == 0:
win.close()
print("block would stick out")
sys.exit()
self.fids.append(f)
if orien == 'h':
r1, r2 = setField(x,y,win,f,color,'black',']')
else:
r1, r2 = setField(x,y,win,f,color,'black','v')
self.fields.append([r1,r2])
#end __init__
def draw(self):
self.fields[0][0].draw(self.win)
self.fields[0][1].draw(self.win)
self.fields[1][0].draw(self.win)
self.fields[1][1].draw(self.win)
if self.length == 3:
self.fields[2][0].draw(self.win)
self.fields[2][1].draw(self.win)
grid(self.win)
#end draw
def undraw(self):
self.fields[0][0].undraw()
self.fields[0][1].undraw()
self.fields[1][0].undraw()
self.fields[1][1].undraw()
if self.length == 3:
self.fields[2][0].undraw()
self.fields[2][1].undraw()
grid(self.win)
#end undraw
def hilite(self):
if self.hilite_flag:
return
self.undraw()
self.fields[0][0].setFill('green')
self.fields[1][0].setFill('green')
if self.length == 3:
self.fields[2][0].setFill('green')
self.draw()
self.hilite_flag = True
#end hlite
def unhilite(self):
if self.hilite_flag == False:
return
self.undraw()
self.fields[0][0].setFill('black')
self.fields[1][0].setFill('black')
if self.length == 3:
self.fields[2][0].setFill('black')
self.draw()
self.hilite_flag = False
#end unhilite
def switchHilite(self):
if self.hilite_flag:
self.unhilite()
else:
self.hilite()
#end switchHilite
# move the block to new target, the target is a list of 2 or 3 fields that the
# block should occupy after the move
# after the move, the block is in unhighlighted state
def move(self,target):
self.undraw()
self.fids = target
self.fields = []
if self.orien == 'h':
r1, r2 = setField(self.x,self.y,self.win,self.fids[0],self.color,'black','[')
else:
r1, r2 = setField(self.x,self.y,self.win,self.fids[0],self.color,'black','^')
self.fields.append([r1,r2])
if self.length == 3:
if self.orien == 'h':
r1, r2 = setField(self.x,self.y,self.win,self.fids[1],self.color,'black','=')
else:
r1, r2 = setField(self.x,self.y,self.win,self.fids[1],self.color,'black','"')
else:
if self.orien == 'h':
r1, r2 = setField(self.x,self.y,self.win,self.fids[1],self.color,'black',']')
else:
r1, r2 = setField(self.x,self.y,self.win,self.fids[1],self.color,'black','v')
self.fields.append([r1,r2])
if self.length == 3:
if self.orien == 'h':
r1, r2 = setField(self.x,self.y,self.win,self.fids[2],self.color,'black',']')
else:
r1, r2 = setField(self.x,self.y,self.win,self.fids[2],self.color,'black','v')
self.fields.append([r1,r2])
grid(self.win)
self.hilite_flag = False
#endmove
#end class Block ------------------------------------------------------
# creates the initial configuration of blocks
# x and y are the coordinates of the left top corner of the first field
def initConfig(x,y,win):
blocks = []
b = Block(x,y,win,1,3,'h','brown')
blocks.append(b)
b = Block(x,y,win,9,3,'v','brown')
blocks.append(b)
b = Block(x,y,win,13,2,'h','red')
blocks.append(b)
b = Block(x,y,win,19,2,'v','brown')
blocks.append(b)
b = Block(x,y,win,31,3,'h','brown')
blocks.append(b)
b = Block(x,y,win,6,3,'v','brown')
blocks.append(b)
b = Block(x,y,win,23,2,'h','brown')
blocks.append(b)
b = Block(x,y,win,29,2,'v','brown')
blocks.append(b)
grid(win)
return blocks
# returns a block that was clicked or 0 if the click was not on a block
# x and y are the coordinates of the left top corner of the first field
# blocks is a list of all blocks that are on the grid
def whichBlockClicked(x,y,click,blocks):
fid = whichFieldClicked(x,y,click)
if fid == 0:
return 0
for b in blocks:
if fid == b.fids[0]:
return b
elif fid == b.fids[1]:
return b
if b.length == 3:
if fid == b.fids[2]:
return b
#end for
return 0
#end whichBlockClicked
# check whether block b if moved to the target would intersect any other block
# blocks is a list of all blocks that are on the grid
# returns True or False
def inter(b,blocks,target):
for b1 in blocks:
if b == b1:
continue
for i in target:
for j in b1.fids:
if i == j:
return True
#endfor
#endfor
return False
#end inter
# b is a block to be moved, blocks is a list of all blocks that are on the grid
# destin is id of the field the block should be moved to
# destinOK checks if it would be a legal move, i.e. destin must be in the same
# row if it b is a horizontal block, or in the same column if b is a vertical block
# it determines if the move should be left or right, or up or down
# it checks whether moving the block would intersect with any other block using inter()
# if all is OK, destinOk returns target, i.e. the list of field id's the block should
# occupy after the move
# if the move cannot be made, destinOK returns an empty list
def destinOK(b,blocks,destin):
if destin == b.fids[0] or destin == b.fids[1]:
return []
if b.length == 3:
if destin == b.fids[2]:
return []
if b.orien == 'h':
if not sameRow(b.fids[0],destin):
return []
if destin < b.fids[0]:
target = [destin,destin+1]
if b.length == 3:
target.append(destin+2)
else:
if b.length == 3:
target = [destin-2,destin-1,destin]
else:
target = [destin-1,destin]
if inter(b,blocks,target):
return []
else:
return target
if b.orien == 'v':
if not sameColumn(b.fids[0],destin):
return []
if destin < b.fids[0]:
target = [destin,destin+6]
if b.length == 3:
target.append(destin+12)
else:
if b.length == 3:
target = [destin-12,destin-6,destin]
else:
target = [destin-6,destin]
if inter(b,blocks,target):
return []
else:
return target
return []
#end destinOK
# undraw all blocks on the grid
def undrawBlocks(blocks):
for b in blocks:
b.undraw()
#end undrawBlocks
# function main()
def main():
win = GraphWin("Unblock me",280,280)
x = 20
y = 20
frame(x,y,win)
grid(win)
blocks = initConfig(x,y,win)
hilited_block = 0
# main loop -- until the red block is succesfully moved out of grid
while True:
click = win.getMouse()
if hilited_block == 0:
b = whichBlockClicked(x,y,click,blocks)
if b == 0:
continue
else:
hilited_block = b
hilited_block.hilite()
continue
else:
fid = whichFieldClicked(x,y,click)
if fid == 0:
continue
target = destinOK(hilited_block,blocks,fid)
if target == []:
hilited_block.unhilite()
hilited_block = 0
continue
else:
# so the move is fine, check if it is a final move
hilited_block.move(target)
a = hilited_block.color == 'red'
a = a and hilited_block.fids[0] == 17
a = a and hilited_block.fids[1] == 18
if a:
for i in range(3):
sleep(0.3)
hilited_block.switchHilite()
undrawBlocks(blocks)
win.close()
return
else:
hilited_block = 0
continue
#endif
#endwhile
#end main
# the executable part of the program starts here
main() # execute function main()
|
MichaelLiut/UTM_PyGame | PyGame Graphics/Particle Flocking System - The Beginning/particle.py | <gh_stars>0
################################################################################
############################ UTM PyGame Workshop ###############################
############################## June 5, 2018 ##################################
################################################################################
############################## <NAME> ##################################
########################## <EMAIL> ############################
################################################################################
############################# particle.py ##################################
################################################################################
# Imports
import random # random.randint(a, b) returns a random integer N,
# such that a <= N <= b.
# import twoDMath # import our custom 2D Math Library
class Particle:
"""
You may set global variables for Particle in this space
"""
# Set your global variables here
"""
This initializes a Particle.
For an instance of a Particle to be created, it must be given the
size of the Window -- i.e. maximal (X,Y) coordinates of the plane
"""
def __init__(self, screenSizeX, screenSizeY):
# Randomize Particle Position
randomX = random.randint(0, screenSizeX)
randomY = random.randint(0, screenSizeY)
self.position = (randomX, randomY);
# Randomize Particle Colour
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
self.colour = (r,g,b);
# Randomize the Particle Size (from 1-5)
self.size = random.randint(1, 8)
# Particle's Range
self.particleRange = (random.randint(0, 99) % (screenSizeX/3) + 50);
# Particle's Speed
self.speed = 3;
"""
These are methods belonging to the Class Particle.
Usually they are either "getters" or "setters"
"""
# Returns the colour of the particle
def getColour(self):
return(self.colour)
# Set the colour of the particle
def setColour(self, colour):
self.colour = colour
# Returns the position of the particle
def getPosition(self):
return(self.position)
# Set the new position of the particle
def setPosition(self, position):
self.position = position
# Returns the size of the particle
def getSize(self):
return(self.size)
# Set the new size of the particle
def setSize(self, size):
self.size = size
# Returns the range of the particle
def getRange(self):
return(self.particleRange)
################################################################################
################################## END #####################################
################################################################################ |
MichaelLiut/UTM_PyGame | PyGame Graphics/Another Game/main.py | import sys
import pygame
from pygame.locals import *
import random
SCREEN_SIZE = (640, 480)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
# The world class keeps a list of all active entities
class World(object):
def __init__(self):
self.entities = []
def add_player(self, entity):
self.entities.append (entity)
self.player = entity
def add_entity(self, entity):
self.entities.append (entity)
def remove_entity (self, entity):
self.entities.remove (entity)
def process(self, time_passed):
seconds = time_passed / 1000.0
for entity in self.entities: # process all entities
entity.process (seconds)
def render(self, surface):
surface.fill (BLACK) # blank the screen
for entity in self.entities: # and render all entities
entity.render(surface)
# Base class for game entities
# good objects, bad objects, and the player descend from this class
class GameEntity(object):
def __init__(self, world, image, location, speed):
self.world = world
self.image = image
self.location = location
self.destination = [0, location[1]]
self.speed = speed
self.loop_sound()
def loop_sound(self):
pass
def render(self, surface):
# render the entity (i.e. copy it to the screen buffer)
x, y = self.location
surface.blit(self.image, (x, y))
if self != self.world.player:
# position the entity's sound to match its position on-screen
rvol = float(x) / SCREEN_SIZE[0]
lvol = 1 - rvol
if self.step_chan: self.step_chan.set_volume (lvol, rvol)
def process(self, time_passed):
# movement and collision detection
x, y = self.location
w, h = self.image.get_width(), self.image.get_height()
player = self.world.player
p_x, p_y = player.location
p_w, p_h = player.image.get_width(), player.image.get_height()
rect1 = pygame.Rect (x, y, w, h)
rect2 = pygame.Rect (p_x, p_y, p_w, p_h)
if rect1.colliderect (rect2):
self.hit_player()
self.step.stop()
self.world.remove_entity (self)
return
if self.speed > 0 and self.location != self.destination:
distance = abs(self.destination[0] - self.location[0])
moved = min(distance, time_passed * self.speed)
self.location[0] -= moved
if self.location == self.destination:
self.step.stop()
self.world.remove_entity (self)
class Good(GameEntity):
def __init__(self, world, image, location):
speed = random.randint (50, 500)
GameEntity.__init__ (self, world, image, location, speed)
def hit_player (self):
player = self.world.player
player.hit_good()
def loop_sound(self):
self.step = pygame.mixer.Sound ('g_move.wav')
self.step_chan = self.step.play(-1)
if self.step_chan: self.step_chan.set_volume (0, 1)
class Bad(GameEntity):
def __init__(self, world, image, location):
speed = random.randint (50, 500)
GameEntity.__init__ (self, world, image, location, speed)
def hit_player (self):
player = self.world.player
player.hit_bad()
def loop_sound(self):
self.step = pygame.mixer.Sound ('b_move.wav')
self.step_chan = self.step.play(-1)
if self.step_chan: self.step_chan.set_volume (0, 1)
class Player(GameEntity):
def __init__(self, world, image, location):
self.speed = 300
self.step = pygame.mixer.Sound ('step.wav')
self.step_playing = False
self.dir = 0
self.points = 0
self.good_hits = 0 # counter for collecting good objects
self.bad_hits = 0 # counter for collecting bad objects
GameEntity.__init__ (self, world, image, location, self.speed)
def hit_good(self):
self.points += 10
self.good_hits += 1
def hit_bad (self):
self.points = max(self.points - 5, 0)
self.bad_hits += 1
def process(self, time_passed):
x, y = self.location
moved = time_passed * self.speed
moved = self.dir * moved
start_location = self.location[1]
self.location[1] += moved
if self.location[1] < 0:
self.location[1] = 0
if self.location[1] > SCREEN_SIZE[1] - self.image.get_height():
self.location[1] = SCREEN_SIZE[1] - self.image.get_height()
if self.location[1] != start_location:
if not self.step_playing:
self.step_playing = True
self.step.play (-1)
elif self.step_playing:
self.step_playing = False
self.step.stop()
def loop_sound(self):
# unlike good/bad objects, the player has no constant looping sound
pass
##### Mainline #####
def rand_height(h):
return random.randint(0, h-1)
# top-level function to run/play the game
def run():
pygame.init()
pygame.mixer.pre_init (44100, 16, 2, 2048)
pygame.mixer.init()
screen = pygame.display.set_mode(SCREEN_SIZE)
pygame.display.set_caption ("Scrolling Game")
world = World()
w, h = SCREEN_SIZE
clock = pygame.time.Clock()
u_image = pygame.image.load ('player.png')
g_image = pygame.image.load ('good.png')
b_image = pygame.image.load ('bad.png')
player = Player(world, u_image, [0, 0])
world.add_player (player)
while True:
for event in pygame.event.get():
if event.type == QUIT:
return
if event.type == KEYDOWN:
if event.key == K_UP:
player.dir = -1
if event.key == K_DOWN:
player.dir = 1
if event.type == KEYUP:
if event.key == K_UP or event.key == K_DOWN:
player.dir = 0
time_passed = clock.tick(30)
if random.randint(1, 125) == 1:
bad = Bad(world, b_image, [w - 1, rand_height(h)])
world.add_entity (bad)
if random.randint(1, 200) == 1:
good = Good(world, g_image, [w - 1, rand_height(h)])
world.add_entity (good)
world.process (time_passed)
world.render (screen)
if pygame.font: # assuming font object is available...
gamefont = pygame.font.Font(None, 28)
# status bar text
ps = "Score: " + str(player.points)
go = " Good Objects: " + str(player.good_hits)
bo = " Bad Objects: " + str(player.bad_hits)
totals = [ps, go, bo]
text = gamefont.render(' '.join(totals), True, WHITE)
textpos = text.get_rect(centerx = SCREEN_SIZE[0] / 2, centery =
SCREEN_SIZE[1] - gamefont.get_height())
screen.blit(text, textpos)
pygame.display.update()
run()
|
MichaelLiut/UTM_PyGame | PyGame Graphics/PyGame_Example.py | <gh_stars>0
import sys
import pygame
from pygame.locals import *
pygame.init() # initializes the PyGame modules
# Set a PyGame Window to a size of 640 x 480
screenSizeX = 640
screenSizeY = 480
screen = pygame.display.set_mode((screenSizeX,screenSizeY))
# Circle Creation
centre = (100, 100)
radius = 20
colourBlue = (0, 0, 255)
pygame.draw.circle (screen, colourBlue, centre, radius)
pygame.display.update()
# Text Output
font = pygame.font.SysFont("times", 46)
mytext = font.render ("Thanks for playing!", False, (255, 255, 255))
screen.blit (mytext, (250, 250))
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
|
MichaelLiut/UTM_PyGame | Zelle Graphics/Train_Skeleton.py | # Course: Course Code
# Name: FirstName LastName
# Student Number: ##########
# File: Train_YourName.py
# Description: What does this file do?
# import all names from module graphics
from graphics import *
# import name sleep from module time
import time
# Wheels are common to BoxCar, PassengerCar, Caboose, and Locomotive
class Wheels:
# constructor of class Wheels
def __init__(self, win, b, length, height, r, colour):
""" Creates wheels for a car whose box left-top
corner is located at the point 'b', the box dimensions
are 'length' and 'height'. r is the wheels radius. The
wheels are drawn in the graphics window 'win' in the
colour given in the variable 'colour' """
""" Your code goes here """
#end of constructor
# method move -- moves the wheels dx along x axis and dy along y axis
def move(self, dx, dy):
""" Your code goes here """
#end of class Wheels
# Box is common to BoxCar, PassengerCar, Caboose, and Locomotive
class Box:
# the constructor
def __init__ (self, win, b, length, height, colour):
""" Creates box for a car whose left-top corner is located
at the point 'b', the box dimensions are 'length' and 'height'.
The box is drawn in the graphics window 'win' in the
colour given in the variable 'colour' """
""" Your code goes here """
#end of constructor
# method move -- moves the box dx along x axis and dy along y axis
def move(self, dx, dy):
""" Your code goes here """
#end of move
#end of class Box
# BoxCar uses Wheels and Box
class BoxCar:
# the constructor
def __init__ (self, win, b, length, height, radius, box_colour,
wheel_colour):
""" Creates a boxcar with a box given by 'b', 'length', 'height'
and wheels given by 'b', 'length', 'height', and 'radius'.
The box is to have colour 'box_colour', the wheels are
to have colour 'wheel_colour'. The box car is drawn in
the graphics window 'win' """
""" Your code goes here """
#end of constructor
# method move -- moves box car dx along the x axis and dy along the y axis
def move(self, dx, dy):
""" Your code goes here """
#end of move
#end of class BoxCar
# PassengerCar uses Wheels and Box
class PassengerCar:
# the constructor
def __init__ (self, win, b, length, height, radius, box_colour,
wheel_colour):
""" Creates a passenger car with a box given by 'b', 'length',
'height' and wheels given by 'b', 'length', 'height', and
'radius'. The box is to have colour 'box_colour', the wheels
are to have colour 'wheel_colour'. The box car is drawn in
the graphics window 'win' """
""" Your code goes here """
#end of constructor
# method move -- moves the passenger car dx along x axis and dy along y axis
def move(self, dx, dy):
""" Your code goes here """
#end of move
#end of class PassengerCar
# Caboose uses Wheels and Box
class Caboose:
def __init__(self, win, b, length, height, radius, box_colour,
wheel_colour, bubble_colour):
""" Creates a caboose with a box given by 'b', 'length', 'height' and
of colour 'box_colour'. The wheels are given by 'b', 'length',
'height', 'radius' and are of colour 'wheel_colour'. The bubble's
dimensions are determined from the dimensions of the box, its
colour is given by 'bubble_colour'. The caboose is drawn in the
graphics window 'win' """
""" Your code goes here """
#end of constructor
#method move -- moves caboose dx along x axis and dy along y axis
def move(self, dx, dy):
""" Your code goes here """
#end of move
#end of class Caboose
# uses Box and Wheels
class Locomotive:
# the constructor
def __init__(self, win, b, length, height, radius, box_colour,
cowcatcher_colour, wheel_colour, smokestack_colour,
cabin_colour):
""" Creates a locomotive with a box given by 'b', 'length', 'height'
and of colour 'box_colour'. The wheels are given by 'b', 'length',
'height', 'radius' and are of colour 'wheel_colour'. The cowcatcher
colour is given by 'cowcatcher_colour'. The smokestack's colour
is given by 'smokestack_colour'. The cabin colour is given by
'cabin_colour'. The dimensions of the cowcatcher are determined
from the dimension of the box. The dimensions of the smokestack
are determined from the dimensions of the box. The dimensions of
the cabin are determined from the dimensions of the box. The
locomotive is drawn in the graphics window 'win' """
""" Your code goes here """
#end of constructor
# method move -- moves locomotive dx along x axis and dy along y axis
def move(self, dx, dy):
""" Your code goes here """
#end of move
#end of class Locomotive
class Tracks:
# the constructor
def __init__(self, win, height):
""" create tracks in black colour """
# create tracks as a thin long rectangle
self.tracks = Rectangle(Point(4,196-height),Point(1196,196))
self.tracks.setFill('black') # make it black
self.tracks.draw(win) # draw it
#end of class Tracks
class Train:
# the constructor
def __init__(self, win):
""" assemble a train and draw it in the graphics window win """
length = 60
height = 30
radius = 8
""" Your code goes here """
#end of constructor
# method move -- moves train dx along x axis and dy along y-axis
def move(self, dx, dy):
""" Your code goes here """
#end of move
#method getStart -- returns the position of the front of the train
def getStart(self):
""" Your code goes here """
#end of getStart()
#end of class Train
# function main()
def main():
win = GraphWin("Train",1200,200) # create the required graphics window
tracks = Tracks(win,2) # create and display the tracks
train = Train(win) # create and display train in the starting position
original = train.getStart() # remember starting position
""" Your code goes here """
#end of main()
# the executable part of the program
main() # execute main()
# program terminates
|
theoofman/encoder | src/toastencoder/encoder.py | <reponame>theoofman/encoder
def encode(msg,key):
out = ""
for i in range(len(msg)):
out += chr(ord(msg[i])+key)
return(out)
def decode(msg,key):
out = ""
for i in range(len(msg)):
out += chr(ord(msg[i])-key)
return(out)
|
Neckster/conformance-checking-augur-smart-contract | petri_extractor.py | import os
from pm4py.objects.log.importer.xes import importer as xes_importer
from pm4py.algo.discovery.inductive import algorithm as inductive_miner
from pm4py.objects.petri.exporter import exporter as pnml_exporter
from pm4py.visualization.petrinet import visualizer as pn_visualizer
log = xes_importer.apply(os.path.join("logs", "log_augur_preprocessed.xes"))
net, initial_marking, final_marking = inductive_miner.apply(log)
pnml_exporter.apply(
net,
initial_marking,
os.path.join("petri", "augur_preprocessed_discovered_petri.pnml"),
final_marking=final_marking
)
parameters = {pn_visualizer.Variants.WO_DECORATION.value.Parameters.FORMAT: "svg"}
gviz = pn_visualizer.apply(net, initial_marking, final_marking, parameters=parameters)
pn_visualizer.save(gviz, os.path.join("img", "augur_preprocessed_discovered_petri.svg"),)
|
Neckster/conformance-checking-augur-smart-contract | conformance_checker.py | import os
from pm4py.objects.log.importer.xes import importer as xes_importer
from pm4py.objects.petri.importer import importer as pnml_importer
from pm4py.algo.conformance.tokenreplay import algorithm as token_replay
from pm4py.algo.conformance.log_skeleton import algorithm as lsk_conformance
from pm4py.algo.conformance.alignments import algorithm as alignments
INF = 9999
log = xes_importer.apply(
os.path.join("logs", "log_augur_preprocessed.xes")
)
traces_len = len(log)
net, initial_marking, final_marking = pnml_importer.apply(
os.path.join("petri", "augur_final.pnml")
)
# TOKEN BASE REPLAY
replayed_traces = token_replay.apply(log, net, initial_marking, final_marking)
trace_fitness_sum = 0
for replayed_trace in replayed_traces:
if not replayed_trace['trace_is_fit']:
print(f"{replayed_trace}\n")
trace_fitness_sum += replayed_trace['trace_fitness']
avg_trace_fitness = trace_fitness_sum / len(replayed_traces)
print("\nTOKEN BASE REPLAY -> %f" % avg_trace_fitness)
# LOG SKELETON
skeleton = {
# ================================================================================================
# Contains the couples of activities that happen ALWAYS with the same frequency inside a trace.
'equivalence': {
('create market', 'finalize market'),
('create market', 'submit initial report'),
},
# ================================================================================================
# Contains the couples of activities (A,B) such that an occurrence of A is ALWAYS followed,
# somewhen in the future of the trace, by an occurrence of B.
'always_after': {
('create market', 'submit initial report'),
('fork market', 'finalize market'),
('create dispute', 'contribute to dispute')
},
# ================================================================================================
# Contains the couples of activities (B,A) such that an occurrence of B is ALWAYS preceded,
# somewhen in the past of the trace, by an occurrence of A.
'always_before': {
# Conditional activities in the loop
('purchase complete sets', 'create market'),
('claim trading proceeds', 'finalize market'),
('redeem dispute crowdsourcer', 'create dispute'),
('redeem dispute crowdsourcer', 'contribute to dispute'),
('redeem as initial reporter', 'submit initial report'),
('contribute to dispute', 'create dispute'),
('complete dispute', 'contribute to dispute'),
('fork market', 'complete dispute')
},
# ================================================================================================
# Contains the couples of activities (A,B) that NEVER happens together in the history of the trace.
'never_together': {
},
# ================================================================================================
# Contains the list of directly-follows relations of the log.
'directly_follows': {
},
# ================================================================================================
# Number of possible occurrences per trace.
'activ_freq': {
'create market': {1},
'submit initial report': {1},
'fork market': {0, 1},
'finalize market': {1},
'redeem as initial reporter': {0, 1},
# Activities in the loop
'purchase complete sets': range(INF),
'create dispute': range(INF),
'contribute to dispute': range(INF),
'complete dispute': range(INF),
'claim trading proceeds': range(INF),
'redeem dispute crowdsourcer': range(INF),
'transfer market': range(INF),
}
}
conf_result = lsk_conformance.apply(log, skeleton)
fit_traces = 0
for trace in conf_result:
fit_traces += trace['is_fit'] * 1
if not trace['is_fit']:
print(trace)
fraction_fitness = fit_traces / traces_len
print(
"\nLOG SKELETON (%d/%d | %d traces incorrect) -> %f" % (
fit_traces, traces_len, traces_len - fit_traces, fraction_fitness
)
)
alignment_traces_fitness = 0
aligned_traces = alignments.apply_log(log, net, initial_marking, final_marking)
for aligned_trace in aligned_traces:
alignment_traces_fitness += aligned_trace['fitness']
print(
"\nALIGNMENTS -> %f" % (alignment_traces_fitness / traces_len)
)
|
Neckster/conformance-checking-augur-smart-contract | log_preprocessor.py | <reponame>Neckster/conformance-checking-augur-smart-contract<filename>log_preprocessor.py<gh_stars>0
import os
from pm4py.algo.filtering.log.attributes import attributes_filter
from pm4py.objects.log.importer.xes import importer as xes_importer
from pm4py.objects.log.exporter.xes import exporter as xes_exporter
from pm4py.objects.log.log import Trace, EventLog
log = xes_importer.apply(os.path.join('logs', 'log_augur.xes'))
preprocessed_log = EventLog()
for name in log.attributes:
preprocessed_log.attributes[name] = log.attributes[name]
for trace in log:
t = Trace()
for name in trace.attributes:
t.attributes[name] = trace.attributes[name]
skip_trace = False
market_finalized = False
for event in trace:
# Prevents traces with overflowed integers
if event['concept:name'] == 'contribute to dispute':
if event['amountStaked'] < 0:
skip_trace = True
break
if event['concept:name'] == 'redeem dispute crowdsourcer':
if event['amountRedeemed'] < 0:
skip_trace = True
break
if event['reportingFeesReceived'] <= 0:
continue
if event['concept:name'] == 'submit initial report':
if event['amountStaked'] < 0:
skip_trace = True
break
if event['concept:name'] == 'redeem as initial reporter':
if event['amountRedeemed'] < 0:
skip_trace = True
break
if event['reportingFeesReceived'] <= 0:
continue
if event['concept:name'] == 'finalize market':
market_finalized = True
t.append(event)
if not market_finalized or skip_trace:
continue
preprocessed_log.append(t)
xes_exporter.apply(preprocessed_log, os.path.join('logs', 'log_augur_preprocessed.xes'))
|
lubianat/topictagger | term2subject/term2subject.py | <gh_stars>0
import pandas as pd
from pandas import json_normalize
from gooey import Gooey
import requests
from wikidata2df import wikidata2df
import argparse
# Simple GUI based on Gooey
#
# https://github.com/chriskiehl/Gooey
@Gooey
def main():
parser = argparse.ArgumentParser(description='Add main subject statements based on Wikidata searches.')
parser.add_argument('-a','--term', required=False, nargs='+')
parser.add_argument('-m','--term_id', required=False, nargs='+')
args = vars(parser.parse_args())
def run_all_to_file(term, term_id):
url = prepare_url_for_search(term)
ids = pull_related_ids(url)
articles_dataframe = filter_for_instances_of_article(ids)
print_qs_to_file(articles_dataframe, term, term_id)
def run_all_to_prompt(term, term_id):
url = prepare_url_for_search(term)
ids = pull_related_ids(url)
articles_dataframe = filter_for_instances_of_article(ids)
print_qs_to_prompt(articles_dataframe, term, term_id)
def prepare_url_for_search(term):
term_for_url = term.replace(" ","%20")
term_for_url = "%22" +term_for_url + "%22"
url = f"https://www.wikidata.org/w/api.php?action=query&list=search&srsearch={term_for_url}&srlimit=500&srprop=size&formatversion=2&format=json"
return(url)
def pull_related_ids(url):
r = requests.get(url)
df = json_normalize(r.json()["query"]["search"])
ids = ["wd:"+a for a in df["title"]]
return(ids)
def filter_for_instances_of_article(ids):
items = "{"
for i in ids:
items = items + " " + i
items = items + " }"
articles = """
SELECT ?item ?itemLabel
WHERE
{
VALUES ?item """ + items + """.
?item wdt:P31 wd:Q13442814.
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
}
"""
articles_dataframe = wikidata2df(articles)
return(articles_dataframe)
def get_info(ids):
items = "{"
for i in ids[:3]:
items = items + " " + i
items = items + " }"
articles = """
SELECT ?item ?itemLabel ?itemDescription ?typeLabel
WHERE
{
VALUES ?item """ + items + """.
?item wdt:P31 ?type.
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
}
"""
articles_dataframe = wikidata2df(articles)
return(articles_dataframe)
def print_qs_to_file(articles_dataframe, term, term_id):
with open(term + ".qs", "w+") as f:
for i, row in articles_dataframe.iterrows():
s = row["item"]
p = "|P921|"
o = term_id
r = "|S887|"
ro = "Q69652283"
f.write(s + p + o + r + ro + "\n")
def print_qs_to_prompt(articles_dataframe, term, term_id):
with open(term + ".qs", "w+") as f:
for i, row in articles_dataframe.iterrows():
s = row["item"]
p = "|P921|"
o = term_id
r = "|S887|"
ro = "Q69652283"
print(s + p + o + r + ro + "\n")
if args["term"]:
url = prepare_url_for_search(args["term"][0])
ids = pull_related_ids(url)
df = get_info(ids)
print("The top 10 ids for your term:")
print(df.head(5))
if args["term"] and args["term_id"]:
run_all_to_prompt(args["term"][0], args["term_id"][0])
if __name__ == "__main__":
main()
|
blaulan/alfred-beancount | beancount.py | # -*- coding: utf-8 -*-
# @Author: <NAME> <<EMAIL>>
# @Date: 2020-02-28 16:48:17
# @Last Modified By: <NAME> <<EMAIL>>
# @Last Modified Time: 2020-03-02 22:47:36
import os
import re
import sys
import math
import glob
import json
from datetime import datetime
from pypinyin import lazy_pinyin
from rapidfuzz import fuzz, process
class Beancount:
"""
APPEND OR MODIFY BEANCOUNT ENTRY VIA ALFRED:
bean_add: add new entry to beancount file;
bean_clear: clear an entry in beancount file by adding #clear tag;
bean_cache: create a cache file with all accounts and payees with frequency.
"""
def __init__(self, config_path='beancount.json'):
# read settings from config file
with open(config_path, 'r') as setting_file:
self.settings = json.load(setting_file)
# read variables from environment
for v in ['default_currency', 'ledger_folder', 'default_ledger']:
if v in os.environ:
self.settings[v] = os.environ[v]
# check path for icons
for k,v in self.settings['icons'].items():
if not os.path.isfile(v):
self.settings['icons'][k] = './icons/{}.png'.format(k)
# setup variables
self.accounts = []
def bean_add(self, inputs):
if not self.accounts:
try:
with open(self.settings['temp_path'], 'r') as tempfile:
self.accounts = json.loads(tempfile.read())
except IOError:
self.accounts = self.bean_cache()
params = ['from', 'to', 'payee', 'amount', 'tags', 'comment']
values = {p: '' for p in params}
for p,v in zip(params, inputs):
# handle matches for accounts
if p in params[:3]:
matches = self.rank(v, self.accounts[p])
# return the full list if last param
if p==params[len(inputs)-1]:
entries = []
for m in matches:
account = m
icon = './icon.png'
if p!='payee':
account_type = m.split(':')[0]
if account_type in self.settings['icons']:
icon = self.settings['icons'][account_type]
else:
if m in self.accounts['mapping']:
account = self.accounts['mapping'][m]
values[p] = account
entries.append({
'title': account,
'subtitle': self.format_desc(values),
'autocomplete': account,
'valid': False,
'icon': icon
})
return entries
else:
account = matches[0]
if p=='payee' and account in self.accounts['mapping']:
account = self.accounts['mapping'][account]
values[p] = account
# handle transaction amount
elif p=='amount':
values[p] = float(v)
# handle tags
elif p=='tags':
values[p] = '#'+' #'.join(v.split('+'))
# handle comment
else:
values[p] = v
values['date'] = datetime.now().strftime('%Y-%m-%d')
entry = '\n'.join([
self.settings['title_format'].format(**values).strip(),
self.settings['body_format'].format(
account=values['from'], flow=-values['amount'],
currency=self.settings['default_currency']
),
self.settings['body_format'].format(
account=values['to'], flow=values['amount'],
currency=self.settings['default_currency']
)
])
return [{
'title': 'New ${amount:.2f} Entry {tags}'.format(**values),
'subtitle': self.format_desc(values),
'valid': True,
'arg': entry,
'text': entry
}]
def bean_clear(self, inputs=None):
with open(self.settings['default_ledger'], 'r') as beanfile:
bean = beanfile.read()
for m in re.finditer(self.settings['regexes']['clear'], bean):
tail = [i.strip() for i in m.group(2).split('"') if i.strip()!='']
values = {
'date': m.group(1),
'from': m.group(3).split()[0],
'to': m.group(4).split()[0],
'amount': abs(float(m.group(3).split()[-2])),
'comment': tail[0].upper() if tail else 'NULL'
}
yield {
'title': '${amount:.2f} with {comment}'.format(**values),
'subtitle': u'{date} {from} โ {to}'.format(**values),
'valid': True,
'icon': self.settings['icons'][values['from'].split(':')[0]],
'arg': str(m.start())
}
def bean_cache(self, ledger_folder=None):
# default to folder in config file
if not ledger_folder:
ledger_folder = self.settings['ledger_folder']
# read and join all records
records = []
for f in glob.glob(os.path.join(ledger_folder, '*.beancount')):
with open(f, 'r') as beanfile:
records.append(beanfile.read())
content = '\n'.join(records)
# find matches based on regexes
matches = {}
for key in ['open', 'close', 'payee', 'from', 'to']:
matches[key] = re.findall(self.settings['regexes'][key], content)
accounts = {
'from': {
x: matches['from'].count(x)
for x in matches['open']
if x not in matches['close']
},
'to': {
x: matches['to'].count(x)
for x in matches['open']
if x not in matches['close']
},
'payee': {
self.decode(x): matches['payee'].count(x)
for x in set(matches['payee'])
},
'mapping': {
self.decode(x): x
for x in set(matches['payee'])
}
}
with open(self.settings['temp_path'], 'w') as tempfile:
json.dump(accounts, tempfile)
return accounts
def rank(self, target, searches, limit=10):
matches = process.extract(
target, searches.keys(), limit=limit, scorer=fuzz.partial_ratio)
matches = [(m[0], m[1]*math.log(searches[m[0]]+1)) for m in matches if m[1]>0]
if matches:
return [m[0] for m in sorted(matches, key=lambda d: -d[1])]
return [target]
def decode(self, text):
return ''.join(lazy_pinyin(text))
def format_desc(self, value):
desc = []
if value['from']:
desc += [value['from']]
if value['to']:
desc += ['โ', value['to']]
if value['payee']:
desc += ['by', value['payee']]
if value['amount']:
desc += ['ยฅ{:.2f}'.format(value['amount'])]
return ' '.join(desc)
def format_alfred(self, results):
print(json.dumps({'items': list(results)}))
if __name__=='__main__':
# exit if no argument provided
if len(sys.argv)==1:
sys.exit()
bean = Beancount()
action = sys.argv[1]
inputs = sys.argv[2:]
if action == 'add':
bean.format_alfred(bean.bean_add(inputs))
elif action == 'cache':
bean.bean_cache(inputs)
elif action == 'clear':
bean.format_alfred(bean.bean_clear(inputs))
|
blaulan/alfred-beancount | commandline.py | # -*- coding: utf-8 -*-
# @Author: <NAME> <<EMAIL>>
# @Date: 2020-02-29 17:49:05
# @Last Modified By: <NAME> <<EMAIL>>
# @Last Modified Time: 2020-03-02 22:06:26
from prompt_toolkit import PromptSession
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.validation import Validator, ValidationError
from beancount import Beancount
class BeancountCompleter(Completer):
def set_bean(self, bean):
self.bean = bean
def get_completions(self, document, complete_event):
inputs = document.text.strip().split()
if len(inputs)>3 or (document.text[-1]==' '):
results = []
else:
results = self.bean.bean_add(inputs)
for r in results:
yield Completion(r['title'], start_position=0)
class BeancountValidator(Validator):
def set_bean(self, bean):
self.bean = bean
def set_toolbar(self, toolbar):
self.toolbar = toolbar
def validate(self, document):
inputs = document.text.strip().split()
if not inputs:
return
if len(inputs)>3 and not inputs[3].replace('.','',1).isdigit():
raise ValidationError(
message='amount value error',
cursor_position=len(document.text)-1
)
results = self.bean.bean_add(inputs)
self.toolbar.set_text(results[0]['subtitle'])
class BeancountToolbar:
def __init__(self):
self.clear_text()
def clear_text(self):
self.text = ''
def set_text(self, text):
self.text = text
def get_text(self):
return self.text
if __name__=='__main__':
bean = Beancount()
toolbar = BeancountToolbar()
completer = BeancountCompleter()
completer.set_bean(bean)
validator = BeancountValidator()
validator.set_bean(bean)
validator.set_toolbar(toolbar)
session = PromptSession(
completer=completer,
complete_while_typing=True,
validator=validator,
validate_while_typing=True,
bottom_toolbar=toolbar.get_text
)
while True:
try:
text = session.prompt('> ')
except KeyboardInterrupt:
toolbar.clear_text()
continue
except EOFError:
break
output = bean.bean_add(text.strip().split())[0]
with open(bean.settings['default_ledger'], 'a') as ledger_file:
ledger_file.write(output['arg']+'\n\n')
print(output['arg'])
toolbar.clear_text()
print('session ended!')
|
varjas/siprefix | siprefix/siprefix.py | import math
# Return prefix or scale based on one input
# Only handles prefixes separated by 3 orders of magnitude
def siConvert(order=None, prefix=None):
# Require at least one argument
if prefix is None and order is None:
raise TypeError("siConvert() missing at least 1 positional argument: 'order' or 'prefix'")
# Define prefix, scale relations
data = {
'Y': 24,
'Z': 21,
'E': 18,
'P': 15,
'T': 12,
'G': 9,
'M': 6,
'k': 3,
'': 0,
'm': -3,
'ยต': -6,
'n': -9,
'p': -12,
'f': -15,
'a': -18,
'z': -21,
'y': -24
}
# If prefix is set
if prefix is not None:
# Return scale
try:
return data[prefix]
# Unless prefix is not found
except KeyError:
raise KeyError("invalid 'prefix' defined: " + str(prefix))
# If scale is set
if order is not None:
# Return prefix
try:
return next((k for k, v in data.items() if v == order))
# Unless scale is not found
except StopIteration:
raise KeyError("invalid 'order' defined: " + str(order))
# Returns scaled value with SI prefix
def scale(value, combined=True):
# Set starting order
if type(value) == str:
# Expand number
value = expand(value)
# Convert to float for scaling calculation
value = float(value)
# Get number of non-decimal digits
order = math.floor(math.log10(abs(value)))
# Convert order to first lowest multiple of 3
order = order // 3 * 3
# Adjust order by maximum range of prefixes in dictionary
if order > 24:
order = 24
elif order < -24:
order = -24
# Scale number by order of magnitude determined
value = value / 10 ** order
# Attempt to get prefix from order
prefix = siConvert(order=order)
if combined is True:
# Return scaled value and SI prefix as string
return (str(value) + ' ' + prefix).strip()
elif combined is False:
# Return scaled value and SI prefix as tuple
return (value, prefix)
# Returns expanded value in base scale
def expand(value):
# Set starting order
order = 0
if type(value) == str:
# Determine order if prefix is included
if value[-1].isalpha():
prefix = value[-1]
order = siConvert(prefix=prefix)
# Remove prfix from value string
value = value[:-1].strip()
# Convert to float for expansion calculation
value = float(value)
# Scale value by order of magnitude determined
value = value * 10 ** order
return value
|
varjas/siprefix | siprefix/__init__.py | from siprefix import *
|
jumper2014/web-test-framework-python-unittest-selenium | libs/pages/home_page.py | <filename>libs/pages/home_page.py
#!/usr/bin/env python
# author: zengyuetian
# page object and function for search page
from libs.pages.base_page_object import BasePage
from libs.pagefactory.pagefactory_support import callable_find_by as find_by
class HomePage(BasePage):
# page object definition
search_box = find_by(id_="kw")
search_button = find_by(id_='su')
# test steps
def search(self, keywords):
self.search_box().clear()
self.search_box().send_keys(keywords)
self.search_button().click()
|
jumper2014/web-test-framework-python-unittest-selenium | suites/search/__init__.py | #!/usr/bin/env python
# coding=utf-8
# author: zengyuetian
if __name__ == '__main__':
pass |
jumper2014/web-test-framework-python-unittest-selenium | libs/pages/result_page.py | <filename>libs/pages/result_page.py<gh_stars>1-10
#!/usr/bin/env python
# coding=utf-8
# author: zengyuetian
from libs.pages.base_page_object import BasePage
from libs.pagefactory.pagefactory_support import callable_find_by as find_by
import unittest
import time
class ResultPage(BasePage):
# page object definition
search_box = find_by(id_="kw")
# test steps
def verify_keyword(self, keyword):
time.sleep(2)
assert self.search_box().get_attribute('value') == keyword
|
jumper2014/web-test-framework-python-unittest-selenium | main.py | <reponame>jumper2014/web-test-framework-python-unittest-selenium<filename>main.py
#!/usr/bin/env python
# coding=utf-8
# author: zengyuetian
import unittest
from suites.search.test_search import *
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(TestSearch('testSearchOk'))
runner = unittest.TextTestRunner()
runner.run(suite) |
jumper2014/web-test-framework-python-unittest-selenium | suites/search/test_search.py | #!/usr/bin/env python
# coding=utf-8
# author: zengyuetian
import unittest
from libs.pages.home_page import *
from libs.pages.result_page import *
from selenium import webdriver
class TestSearch(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
def testSearchOk(self):
self.driver.get("https://www.baidu.com")
HomePage(self.driver).search("selenium")
ResultPage(self.driver).verify_keyword("selenium1")
def tearDown(self):
self.driver.close()
|
jumper2014/web-test-framework-python-unittest-selenium | libs/pages/base_page_object.py | #!/usr/bin/env python
# author: zengyuetian
# base page class
from selenium.webdriver.common.action_chains import ActionChains
import time
class BasePage(object):
def __init__(self, driver, base_url='http://www.baidu.com'):
self._driver = driver
self.base_url = base_url
self.timeout = 30
# def find_element(self, *loc):
# return self.browser.find_element(*loc)
def visit(self, url):
self._driver.get(url)
def hover(self, element):
ActionChains(self._driver).move_to_element(element).perform()
# I don't like this but hover is sensitive and needs some sleep time
time.sleep(5)
@staticmethod
def method_missing(what):
print("No %s here!" % what)
|
jumper2014/web-test-framework-python-unittest-selenium | libs/pagefactory/pagefactory_support.py | #!/usr/bin/env python
# https://jeremykao.wordpress.com/2015/06/10/pagefactory-pattern-in-python/
# author: Jeremy, github: https://gist.github.com/imsardine/6185d76377e2c8d06d07#file-pageobject_support-py
# updated to support python2 and python3
import sys
__all__ = ['visible', 'cacheable', 'callable_find_by', 'property_find_by']
def cacheable_decorator(lookup):
def func(self):
if not hasattr(self, '_elements_cache'):
self._elements_cache = {} # {callable_id: element(s)}
cache = self._elements_cache
key = id(lookup)
if key not in cache:
cache[key] = lookup(self)
return cache[key]
return func
cacheable = cacheable_decorator
_strategy_kwargs = ['id_', 'xpath', 'link_text', 'partial_link_text',
'name', 'tag_name', 'class_name', 'css_selector']
def _callable_find_by(how, using, multiple, cacheable, context, driver_attr, **kwargs):
def func(self):
# context - driver or a certain element
if context:
ctx = context() if callable(context) else context.__get__(self) # or property
else:
ctx = getattr(self, driver_attr)
# 'how' AND 'using' take precedence over keyword arguments
if how and using:
lookup = ctx.find_elements if multiple else ctx.find_element
return lookup(how, using)
if sys.version_info < (3, 0): # ๅฆๆๅฐไบPython3
key = kwargs.keys()[0]
else:
key = list(kwargs.keys())[0]
if len(kwargs) != 1 or key not in _strategy_kwargs:
raise ValueError(
"If 'how' AND 'using' are not specified, one and only one of the following "
"valid keyword arguments should be provided: %s." % _strategy_kwargs)
value = kwargs[key]
suffix = key[:-1] if key.endswith('_') else key # find_element(s)_by_xxx
prefix = 'find_elements_by' if multiple else 'find_element_by'
lookup = getattr(ctx, '%s_%s' % (prefix, suffix))
return lookup(value)
return cacheable_decorator(func) if cacheable else func
def callable_find_by(how=None, using=None, multiple=False, cacheable=False, context=None, driver_attr='_driver',
**kwargs):
return _callable_find_by(how, using, multiple, cacheable, context, driver_attr, **kwargs)
def property_find_by(how=None, using=None, multiple=False, cacheable=False, context=None, driver_attr='_driver',
**kwargs):
return property(_callable_find_by(how, using, multiple, cacheable, context, driver_attr, **kwargs))
def visible(element):
def expected_condition(ignored):
candidate = element() if callable(element) else element
return candidate if (candidate and candidate.is_displayed()) else None
return expected_condition
|
madhuvenkidusamy/sql-challenge | employees.py | <gh_stars>0
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, inspect
import pandas as pd
# Connect to postgres
engine = create_engine("postgres://madhuvenkidusamy:blend62@localhost:5432/employees")
# Load CSVs into Beekeeper. ONLY DO THIS ONCE!! DO NOT KEEP REPLACING TABLES IN BEEKEEPER.
tables = ['departments','dept_emp','dept_manager','employees','salaries','titles']
for table in tables:
path = '/Users/madhuvenkidusamy/Documents/Data Science Bootcamp/Homeworks/sql-challenge/EmployeeSQL/' +table+ '.csv'
with open(path, 'r') as file:
data_df = pd.read_csv(file)
data_df.to_sql(table, con=engine, index=True, index_label='id', if_exists='replace')
# Verify all tables have been loaded to postgres
print(inspect(engine).get_table_names())
|
nbeguier/coverage2css | coverage2css.py | <gh_stars>0
#!/usr/bin/env python3
"""
Chrome Coverage to CSS file
Copyright (c) 2020-2021 <NAME>
Licensed under the MIT License
Written by <NAME> (<EMAIL>)
"""
# Standard library imports
import sys
import json
# Debug
# from pdb import set_trace as st
JSON_COVERAGE = sys.argv[1]
CSS_NAME = sys.argv[2]
VERSION = '1.1.0'
def main():
"""
Main function
"""
with open(JSON_COVERAGE, 'r') as json_coverage_file:
json_coverage = json.loads(json_coverage_file.read())
for coverage in json_coverage:
css_name = coverage['url'].split('/')[-1]
print(f"> Detect '{css_name}' in report")
if CSS_NAME != css_name:
print(f"{CSS_NAME} != {css_name}")
continue
print(">> Match input file")
css_output_file = open(css_name+'.new', 'w+')
for rng in enumerate(coverage['ranges']):
css_output_file.write(coverage['text'][rng[1]['start']:rng[1]['end']])
css_output_file.close()
print(f">> Write {css_name}.new")
if __name__ == "__main__":
main()
|
4BH1J337/Volume-Calculator | main.py | <filename>main.py
from tkinter import * # all
root=Tk() #call tkinter function
root.geometry("320x480") #Width x Height (320,480)
root.resizable(False,False) # window always stay in same size
root.title("Volume Calculator") #Title bar
root.iconbitmap('Images/volcon.ico')# Setting icon of master window,The image is of .ico extension
# setting variables types for entry field
heightValue=DoubleVar()
radiusValue=DoubleVar()
lengthValue=DoubleVar()
widthValue=DoubleVar()
unit_List= ["mm","cm","in"] #list of options
unitVarible=StringVar() #varible to store selected unit
unitVarible.set("Select") # set the placeholder
shape_List= ["Cylinder","Cone","Cuboid","Sphere"] #list of shape options
shapeVarible=StringVar() #varible to store selected shape
shapeVarible.set("Select") # set the placeholder to select
# Controle Structure/Logic---------------------------------------------------------------------
def show_selected_unit(*args): #function to get Selected value from list DropDown menu ( show_selected, button command)
global Selected_unit #Value of varible (created in function) outside the function
Selected_unit = unitVarible.get() # getting selected unit from stored varible
show_selected_unit()
def show_selected_shape(*args): #function to get Selected shape from list DropDown Menu and Push Entry fields
# and Labels accordangily
global Selected_shape # Setting shape varible global for different functions
Selected_shape = shapeVarible.get() # getting selected shape from stored varible
if Selected_shape =='Cuboid':
print('Shape Selected : ',Selected_shape) # for shell
#location of entry field for cuboid [ Only visible when cuboid shape selected]
radiusEntry.place(x=80,y=110) # in cuboid labeled as Lenght entry field
heightEntry.place(x=80,y=160)
widthEntry.place(x=80,y=205)
# Location, for first time selection or after;
L3.place(x=20,y=110) # length label
L3.config(text="Length",font=20) #length label, change radius to length, if selected after cylinder
L2.place(x=20,y=160) # height label
L4.place(x=20,y=205) # width label
L5.place(x=20,y=240) # unit label
L6.place(x=105,y=270) #Results label,fix location in all
dropDownMenu.place(x=80,y=240)#option menu of unit
elif (Selected_shape =='Cylinder' or Selected_shape =='Cone'):# both shapes have same entry fields
print('Shape Selected : ',Selected_shape) # for shell
# Erase unwanted fields and rename and locations
widthEntry.place_forget()#entry fields , no need forget
L4.place_forget() # width label , no need
# Location and name for first time selection or after;
L3.config(text="Radius",font=20) #length label, change to radius
L3.place(x=20,y=110) # radius location
L2.place(x=20,y=160) # height label
L5.place(x=20,y=210) #unit label, change loction after cuboid or first time placement
L6.place(x=105,y=270) #Results label,fix location in all
radiusEntry.place(x=80,y=110) # first time placement
heightEntry.place(x=80,y=160) # first time placement
dropDownMenu.place(x=80,y=210) #change loction after cuboid or first time placement
elif Selected_shape =='Sphere':
print('Shape Selected : ',Selected_shape) # for shell
# Erase unwanted fields and rename and locations
L2.place_forget() # height label, no need
L4.place_forget() # width label , no need
widthEntry.place_forget()#entry fields , no need forget
heightEntry.place_forget()#entry fields , no need forget
L4.place_forget() # width label , no need
L5.place_forget() #height label, no need
# Location and name for first time selection or after;
L3.config(text="Radius",font=20) #length label, change to radius
L3.place(x=20,y=140) # radius location, first time placement
L5.place(x=20,y=210) #unit label, change loction after cuboid or first time placement
L6.place(x=105,y=270) #Results label,fix location in all
radiusEntry.place(x=80,y=140) # first time placement
dropDownMenu.place(x=80,y=210) #change loction after cuboid or first time placement
show_selected_shape()
def Calculate_Vol(*args): #function to Calculate volume ( Calculate_Vol, button command)
#Get appropriate Dividing constant as per Unit selected
print('Unit Selected : ',Selected_unit) # For Shell
if Selected_unit =='mm':
Liters_Constant = float(1000000) # divide the volume value by 1000000 to change in leters
print('Liters Constant : ', Liters_Constant)# For Shell
elif Selected_unit == 'cm':
Liters_Constant = float(1000)# divide the volume value by 1000
print('Liters Constant : ', Liters_Constant)
elif Selected_unit == 'in':
Liters_Constant = float(61.024) # divide the volume value by 61.024
print('Liters Constant : ', Liters_Constant)
#Conversion of parameters into Cubic Units for Different Shapes
RADIUS = float(radiusEntry.get())# Get data from entry field varible, valid for all shapes
PI=22/7 # Value of PI, valid for shapes
#Cylinder
if Selected_shape == ('Cylinder'):
HEIGHT = float(heightEntry.get())# Get data from entry field varible
#can't define for all cuz in sphere height feild is empty(generate error)so, specific.
CubicUnits=(PI*RADIUS*RADIUS*HEIGHT)#Volume of cylinder is PI x R2 x H (varible created in function)
print('Calculate volume for : ',Selected_shape) # for shell
print('Radius : ',RADIUS)
print('Height : ',HEIGHT)
#Cone
elif Selected_shape == ('Cone'):
HEIGHT = float(heightEntry.get())# Get data from entry field varible
CubicUnits=(PI*RADIUS*RADIUS*HEIGHT/3)#Volume of cone is PI x R2 x H/3
print('Calculate volume for : ',Selected_shape) # for shell
print('Radius : ',RADIUS)
print('Height : ',HEIGHT)
#Cuboid
elif Selected_shape == ('Cuboid'):
HEIGHT = float(heightEntry.get())# Get data from entry field varible
WIDTH = float(widthEntry.get())
#can't define for all cuz in sphere height & Width feild is empty(generate error)so, specific.
CubicUnits=(RADIUS*WIDTH*HEIGHT)#Volume of cubiod is L x W x H
#RADIUS and Length Entry feild are same only show or hide or change label name as per selected shape
print('Calculate volume for : ',Selected_shape) # for shell
print('Length : ',RADIUS) # as length
print('Width : ',WIDTH)
print('Height : ',HEIGHT)
#Sphere
elif Selected_shape == ('Sphere'):
CubicUnits=(4/3*PI*RADIUS*RADIUS*RADIUS)#Volume of cone is 4/3 x PI x R3
print('Calculate volume for : ',Selected_shape) # for shell
print('Radius : ',RADIUS)
#Conversion of Cubic Units into Liters
Liters=(CubicUnits/Liters_Constant) # divide the volume value by liters constant
# liters constant changes according to selected units for correct conversion into liters
Round_Liters = float(round(Liters,3)) #Rounding off by 2 decimal places
Round_CubicUnits=float( round(CubicUnits,3))
print('CubicUnits : ',Round_CubicUnits)# for Shell
print('Liters : ',Round_Liters)
print('\n') #blank line
#Push Results to GUI labels
L7=Label(text=f"{Round_CubicUnits} {unitVarible.get()}3 ",font=('Helvetica',12,'bold'),bg="#FFFFFF",width=25, height=3).place(x=30,y=310)
L8=Label(text=f"{Round_Liters} Liters ",font=('Helvetica',12,'bold'),bg="#FFFFFF",width=25, height=2).place(x=30,y=360)
def CLEAR(*args): # Function to clear/reset all input fields ( CLEAR, button command)
unitVarible.set("Select") #clear unit selection
heightValue.set("")#clear input fields parameters
radiusValue.set("")
lengthValue.set("")
widthValue.set("")
Round_CubicUnits=()# clear result varible
Round_Liters=()
L7=Label(text=f"",font=('Helvetica',12,'bold'),bg="#FFFFFF",width=25, height=3).place(x=30,y=310) #Clear labels (empty label)
L8=Label(text=f"",font=('Helvetica',12,'bold'),bg="#FFFFFF",width=25, height=2).place(x=30,y=360)
print("Clear\n")
CLEAR()# Call function , So things already clear in starting
# Graphical User Interface (GUI)----------------------------------------------------------------------
# Labels
L0=Label(root,font=('Helvetica',17,'bold'),text="Volume Calculator",justify=CENTER,bg="#1b6a97",borderwidth=3, relief="raised",fg='white',width=100) #Head Label
L0.pack(padx=2)
L1=Label(text="Shape",font=20).place(x=20,y=60)# Text, FontSize
L2=Label(text="Height",font=20)
L3=Label(text="Length",font=20) # can change into radius in function
L4=Label(text="Width",font=20)
L5=Label(text="Units",font=20)
L6=Label(text="Results",font=("Courier", 16, "bold"),fg='#1b6a97')
#(All above label location is in show shape function)
# Creating widget/ DropDown Menu
dropDownMenu1 = OptionMenu(root,shapeVarible,*shape_List, command = show_selected_shape ) #DropDown menu, Varible, shape list/optons & command
dropDownMenu1.pack(expand=True)
dropDownMenu1.place(x=80,y=60) #location of DropDown menu1
dropDownMenu = OptionMenu(root,unitVarible,*unit_List, command = show_selected_unit ) #DropDown menu, Varible, Unit list/optons & command
#(location is in show shape function)
#Entry fields
#cylinder and cone, sphere (have same inputs fields)
heightEntry=Entry(root,textvariable=heightValue,width=20,bd=3,font=20) # Entry/Input field for height
radiusEntry=Entry(root,textvariable=radiusValue,width=20,bd=3,font=20) # Entry field for length and radius (both)
#only label and location changes as per shape
widthEntry=Entry(root,textvariable=widthValue,width=20,bd=3,font=20)
#( All widget location is in show shape function)
#Buttons
Clear_Button=Button(text ="Clear",font=('Helvetica',12), bg="#1b6a97",fg="white",width=8,height=1, command = CLEAR).place(x=30,y=430) #Clear All command button & location
Cal_Button=Button(text ="Calculate",font=('Helvetica',12), bg="#1b6a97",fg="white",width=8,height=1, command = Calculate_Vol ).place(x=200,y=430) #Calculte Volume command button & location
root.mainloop() #Execute tkinter
|
daangn/redis-memory-analyzer | rma/rule/Hash.py | import statistics
from itertools import tee
from tqdm import tqdm
from rma.redis import *
from rma.helpers import pref_encoding, make_total_row, progress_iterator
class HashStatEntry(object):
def __init__(self, info, redis):
"""
:param key_name:
:param RmaRedis redis:
:return:
"""
key_name = info['name']
self.keys = []
self.values = []
self.encoding = info["encoding"]
for key, value in redis.hscan_iter(key_name, '*'):
self.keys.append(key)
self.values.append(value)
self.count = len(self.keys)
args, args2, args3 = tee((len(x) for x in self.keys), 3)
m, m2, m3 = tee((len(x) for x in self.values), 3)
self.fieldUsedBytes = sum(args)
if self.encoding == REDIS_ENCODING_ID_HASHTABLE:
self.system = dict_overhead(self.count)
self.fieldAlignedBytes = sum(map(size_of_aligned_string, self.keys))
self.valueAlignedBytes = sum(map(size_of_aligned_string, self.values))
elif self.encoding == REDIS_ENCODING_ID_ZIPLIST:
self.system = ziplist_overhead(self.count)
self.fieldAlignedBytes = sum(map(size_of_ziplist_aligned_string, self.keys))
self.valueAlignedBytes = sum(map(size_of_ziplist_aligned_string, self.values))
else:
raise Exception('Panic', 'Unknown encoding %s in %s' % (self.encoding, key_name))
self.valueUsedBytes = sum(m)
try:
self.fieldMin = min(args2)
except ValueError:
self.fieldMin = None
pass
try:
self.fieldMax = max(args3)
except ValueError:
self.fieldMax = None
pass
try:
self.valueMin = min(m2)
except ValueError:
self.valueMin = None
try:
self.valueMax = max(m3)
except ValueError:
self.valueMax = None
class HashAggregator(object):
def __init__(self, all_obj, total):
self.total_elements = total
g00, g0, g1, g2, g3, v1, v2 = tee(all_obj, 7)
self.encoding = pref_encoding([obj.encoding for obj in g00], redis_encoding_id_to_str)
self.system = sum(obj.system for obj in g0)
self.fieldUsedBytes = sum(obj.fieldUsedBytes for obj in g1)
self.fieldAlignedBytes = sum(obj.fieldAlignedBytes for obj in g2)
if total == 0:
self.fieldAvgCount = 0
elif total > 1:
self.fieldAvgCount = statistics.mean(obj.count for obj in g3)
else:
self.fieldAvgCount = min((obj.count for obj in g3))
self.valueUsedBytes = sum(obj.valueUsedBytes for obj in v1)
self.valueAlignedBytes = sum(obj.valueAlignedBytes for obj in v2)
def __enter__(self):
return self
def __exit__(self, *exc):
return False
class Hash(object):
def __init__(self, redis):
"""
:param RmaRedis redis:
:return:
"""
self.redis = redis
def analyze(self, keys, total=0):
key_stat = {
'headers': ['Match', "Count", "Avg field count", "Key mem", "Real", "Ratio", "Value mem", "Real", "Ratio",
"System", "Encoding", "Total mem", "Total aligned"],
'data': []
}
progress = tqdm(total=total,
mininterval=1,
desc="Processing Hash patterns",
leave=False)
for pattern, data in keys.items():
agg = HashAggregator(progress_iterator((HashStatEntry(x, self.redis) for x in data), progress), len(data))
stat_entry = [
pattern,
len(data),
agg.fieldAvgCount,
agg.fieldUsedBytes,
agg.fieldAlignedBytes,
agg.fieldAlignedBytes / (agg.fieldUsedBytes if agg.fieldUsedBytes > 0 else 1),
agg.valueUsedBytes,
agg.valueAlignedBytes,
agg.valueAlignedBytes / (agg.valueUsedBytes if agg.valueUsedBytes > 0 else 1),
agg.system,
agg.encoding,
agg.fieldUsedBytes + agg.valueUsedBytes,
agg.fieldAlignedBytes + agg.valueAlignedBytes + agg.system,
]
key_stat['data'].append(stat_entry)
key_stat['data'].sort(key=lambda x: x[12], reverse=True)
key_stat['data'].append(
make_total_row(key_stat['data'], ['Total:', sum, 0, sum, sum, 0, sum, sum, 0, sum, '', sum, sum]))
progress.close()
return key_stat
|
amickael/pyopentdb | pyopentdb/enum/Difficulty.py | from enum import Enum, unique
@unique
class Difficulty(Enum):
EASY = "easy"
MEDIUM = "medium"
HARD = "hard"
difficulty_map = {i.value: i for i in Difficulty}
|
amickael/pyopentdb | pyopentdb/api/__init__.py | from pyopentdb.api.OpenTDBClient import OpenTDBClient
|
amickael/pyopentdb | pyopentdb/model/QuestionResult.py | from dataclasses import dataclass
from typing import List
from copy import deepcopy
import html
from pyopentdb.model.Question import Question
from pyopentdb.enum import (
category_name_map,
difficulty_map,
question_type_map,
)
from pyopentdb.exc.QuestionError import QuestionError
@dataclass
class QuestionResult:
category: str
type: str
difficulty: str
question: str
correct_answer: str
incorrect_answers: List[str]
def __post_init__(self):
# Unescape all HTML encoded characters
self.category = html.unescape(self.category)
self.type = html.unescape(self.type)
self.difficulty = html.unescape(self.difficulty)
self.question = html.unescape(self.question)
self.correct_answer = html.unescape(self.correct_answer)
self.incorrect_answers = [html.unescape(i) for i in self.incorrect_answers]
def parse(self) -> Question:
# Parse values to enums
category = category_name_map.get(self.category)
question_type = question_type_map.get(self.type)
difficulty = difficulty_map.get(self.difficulty)
# Validate enums
enums = {
"Category": [category, self.category],
"Question Type": [question_type, self.type],
"Difficulty": [difficulty, self.difficulty],
}
for enum, [parsed, original] in enums.items():
if parsed is None:
raise QuestionError(f'Invalid {enum} "{original}"')
# Build Question
choices = deepcopy(self.incorrect_answers)
choices.append(self.correct_answer)
return Question(
category=category,
question_type=question_type,
difficulty=difficulty,
question=self.question,
answer=self.correct_answer,
choices=choices,
)
|
amickael/pyopentdb | pyopentdb/model/Question.py | import json
from dataclasses import dataclass, asdict
from enum import Enum
from random import shuffle
from typing import List, Union
from pyopentdb.enum import QuestionType, Difficulty, Category
from pyopentdb.exc import QuestionError
@dataclass
class Question:
category: Category
question_type: QuestionType
difficulty: Difficulty
question: str
choices: List[str]
answer: str
answer_index: int = None
def __post_init__(self):
# Shuffle choices
shuffle(self.choices)
# Try to set answer index
try:
self.answer_index = self.choices.index(self.answer)
except ValueError:
raise QuestionError(
f"Answer ({self.answer}) is not in the list of choices ({self.choices})"
)
def to_serializable(self, as_json: bool = False) -> Union[dict, str]:
output = {}
for key, value in asdict(self).items():
if isinstance(value, Category):
output[key] = value.value.name
elif isinstance(value, Enum):
output[key] = value.value
else:
output[key] = value
if as_json is True:
return json.dumps(output)
else:
return output
|
amickael/pyopentdb | pyopentdb/__init__.py | from pyopentdb.api import OpenTDBClient
from pyopentdb.enum import (
QuestionType,
Difficulty,
Category,
CategoryItem,
ResponseCode,
)
from pyopentdb.exc import APIError, QuestionError
from pyopentdb.model import Question
__author__ = "<NAME>"
__version__ = "0.0.4"
__description__ = "Python interface for the Open Trivia DB"
|
amickael/pyopentdb | pyopentdb/scripts/get_categories.py | <reponame>amickael/pyopentdb
import requests
req = requests.get("https://opentdb.com/api_category.php")
data = req.json()["trivia_categories"]
for cat in data:
ident = cat["id"]
name = cat["name"]
enum = name.upper().replace(" ", "_").replace(":", "_").replace("&", "_").strip()
enum = enum.replace("___", "_").replace("__", "_")
print(f'{enum} = CategoryItem({ident}, "{name}")')
|
amickael/pyopentdb | pyopentdb/enum/ResponseCode.py | <reponame>amickael/pyopentdb
from enum import Enum, unique
from collections import namedtuple
ResponseCodeItem = namedtuple("ResponseCodeItem", ["code", "name", "description"])
@unique
class ResponseCode(Enum):
SUCCESS = ResponseCodeItem(0, "Success", "Returned results successfully.")
NO_RESULTS = ResponseCodeItem(
1,
"No Results",
"Could not return results. The API doesn't have enough questions for your query.",
)
INVALID_PARAMETER = ResponseCodeItem(
2,
"Invalid Parameter",
"Contains an invalid parameter. Arguments passed in are not valid.",
)
TOKEN_NOT_FOUND = ResponseCodeItem(
3, "Token Not Found", "Session Token does not exist."
)
TOKEN_EMPTY = ResponseCodeItem(
4,
"Token Empty",
"Session Token has returned all possible questions for the specified query. Resetting the Token is necessary.",
)
response_code_map = {i.value.code: i for i in ResponseCode}
|
amickael/pyopentdb | pyopentdb/model/QuestionSet.py | import json
from copy import deepcopy
from typing import List, Union
from pyopentdb.model.Question import Question
class QuestionSet:
def __init__(self, questions: List[Question]):
self.items = questions
def __len__(self) -> int:
return len(self.items)
def __getitem__(self, item: int) -> Question:
return self.items[item]
def __setitem__(self, key: int, value: Question) -> Question:
self.items[key] = value
return value
def __delitem__(self, key) -> Question:
item = deepcopy(self.items[key])
del self.items[key]
return item
def __iter__(self) -> Question:
yield from self.items
def __str__(self):
return str(self.items)
def __repr__(self):
return repr(self.items)
def to_serializable(self, as_json: bool = False) -> Union[List[dict], str]:
output = [i.to_serializable() for i in self.items]
if as_json is True:
return json.dumps(output)
else:
return output
|
amickael/pyopentdb | pyopentdb/api/OpenTDBClient.py | from typing import Union
from warnings import warn
import requests
from pyopentdb.enum import Category, Difficulty, QuestionType, ResponseCode
from pyopentdb.exc import APIError
from pyopentdb.model import QuestionResponse, QuestionSet
class OpenTDBClient:
def __init__(self):
self.session = requests.Session()
self.request_token()
def request_token(self):
req = requests.get(
"https://opentdb.com/api_token.php", params={"command": "request"}
)
if req.ok:
self.session.params.update({"token": req.json().get("token")})
def reset_token(self):
req = requests.get(
f"https://opentdb.com/api_token.php",
params={"command": "reset", "token": self.session.params.get("token")},
)
if req.ok:
self.session.params.update({"token": req.json().get("token")})
def get_questions(
self,
amount: int = 10,
category: Union[Category, int] = None,
difficulty: Union[Difficulty, str] = None,
question_type: Union[QuestionType, str] = None,
retry: int = 5,
) -> QuestionSet:
# Validate amount
if not 1 <= amount <= 50:
raise APIError("Amount must be between 1 and 50, inclusive")
# Parse enums
if isinstance(category, Category):
category = category.value.id
if isinstance(difficulty, Difficulty):
difficulty = difficulty.value
if isinstance(question_type, QuestionType):
question_type = question_type.value
# Make HTTP request
status = 200
for i in range(retry + 1):
req = self.session.get(
"https://opentdb.com/api.php",
params={
"amount": amount,
"category": category,
"difficulty": difficulty,
"type": question_type,
},
)
if req.ok:
resp = QuestionResponse(**req.json())
if resp.response_code == ResponseCode.TOKEN_EMPTY:
self.reset_token()
continue
elif resp.response_code == ResponseCode.TOKEN_NOT_FOUND:
self.request_token()
continue
elif resp.response_code != ResponseCode.SUCCESS:
warn(resp.response_code.value.description)
return resp.results
else:
status = req.status_code
warn(
f"API returned status code {req.status_code}, retrying... ({i}/{retry})"
)
continue
# Raise error if max retries exceeded
raise APIError(
f"API returned status code {status}, max retries exceeded, stopping."
)
def get_question_count(self, category: Union[Category, int] = None) -> dict:
if category is None:
req = self.session.get("https://opentdb.com/api_count_global.php")
else:
if isinstance(category, Category):
category = category.value.id
req = self.session.get(
"https://opentdb.com/api_count.php", params={"category": category}
)
if req.ok:
return req.json()
|
amickael/pyopentdb | pyopentdb/enum/QuestionType.py | <filename>pyopentdb/enum/QuestionType.py
from enum import Enum, unique
@unique
class QuestionType(Enum):
MULTIPLE = "multiple"
TRUE_FALSE = "boolean"
question_type_map = {i.value: i for i in QuestionType}
|
amickael/pyopentdb | pyopentdb/model/__init__.py | from pyopentdb.model.Question import Question
from pyopentdb.model.QuestionResponse import QuestionResponse
from pyopentdb.model.QuestionResult import QuestionResult
from pyopentdb.model.QuestionSet import QuestionSet
|
amickael/pyopentdb | setup.py | from setuptools import setup, find_packages
import pyopentdb
with open("README.md", "r") as f:
readme = f.read()
with open("requirements.txt", "r") as f:
requirements = [i.rstrip() for i in f.readlines()]
setup(
name="pyopentdb",
version=pyopentdb.__version__,
description=pyopentdb.__description__,
long_description=readme,
long_description_content_type="text/markdown",
author=pyopentdb.__author__,
author_email="<EMAIL>",
license="MIT",
platforms=["NT", "POSIX"],
url="https://github.com/amickael/pyopentdb",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
],
python_requires=">=3.6",
)
|
amickael/pyopentdb | pyopentdb/enum/Category.py | <reponame>amickael/pyopentdb
from collections import namedtuple
from enum import Enum, unique
CategoryItem = namedtuple("CategoryItem", ["id", "name", "emoji", "aria"])
@unique
class Category(Enum):
GENERAL_KNOWLEDGE = CategoryItem(9, "General Knowledge", "๐ง ", "brain")
ENTERTAINMENT_BOOKS = CategoryItem(10, "Entertainment: Books", "๐", "books")
ENTERTAINMENT_FILM = CategoryItem(11, "Entertainment: Film", "๐๏ธ", "film")
ENTERTAINMENT_MUSIC = CategoryItem(12, "Entertainment: Music", "๐ธ", "guitar")
ENTERTAINMENT_MUSICALS_THEATRES = CategoryItem(
13, "Entertainment: Musicals & Theatres", "๐ญ", "masks"
)
ENTERTAINMENT_TELEVISION = CategoryItem(
14, "Entertainment: Television", "๐บ", "television"
)
ENTERTAINMENT_VIDEO_GAMES = CategoryItem(
15, "Entertainment: Video Games", "๐น๏ธ", "joystick"
)
ENTERTAINMENT_BOARD_GAMES = CategoryItem(
16, "Entertainment: Board Games", "๐ฒ", "dice"
)
SCIENCE_NATURE = CategoryItem(17, "Science & Nature", "๐ฌ", "microscope")
SCIENCE_COMPUTERS = CategoryItem(18, "Science: Computers", "๐ป", "laptop")
SCIENCE_MATHEMATICS = CategoryItem(19, "Science: Mathematics", "๐งฎ", "abacus")
MYTHOLOGY = CategoryItem(20, "Mythology", "๐ฑ", "trident")
SPORTS = CategoryItem(21, "Sports", "โฝ", "soccer ball")
GEOGRAPHY = CategoryItem(22, "Geography", "๐บ๏ธ", "world map")
HISTORY = CategoryItem(23, "History", "๐", "scroll")
POLITICS = CategoryItem(24, "Politics", "๐ณ๏ธ", "ballot box")
ART = CategoryItem(25, "Art", "๐จ", "palette")
CELEBRITIES = CategoryItem(26, "Celebrities", "๐ธ", "camera")
ANIMALS = CategoryItem(27, "Animals", "๐", "dog")
VEHICLES = CategoryItem(28, "Vehicles", "๐๏ธ", "race car")
ENTERTAINMENT_COMICS = CategoryItem(29, "Entertainment: Comics", "๐ฆธ", "superhero")
SCIENCE_GADGETS = CategoryItem(30, "Science: Gadgets", "๐ฑ", "mobile phone")
ENTERTAINMENT_JAPANESE_ANIME_MANGA = CategoryItem(
31, "Entertainment: Japanese Anime & Manga", "๐ฏ๐ต", "japanese flag"
)
ENTERTAINMENT_CARTOON_ANIMATIONS = CategoryItem(
32, "Entertainment: Cartoon & Animations", "๐ฑ๐ญ", "cat and mouse"
)
category_id_map = {i.value.id: i for i in Category}
category_name_map = {i.value.name: i for i in Category}
|
amickael/pyopentdb | pyopentdb/enum/__init__.py | from pyopentdb.enum.QuestionType import QuestionType, question_type_map
from pyopentdb.enum.Difficulty import Difficulty, difficulty_map
from pyopentdb.enum.Category import (
Category,
CategoryItem,
category_id_map,
category_name_map,
)
from pyopentdb.enum.ResponseCode import ResponseCode, response_code_map
|
amickael/pyopentdb | pyopentdb/exc/__init__.py | <reponame>amickael/pyopentdb<filename>pyopentdb/exc/__init__.py
from pyopentdb.exc.QuestionError import QuestionError
from pyopentdb.exc.APIError import APIError
|
amickael/pyopentdb | pyopentdb/model/QuestionResponse.py | from typing import List
from pyopentdb.enum import response_code_map
from pyopentdb.model.QuestionResult import QuestionResult
from pyopentdb.model.QuestionSet import QuestionSet
class QuestionResponse:
def __init__(self, response_code: int, results: List[dict]):
self.response_code = response_code_map.get(response_code)
self.results = QuestionSet([QuestionResult(**i).parse() for i in results])
|
dmcskim/hmp_download | hmp_download.py | #!/usr/bin/env python3
""" download_hmp.py: Downloads Human Microbiome Project (HMP) data files from
a manifest file. """
__author__ = '<NAME>'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.0.1'
__status__ = 'Development'
from numpy import array
from pandas import read_csv
import wget
from hashlib import md5
from os.path import isfile, isdir
from os import remove, mkdir
def calcMD5(ifile, block_size=2**20):
md5c = md5()
try:
with open(ifile, 'rb') as ihand:
while True:
data = ihand.read(block_size)
if not data:
break
md5c.update(data)
except:
return None
return md5c.hexdigest()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Download HMP files from\
manifest.')
parser.add_argument('manifest', nargs=1, help='Manifest file from\
portal.hmpdacc.org', type=str)
parser.add_argument('-o', '--out_dir', type=str, help='Location for \
downloaded files to be saved.', default='data')
args = parser.parse_args()
#load data
wanted = read_csv(args.manifest[0], sep='\t')
#ensure save directory exists
if not isdir(args.out_dir):
mkdir(args.out_dir)
done = set()
for x in wanted.index[:5]:
xx = wanted.loc[x,'urls']
cmd5 = wanted.loc[x,'md5']
fid = wanted.loc[x,'file_id']
temp = array(xx.split(','))
wn = ['http' in y for y in temp]
y = temp[wn][0]
fname = y.split('/')[-1]
print('\nDownloading {0}.\n'.format(fname))
if not isfile(args.out_dir+'/'+fname) and fid not in done:
#download files
f2name = wget.download(y, out=args.out_dir)
print('\n{0} saved.\n'.format(f2name))
#check md5sum
tmd5 = calcMD5(f2name)
if tmd5 == cmd5:
done.add(fid)
else:
remove(fid)
|
dmcskim/hmp_download | setup.py | #!/usr/bin/env python
"""Download HMP data files."""
from os.path import dirname
setup_args = {}
try:
from setuptools import setup
# Dependencies for easy_install:
setup_args.update(
install_requires=[
'pandas >= 0.20.2',
'numpy >= 1.13.3',
'wget >= 3.0',
])
except ImportError:
from distutils.core import setup
DIR = (dirname(__file__) or '.') + '/'
setup_args.update(
name='hmp_download',
version='0.1',
description=__doc__,
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/dmcskim/hmp_download',
scripts=[
DIR + 'hmp_download.py',
])
setup(**setup_args)
|
trnolan/simple-messaging-api | run.py | # stdlib
import os
# project
from messaging_app import app
if __name__ == '__main__':
app.run(host='0.0.0.0', port=os.environ.get('HTTP_PORT')) |
trnolan/simple-messaging-api | messaging_app/__init__.py | # stdlib
import datetime
import logging
import os
# 3p
from flask import Flask, request
from flask.json import jsonify
from flask_jwt import JWT, jwt_required
import json
import pika
app = Flask(__name__)
params = pika.ConnectionParameters(host='amqp', credentials=pika.PlainCredentials(username='amqp', password='<PASSWORD>'), port=5672)
app.config.update(
SECRET_KEY=os.environ.get('JWT_USER_SECRET'),
JWT_AUTH_HEADER_PREFIX='Bearer',
JWT_REQUIRED_CLAIMS=[])
def _auth_handler():
"""
Requrired JWT method
"""
return None
def _identity_handler(jwt_payload):
"""
Required JWT method
"""
return jwt_payload
jwt = JWT(app, _auth_handler, _identity_handler)
def _format_error(error_message, status_code=None):
response = jsonify({
'error': {
'message': error_message
}
})
logging.error(f"Responding with {status_code}: {response}")
return response
def _format_result(result):
response = jsonify({
'result': result
})
logging.info(f"Responding with 200: {response}")
return response
def _rabbitmq_helper():
"""
Helper method that establishes a connection with rabbitmq and returns
a channel to work with
"""
rabbitmq_connection = pika.BlockingConnection(parameters=params)
return rabbitmq_connection.channel()
def _send_message_helper(message, destination, sender, chat_name='private'):
"""
Helper method to format and send message over rabbitmq
"""
body = {"message": message, "timestamp": str(datetime.datetime.utcnow()), "chat_name": chat_name, "sender": sender}
rabbitmq_channel = _rabbitmq_helper()
rabbitmq_channel.basic_publish(exchange='amq.topic', routing_key=destination, body=json.dumps(body))
rabbitmq_channel.close()
@app.route('/sign-up', methods=['POST'])
@jwt_required()
def sign_up():
"""
Accepts a JSON payload with a string username and returns 200 if username queue can be created
Ex: {"username": "foo"}
"""
data = request.get_json()
username = data.get('username')
if not username:
return _format_error("Username required", 400)
try:
rabbitmq_channel = _rabbitmq_helper()
# TODO: Add check to see if username is taken already
rabbitmq_channel.queue_declare(queue=username)
rabbitmq_channel.queue_bind(queue=username, exchange='amq.topic', routing_key=username)
rabbitmq_channel.close()
except Exception:
return _format_error("Error declaring username", 400)
return _format_result(f"Successfully signed up user {username}")
@app.route('/send-message', methods=['POST'])
@jwt_required()
def send_message():
"""
Accepts a json payload with keys message, recipient and sender
Ex: {"recipient": "foo", "message": "hi", "sender": "bar"}
"""
data = request.get_json()
destination = data.get('recipient')
message = data.get('message')
sender = data.get('sender')
try:
_send_message_helper(message, destination, sender)
except Exception:
return _format_error(f"Error sending message to {destination}")
return _format_result(f"Successfully sent message to {destination}")
@app.route('/send-group-message', methods=['POST'])
@jwt_required()
def send_group_mesage():
"""
Accepts a json payload with keys chat_name, message, recipients, and sender
Ex: {"chat_name": "team", "recipients": ["foo", "test"], "message": "hi", "sender": "bar"}
"""
data = request.get_json()
destinations = data.get('recipients')
chat_name = data.get('chat_name')
message = data.get('message')
sender = data.get('sender')
for username in destinations:
try:
_send_message_helper(message, username, sender, chat_name)
except Exception:
return _format_error(f"Error sending message to {username}")
return _format_result(f"Successfully sent message to {str(destinations)}")
@app.route('/get-messages/<username>', methods=['GET'])
@jwt_required()
def get_messages(username):
"""
GET endpoint which expects a single username parameter
"""
messages = []
channel = _rabbitmq_helper()
messages_left = True
# Get messages until the get returns None
while messages_left:
try:
queued_message = channel.basic_get(username, auto_ack=True)
messages.append(str(queued_message))
# Need to check an index because the returned tuple of Nones is truthy
if not queued_message[0]:
messages_left = False
del messages[-1] # TODO Find a better way to handle this
except Exception:
return _format_error("Error getting messages for username {username}", 400)
return _format_result(messages)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.