content stringlengths 5 1.05M |
|---|
import os
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import TYPE_CHECKING
from ..cloudpath import CloudPath, NoStatError, register_path_class
if TYPE_CHECKING:
from .gsclient import GSClient
@register_path_class("gs")
class GSPath(CloudPath):
"""Class for representing and operating on Google Cloud Storage URIs, in the style of the
Python standard library's [`pathlib` module](https://docs.python.org/3/library/pathlib.html).
Instances represent a path in GS with filesystem path semantics, and convenient methods allow
for basic operations like joining, reading, writing, iterating over contents, etc. This class
almost entirely mimics the [`pathlib.Path`](https://docs.python.org/3/library/pathlib.html#pathlib.Path)
interface, so most familiar properties and methods should be available and behave in the
expected way.
The [`GSClient`](../gsclient/) class handles authentication with GCP. If a client instance is
not explicitly specified on `GSPath` instantiation, a default client is used. See `GSClient`'s
documentation for more details.
"""
cloud_prefix: str = "gs://"
client: "GSClient"
@property
def drive(self) -> str:
return self.bucket
def is_dir(self) -> bool:
return self.client._is_file_or_dir(self) == "dir"
def is_file(self) -> bool:
return self.client._is_file_or_dir(self) == "file"
def mkdir(self, parents=False, exist_ok=False):
# not possible to make empty directory on cloud storage
pass
def touch(self):
if self.exists():
self.client._move_file(self, self)
else:
tf = TemporaryDirectory()
p = Path(tf.name) / "empty"
p.touch()
self.client._upload_file(p, self)
tf.cleanup()
def stat(self):
meta = self.client._get_metadata(self)
if meta is None:
raise NoStatError(
f"No stats available for {self}; it may be a directory or not exist."
)
try:
mtime = meta["updated"].timestamp()
except KeyError:
mtime = 0
return os.stat_result(
(
None, # mode
None, # ino
self.cloud_prefix, # dev,
None, # nlink,
None, # uid,
None, # gid,
meta.get("size", 0), # size,
None, # atime,
mtime, # mtime,
None, # ctime,
)
)
@property
def bucket(self) -> str:
return self._no_prefix.split("/", 1)[0]
@property
def blob(self) -> str:
key = self._no_prefix_no_drive
# key should never have starting slash for
# use with google-cloud-storage, etc.
if key.startswith("/"):
key = key[1:]
return key
@property
def etag(self):
return self.client._get_metadata(self).get("etag")
|
"""
Distributions based on circuits with independent inputs.
"""
from __future__ import division
from dit import Distribution
import dit
def Unq():
"""
A distribution with unique information.
"""
pmf = [1/4] * 4
outcomes = [
('a', 'b', 'ab'),
('a', 'B', 'aB'),
('A', 'b', 'Ab'),
('A', 'B', 'AB')
]
d = Distribution(outcomes, pmf)
return d
def Rdn():
"""
A distribution with redundant information.
"""
pmf = [1/2, 1/2]
outcomes = ['000', '111']
d = Distribution(outcomes, pmf)
return d
def Xor():
"""
A distribution with synergistic information, [0] xor [1] = [2]
"""
pmf = [1/4] * 4
outcomes = ['000', '011', '101', '110']
d = Distribution(outcomes, pmf)
return d
def And(k=2):
"""
[0] and [1] = [2]
"""
d = dit.uniform_distribution(k, ['01'])
d = dit.distconst.modify_outcomes(d, lambda x: ''.join(x))
d = dit.insert_rvf(d, lambda x: '1' if all(map(bool, map(int, x))) else '0')
return d
def Or(k=2):
"""
[0] or [1] = [2]
"""
d = dit.uniform_distribution(k, ['01'])
d = dit.distconst.modify_outcomes(d, lambda x: ''.join(x))
d = dit.insert_rvf(d, lambda x: '1' if any(map(bool, map(int, x))) else '0')
return d
def RdnXor():
"""
Concatenation of Rdn() and Xor(). Distribution has both redundant and
synergistic information.
"""
pmf = [1/8] * 8
outcomes = [
('r0', 'r0', 'r0'),
('r0', 'r1', 'r1'),
('r1', 'r0', 'r1'),
('r1', 'r1', 'r0'),
('R0', 'R0', 'R0'),
('R0', 'R1', 'R1'),
('R1', 'R0', 'R1'),
('R1', 'R1', 'R0'),
]
d = Distribution(outcomes, pmf)
return d
def ImperfectRdn():
"""
Like Rdn() with a small off-term.
"""
pmf = [.499, .5, .001]
outcomes = [('0', '0', '0'), ('1', '1', '1'), ('0', '1', '0')]
d = Distribution(outcomes, pmf)
return d
def Subtle():
"""
The Subtle distribution.
"""
pmf = [1/3] * 3
outcomes = [('0', '0', '00'), ('1', '1', '11'), ('0', '1', '01')]
d = Distribution(outcomes, pmf)
return d
|
from models import User, Post
# delete user from post, user where post.user_id = user.id
query = (Post & User).delete(User) # mysql supports; sqlite3 dosenot support
|
import asyncio
import pytest
from . import assert_browsing_context
pytestmark = pytest.mark.asyncio
CONTEXT_CREATED_EVENT = "browsingContext.contextCreated"
async def test_not_unsubscribed(bidi_session, current_session):
await bidi_session.session.subscribe(events=[CONTEXT_CREATED_EVENT])
await bidi_session.session.unsubscribe(events=[CONTEXT_CREATED_EVENT])
# Track all received browsingContext.contextCreated events in the events array
events = []
async def on_event(method, data):
events.append(data)
remove_listener = bidi_session.add_event_listener(CONTEXT_CREATED_EVENT, on_event)
handle = current_session.new_window(type_hint="tab")
await asyncio.sleep(0.5)
assert len(events) == 0
remove_listener()
@pytest.mark.parametrize("type_hint", ["tab", "window"])
async def test_new_context(bidi_session, current_session, wait_for_event, type_hint):
# Unsubscribe in case a previous tests subscribed to the event
await bidi_session.session.unsubscribe(events=[CONTEXT_CREATED_EVENT])
await bidi_session.session.subscribe(events=[CONTEXT_CREATED_EVENT])
on_entry = wait_for_event(CONTEXT_CREATED_EVENT)
top_level_context_id = current_session.new_window(type_hint=type_hint)
context_info = await on_entry
assert_browsing_context(
context_info,
children=None,
context=top_level_context_id,
url="about:blank",
parent=None,
)
async def test_evaluate_window_open_without_url(
bidi_session, current_session, wait_for_event
):
# Unsubscribe in case a previous tests subscribed to the event
await bidi_session.session.unsubscribe(events=[CONTEXT_CREATED_EVENT])
await bidi_session.session.subscribe(events=[CONTEXT_CREATED_EVENT])
on_entry = wait_for_event(CONTEXT_CREATED_EVENT)
current_session.execute_script("""window.open();""")
context_info = await on_entry
assert_browsing_context(
context_info,
children=None,
context=None,
url="about:blank",
parent=None,
)
await bidi_session.session.unsubscribe(events=[CONTEXT_CREATED_EVENT])
async def test_evaluate_window_open_with_url(
bidi_session, current_session, wait_for_event, inline
):
# Unsubscribe in case a previous tests subscribed to the event
await bidi_session.session.unsubscribe(events=[CONTEXT_CREATED_EVENT])
url = inline("<div>foo</div>")
await bidi_session.session.subscribe(events=[CONTEXT_CREATED_EVENT])
on_entry = wait_for_event(CONTEXT_CREATED_EVENT)
current_session.execute_script(
"""
const url = arguments[0];
window.open(url);
""",
args=[url],
)
context_info = await on_entry
assert_browsing_context(
context_info,
children=None,
context=None,
url="about:blank",
parent=None,
)
async def test_navigate_creates_iframes(bidi_session, current_session, wait_for_event, inline):
# Unsubscribe in case a previous tests subscribed to the event
await bidi_session.session.unsubscribe(events=[CONTEXT_CREATED_EVENT])
events = []
top_level_context_id = current_session.window_handle
url_iframe1 = inline("<div>foo</div>")
url_iframe2 = inline("<div>bar</div>")
url_page = inline(
f"<iframe src='{url_iframe1}'></iframe><iframe src='{url_iframe2}'></iframe>"
)
async def on_event(method, data):
events.append(data)
remove_listener = bidi_session.add_event_listener(CONTEXT_CREATED_EVENT, on_event)
await bidi_session.session.subscribe(events=[CONTEXT_CREATED_EVENT])
current_session.url = url_page
frame1_info = await wait_for_event(CONTEXT_CREATED_EVENT)
assert_browsing_context(
frame1_info,
children=None,
context=None,
url=url_iframe1,
parent=top_level_context_id,
)
frame2_info = await wait_for_event(CONTEXT_CREATED_EVENT)
assert_browsing_context(
frame2_info,
children=None,
context=None,
url=url_iframe2,
parent=top_level_context_id,
)
assert len(events) == 2
remove_listener()
await bidi_session.session.unsubscribe(events=[CONTEXT_CREATED_EVENT])
async def test_navigate_creates_nested_iframes(
bidi_session, current_session, wait_for_event, inline
):
# Unsubscribe in case a previous tests subscribed to the event
await bidi_session.session.unsubscribe(events=[CONTEXT_CREATED_EVENT])
events = []
top_level_context_id = current_session.window_handle
url_nested_iframe = inline("<div>foo</div>")
url_iframe = inline(f"<iframe src='{url_nested_iframe}'></iframe")
url_page = inline(f"<iframe src='{url_iframe}'></iframe>")
async def on_event(method, data):
events.append(data)
remove_listener = bidi_session.add_event_listener(CONTEXT_CREATED_EVENT, on_event)
await bidi_session.session.subscribe(events=[CONTEXT_CREATED_EVENT])
current_session.url = url_page
frame_info = await wait_for_event(CONTEXT_CREATED_EVENT)
assert_browsing_context(
frame_info,
children=None,
context=None,
url=url_iframe,
parent=top_level_context_id,
)
nested_frame_info = await wait_for_event(CONTEXT_CREATED_EVENT)
assert_browsing_context(
nested_frame_info,
children=None,
context=None,
url=url_nested_iframe,
parent=frame_info["context"],
)
assert len(events) == 2
remove_listener()
await bidi_session.session.unsubscribe(events=[CONTEXT_CREATED_EVENT])
|
#! /usr/bin/env python
import sys
eps = '<epsilon>'
spa = '<space>'
phi = '<phi>'
rho = '<rho>'
unk = '<unk>'
cost = 1
def make_lexicon(ifile):
s = 2
for line in ifile:
word, label = line.split()
if int(label) <= 1:
continue # epsilon
for i, letter in enumerate(word):
if i == 0:
print 0, s, letter, eps
else:
print s, s + 1, letter, eps
s = s + 1
print s, 1, phi, eps, cost # unknown tokens will match
print s, 1, phi, eps, cost # non-consuming phi symbols
print s, 0, spa, word # whitespace required (even at the end)
s = s + 1
print 0, 1, phi, eps, cost # first letter does not match any word
print 1, 1, rho, eps # consume rest of the unknown token
print 1, 0, spa, unk # whitespace required (even at the end)
print 0
if __name__ == '__main__':
make_lexicon(sys.stdin)
|
import atexit
import os
import tempfile
import unittest
from pyramid.compat import PY3
if PY3: # pragma: no cover
import builtins as __builtin__
else:
import __builtin__
class TestPServeCommand(unittest.TestCase):
def setUp(self):
from pyramid.compat import NativeIO
self.out_ = NativeIO()
self.pid_file = None
def tearDown(self):
if self.pid_file and os.path.exists(self.pid_file):
os.remove(self.pid_file)
def out(self, msg):
self.out_.write(msg)
def _get_server(*args, **kwargs):
def server(app):
return ''
return server
def _getTargetClass(self):
from pyramid.scripts.pserve import PServeCommand
return PServeCommand
def _makeOne(self, *args):
effargs = ['pserve']
effargs.extend(args)
cmd = self._getTargetClass()(effargs)
cmd.out = self.out
return cmd
def _makeOneWithPidFile(self, pid):
self.pid_file = tempfile.mktemp()
inst = self._makeOne()
with open(self.pid_file, 'w') as f:
f.write(str(pid))
return inst
def test_remove_pid_file_verbose(self):
inst = self._makeOneWithPidFile(os.getpid())
inst._remove_pid_file(os.getpid(), self.pid_file, verbosity=1)
self._assert_pid_file_removed(verbose=True)
def test_remove_pid_file_not_verbose(self):
inst = self._makeOneWithPidFile(os.getpid())
inst._remove_pid_file(os.getpid(), self.pid_file, verbosity=0)
self._assert_pid_file_removed(verbose=False)
def test_remove_pid_not_a_number(self):
inst = self._makeOneWithPidFile('not a number')
inst._remove_pid_file(os.getpid(), self.pid_file, verbosity=1)
self._assert_pid_file_removed(verbose=True)
def test_remove_pid_current_pid_is_not_written_pid(self):
inst = self._makeOneWithPidFile(os.getpid())
inst._remove_pid_file('99999', self.pid_file, verbosity=1)
self._assert_pid_file_not_removed('')
def test_remove_pid_current_pid_is_not_pid_in_file(self):
inst = self._makeOneWithPidFile('99999')
inst._remove_pid_file(os.getpid(), self.pid_file, verbosity=1)
msg = 'PID file %s contains 99999, not expected PID %s'
self._assert_pid_file_not_removed(msg % (self.pid_file, os.getpid()))
def test_remove_pid_no_pid_file(self):
inst = self._makeOne()
self.pid_file = 'some unknown path'
inst._remove_pid_file(os.getpid(), self.pid_file, verbosity=1)
self._assert_pid_file_removed(verbose=False)
def test_remove_pid_file_unlink_exception(self):
inst = self._makeOneWithPidFile(os.getpid())
self._remove_pid_unlink_exception(inst)
msg = [
'Removing PID file %s' % (self.pid_file),
'Cannot remove PID file: (Some OSError - unlink)',
'Stale PID removed']
self._assert_pid_file_not_removed(msg=''.join(msg))
with open(self.pid_file) as f:
self.assertEqual(f.read(), '')
def test_remove_pid_file_stale_pid_write_exception(self):
inst = self._makeOneWithPidFile(os.getpid())
self._remove_pid_unlink_and_write_exceptions(inst)
msg = [
'Removing PID file %s' % (self.pid_file),
'Cannot remove PID file: (Some OSError - unlink)',
'Stale PID left in file: %s ' % (self.pid_file),
'(Some OSError - open)']
self._assert_pid_file_not_removed(msg=''.join(msg))
with open(self.pid_file) as f:
self.assertEqual(int(f.read()), os.getpid())
def test_record_pid_verbose(self):
self._assert_record_pid(verbosity=2, msg='Writing PID %d to %s')
def test_record_pid_not_verbose(self):
self._assert_record_pid(verbosity=1, msg='')
def _remove_pid_unlink_exception(self, inst):
old_unlink = os.unlink
def fake_unlink(filename):
raise OSError('Some OSError - unlink')
try:
os.unlink = fake_unlink
inst._remove_pid_file(os.getpid(), self.pid_file, verbosity=1)
finally:
os.unlink = old_unlink
def _remove_pid_unlink_and_write_exceptions(self, inst):
old_unlink = os.unlink
def fake_unlink(filename):
raise OSError('Some OSError - unlink')
run_already = []
old_open = __builtin__.open
def fake_open(*args):
if not run_already:
run_already.append(True)
return old_open(*args)
raise OSError('Some OSError - open')
try:
os.unlink = fake_unlink
__builtin__.open = fake_open
inst._remove_pid_file(os.getpid(), self.pid_file, verbosity=1)
finally:
os.unlink = old_unlink
__builtin__.open = old_open
def _assert_pid_file_removed(self, verbose=False):
self.assertFalse(os.path.exists(self.pid_file))
msg = 'Removing PID file %s' % (self.pid_file) if verbose else ''
self.assertEqual(self.out_.getvalue(), msg)
def _assert_pid_file_not_removed(self, msg):
self.assertTrue(os.path.exists(self.pid_file))
self.assertEqual(self.out_.getvalue(), msg)
def _assert_record_pid(self, verbosity, msg):
old_atexit = atexit.register
def fake_atexit(*args):
pass
self.pid_file = tempfile.mktemp()
pid = os.getpid()
inst = self._makeOne()
inst.options.verbose = verbosity
try:
atexit.register = fake_atexit
inst.record_pid(self.pid_file)
finally:
atexit.register = old_atexit
msg = msg % (pid, self.pid_file) if msg else ''
self.assertEqual(self.out_.getvalue(), msg)
with open(self.pid_file) as f:
self.assertEqual(int(f.read()), pid)
def test_run_no_args(self):
inst = self._makeOne()
result = inst.run()
self.assertEqual(result, 2)
self.assertEqual(self.out_.getvalue(), 'You must give a config file')
def test_run_stop_daemon_no_such_pid_file(self):
path = os.path.join(os.path.dirname(__file__), 'wontexist.pid')
inst = self._makeOne('--stop-daemon', '--pid-file=%s' % path)
inst.run()
msg = 'No PID file exists in %s' % path
self.assertEqual(self.out_.getvalue(), msg)
def test_run_stop_daemon_bad_pid_file(self):
path = __file__
inst = self._makeOne('--stop-daemon', '--pid-file=%s' % path)
inst.run()
msg = 'Not a valid PID file in %s' % path
self.assertEqual(self.out_.getvalue(), msg)
def test_run_stop_daemon_invalid_pid_in_file(self):
fn = tempfile.mktemp()
with open(fn, 'wb') as tmp:
tmp.write(b'9999999')
tmp.close()
inst = self._makeOne('--stop-daemon', '--pid-file=%s' % fn)
inst.run()
msg = 'PID in %s is not valid (deleting)' % fn
self.assertEqual(self.out_.getvalue(), msg)
def test_get_options_with_command(self):
inst = self._makeOne()
inst.args = ['foo', 'stop', 'a=1', 'b=2']
result = inst.get_options()
self.assertEqual(result, {'a': '1', 'b': '2'})
def test_get_options_no_command(self):
inst = self._makeOne()
inst.args = ['foo', 'a=1', 'b=2']
result = inst.get_options()
self.assertEqual(result, {'a': '1', 'b': '2'})
def test_parse_vars_good(self):
from pyramid.tests.test_scripts.dummy import DummyApp
inst = self._makeOne('development.ini', 'a=1', 'b=2')
inst.loadserver = self._get_server
app = DummyApp()
def get_app(*args, **kwargs):
app.global_conf = kwargs.get('global_conf', None)
inst.loadapp = get_app
inst.run()
self.assertEqual(app.global_conf, {'a': '1', 'b': '2'})
def test_parse_vars_bad(self):
inst = self._makeOne('development.ini', 'a')
inst.loadserver = self._get_server
self.assertRaises(ValueError, inst.run)
class Test_read_pidfile(unittest.TestCase):
def _callFUT(self, filename):
from pyramid.scripts.pserve import read_pidfile
return read_pidfile(filename)
def test_read_pidfile(self):
filename = tempfile.mktemp()
try:
with open(filename, 'w') as f:
f.write('12345')
result = self._callFUT(filename)
self.assertEqual(result, 12345)
finally:
os.remove(filename)
def test_read_pidfile_no_pid_file(self):
result = self._callFUT('some unknown path')
self.assertEqual(result, None)
def test_read_pidfile_not_a_number(self):
result = self._callFUT(__file__)
self.assertEqual(result, None)
class Test_main(unittest.TestCase):
def _callFUT(self, argv):
from pyramid.scripts.pserve import main
return main(argv, quiet=True)
def test_it(self):
result = self._callFUT(['pserve'])
self.assertEqual(result, 2)
class TestLazyWriter(unittest.TestCase):
def _makeOne(self, filename, mode='w'):
from pyramid.scripts.pserve import LazyWriter
return LazyWriter(filename, mode)
def test_open(self):
filename = tempfile.mktemp()
try:
inst = self._makeOne(filename)
fp = inst.open()
self.assertEqual(fp.name, filename)
finally:
fp.close()
os.remove(filename)
def test_write(self):
filename = tempfile.mktemp()
try:
inst = self._makeOne(filename)
inst.write('hello')
finally:
with open(filename) as f:
data = f.read()
self.assertEqual(data, 'hello')
inst.close()
os.remove(filename)
def test_writeline(self):
filename = tempfile.mktemp()
try:
inst = self._makeOne(filename)
inst.writelines('hello')
finally:
with open(filename) as f:
data = f.read()
self.assertEqual(data, 'hello')
inst.close()
os.remove(filename)
def test_flush(self):
filename = tempfile.mktemp()
try:
inst = self._makeOne(filename)
inst.flush()
fp = inst.fileobj
self.assertEqual(fp.name, filename)
finally:
fp.close()
os.remove(filename)
class Test__methodwrapper(unittest.TestCase):
def _makeOne(self, func, obj, type):
from pyramid.scripts.pserve import _methodwrapper
return _methodwrapper(func, obj, type)
def test___call__succeed(self):
def foo(self, cls, a=1): return 1
class Bar(object): pass
wrapper = self._makeOne(foo, Bar, None)
result = wrapper(a=1)
self.assertEqual(result, 1)
def test___call__fail(self):
def foo(self, cls, a=1): return 1
class Bar(object): pass
wrapper = self._makeOne(foo, Bar, None)
self.assertRaises(AssertionError, wrapper, cls=1)
|
def test_sum_two_correctly():
assert 2 + 2 == 4
def test_sum_two_incorrectly():
# Expected to fail. Replace by
# assert 3 + 3 == 6
assert 3 + 3 == 5
|
import os,sys,json
currentpath = sys.path[0]
root = os.path.dirname(currentpath)
re = os.listdir(root)
if 'json' in re:
json_path = root + '\\json'
else:
json_path = os.path.dirname(root) + '\\json'
def set_config(key,value):
with open(json_path + '\\config.json','r') as config_file:
config = json.loads(config_file.read())
config[key]=value
with open(json_path + '\\config.json','w+') as config_file:
config_file.write(json.dumps(config,indent=4))
def get_config(keyword):
config_file = open(json_path + '\\config.json','r')
config = eval(config_file.read())
config_file.close()
return config.get(keyword)
|
import contextlib
import io
import json
import os
import sys
import textwrap
from peru.async_helpers import raises_gathered
import peru.cache
import peru.compat
import peru.error
import peru.main
from peru.parser import DEFAULT_PERU_FILE_NAME
import peru.rule
import peru.scope
import shared
from shared import run_peru_command, assert_contents
PERU_MODULE_ROOT = os.path.abspath(
os.path.join(os.path.dirname(peru.__file__)))
class SyncTest(shared.PeruTest):
def setUp(self):
self.test_dir = shared.create_dir()
self.peru_dir = os.path.join(self.test_dir, '.peru')
def tearDown(self):
shared.assert_clean_tmp(self.peru_dir)
def write_yaml(self, unformatted_yaml, *format_args, dir=None):
yaml = textwrap.dedent(unformatted_yaml.format(*format_args))
if dir is None:
dir = self.test_dir
with open(os.path.join(dir, DEFAULT_PERU_FILE_NAME), 'w') as f:
f.write(yaml)
def do_integration_test(self,
args,
expected,
*,
cwd=None,
**peru_cmd_kwargs):
if not cwd:
cwd = self.test_dir
output = run_peru_command(args, cwd, **peru_cmd_kwargs)
assert_contents(
self.test_dir,
expected,
excludes=[DEFAULT_PERU_FILE_NAME, '.peru'])
return output
def test_basic_sync(self):
module_dir = shared.create_dir({'foo': 'bar'})
self.write_yaml(
'''\
cp module foo:
path: {}
imports:
foo: subdir
''', module_dir)
self.do_integration_test(['sync'], {'subdir/foo': 'bar'})
# Running it again should be a no-op.
self.do_integration_test(['sync'], {'subdir/foo': 'bar'})
# Running it with a dirty working copy should be an error.
shared.write_files(self.test_dir, {'subdir/foo': 'dirty'})
with self.assertRaises(peru.cache.DirtyWorkingCopyError):
self.do_integration_test(['sync'], {'subdir/foo': 'bar'})
def test_no_cache_flag(self):
foo_dir = shared.create_dir({'foo': 'bar'})
self.write_yaml(
'''\
cp module foo:
path: {}
imports:
foo: subdir
''', foo_dir)
# Sync the foo module once.
self.do_integration_test(['sync'], {'subdir/foo': 'bar'})
# Change the contents of foo and sync again. Because foo is cached, we
# shouldn't see any changes.
shared.write_files(foo_dir, {'foo': 'woo'})
self.do_integration_test(['sync'], {'subdir/foo': 'bar'})
# Now sync with --no-cache. This time we should see the changes.
self.do_integration_test(['sync', '--no-cache'], {'subdir/foo': 'woo'})
def test_sync_from_subdir(self):
module_dir = shared.create_dir({'foo': 'bar'})
self.write_yaml(
'''\
# Use a relative module path, to make sure it gets resolved
# relative to the project root and not the dir where peru was
# called.
cp module relative_foo:
path: {}
imports:
relative_foo: subdir
''', os.path.relpath(module_dir, start=self.test_dir))
subdir = os.path.join(self.test_dir, 'a', 'b')
peru.compat.makedirs(subdir)
run_peru_command(['sync'], subdir)
self.assertTrue(
os.path.isdir(os.path.join(self.test_dir, '.peru')),
msg=".peru dir didn't end up in the right place")
assert_contents(os.path.join(self.test_dir, 'subdir'), {'foo': 'bar'})
def test_conflicting_imports(self):
module_dir = shared.create_dir({'foo': 'bar'})
self.write_yaml(
'''\
cp module foo:
path: {0}
# same as foo
cp module bar:
path: {0}
imports:
foo: subdir
bar: subdir
''', module_dir)
with self.assertRaises(peru.cache.MergeConflictError):
self.do_integration_test(['sync'], {'subdir/foo': 'bar'})
def test_empty_imports(self):
module_dir = shared.create_dir({'foo': 'bar'})
empty_yaml = '''\
cp module foo:
path: {}
'''.format(module_dir)
nonempty_yaml = '''\
cp module foo:
path: {}
imports:
foo: ./
'''.format(module_dir)
self.write_yaml(empty_yaml)
self.do_integration_test(['sync'], {})
# Now test switching back and forth between non-empty and empty.
self.write_yaml(nonempty_yaml)
self.do_integration_test(['sync'], {'foo': 'bar'})
# Back to empty.
self.write_yaml(empty_yaml)
self.do_integration_test(['sync'], {})
def test_import_module_defined_in_another_module(self):
# Project B contains project A
dir_a = shared.create_dir({'afile': 'stuff'})
dir_b = shared.create_dir()
# Create the peru.yaml file for B.
self.write_yaml(
'''\
cp module a:
path: {}
''',
dir_a,
dir=dir_b)
# Now create the peru.yaml file in the actual test project.
self.write_yaml(
'''\
imports:
b.a: a_via_b/
cp module b:
path: {}
''', dir_b)
self.do_integration_test(['sync'], {'a_via_b/afile': 'stuff'})
# Test the error message from an invalid module.
self.write_yaml(
'''\
imports:
b.missing_module: some_path
cp module b:
path: {}
''', dir_b)
try:
self.do_integration_test(['sync'], {})
except peru.error.PrintableError as e:
assert 'b.missing_module' in e.message
else:
assert False, 'should throw invalid module error'
def test_recursive_imports(self):
# Project B contains project A
dir_a = shared.create_dir({'afile': 'aaa'})
dir_b = shared.create_dir({'exports/bfile': 'bbb'})
# Create the peru.yaml file for B.
self.write_yaml(
'''\
imports:
a: exports/where_b_put_a
cp module a:
path: {}
''',
dir_a,
dir=dir_b)
# Now create the peru.yaml file in the actual test project.
self.write_yaml(
'''\
imports:
b: where_c_put_b
cp module b:
# recursive is false by default
path: {}
export: exports # omit the peru.yaml file from b
''', dir_b)
self.do_integration_test(['sync'], {'where_c_put_b/bfile': 'bbb'})
# Repeat the same test with explicit 'recursive' settings.
self.write_yaml(
'''\
imports:
b: where_c_put_b
cp module b:
path: {}
pick: exports/where_b_put_a
export: exports # omit the peru.yaml file from b
recursive: true
''', dir_b)
self.do_integration_test(['sync'],
{'where_c_put_b/where_b_put_a/afile': 'aaa'})
self.write_yaml(
'''\
imports:
b: where_c_put_b
cp module b:
path: {}
export: exports # omit the peru.yaml file from b
recursive: false
''', dir_b)
self.do_integration_test(['sync'], {'where_c_put_b/bfile': 'bbb'})
def test_recursive_import_error(self):
'''Errors that happen inside recursively-fetched targets should have
context information about the targets that caused them. This test is
especially important for checking that context isn't lost in
GatheredExceptions.'''
# Project NOTABLE_NAME has a BAD_MODULE in it.
dir_notable = shared.create_dir()
# Create the peru.yaml file for NOTABLE_NAME.
self.write_yaml(
'''\
imports:
BAD_MODULE: ./
git module BAD_MODULE:
bad_field: stuff
# The error we get here will actually be that `url` is missing.
''',
dir=dir_notable)
# Now make our test project import it.
self.write_yaml(
'''\
imports:
NOTABLE_NAME: ./notable
cp module NOTABLE_NAME:
recursive: true
path: {}
''', dir_notable)
with self.assertRaises(peru.error.PrintableError) as cm:
run_peru_command(['sync'], self.test_dir)
self.assertIn("NOTABLE_NAME", cm.exception.message)
self.assertIn("BAD_MODULE", cm.exception.message)
def test_peru_file_field(self):
# Project B contains project A
dir_a = shared.create_dir({'afile': 'stuff'})
# Create project B with an unusual YAML filename.
dir_b = shared.create_dir({
'alternate.yaml':
textwrap.dedent('''\
cp module a:
path: {}
'''.format(dir_a))
})
# Now create the peru.yaml file in the actual test project.
self.write_yaml(
'''\
imports:
b.a: a_via_b/
cp module b:
path: {}
peru file: alternate.yaml
''', dir_b)
self.do_integration_test(['sync'], {'a_via_b/afile': 'stuff'})
def test_module_rules(self):
module_dir = shared.create_dir({'a/b': '', 'c/d': ''})
yaml = '''\
cp module foo:
path: {}
rule get_a:
export: a
rule get_c:
export: c
imports:
foo|get_a: ./
'''.format(module_dir)
self.write_yaml(yaml)
self.do_integration_test(['sync'], {'b': ''})
# Run it again with a different import to make sure we clean up.
yaml_different = yaml.replace('foo|get_a', 'foo|get_c')
self.write_yaml(yaml_different)
self.do_integration_test(['sync'], {'d': ''})
def test_rule_with_picked_files(self):
content = {
name: ''
for name in
['foo', 'bar', 'special', 'baz/bing', 'baz/boo/a', 'baz/boo/b']
}
module_dir = shared.create_dir(content)
self.write_yaml(
'''\
cp module foo:
path: {}
rule filter:
pick:
- "**/*oo"
- special
imports:
foo|filter: ./
''', module_dir)
filtered_content = {
name: ''
for name in [
'foo',
'special',
'baz/boo/a',
'baz/boo/b',
]
}
self.do_integration_test(['sync'], filtered_content)
def test_rule_with_picked_files_that_do_not_exist(self):
module_dir = shared.create_dir({'foo': 'bar'})
self.write_yaml(
'''\
cp module foo:
path: {}
pick: idontexist
imports:
foo: ./
''', module_dir)
with raises_gathered(peru.rule.NoMatchingFilesError):
self.do_integration_test(['sync'], {})
def test_rule_with_exported_files_that_are_not_picked(self):
content = {
name: ''
for name in ['foo', 'bar', 'baz/bing', 'baz/boo/a', 'baz/boo/b']
}
module_dir = shared.create_dir(content)
self.write_yaml(
'''\
cp module foo:
path: {}
pick: foo
export: baz/
imports:
foo: ./
''', module_dir)
with raises_gathered(peru.rule.NoMatchingFilesError):
self.do_integration_test(['sync'], {})
def test_rule_with_dropped_files(self):
content = {'foo': 'one', 'bar': 'two'}
module_dir = shared.create_dir(content)
self.write_yaml(
'''\
cp module foobar:
path: {}
rule filter:
drop: foo
imports:
foobar|filter: ./
''', module_dir)
filtered_content = {'bar': 'two'}
self.do_integration_test(['sync'], filtered_content)
def test_drop_then_pick_is_an_error(self):
'''We want drop to run before pick, so that deleting a bunch of stuff
and then trying to pick it turns into an error. The opposite execution
order would make this silently succeed. See the discussion at
https://github.com/buildinspace/peru/issues/150#issuecomment-212580912.
'''
content = {'foo': 'stuff'}
module_dir = shared.create_dir(content)
self.write_yaml(
'''\
cp module foobar:
path: {}
drop: foo
pick: foo
imports:
foobar: ./
''', module_dir)
with raises_gathered(peru.rule.NoMatchingFilesError):
run_peru_command(['sync'], self.test_dir)
def test_rule_with_executable(self):
contents = {'a.txt': '', 'b.txt': '', 'c.foo': ''}
module_dir = shared.create_dir(contents)
self.write_yaml(
'''\
cp module foo:
path: {}
executable: "*.txt"
imports:
foo: ./
''', module_dir)
self.do_integration_test(['sync'], contents)
for f in ('a.txt', 'b.txt'):
shared.assert_executable(os.path.join(self.test_dir, f))
def test_rule_with_move(self):
module_dir = shared.create_dir({'a': 'foo', 'b/c': 'bar'})
self.write_yaml(
'''\
cp module foo:
path: {}
move:
a: newa
b: newb
imports:
foo: ./
''', module_dir)
self.do_integration_test(['sync'], {'newa': 'foo', 'newb/c': 'bar'})
def test_rule_with_move_error(self):
module_dir = shared.create_dir()
self.write_yaml(
'''\
cp module foo:
path: {}
move:
doesntexist: also_nonexistent
imports:
foo: ./
''', module_dir)
with raises_gathered(peru.rule.NoMatchingFilesError) as cm:
self.do_integration_test(['sync'], {
'newa': 'foo',
'newb/c': 'bar'
})
assert 'doesntexist' in cm.exception.message
def test_rule_with_copied_files(self):
content = {'foo': 'foo', 'bar/baz': 'baz'}
module_dir = shared.create_dir(content)
self.write_yaml(
'''\
cp module foo:
path: {}
copy:
foo: foo-copy
bar:
- bar-copy-1
- bar-copy-2
imports:
foo: ./
''', module_dir)
copied_content = {
'foo': 'foo',
'bar/baz': 'baz',
'foo-copy': 'foo',
'bar-copy-1/baz': 'baz',
'bar-copy-2/baz': 'baz'
}
self.do_integration_test(['sync'], copied_content)
def test_alternate_cache(self):
module_dir = shared.create_dir({'foo': 'bar'})
self.write_yaml(
'''\
cp module foo:
path: {}
imports:
foo: subdir
''', module_dir)
cache_dir = shared.create_dir()
env = {'PERU_CACHE_DIR': cache_dir}
self.do_integration_test(['sync'], {'subdir/foo': 'bar'}, env=env)
self.assertTrue(os.path.exists(os.path.join(cache_dir, 'plugins')))
self.assertTrue(os.path.exists(os.path.join(cache_dir, 'trees')))
self.assertTrue(os.path.exists(os.path.join(cache_dir, 'keyval')))
self.assertFalse(os.path.exists(os.path.join(self.peru_dir, 'cache')))
def test_override(self):
module_dir = shared.create_dir({'foo': 'bar'})
self.write_yaml(
'''\
cp module foo:
path: {}
imports:
foo: ./
''', module_dir)
override_dir = shared.create_dir({'foo': 'override'})
# Set the override.
run_peru_command(['override', 'add', 'foo', override_dir],
self.test_dir)
# Confirm that the override is configured.
output = run_peru_command(['override'], self.test_dir)
self.assertEqual(output, 'foo: {}\n'.format(override_dir))
# Make sure 'override list' gives the same output as 'override'.
output = run_peru_command(['override', 'list'], self.test_dir)
self.assertEqual(output, 'foo: {}\n'.format(override_dir))
# Same as above, but as JSON (with --json flag).
output = run_peru_command(['override', '--json'], self.test_dir)
override_dict = json.loads(output)
self.assertEqual(override_dict, {'foo': override_dir})
# Run the sync with --no-overrides and confirm nothing changes. Also
# check that there's no overrides-related output.
output = self.do_integration_test(['sync', '--no-overrides'],
{'foo': 'bar'})
self.assertNotIn('overrides', output)
# Now run the sync normally and confirm that the override worked. Also
# confirm that we mentioned the override in output, and that the unused
# overrides warning is not printed.
output = self.do_integration_test(['sync'], {'foo': 'override'})
self.assertIn('overrides', output)
self.assertNotIn('WARNING unused overrides', output)
# Delete the override.
run_peru_command(['override', 'delete', 'foo'], self.test_dir)
# Confirm that the override was deleted.
output = run_peru_command(['override'], self.test_dir)
self.assertEqual(output, '')
# Rerun the sync and confirm the original content is back.
self.do_integration_test(['sync'], {'foo': 'bar'})
# Add a bogus override and confirm the unused overrides warning is
# printed.
run_peru_command(['override', 'add', 'bogus', override_dir],
self.test_dir)
output = self.do_integration_test(['sync'], {'foo': 'bar'})
self.assertIn('WARNING unused overrides', output)
def test_override_after_regular_sync(self):
module_dir = shared.create_dir({'foo': 'bar'})
self.write_yaml(
'''\
cp module foo:
path: {}
imports:
foo: ./
''', module_dir)
# First, do a regular sync.
self.do_integration_test(['sync'], {'foo': 'bar'})
# Now, add an override, and confirm that the new sync works.
override_dir = shared.create_dir({'foo': 'override'})
run_peru_command(['override', 'add', 'foo', override_dir],
self.test_dir)
self.do_integration_test(['sync'], {'foo': 'override'})
def test_override_recursive(self):
# Module A just includes the file 'foo'.
module_a_dir = shared.create_dir({'foo': 'bar'})
# Module B imports module A.
module_b_dir = shared.create_dir()
self.write_yaml(
'''\
cp module A:
path: {}
imports:
A: A/
''',
module_a_dir,
dir=module_b_dir)
# Module C (in self.test_dir) imports module B, and also directly
# imports module A. When we set an override for module A below, we'll
# want to check that *both* of these imports get overridden.
self.write_yaml(
'''\
cp module B:
path: {}
recursive: true
# Note that module business happens before rule business, so
# 'drop: peru.yaml' will not affect the recursion, just the
# final output.
drop: peru.yaml
imports:
B.A: A/
B: B/
''', module_b_dir)
# First, do a regular sync.
self.do_integration_test(['sync'], {
'A/foo': 'bar',
'B/A/foo': 'bar',
})
# Now set an override for B.A.
override_dir = shared.create_dir({'foo': 'override'})
run_peru_command(['override', 'add', 'B.A', override_dir],
self.test_dir)
# Now do another sync. *Both* the directly imported copy of A *and* the
# copy synced inside of B should be overridden.
self.do_integration_test(['sync'], {
'A/foo': 'override',
'B/A/foo': 'override',
})
def test_relative_override_from_subdir(self):
self.write_yaml('''\
empty module foo:
imports:
foo: ./
''')
# Create some subdirs inside the project.
subdir = os.path.join(self.test_dir, 'a', 'b')
peru.compat.makedirs(subdir)
# Create an override dir outside the project.
override_dir = shared.create_dir({'foo': 'override'})
# Set the override from inside subdir, using the relative path that's
# valid from that location. Peru is going to store this path in
# .peru/overrides/ at the root, so this tests that we resolve the
# stored path properly.
relative_path = os.path.relpath(override_dir, start=subdir)
run_peru_command(['override', 'add', 'foo', relative_path], subdir)
# Confirm that the right path is stored on disk.
expected_stored_path = os.path.relpath(
override_dir, start=self.test_dir)
with open(os.path.join(self.peru_dir, 'overrides', 'foo')) as f:
actual_stored_path = f.read()
self.assertEqual(expected_stored_path, actual_stored_path)
# Confirm that `peru override` prints output that respects the cwd.
output = run_peru_command(['override'], subdir)
self.assertEqual('foo: {}\n'.format(relative_path), output)
# Confirm that syncing works.
self.do_integration_test(['sync'], {'foo': 'override'}, cwd=subdir)
def test_override_excludes_dotperu(self):
self.write_yaml('''\
empty module foo:
imports:
foo: ./
''')
override_dir = shared.create_dir({
'foo': 'override',
'.peru/bar': 'baz'
})
run_peru_command(['override', 'add', 'foo', override_dir],
self.test_dir)
self.do_integration_test(['sync'], {'foo': 'override'})
def test_rules_in_override(self):
module_dir = shared.create_dir({'a/b': 'c'})
yaml = '''
imports:
foo|get_a: ./
cp module foo:
path: {}
rule get_a:
export: a
'''
self.write_yaml(yaml, module_dir)
override_dir = shared.create_dir({'a/b': 'override'})
run_peru_command(['override', 'add', 'foo', override_dir],
self.test_dir)
self.do_integration_test(['sync'], {'b': 'override'})
def test_missing_name_errors(self):
self.write_yaml('''
imports:
thingabc: path
''')
with self.assertRaises(peru.error.PrintableError) as cm:
self.do_integration_test(['sync'], {})
assert "thingabc" in cm.exception.message
self.write_yaml('''
imports:
thingabc|rulexyz: path
git module thingabc:
url: http://example.com
''')
with self.assertRaises(peru.error.PrintableError) as cm:
self.do_integration_test(['sync'], {})
assert "rulexyz" in cm.exception.message, cm.exception.message
def test_copy(self):
module_dir = shared.create_dir({'foo': 'bar'})
self.write_yaml(
'''\
cp module foo:
path: {}
''', module_dir)
# Do a simple copy and check the results.
self.do_integration_test(['copy', 'foo', '.'], {'foo': 'bar'})
# Running the same copy again should fail, because of conflicts.
with self.assertRaises(peru.cache.DirtyWorkingCopyError):
self.do_integration_test(['copy', 'foo', '.'], {'foo': 'bar'})
# Passing the --force flag should pave over conflicts.
self.do_integration_test(['copy', '--force', 'foo', '.'],
{'foo': 'bar'})
def test_copy_nested(self):
# Project B contains project A
dir_a = shared.create_dir({'afile': 'stuff'})
dir_b = shared.create_dir()
# Create the peru.yaml file for B.
self.write_yaml(
'''\
cp module a:
path: {}
''',
dir_a,
dir=dir_b)
# Now create the peru.yaml file in the actual test project.
self.write_yaml(
'''\
cp module b:
path: {}
''', dir_b)
self.do_integration_test(['copy', 'b.a', '.'], {'afile': 'stuff'})
def test_clean(self):
module_dir = shared.create_dir({'foo': 'bar'})
self.write_yaml(
'''\
imports:
foo: ./
cp module foo:
path: {}
''', module_dir)
self.do_integration_test(['clean'], {})
self.do_integration_test(['sync'], {'foo': 'bar'})
shared.write_files(self.test_dir, {'foo': 'DIRTY'})
with self.assertRaises(peru.cache.DirtyWorkingCopyError):
self.do_integration_test(['clean'], {})
self.do_integration_test(['clean', '--force'], {})
def test_help(self):
flag_output = run_peru_command(['--help'], self.test_dir)
self.assertEqual(peru.main.__doc__, flag_output)
command_output = run_peru_command(['help'], self.test_dir)
self.assertEqual(peru.main.__doc__, command_output)
clean_help = peru.main.COMMAND_DOCS['clean']
pre_flag_output = run_peru_command(['-h', 'clean'], self.test_dir)
self.assertEqual(clean_help, pre_flag_output)
post_flag_output = run_peru_command(['clean', '-h'], self.test_dir)
self.assertEqual(clean_help, post_flag_output)
buffer = io.StringIO()
with redirect_stderr(buffer):
run_peru_command(['foobarbaz'], self.test_dir, expected_error=1)
self.assertEqual(peru.main.__doc__, buffer.getvalue())
def test_version(self):
version_output = run_peru_command(["--version"], self.test_dir)
self.assertEqual(peru.main.get_version(), version_output.strip())
def test_duplicate_keys_warning(self):
self.write_yaml('''\
git module foo:
git module foo:
''')
buffer = io.StringIO()
with redirect_stderr(buffer):
run_peru_command(['sync'], self.test_dir)
assert ('WARNING' in buffer.getvalue())
assert ('git module foo' in buffer.getvalue())
# Make sure --quiet suppresses the warning.
buffer = io.StringIO()
with redirect_stderr(buffer):
run_peru_command(['sync', '--quiet'], self.test_dir)
# Don't literally check that stderr is empty, because that could get
# tripped up on other Python warnings (like asyncio taking too long).
assert 'git module foo' not in buffer.getvalue()
def test_lastimports_timestamp(self):
module_dir = shared.create_dir({'foo': 'bar'})
template = '''\
cp module foo:
path: {}
imports:
foo: {}
'''
self.write_yaml(template, module_dir, "subdir1")
self.do_integration_test(['sync'], {'subdir1/foo': 'bar'})
lastimports = os.path.join(self.test_dir, '.peru', 'lastimports')
def get_timestamp():
return os.stat(lastimports).st_mtime
original_timestamp = get_timestamp()
# Running it again should be a no-op. Assert that the lastimports
# timestamp hasn't changed.
self.do_integration_test(['sync'], {'subdir1/foo': 'bar'})
assert get_timestamp() == original_timestamp, \
"Expected an unchanged timestamp."
# Modify peru.yaml and sync again. This should change the timestamp.
self.write_yaml(template, module_dir, "subdir2")
self.do_integration_test(['sync'], {'subdir2/foo': 'bar'})
assert get_timestamp() > original_timestamp, \
"Expected an updated timestamp."
def test_number_of_git_commands(self):
'''A no-op sync should be a single git command. Also check that index
files are deleted after any sync error.'''
module_dir = shared.create_dir({'foo': 'bar'})
self.write_yaml(
'''\
cp module foo:
path: {}
imports:
foo: subdir
''', module_dir)
index_path = os.path.join(self.test_dir, '.peru/lastimports.index')
# The first sync should take multiple operations and create a
# lastimports.index file.
peru.cache.DEBUG_GIT_COMMAND_COUNT = 0
self.do_integration_test(['sync'], {'subdir/foo': 'bar'})
assert peru.cache.DEBUG_GIT_COMMAND_COUNT > 1, \
'The first sync should take multiple operations.'
assert os.path.exists(index_path), \
'The first sync should create an index file.'
# The second sync should reuse the index file and only take one
# operation.
peru.cache.DEBUG_GIT_COMMAND_COUNT = 0
self.do_integration_test(['sync'], {'subdir/foo': 'bar'})
assert peru.cache.DEBUG_GIT_COMMAND_COUNT == 1, \
'The second sync should take only one operation.'
assert os.path.exists(index_path), \
'The second sync should preserve the index file.'
# Now force an error. This should delete the index file.
with open(os.path.join(self.test_dir, 'subdir/foo'), 'w') as f:
f.write('dirty')
with self.assertRaises(peru.cache.DirtyWorkingCopyError):
run_peru_command(['sync'], self.test_dir)
assert not os.path.exists(index_path), \
'The error should delete the index file.'
# Fix the error and resync with new module contents. This should
# recreate the index file with the current tree and then succeed,
# rather than using an empty index and treating the current files as
# conflicting.
with open(os.path.join(self.test_dir, 'subdir/foo'), 'w') as f:
f.write('bar')
with open(os.path.join(module_dir, 'foo'), 'w') as f:
f.write('new bar')
self.do_integration_test(['sync', '--no-cache'],
{'subdir/foo': 'new bar'})
assert os.path.exists(index_path), \
'The index should have been recreated.'
def test_module_list(self):
self.write_yaml('''\
git module foo:
url: blah
git module bar:
url: blah
''')
output = run_peru_command(['module'], self.test_dir)
self.assertEqual(output, "bar\nfoo\n")
output = run_peru_command(['module', 'list'], self.test_dir)
self.assertEqual(output, "bar\nfoo\n")
output = run_peru_command(['module', 'list', '--json'], self.test_dir)
self.assertEqual(output, '["bar", "foo"]\n')
@contextlib.contextmanager
def redirect_stderr(f):
old_stderr = sys.stderr
sys.stderr = f
try:
yield
finally:
sys.stderr = old_stderr
|
# Get instance
import instaloader
import json
L = instaloader.Instaloader(max_connection_attempts=0)
# Login or load session
username = ''
password = ''
L.login(username, password) # (login)
# Obtain profile metadata
instagram_target = ''
profile = instaloader.Profile.from_username(L.context, instagram_target)
following_list = []
count=1
for followee in profile.get_followees():
username = followee.username
following_list.append(username)
print(str(count) + ". " + username)
count = count + 1
following_list_json = json.dumps(following_list)
open("list_following_" + instagram_target +".json","w").write(following_list_json)
print("selesai")
print("cek file json di file : list_following_" + instagram_target +".json") |
"""
This module combines schema and yaml parser into one, to provide better error
messages through a single entrypoint `load`.
Used for parsing dvc.yaml, dvc.lock and .dvc files.
Not to be confused with strictyaml, a python library with similar motivations.
"""
from typing import TYPE_CHECKING, Any, Callable, List, TypeVar
from dvc.exceptions import DvcException, PrettyDvcException
from dvc.utils.serialize import (
EncodingError,
YAMLFileCorruptedError,
parse_yaml,
)
if TYPE_CHECKING:
from rich.syntax import Syntax
from rich.text import Text
from ruamel.yaml import StreamMark
from dvc.fs.base import BaseFileSystem
_T = TypeVar("_T")
def _prepare_cause(cause: str) -> "Text":
from rich.text import Text
return Text(cause, style="bold")
def _prepare_code_snippets(code: str, start_line: int = 1) -> "Syntax":
from rich.syntax import Syntax
return Syntax(
code,
"yaml",
start_line=start_line,
theme="ansi_dark",
word_wrap=True,
line_numbers=True,
indent_guides=True,
)
class YAMLSyntaxError(PrettyDvcException, YAMLFileCorruptedError):
def __init__(self, path: str, yaml_text: str, exc: Exception) -> None:
self.path: str = path
self.yaml_text: str = yaml_text
self.exc: Exception = exc
super().__init__(self.path)
def __pretty_exc__(self, **kwargs: Any) -> None:
from ruamel.yaml.error import MarkedYAMLError
from dvc.ui import ui
from dvc.utils import relpath
exc = self.exc.__cause__
if not isinstance(exc, MarkedYAMLError):
raise ValueError("nothing to pretty-print here. :)")
source = self.yaml_text.splitlines()
def prepare_linecol(mark: "StreamMark") -> str:
return f"in line {mark.line + 1}, column {mark.column + 1}"
def prepare_message(message: str, mark: "StreamMark" = None) -> "Text":
cause = ", ".join(
[message.capitalize(), prepare_linecol(mark) if mark else ""]
)
return _prepare_cause(cause)
def prepare_code(mark: "StreamMark") -> "Syntax":
line = mark.line + 1
code = "" if line > len(source) else source[line - 1]
return _prepare_code_snippets(code, line)
lines: List[object] = []
if hasattr(exc, "context"):
if exc.context_mark is not None:
lines.append(
prepare_message(str(exc.context), exc.context_mark)
)
if exc.context_mark is not None and (
exc.problem is None
or exc.problem_mark is None
or exc.context_mark.name != exc.problem_mark.name
or exc.context_mark.line != exc.problem_mark.line
or exc.context_mark.column != exc.problem_mark.column
):
lines.extend([prepare_code(exc.context_mark), ""])
if exc.problem is not None:
lines.append(
prepare_message(str(exc.problem), exc.problem_mark)
)
if exc.problem_mark is not None:
lines.append(prepare_code(exc.problem_mark))
if lines and lines[-1]:
lines.insert(0, "")
lines.insert(0, f"[red]'{relpath(self.path)}' structure is corrupted.")
for message in lines:
ui.error_write(message, styled=True)
class YAMLValidationError(DvcException):
def __init__(self, exc):
super().__init__(str(exc))
def validate(data: _T, schema: Callable[[_T], _T], _text: str = None) -> _T:
from voluptuous import MultipleInvalid
try:
return schema(data)
except MultipleInvalid as exc:
raise YAMLValidationError(str(exc))
def load(
path: str,
schema: Callable[[_T], _T] = None,
fs: "BaseFileSystem" = None,
encoding: str = "utf-8",
round_trip: bool = False,
) -> Any:
open_fn = fs.open if fs else open
try:
with open_fn(path, encoding=encoding) as fd: # type: ignore
text = fd.read()
data = parse_yaml(text, path, typ="rt" if round_trip else "safe")
except UnicodeDecodeError as exc:
raise EncodingError(path, encoding) from exc
except YAMLFileCorruptedError as exc:
cause = exc.__cause__
raise YAMLSyntaxError(path, text, exc) from cause
if schema:
# not returning validated data, as it may remove
# details from CommentedMap that we get from roundtrip parser
validate(data, schema, text)
return data, text
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Timer.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import QMessageBox
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(570, 138)
Form.setStyleSheet("background-color: rgb(255, 255, 255);")
self.pushButton = QtWidgets.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(20, 40, 111, 71))
font = QtGui.QFont()
font.setPointSize(20)
self.pushButton.setFont(font)
self.pushButton.clicked.connect(lambda:self.whichbtn(self.pushButton))
self.pushButton.setCheckable(True) # Make pushButton Checkable
self.pushButton.setObjectName("pushButton")
self.lineEdit = QtWidgets.QLineEdit(Form)
self.lineEdit.setGeometry(QtCore.QRect(160, 40, 104, 70))
font = QtGui.QFont()
font.setPointSize(28)
self.lineEdit.setFont(font)
self.lineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit.setObjectName("lineEdit") # lineEdit for Hour value
self.lineEdit_2 = QtWidgets.QLineEdit(Form)
self.lineEdit_2.setGeometry(QtCore.QRect(300, 40, 104, 70))
font = QtGui.QFont()
font.setPointSize(28)
self.lineEdit_2.setFont(font)
self.lineEdit_2.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_2.setObjectName("lineEdit_2") # lineEdit for Minute value
self.lineEdit_3 = QtWidgets.QLineEdit(Form)
self.lineEdit_3.setGeometry(QtCore.QRect(440, 40, 104, 70))
font = QtGui.QFont()
font.setPointSize(28)
self.lineEdit_3.setFont(font)
self.lineEdit_3.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_3.setObjectName("lineEdit_3") # lineEdit for Second value
self.label = QtWidgets.QLabel(Form)
self.label.setGeometry(QtCore.QRect(270, 50, 20, 41))
font = QtGui.QFont()
font.setPointSize(44)
self.label.setFont(font)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(410, 50, 20, 41))
font = QtGui.QFont()
font.setPointSize(44)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(190, 10, 52, 21))
font = QtGui.QFont()
font.setPointSize(16)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(Form)
self.label_4.setGeometry(QtCore.QRect(320, 10, 71, 21))
font = QtGui.QFont()
font.setPointSize(16)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(Form)
self.label_5.setGeometry(QtCore.QRect(450, 10, 81, 21))
font = QtGui.QFont()
font.setPointSize(16)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
# Initial value for Counter
self.timerCnt= self.i=self.j=self.k=0
# Make QTimer
self.qTimer = QTimer() # Timer 1 for readTemp() functio
# Set interval to 1 s
self.qTimer.setInterval(1000) # 1000 ms = 1 s
# Connect timeout signal to signal handler
self.qTimer.timeout.connect(self.timerOn)
def timerOn(self):
self.timerCnt +=1
self.getSecond()
def getSecond(self):
# Condition 1
if (self.i==0 and self.j >0 and self.k ==0):
self.i =60
self.j -=1
# Condition 2
if (self.i==0 and self.j == 0 and self.k >0):
self.i =59
self.j=59
self.k-=1
# Condition 3
if (self.i==0 and self.j > 0 and self.k >0):
self.i =59
self.j-=1
# Condition 4
if (self.i==0 and self.j > 0 and self.k > 0):
self.i =59
self.k-=1
# Decrease second value by 1 each timer reset
self.i -=1
self.lineEdit_3.setText(str(self.i))
self.lineEdit_2.setText(str(self.j))
self.lineEdit.setText(str(self.k))
if(self.i==0 and self.j ==0 and self.k ==0):
self.qTimer.stop() # Stop timer if all of the value has reached zero
self.timerCnt=0 # Reset the counter value
self.pushButton.setChecked(False) # Toogle the pushButton
self.pushButton.setText("START")
self.lineEdit.setText('00')
self.lineEdit_2.setText('00')
self.lineEdit_3.setText('00')
print("Timer STOP")
def whichbtn(self,b): # Read the signal from QPushButton
print ("Button "+b.text())
if self.pushButton.isChecked() :
self.t_sec=self.lineEdit_3.text() # Save the input from lineEdit
self.i =int(self.t_sec) # Variable for second
self.t_min=self.lineEdit_2.text()
self.j =int(self.t_min) # Variable for minute
self.t_hour=self.lineEdit.text()
self.k=int(self.t_hour) # Variable for hour
print(self.k,self.j,self.i)
self.pushButton.setText("STOP")
# Check the input value first
if (self.i==0 and self.j == 0 and self.k == 0):
self.clickMethod() # Show message box
self.pushButton.setChecked(False)
self.pushButton.setText("START")
else:
self.qTimer.start() # Start timer
else:
self.pushButton.setText("START")
self.qTimer.stop() # Stop timer
self.timerCnt=0
self.lineEdit.setText(str('00'))
self.lineEdit_2.setText(str('00'))
self.lineEdit_3.setText(str('00'))
def clickMethod(self): # Notification window function
msg=QMessageBox()
msg.about(msg, "Timer Failed", "Please fill the amount of time first !")
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.pushButton.setText(_translate("Form", "START"))
self.lineEdit.setInputMask(_translate("Form", "99"))
self.lineEdit.setText(_translate("Form", "00"))
self.lineEdit_2.setInputMask(_translate("Form", "99"))
self.lineEdit_2.setText(_translate("Form", "00"))
self.lineEdit_3.setInputMask(_translate("Form", "99"))
self.lineEdit_3.setText(_translate("Form", "00"))
self.label.setText(_translate("Form", ":"))
self.label_2.setText(_translate("Form", ":"))
self.label_3.setText(_translate("Form", "Hour"))
self.label_4.setText(_translate("Form", "Minute"))
self.label_5.setText(_translate("Form", "Second"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
|
import math
import params
MASS = params.STAR_MASS
accuracy = params.accuracy
# Radius in cm
def sonic_radius(t):
return 2.0e11 * 2e6 / t * MASS
# Sound speed in km/s
def sound_speed(t):
return 180 * math.sqrt(t / 2.0e6)
def right_part(r, t):
rs = sonic_radius(t)
return 4.0 * math.log(1.0 * r / rs) + 4.0 * rs / r - 3
def left_part(u, t):
a0 = sound_speed(t)
a0srq = a0 * a0
uu = 1.0 * u * u / a0srq
return uu - math.log(uu)
def get(r, t):
rp = right_part(r, t)
rs = sonic_radius(t)
cs = sound_speed(t)
if r < rs:
left = 0.1
right = cs
else:
left = cs
right = 1000
def eq(u):
return left_part(u, t) - rp
middle = (left + right) / 2
err = eq(middle)
index = 0
while math.fabs(err) > accuracy:
if eq(left) * eq(middle) < 0:
right = middle
else:
left = middle
middle = 1.0 * (left + right) / 2.0
err = eq(middle)
index += 1
if index == 1000:
break
return middle
def getsgs(r, t):
return get(r, t) * 1e5
|
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDis1, SellarDis2
class SellarMDAConnect(om.Group):
def setup(self):
cycle = self.add_subsystem('cycle', om.Group(), promotes_inputs=['x', 'z'])
cycle.add_subsystem('d1', SellarDis1(), promotes_inputs=['x', 'z'])
cycle.add_subsystem('d2', SellarDis2(), promotes_inputs=['z'])
cycle.connect('d1.y1', 'd2.y1')
######################################
# This is a "forgotten" connection!!
######################################
#cycle.connect('d2.y2', 'd1.y2')
cycle.set_input_defaults('x', 1.0)
cycle.set_input_defaults('z', np.array([5.0, 2.0]))
# Nonlinear Block Gauss Seidel is a gradient free solver
cycle.nonlinear_solver = om.NonlinearBlockGS()
self.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes_inputs=['x', 'z'])
self.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'))
self.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'))
self.connect('cycle.d1.y1', ['obj_cmp.y1', 'con_cmp1.y1'])
self.connect('cycle.d2.y2', ['obj_cmp.y2', 'con_cmp2.y2'])
prob = om.Problem()
prob.model = SellarMDAConnect()
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
# prob.driver.options['maxiter'] = 100
prob.driver.options['tol'] = 1e-8
prob.set_solver_print(level=0)
prob.model.add_design_var('x', lower=0, upper=10)
prob.model.add_design_var('z', lower=0, upper=10)
prob.model.add_objective('obj_cmp.obj')
prob.model.add_constraint('con_cmp1.con1', upper=0)
prob.model.add_constraint('con_cmp2.con2', upper=0)
prob.setup()
prob.set_val('x', 2.0)
prob.set_val('z', [-1., -1.])
prob.run_driver()
print('minimum found at')
print(prob.get_val('x')[0])
print(prob.get_val('z'))
print('minumum objective')
print(prob.get_val('obj_cmp.obj')[0]) |
from abc import ABC, abstractmethod
import queue
class Sort(ABC):
def __init__(self, array):
self.array = array.copy()
self.steps = queue.Queue()
@abstractmethod
def sort(self):
pass
def swap(self, i, j):
self.steps.put((i, j))
self.array[i], self.array[j] = self.array[j], self.array[i]
|
### Util library for general file handling
def replaceTextInFile(filepath, oldText, newText):
"""Replace all occurrences of some text in plaintext file with some other text.
filepath -- the path of the plaintext file\n
oldText -- the text to replace\n
newText -- the text to implement\n
"""
file = open(filepath, "rt")
newFile = file.read().replace(oldText, newText)
file.close()
file = open(filepath, "wt")
file.write(newFile)
file.close
|
import os
ROOT_DIR = os.path.split(os.path.dirname(os.path.abspath(os.getcwd())))[0]
PROJECT_DIR = os.path.dirname(os.path.abspath(os.getcwd()))
def create_tax_level_files(tax_level_name, replacements_dict, fold):
for root, dirs, files in os.walk(os.path.join(ROOT_DIR, "data", "GME", "finest", fold)):
for file in files:
if "finest_space.bmes" in file:
with open(os.path.join(root, file), "r") as fine_f:
fname = file.replace("finest", tax_level_name)
with open(os.path.join(ROOT_DIR, "data", "GME", tax_level_name, fold, fname), "w") as binary_f:
for line in fine_f.readlines():
if line:
l = line.split()
try:
token, prefix, suffix = l[0], l[1].split("-")[0], l[1].split("-")[1]
for (super_label, sublabels) in replacements_dict:
if any(x in l[1] for x in sublabels):
binary_f.write(f"{token} {prefix}-{super_label}\n")
except Exception:
binary_f.write(line)
else:
binary_f.write(line)
def create_tax_level_for_test(tax_level_name, replacements_dict):
test_path = os.path.join(ROOT_DIR, "data", "GME", "finest", "test_finest_space.bmes")
new_test_path = os.path.join(ROOT_DIR, "data", "GME", tax_level_name, "test_finest_space.bmes")
with open(test_path, "r") as fine_f:
with open(new_test_path, "w") as binary_f:
for line in fine_f.readlines():
if line.strip():
l = line.split()
token = l[0]
label = l[1].split("-")
if len(label) > 1:
prefix = label[0]
suffix = label[1]
for (super_label, sublabels) in replacements_dict:
if suffix in sublabels:
binary_f.write(f"{token} {prefix}-{super_label}\n")
break
else:
binary_f.write(line)
else:
binary_f.write(line)
def convert_data_to_taxonomy_level(tax_level_name, replacements_dict):
gme_basepath = os.path.join(ROOT_DIR, "data", "GME")
if not os.path.isdir(os.path.join(gme_basepath, tax_level_name)):
os.mkdir(os.path.join(gme_basepath, tax_level_name))
for fold in range(5):
if not os.path.isdir(os.path.join(gme_basepath, tax_level_name, fold)):
os.mkdir(os.path.join(gme_basepath, tax_level_name, fold))
create_tax_level_for_test(tax_level_name, replacements_dict)
for fold in range(5):
create_tax_level_files(tax_level_name, replacements_dict, fold)
if __name__ == "__main__":
modal_not_modal = [("modal", ["teleological", "deontic", "priority", "buletic",
"epistemic", "circumstantial", "ability",
"epistemic_circumstantial", "buletic_teleological", "ability_circumstantial"])]
priority_vs_plausibility = [
("priority", ["teleological", "deontic", "priority", "buletic"]),
("plausibility", ["epistemic", "circumstantial", "ability"])
]
fine_grained = [
("deontic", ["deontic", "priority"]),
("intetional", ["buletic_teleological", "teleological", "buletic"]),
("circumstantial", ["circumstantial"]),
("ability", ["ability_circumstantial", "ability"]),
("epistemic", ["epistemic_circumstantial", "epistemic"])
]
finest_grained = [
("priority", ["priority", "buletic_teleological"]),
("deontic", ["deontic"]),
("epistemic", ["epistemic_circumstantial", "epistemic"]),
("ability", ["ability", "ability_circumstantial"]),
("buletic", ["buletic"]),
("teleological", ["teleological"]),
("circumstantial", ["circumstantial"]),
]
plausibility_others = [
("deontic", ["deontic", "priority"]),
("intetional", ["buletic_teleological", "teleological", "buletic"]),
("plausibility", ["epistemic_circumstantial", "ability_circumstantial", "circumstantial",
"ability", "epistemic"])
]
# this is an example how to call the converter. to add more versions, create a list of tuples as above.
convert_data_to_taxonomy_level("priority_vs_plausibility", priority_vs_plausibility)
|
from fastapi import APIRouter
from src.handler.getinformationbyconfig.contract.request import Params
from src.handler.getinformationbyconfig.handler import Handler as HandlerInformationByConfig
information_router = APIRouter()
@information_router.get("/information/config/{language}/{config_name}")
async def information_by_config(language: str, config_name: str):
params = Params(language, config_name)
return HandlerInformationByConfig().handler(params)
|
N = 4
board = [input() for _ in range(4)]
for i in range(N):
for j in range(N):
cand = []
if j + 2 < N:
cand.append([board[i][j+k] for k in range(3)])
if i + 2 < N:
cand.append([board[i+k][j] for k in range(3)])
if i + 2 < N and j + 2 < N:
cand.append([board[i+k][j+k] for k in range(3)])
if i + 2 < N and j - 2 >= 0:
cand.append([board[i+k][j-k] for k in range(3)])
for row in cand:
for k in range(3):
if all(c == 'x' if i != k else c == '.' for i, c in enumerate(row)):
print("YES")
quit()
print("NO")
|
expected_output = {"mac_aging_time": 120}
|
import cv2
from numpy import expand_dims
from keras.models import load_model
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from matplotlib import pyplot
from matplotlib.patches import Rectangle
from keras import backend as bak
import numpy as np
import os
bak.clear_session()
# Just disables the warning, doesn't enable AVX/FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# img_counter = 0
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, objness = None, classes = None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.objness = objness
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def decode_netout(netout, anchors, obj_thresh, net_h, net_w):
grid_h, grid_w = netout.shape[:2]
nb_box = 3
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
nb_class = netout.shape[-1] - 5
boxes = []
netout[..., :2] = _sigmoid(netout[..., :2])
netout[..., 4:] = _sigmoid(netout[..., 4:])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * netout[..., 5:]
netout[..., 5:] *= netout[..., 5:] > obj_thresh
for i in range(grid_h*grid_w):
row = i / grid_w
col = i % grid_w
for b in range(nb_box):
# 4th element is objectness score
objectness = netout[int(row)][int(col)][b][4]
if(objectness.all() <= obj_thresh): continue
# first 4 elements are x, y, w, and h
x, y, w, h = netout[int(row)][int(col)][b][:4]
x = (col + x) / grid_w # center position, unit: image width
y = (row + y) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height
# ements are class probabilities
classes = netout[int(row)][col][b][5:]
box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, objectness, classes)
boxes.append(box)
return boxes
def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):
new_w, new_h = net_w, net_h
for i in range(len(boxes)):
x_offset, x_scale = (net_w - new_w)/2./net_w, float(new_w)/net_w
y_offset, y_scale = (net_h - new_h)/2./net_h, float(new_h)/net_h
boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = intersect_w * intersect_h
w1, h1 = box1.xmax-box1.xmin, box1.ymax-box1.ymin
w2, h2 = box2.xmax-box2.xmin, box2.ymax-box2.ymin
union = w1*h1 + w2*h2 - intersect
return float(intersect) / union
def draw_boxes(filename, v_boxes, v_labels, v_scores):
# load the image
data = pyplot.imread(filename)
# plot the image
pyplot.imshow(data)
# get the context for drawing boxes
ax = pyplot.gca()
# plot each box
for i in range(len(v_boxes)):
box = v_boxes[i]
# get coordinates
y1, x1, y2, x2 = box.ymin, box.xmin, box.ymax, box.xmax
# calculate width and height of the box
width, height = x2 - x1, y2 - y1
# create the shape
rect = Rectangle((x1, y1), width, height, fill=False, color='white')
# draw the box
ax.add_patch(rect)
# draw text and score in top left corner
label = "%s (%.3f)" % (v_labels[i], v_scores[i])
pyplot.text(x1, y1, label, color='white')
# show the plot
pyplot.show()
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0: continue
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
boxes[index_j].classes[c] = 0
def preprocess_input(image, net_h, net_w):
#new_h, new_w, _ = image.shape
new_h = 480
new_w = 640
# determine the new size of the image
if (float(net_w)/new_w) < (float(net_h)/new_h):
new_h = (new_h * net_w)/new_w
new_w = net_w
else:
new_w = (new_w * net_h)/new_h
new_h = net_h
# resize the image to the new size
resized = cv2.resize(image[:,:,::-1]/255., (int(new_w), int(new_h)))
# embed the image into the standard letter box
new_image = np.ones((net_h, net_w, 3)) * 0.5
new_image[int((net_h-new_h)//2):int((net_h+new_h)//2), int((net_w-new_w)//2):int((net_w+new_w)//2), :] = resized
new_image = np.expand_dims(new_image, 0)
return new_image
# load and prepare an image
def load_image_pixels(filename, shape):
# load the image to get its shape
image = load_img(filename)
width, height = image.size
# load the image with the required size
image = load_img(filename, target_size=shape)
# convert to numpy array
image = img_to_array(image)
# scale pixel values to [0, 1]
image = image.astype('float32')
image /= 255.0
# add a dimension so that we have one sample
image = expand_dims(image, 0)
return image, width, height
# get all of the results above a threshold
def get_boxes(boxes, labels, thresh):
v_boxes, v_labels, v_scores = list(), list(), list()
# enumerate all boxes
for box in boxes:
# enumerate all possible labels
for i in range(len(labels)):
# check if the threshold for this label is high enough
if box.classes[i] > thresh:
v_boxes.append(box)
v_labels.append(labels[i])
v_scores.append(box.classes[i]*100)
# don't break, many labels may trigger for one box
return v_boxes, v_labels, v_scores
labels = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck",
"boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench",
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe",
"backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard",
"sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana",
"apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake",
"chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse",
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator",
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
#set webcam
cam = cv2.VideoCapture(1)
# load yolov3 model
model = load_model('model.h5',compile=False)
k = cv2.waitKey(1)
# define the expected input shape for the model
input_w, input_h = 416, 416
# define the probability threshold for detected objects
class_threshold = 0.6
# define the anchors
anchors = [[116,90, 156,198, 373,326], [30,61, 62,45, 59,119], [10,13, 16,30, 33,23]]
while True:
try:
os.remove("opencv_frame.png")
except: pass
# if not ret:
# print("failed to grab frame")
# break
# cv2.imshow("Object detection",frame)
# print("{} written!".format(img_name))
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
#if True: #k%256 == 32
#SPACE pressed
ret, frame = cam.read()
if not ret:
print("Failed to capture frame")
break
# define our new photo
cv2.imwrite('opencv_frame.png', frame)
photo_filename = 'opencv_frame.png'
# load and prepare image
image, image_w, image_h = load_image_pixels(photo_filename, (input_w, input_h))
new_image=preprocess_input(frame, input_h, input_w)
yhat = model.predict(new_image)
boxes = []
for i in range(len(yhat)):
boxes += decode_netout(yhat[i][0], anchors[i], class_threshold, input_h, input_w)
correct_yolo_boxes(boxes, 480, 640, input_h, input_w)
do_nms(boxes, 0.5)
v_boxes, v_labels, v_scores = get_boxes(boxes, labels, class_threshold)
#for i in range(len(v_boxes)):
# print(v_labels[i], v_scores[i])
draw_boxes(photo_filename, v_boxes, v_labels, v_scores)
cv2.imshow('frame',frame)
# elif k%256 == 32:
# # SPACE pressed
# img_name = "opencv_frame.png"
# x=cv2.imwrite(img_name, frame)
# # print("{} written!".format(img_name))
# # draw all results
# def draw_boxes(filename, v_boxes, v_labels, v_scores):
# # load the image
# data = pyplot.imread(filename)
# # plot the image
# pyplot.imshow(data)
# # get the context for drawing boxes
# ax = pyplot.gca()
# # plot each box
# for i in range(len(v_boxes)):
# box = v_boxes[i]
# # get coordinates
# y1, x1, y2, x2 = box.ymin, box.xmin, box.ymax, box.xmax
# # calculate width and height of the box
# width, height = x2 - x1, y2 - y1
# # create the shape
# rect = Rectangle((x1, y1), width, height, fill=False, color='white')
# # draw the box
# ax.add_patch(rect)
# # draw text and score in top left corner
# label = "%s (%.3f)" % (v_labels[i], v_scores[i])
# pyplot.text(x1, y1, label, color='white')
# # show the plot
# pyplot.show()
# make prediction
# summarize the shape of the list of arrays
#print([a.shape for a in yhat])
# decode the output of the network
# correct the sizes of the bounding boxes for the shape of the image
# suppress non-maximal boxes
# define the labels
# get the details of the detected objects
# summarize what we found
# draw what we found
cam.release()
cv2.destroyAllWindows() |
#!/usr/bin/env python3
import numpy
import pyscf.pbc.gto as gto
from pyscf.pbc import scf, dft
import h5py
import sys
cell = gto.Cell()
cell.verbose = 5
alat0 = 3.6
cell.a = (numpy.ones((3,3))-numpy.eye(3))*alat0 / 2.0
cell.atom = (('C',0,0,0),('C',numpy.array([0.25,0.25,0.25])*alat0))
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.mesh = [25,25,25]
cell.build()
nk = [2,2,2]
kpts = cell.make_kpts(nk)
mf = scf.KRHF(cell, kpts=kpts)
mf.chkfile = 'scf.chk'
mf.kernel()
from afqmctools.utils.linalg import get_ortho_ao
hcore = mf.get_hcore()
fock = (hcore + mf.get_veff())
X, nmo_per_kpt = get_ortho_ao(cell,kpts,1e-14)
with h5py.File(mf.chkfile, 'r+') as fh5:
fh5['scf/hcore'] = hcore
fh5['scf/fock'] = fock
fh5['scf/orthoAORot'] = X
fh5['scf/nmo_per_kpt'] = nmo_per_kpt
|
import os
import csv
# CSVfile pathway
budget_csv = os.path.join('..', 'Resources', 'budget_data.csv')
#Initializing the variables
total_months = 0
total_revenue = 0
changes = []
date_tally = []
greatest_increase = 0
greatest_increase_month = 0
greatest_decrease = 0
greatest_decrease_month = 0
# Open the CSV and read it in
with open(budget_csv, newline = '') as csvfile:
csvreader = csv.reader(csvfile, delimiter = ',')
next(csvreader, None)
row = next(csvreader)
#Total months and total revenue calculations
previous_profit = int(row[1])
total_months = total_months + 1
total_revenue = total_revenue + int(row[1])
greatest_increase = int(row[1])
greatest_increase_month = row[0]
for row in csvreader:
total_months = total_months + 1
total_revenue = total_revenue + int(row[1])
# Compare monthly performance to prior months
change = int(row[1]) - previous_profit
changes.append(change)
previous_profit = int(row[1])
date_tally.append(row[0])
# Greatest Increase Calculations
if int(row[1]) > greatest_increase:
greatest_increase = int(row[1])
greatest_increase_month = row[0]
# Greatest Decrease Calculations
if int(row[1]) < greatest_decrease:
greatest_decrease = int(row[1])
greatest_decrease_month = row[0]
# Calculating the average and date
average_change = sum(changes)/len(changes)
high = max(changes)
low = min(changes)
# Prints the values in terminal
print("Financial Analysis")
print("----------------------------")
print("Total Months: " + str(total_months))
print("Total: $" + str(total_revenue))
print("Average Change: $" + str(average_change))
print(f"Greatest Increase in Profits:, {greatest_increase_month}, (${high})")
print(f"Greatest Decrease in Profits:, {greatest_decrease_month}, (${low})")
# Exports values to a text file
PyBank = open("PyBankOutput.txt","w+")
PyBank.write("Financial Analysis")
PyBank.write('\n' + "----------------------------")
PyBank.write('\n' + "Total Months: " + str(total_months))
PyBank.write('\n' + "Total: $" + str(total_revenue))
PyBank.write('\n' + "Average Change: $" + str(average_change))
PyBank.write('\n' + f"Greatest Increase in Profits: {greatest_increase_month}, (${high})")
PyBank.write('\n' + f"Greatest Decrease in Profits: {greatest_decrease_month}, (${low})") |
import os
import glob
import shutil
import unittest
from inspect import getfile, currentframe
import autoload.vim_python_test_runner as sut
class VimTestRunnerForDjangoTests(unittest.TestCase):
def setUp(self):
dirs_to_make = [
"/tmp/project_app_only/example_app1/tests/", "/tmp/project_app_name_and_env/example_app1/tests/",
"/tmp/bad_project_no_files/example_app1/tests/", "/tmp/bad_project_no_config_file/example_app1/tests/",
"/tmp/bad_project_no_app/example_app1/tests/", "/tmp/bad_project_no_path_to_tests/example_app1/tests/",
"/tmp/project_multiple_apps/example_app1/tests/", "/tmp/bad_project_multiple_invalid_apps/example_app1/tests/",
"/tmp/project_nested_test_dirs/example_app1/tests/nested1/", "/tmp/project_contains_app_name/app_name/tests/",
"/tmp/project_failfast/example_app/tests/", "/tmp/project_nocapture/example_app/tests/",
"/tmp/project_with_dots/example.app.something/tests/", "/tmp/django_runner_project_app/example_app1/tests/"
]
contents_to_write = [
("/tmp/project_app_only/.vim-django", '{"app_name": "example_app1"}'),
("/tmp/project_app_only/manage.py", "#Place holder"),
("/tmp/project_app_name_and_env/.vim-django", '{"app_name": "example_app1", "environment": "test"}'),
("/tmp/project_app_name_and_env/manage.py", "#Place holder"),
("/tmp/bad_project_no_config_file/manage.py", "#Place holder"),
("/tmp/bad_project_no_app/.vim-django", '{"bad_field": "example_app1"}'),
("/tmp/bad_project_no_app/manage.py", "#Place holder"),
("/tmp/bad_project_no_path_to_tests/.vim-django", '{"app_name": "example_app1"}'),
("/tmp/bad_project_no_path_to_tests/manage.py", "#Place holder"),
("/tmp/project_multiple_apps/.vim-django", '{"app_name": "other_app, example_app1, example_app2"}'),
("/tmp/project_multiple_apps/manage.py", "#Place holder"),
("/tmp/bad_project_multiple_invalid_apps/.vim-django", '{"app_name": "other_app1, other_app2, other_app3"}'),
("/tmp/bad_project_multiple_invalid_apps/manage.py", "#Place holder"),
("/tmp/project_nested_test_dirs/.vim-django", '{"app_name": "example_app1, example_app2"}'),
("/tmp/project_nested_test_dirs/manage.py", "#Place holder"),
("/tmp/project_contains_app_name/.vim-django", '{"app_name": "example_app1, app_name"}'),
("/tmp/project_contains_app_name/manage.py", "#Place holder"),
("/tmp/project_failfast/.vim-django", '{"app_name": "example_app", "flags": ["failfast"]}'),
("/tmp/project_failfast/manage.py", "#Place holder"),
("/tmp/project_nocapture/.vim-django", '{"app_name": "example_app", "flags": ["nocapture"]}'),
("/tmp/project_nocapture/manage.py", "#Place holder"),
("/tmp/project_with_dots/.vim-django", '{"app_name": "example.app.something"}'),
("/tmp/project_with_dots/manage.py", "#Place holder"),
("/tmp/django_runner_project_app/.vim-django", '{"app_name": "example_app1", "test-runner": "django"}'),
("/tmp/django_runner_project_app/manage.py", "#Place holder")
]
for directory in dirs_to_make:
os.makedirs(directory)
for needed_file in contents_to_write:
with open(needed_file[0], "w") as f:
f.write(needed_file[1])
def tearDown(self):
for a_dir in glob.glob("/tmp/*project_*"):
shutil.rmtree(a_dir)
def test_find_vim_django_file(self):
return_value = sut.find_path_to_file("/tmp/project_app_only/example_app1/tests", ".vim-django")
self.assertEqual(return_value, "/tmp/project_app_only/.vim-django")
def test_can_not_find_vim_django_file(self):
with self.assertRaises(sut.NoVimDjango):
sut.find_path_to_file("/tmp/bad_project_no_files/example_app1/tests", ".vim-django", sut.NoVimDjango)
def test_find_manage_py(self):
return_value = sut.find_path_to_file("/tmp/project_app_only/example_app1/tests", "manage.py")
self.assertEqual(return_value, "/tmp/project_app_only/manage.py")
def test_can_not_find_manage_py(self):
with self.assertRaises(sut.NotDjango):
sut.find_path_to_file("/tmp/bad_project_no_files/example_app1/tests", "manage.py", sut.NotDjango)
def test_get_valid_class_name(self):
current_line1 = 17
current_line2 = 24
current_buffer = self.build_buffer_helper()
self.assertEqual("Example1", sut.get_current_method_and_class(current_line1, current_buffer)[0])
self.assertEqual("Example2", sut.get_current_method_and_class(current_line2, current_buffer)[0])
def test_get_current_method_and_class_returns_false_for_class_when_not_in_class(self):
current_buffer = self.build_buffer_helper()
self.assertEqual(False, sut.get_current_method_and_class(2, current_buffer)[0])
def test_get_valid_method_name(self):
should_return_dummy2 = 15
should_return_dummy1b = 27
current_buffer = self.build_buffer_helper()
self.assertEqual("dummy2", sut.get_current_method_and_class(should_return_dummy2, current_buffer)[1])
self.assertEqual("dummy1b", sut.get_current_method_and_class(should_return_dummy1b, current_buffer)[1])
def test_get_current_method_and_class_returns_false_when_not_in_method(self):
current_buffer = self.build_buffer_helper()
self.assertEqual(False, sut.get_current_method_and_class(25, current_buffer)[1])
def test_get_app_name(self):
app_name = sut.get_json_field_from_config_file("/tmp/project_app_only/example_app1/tests", "app_name")
self.assertEqual("example_app1", app_name)
def test_get_env_name_when_present(self):
app_name = sut.get_json_field_from_config_file("/tmp/project_app_name_and_env/example_app1/tests", "environment")
self.assertEqual("test", app_name)
def test_get_env_name_returns_false_when_not_provided(self):
app_name = sut.get_json_field_from_config_file("/tmp/project_app_only/example_app1/tests", "environment")
self.assertEqual(False, app_name)
def test_get_command_to_run_the_current_app_when_manage_py_found_and_app_name_provided_and_no_env_specified(self):
current_dir = '/tmp/project_app_only/example_app1/tests/test_file.py'
command_to_run = sut.get_command_to_run_the_current_app(current_dir)
self.assertEqual("/tmp/project_app_only/manage.py test example_app1", command_to_run)
def test_get_command_to_run_the_current_app_when_manage_py_found_and_app_name_and_env_specified(self):
current_dir = '/tmp/project_app_name_and_env/example_app1/tests/test_file.py'
command_to_run = sut.get_command_to_run_the_current_app(current_dir)
self.assertEqual("/tmp/project_app_name_and_env/manage.py test test example_app1", command_to_run)
def test_get_command_to_run_the_current_app_when_config_file_not_properly_formated(self):
current_dir = '/tmp/bad_project_no_app/example_app1/tests/test_file.py'
with self.assertRaises(sut.NoVimDjango):
sut.get_command_to_run_the_current_app(current_dir)
def test_get_command_to_run_the_current_app_when_config_file_not_present(self):
current_dir = '/tmp/bad_project_no_config_file/example_app1/tests/test_file.py'
with self.assertRaises(sut.NoVimDjango):
sut.get_command_to_run_the_current_app(current_dir)
def test_get_command_to_run_the_current_app_when_manage_py_not_found(self):
current_dir = '/tmp/bad_project_no_files/example_app1/tests/test_file.py'
with self.assertRaises(sut.NotDjango):
sut.get_command_to_run_the_current_app(current_dir)
def test_get_command_to_run_the_current_file_when_manage_py_found_and_app_name_provided_and_no_env_specified(self):
current_dir = '/tmp/project_app_only/example_app1/tests/test_file.py'
expected_return_value = "/tmp/project_app_only/manage.py test example_app1.tests.test_file"
command_returned = sut.get_command_to_run_the_current_file(current_dir)
self.assertEqual(command_returned, expected_return_value)
def test_get_command_to_run_the_current_file_when_manage_py_found_and_app_name_and_env_specified(self):
current_dir = '/tmp/project_app_name_and_env/example_app1/tests/test_file.py'
command_to_run = sut.get_command_to_run_the_current_file(current_dir)
self.assertEqual("/tmp/project_app_name_and_env/manage.py test test example_app1.tests.test_file", command_to_run)
pass
def test_get_command_to_run_the_current_file_when_config_file_not_properly_formated(self):
current_dir = '/tmp/bad_project_no_app/example_app1/tests/test_file.py'
with self.assertRaises(sut.NoVimDjango):
sut.get_command_to_run_the_current_file(current_dir)
def test_get_command_to_run_the_current_file_when_config_file_not_present(self):
current_dir = '/tmp/bad_project_no_config_file/example_app1/tests/test_file.py'
with self.assertRaises(sut.NoVimDjango):
sut.get_command_to_run_the_current_file(current_dir)
def test_get_command_to_run_the_current_file_when_manage_py_not_found(self):
current_dir = '/tmp/bad_project_no_files/example_app1/tests/test_file.py'
with self.assertRaises(sut.NotDjango):
sut.get_command_to_run_the_current_file(current_dir)
def test_get_command_to_run_the_current_class_with_manage_py_app_name_but_no_env_specified(self):
current_dir = '/tmp/project_app_only/example_app1/tests/test_file.py'
current_line = 17
current_buffer = self.build_buffer_helper()
expected_return_value = "/tmp/project_app_only/manage.py test example_app1.tests.test_file:Example1"
self.assertEqual(expected_return_value, sut.get_command_to_run_the_current_class(current_dir, current_line, current_buffer))
def test_get_command_to_run_the_current_class_with_manage_py_app_name_and_env_specified(self):
current_dir = '/tmp/project_app_name_and_env/example_app1/tests/test_file.py'
current_line = 17
current_buffer = self.build_buffer_helper()
expected_return_value = "/tmp/project_app_name_and_env/manage.py test test example_app1.tests.test_file:Example1"
self.assertEqual(expected_return_value, sut.get_command_to_run_the_current_class(current_dir, current_line, current_buffer))
def test_get_command_to_run_the_current_class_when_config_not_properly_formated_no_app_name(self):
current_dir = '/tmp/bad_project_no_app/example_app1/tests/test_file.py'
current_line = 17
current_buffer = self.build_buffer_helper()
with self.assertRaises(sut.NoVimDjango):
sut.get_command_to_run_the_current_class(current_dir, current_line, current_buffer)
def test_get_command_to_run_the_current_class_when_config_not_present(self):
current_dir = '/tmp/bad_project_no_app/example_app1/tests/test_file.py'
current_line = 17
current_buffer = self.build_buffer_helper()
with self.assertRaises(sut.NoVimDjango):
sut.get_command_to_run_the_current_class(current_dir, current_line, current_buffer)
def test_get_command_to_run_the_current_class_when_manage_py_not_found(self):
current_dir = '/tmp/bad_project_no_files/example_app1/tests/test_file.py'
current_line = 17
with self.assertRaises(sut.NotDjango):
current_buffer = self.build_buffer_helper()
self.assertEqual("Not Django", sut.get_command_to_run_the_current_class(current_dir, current_line, current_buffer))
def test_get_command_to_run_the_current_method_with_manage_py_app_name_but_no_env_specified(self):
current_dir = '/tmp/project_app_only/example_app1/tests/test_file.py'
current_line = 17
current_buffer = self.build_buffer_helper()
expected_return_value = "/tmp/project_app_only/manage.py test example_app1.tests.test_file:Example1.dummy2"
self.assertEqual(expected_return_value, sut.get_command_to_run_the_current_method(current_dir, current_line, current_buffer))
def test_get_command_to_run_the_current_method_with_manage_py_app_name_and_env_specified(self):
current_dir = '/tmp/project_app_name_and_env/example_app1/tests/test_file.py'
current_line = 17
current_buffer = self.build_buffer_helper()
expected_return_value = "/tmp/project_app_name_and_env/manage.py test test example_app1.tests.test_file:Example1.dummy2"
self.assertEqual(expected_return_value, sut.get_command_to_run_the_current_method(current_dir, current_line, current_buffer))
def test_get_command_to_run_the_current_method_when_config_not_properly_formated(self):
current_dir = '/tmp/bad_project_no_app/example_app1/tests/test_file.py'
current_line = 17
current_buffer = self.build_buffer_helper()
with self.assertRaises(sut.NoVimDjango):
sut.get_command_to_run_the_current_method(current_dir, current_line, current_buffer)
def test_get_command_to_run_the_current_method_when_config_not_present(self):
current_dir = '/tmp/bad_project_no_app/example_app1/tests/test_file.py'
current_line = 17
current_buffer = self.build_buffer_helper()
with self.assertRaises(sut.NoVimDjango):
sut.get_command_to_run_the_current_method(current_dir, current_line, current_buffer)
def test_get_command_to_run_the_current_method_when_manage_py_not_found(self):
current_dir = '/tmp/bad_project_no_files/example_app1/tests/test_file.py'
current_line = 17
current_buffer = self.build_buffer_helper()
with self.assertRaises(sut.NotDjango):
sut.get_command_to_run_the_current_method(current_dir, current_line, current_buffer)
def test_get_command_to_run_the_current_app_when_multiple_apps_are_listed_and_a_valid_app_name_is_in_config_file(self):
current_dir = "/tmp/project_multiple_apps/example_app1/tests/test_file.py"
expected_return_value = "/tmp/project_multiple_apps/manage.py test example_app1"
command_returned = sut.get_command_to_run_the_current_app(current_dir)
self.assertEqual(command_returned, expected_return_value)
def test_get_command_to_run_the_current_file_when_multiple_apps_are_listed_and_a_valid_app_name_is_in_config_file(self):
current_dir = "/tmp/project_multiple_apps/example_app1/tests/test_file.py"
expected_return_value = "/tmp/project_multiple_apps/manage.py test example_app1.tests.test_file"
command_returned = sut.get_command_to_run_the_current_file(current_dir)
self.assertEqual(command_returned, expected_return_value)
def test_get_command_to_run_the_current_class_when_multiple_apps_are_listed_and_a_valid_app_name_is_in_config_file(self):
current_dir = "/tmp/project_multiple_apps/example_app1/tests/test_file.py"
current_line = 17
current_buffer = self.build_buffer_helper()
expected_return_value = "/tmp/project_multiple_apps/manage.py test example_app1.tests.test_file:Example1"
command_returned = sut.get_command_to_run_the_current_class(current_dir, current_line, current_buffer)
self.assertEqual(command_returned, expected_return_value)
def test_get_command_to_run_the_current_method_when_multiple_apps_are_listed_and_a_valid_app_name_is_in_config_file(self):
current_dir = "/tmp/project_multiple_apps/example_app1/tests/test_file.py"
current_line = 17
current_buffer = self.build_buffer_helper()
expected_return_value = "/tmp/project_multiple_apps/manage.py test example_app1.tests.test_file:Example1.dummy2"
command_returned = sut.get_command_to_run_the_current_method(current_dir, current_line, current_buffer)
self.assertEqual(command_returned, expected_return_value)
def test_get_command_to_run_the_current_app_when_multiple_apps_are_listed_and_a_valid_app_name_is_not_in_config_file(self):
current_dir = "/tmp/bad_project_multiple_invalid_apps/example_app1/tests/test_file.py"
with self.assertRaises(sut.NoVimDjango):
sut.get_command_to_run_the_current_app(current_dir)
def test_get_command_to_run_the_current_file_when_multiple_apps_are_listed_and_a_valid_app_name_is_not_in_config_file(self):
current_dir = "/tmp/bad_project_multiple_invalid_apps/example_app1/tests/test_file.py"
with self.assertRaises(sut.NoVimDjango):
sut.get_command_to_run_the_current_file(current_dir)
def test_get_command_to_run_the_current_class_when_multiple_apps_are_listed_and_a_valid_app_name_is_not_in_config_file(self):
current_dir = "/tmp/bad_project_multiple_invalid_apps/example_app1/tests/test_file.py"
current_line = 17
current_buffer = self.build_buffer_helper()
with self.assertRaises(sut.NoVimDjango):
sut.get_command_to_run_the_current_class(current_dir, current_line, current_buffer)
def test_get_command_to_run_the_current_method_when_multiple_apps_are_listed_and_a_valid_app_name_is_not_in_config_file(self):
current_dir = "/tmp/bad_project_multiple_invalid_apps/example_app1/tests/test_file.py"
current_line = 17
current_buffer = self.build_buffer_helper()
with self.assertRaises(sut.NoVimDjango):
sut.get_command_to_run_the_current_method(current_dir, current_line, current_buffer)
def test_get_command_to_run_current_app_when_tests_are_in_a_nested_directory(self):
current_dir = "/tmp/project_nested_test_dirs/example_app1/tests/nested1/test_nested_file.py"
expected_return_value = "/tmp/project_nested_test_dirs/manage.py test example_app1"
command_returned = sut.get_command_to_run_the_current_app(current_dir)
self.assertEqual(command_returned, expected_return_value)
def test_get_command_to_run_current_file_when_tests_are_in_a_nested_directory(self):
current_dir = "/tmp/project_nested_test_dirs/example_app1/tests/nested1/test_nested_file.py"
expected_return_value = "/tmp/project_nested_test_dirs/manage.py test example_app1.tests.nested1.test_nested_file"
command_returned = sut.get_command_to_run_the_current_file(current_dir)
self.assertEqual(command_returned, expected_return_value)
def test_get_command_to_run_current_class_when_tests_are_in_a_nested_directory(self):
current_dir = "/tmp/project_nested_test_dirs/example_app1/tests/nested1/test_nested_file.py"
current_line = 17
current_buffer = self.build_buffer_helper()
expected_return_value = "/tmp/project_nested_test_dirs/manage.py test example_app1.tests.nested1.test_nested_file:Example1"
command_returned = sut.get_command_to_run_the_current_class(current_dir, current_line, current_buffer)
self.assertEqual(command_returned, expected_return_value)
def test_get_command_to_run_current_method_when_tests_are_in_a_nested_directory(self):
current_dir = "/tmp/project_nested_test_dirs/example_app1/tests/nested1/test_nested_file.py"
current_line = 17
current_buffer = self.build_buffer_helper()
expected_return_value = "/tmp/project_nested_test_dirs/manage.py test example_app1.tests.nested1.test_nested_file:Example1.dummy2"
command_returned = sut.get_command_to_run_the_current_method(current_dir, current_line, current_buffer)
self.assertEqual(command_returned, expected_return_value)
def test_get_command_to_run_current_class_when_current_line_occurs_in_file_more_than_once(self):
current_dir = '/tmp/project_app_only/example_app1/tests/test_file.py'
current_line = 42
current_buffer = self.build_buffer_helper()
expected_return_value = "/tmp/project_app_only/manage.py test example_app1.tests.test_file:Example3"
self.assertEqual(expected_return_value, sut.get_command_to_run_the_current_class(current_dir, current_line, current_buffer))
def test_get_command_to_run_current_method_when_current_line_occurs_in_file_more_than_once(self):
current_dir = '/tmp/project_app_name_and_env/example_app1/tests/test_file.py'
current_line = 50
current_buffer = self.build_buffer_helper()
expected_return_value = "/tmp/project_app_name_and_env/manage.py test test example_app1.tests.test_file:Example3.double_dummy"
self.assertEqual(expected_return_value, sut.get_command_to_run_the_current_method(current_dir, current_line, current_buffer))
def test_get_command_to_run_the_current_class_when_project_name_contains_the_app_name(self):
current_dir = "/tmp/project_contains_app_name/app_name/tests/test_file.py"
current_line = 17
current_buffer = self.build_buffer_helper()
expected_return_value = "/tmp/project_contains_app_name/manage.py test app_name.tests.test_file:Example1"
command_returned = sut.get_command_to_run_the_current_class(current_dir, current_line, current_buffer)
self.assertEqual(command_returned, expected_return_value)
def test_get_command_to_run_the_current_app_when_failfast_is_set_to_true_in_config_file(self):
current_dir = '/tmp/project_failfast/example_app/tests/test_file.py'
command_to_run = sut.get_command_to_run_the_current_app(current_dir)
self.assertEqual("/tmp/project_failfast/manage.py test --failfast example_app", command_to_run)
def test_get_command_to_run_the_current_app_when_nocapture_is_set_to_true_in_config_file(self):
current_dir = '/tmp/project_nocapture/example_app/tests/test_file.py'
command_to_run = sut.get_command_to_run_the_current_app(current_dir)
self.assertEqual("/tmp/project_nocapture/manage.py test --nocapture example_app", command_to_run)
def test_get_command_to_run_current_app_writes_command_to_cache_file_when_successfully_called(self):
current_dir = '/tmp/project_app_only/example_app1/tests/test_file.py'
command_to_run = sut.get_command_to_run_the_current_app(current_dir)
last_command = self.get_cached_command()
self.assertEqual(command_to_run, last_command)
def test_get_command_to_run_current_file_writes_command_to_cache_file_when_successfully_called(self):
current_dir = '/tmp/project_app_only/example_app1/tests/test_file.py'
command_to_run = sut.get_command_to_run_the_current_file(current_dir)
last_command = self.get_cached_command()
self.assertEqual(command_to_run, last_command)
def test_get_command_to_run_current_class_writes_command_to_cache_file_when_successfully_called(self):
current_dir = '/tmp/project_app_only/example_app1/tests/test_file.py'
current_line = 17
current_buffer = self.build_buffer_helper()
command_to_run = sut.get_command_to_run_the_current_class(current_dir, current_line, current_buffer)
last_command = self.get_cached_command()
self.assertEqual(command_to_run, last_command)
def test_get_command_to_run_current_method_writes_command_to_cache_file_when_successfully_called(self):
current_dir = '/tmp/project_app_only/example_app1/tests/test_file.py'
current_line = 17
current_buffer = self.build_buffer_helper()
command_to_run = sut.get_command_to_run_the_current_method(current_dir, current_line, current_buffer)
last_command = self.get_cached_command()
self.assertEqual(command_to_run, last_command)
def test_get_command_to_rerun_last_tests_returns_the_command_last_used_to_run_tests(self):
current_dir = '/tmp/project_app_only/example_app1/tests/test_file.py'
command_to_run = sut.get_command_to_run_the_current_app(current_dir)
last_command = sut.get_command_to_rerun_last_tests()
self.assertEqual(command_to_run, last_command)
def test_get_command_to_run_current_method_when_app_name_has_dots(self):
current_dir = "/tmp/project_with_dots/example.app.something/tests/test_dot_file.py"
current_line = 17
current_buffer = self.build_buffer_helper()
expected_return_value = "/tmp/project_with_dots/manage.py test example.app.something.tests.test_dot_file:Example1.dummy2"
command_returned = sut.get_command_to_run_the_current_method(current_dir, current_line, current_buffer)
self.assertEqual(command_returned, expected_return_value)
def test_get_command_to_run_current_class_writes_using_django_default_test_runner_when_runner_set_to_django_in_vim_django_file(self):
current_dir = '/tmp/project_app_only/example_app1/tests/test_file.py'
current_line = 17
current_buffer = self.build_buffer_helper()
command_to_run = sut.get_command_to_run_the_current_class(current_dir, current_line, current_buffer)
last_command = self.get_cached_command()
self.assertEqual(command_to_run, last_command)
def test_get_command_to_run_current_method_writes_using_django_default_test_runner_when_runner_set_to_django_in_vim_django_file(self):
current_dir = '/tmp/django_runner_project_app/example_app1/tests/test_file.py'
current_line = 17
current_buffer = self.build_buffer_helper()
command_to_run = sut.get_command_to_run_the_current_method(current_dir, current_line, current_buffer)
last_command = self.get_cached_command()
self.assertEqual(command_to_run, last_command)
def build_buffer_helper(self):
current_dir = os.path.dirname(os.path.abspath(getfile(currentframe())))
with open("{}/dummy_test_file.py".format(current_dir), "r") as f:
current_buffer = []
for line in f.readlines():
current_buffer.append(line)
return current_buffer
def get_cached_command(self):
with open("/tmp/vim_python_test_runner_cache", "r") as f:
return f.read()
|
#! /usr/bin/env python3
"""Create a TAGS file dla Python programs, usable przy GNU Emacs.
usage: eptags pyfiles...
The output TAGS file jest usable przy Emacs version 18, 19, 20.
Tagged are:
- functions (even inside other defs albo classes)
- classes
eptags warns about files it cannot open.
eptags will nie give warnings about duplicate tags.
BUGS:
Because of tag duplication (methods przy the same name w different
classes), TAGS files are nie very useful dla most object-oriented
python projects.
"""
zaimportuj sys,re
expr = r'^[ \t]*(def|class)[ \t]+([a-zA-Z_][a-zA-Z0-9_]*)[ \t]*[:\(]'
matcher = re.compile(expr)
def treat_file(filename, outfp):
"""Append tags found w file named 'filename' to the open file 'outfp'"""
spróbuj:
fp = open(filename, 'r')
wyjąwszy OSError:
sys.stderr.write('Cannot open %s\n'%filename)
zwróć
charno = 0
lineno = 0
tags = []
size = 0
dopóki 1:
line = fp.readline()
jeżeli nie line:
przerwij
lineno = lineno + 1
m = matcher.search(line)
jeżeli m:
tag = m.group(0) + '\177%d,%d\n' % (lineno, charno)
tags.append(tag)
size = size + len(tag)
charno = charno + len(line)
outfp.write('\f\n%s,%d\n' % (filename,size))
dla tag w tags:
outfp.write(tag)
def main():
outfp = open('TAGS', 'w')
dla filename w sys.argv[1:]:
treat_file(filename, outfp)
jeżeli __name__=="__main__":
main()
|
"""
widgets_layout component
"""
from zoom import DynamicComponent
class WidgetsLayout(DynamicComponent):
pass
|
from pytest import raises
from riotcli.main import RiotCliTest
def test_riotcli():
# test riotcli without any subcommands or arguments
with RiotCliTest() as app:
app.run()
assert app.exit_code == 0
def test_riotcli_debug():
# test that debug mode is functional
argv = ['--debug']
with RiotCliTest(argv=argv) as app:
app.run()
assert app.debug is True
def test_config():
# test config without arguments
argv = ['config']
with RiotCliTest(argv=argv) as app:
app.run()
# test command1 with arguments
argv = ['config', 'list']
with RiotCliTest(argv=argv) as app:
app.run()
data,output = app.last_rendered
|
import array as arr
numa = arr.array('i',[1,2,3,4,5])
numb = arr.array('i',[10,20,30,40,50,60,70,80])
nummc = numa+numb
print(nummc) |
import sys
import boto3
import requests
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
from pyspark.sql.functions import col, expr, when, round
from pyspark.sql.types import LongType
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
path="income-ny/year=2016/income.csv"
raw_bucket='<<userid>>-raw' #replace with your user id here
s3 = boto3.resource('s3')
bucket = s3.Bucket(raw_bucket)
objs = list(bucket.objects.filter(Prefix=path))
if len(objs) > 0 and objs[0].key == path:
print("Object "+path+" already exists!")
else:
print("Starting download")
income_url='https://www.irs.gov/pub/irs-soi/16zpallagi.csv'
income_csv=requests.get(income_url).text
# Method 1: Object.put()
object = s3.Object(raw_bucket, path)
object.put(Body=income_csv)
print("Done")
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 22 11:05:33 2018
@author: jhodges
This file contains classes and functions to read MODIS Level 3 data and
locate multiple data tiles onto a single larger grid.
Results can be queried from the database or a specific time. If a static query
time is given, the best estimated value at that time will be returned. If a
time range is given, the average value across the time interval will be
returned.
"""
import glob
import pyhdf.SD as phdf
import xml.etree.ElementTree as ET
import datetime as dt
from scipy.ndimage.interpolation import zoom
import numpy as np
import util_common as uc
import re
import sys
import math
import scipy.interpolate as scpi
def coordinatesFromTile(tile):
''' This function will return the longitude and latitude MODIS Level 3
tile coordinate from the tile name in the format 'h00v00'
'''
lon = int(tile[1:3])
lat = int(tile[4:])
return lat, lon
def loadGPolygon(file):
''' This function will return the corner latitude and longitudes from a
MODIS Level 3 metadata xml file.
'''
tree = ET.parse(file)
root = tree.getroot()
ps = root[2][9][0][0][0]
p = []
for i in range(0,4):
p.append([float(ps[i][0].text),float(ps[i][1].text)])
return p
def loadXmlDate(file):
''' This function will return the start and end dates from a MODIS Level 3
metadata xml file.
'''
tree = ET.parse(file)
root = tree.getroot()
DT = root[2][8]
fmt = '%Y-%m-%d-%H:%M:%S'
enddate = DT[1].text+'-'+DT[0].text.split('.')[0]
startdate = DT[3].text+'-'+DT[2].text.split('.')[0]
enddate = dt.datetime.strptime(enddate,fmt)
startdate = dt.datetime.strptime(startdate,fmt)
return startdate, enddate
def arrangeGPolygon(p,topleft=1,topright=2,botleft=0,botright=3):
''' This function will rearrange GPolygon points into a human readable
format.
'''
plat = np.array([[p[topleft][1],p[topright][1]],[p[botleft][1],p[botright][1]]])
plon = np.array([[p[topleft][0],p[topright][0]],[p[botleft][0],p[botright][0]]])
return plat, plon
def interpGPolygon(plat,plon,pixels=1200):
''' This function will interpolate the 2x2 coordinate matricies to
pixel x pixel matricies using bilinear interpolation. Note, this function
should not be used with MODIS Level 3 data as the grid is non-linear. Use
invertModisTile instead.
'''
lat = zoom(plat,pixels/2,order=1)
lon = zoom(plon,pixels/2,order=1)
return lat, lon
def loadSdsData(file,sdsname):
''' This function will open an hdf4 file and return the data stored in
the sdsname attribute.
'''
f = phdf.SD(file,phdf.SDC.READ)
sds_obj = f.select(sdsname)
data = sds_obj.get()
return data
def returnDataFile(file):
f = phdf.SD(file,phdf.SDC.READ)
return f
def findXmlTimes(datadir,tiles):
''' This function finds the start and end times of each .hdf.xml file
in datadir within the first tile.
'''
files = glob.glob(datadir+'*'+tiles[0]+'*'+'.hdf')
startdates = []
enddates = []
for file in files:
startdate, enddate = loadXmlDate(file+'.xml')
startdates.append(startdate)
enddates.append(enddate)
return [startdates, enddates], files
def findQueryDateTime(files,dates,queryDateTime):
''' findQueryDateTime: This function takes a list containing start and end
datetimes returns the index of the list which contains a queryDateTime.
If no match is found, returns None.
Using timedeltas from datetime.datetime would have been better.
Unfortunately, that gave an error when the day was the same and the hour
difference was negative since the negative was stored in the day part of
the structure.
'''
index = None
queryDay = queryDateTime.timetuple().tm_yday+((queryDateTime.hour*60+queryDateTime.minute)*60+queryDateTime.second)/(24*60*60)
for i in range(0,len(dates[0])):
lowYearDiff = queryDateTime.year-dates[0][i].year
highYearDiff = dates[1][i].year-queryDateTime.year
lowDay = dates[0][i].timetuple().tm_yday+((dates[0][i].hour*60+dates[0][i].minute)*60+dates[0][i].second)/(24*60*60)
highDay = dates[1][i].timetuple().tm_yday+((dates[1][i].hour*60+dates[1][i].minute)*60+dates[1][i].second)/(24*60*60)
if lowYearDiff < 0:
lowDay = 367
elif lowYearDiff > 0:
lowDay = lowDay-uc.daysInYear(dates[0][i].year)
if highYearDiff < 0:
highDay = 0
elif highYearDiff > 0:
highDay = highDay+uc.daysInYear(dates[0][i].year-1)
if queryDay >= lowDay and queryDay <= highDay:
index = i
#print(dates[0][i],dates[1][i])
if index is not None:
tile = extractTileFromFile(files[index])
datename = files[index].split(tile)[0][-8:-1]
else:
print("Did not find queryDateTime.")
datename = None
return datename
def removeUnlistedTilesFromFiles(datadir,datename,tiles,use_all=False):
''' This will remove tiles which were not included in the list from the
list of files. If the use_all argument is active, it will instead
update the list of tiles to include all files found in the file names.
'''
files = glob.glob(datadir+'*'+datename+'*'+'.hdf')
if use_all:
tiles = findAllTilesFromFiles(files)
updated_files = []
for file in files:
use_file = False
for tile in tiles:
if tile in file:
use_file = True
if use_file:
updated_files.append(file)
return updated_files, tiles
def extractTileFromFile(file):
''' This function uses regular expressions to find .h00v00. in a filename
to extract the MODIS tile.
'''
m = re.search('\.h\d\dv\d\d\.',file)
tile = m.group(0)[1:-1]
return tile
def findAllTilesFromFiles(files):
''' This function finds all MODIS tiles in a list of file names
'''
tiles = []
for file in files:
tile = extractTileFromFile(file)
tiles.append(tile)
return list(set(tiles))
def findAllTilesFromDir(datadir):
''' This function finds all MODIS tiles in a list of file names
'''
files = glob.glob(datadir+'*.hdf')
tiles = []
for file in files:
tile = extractTileFromFile(file)
tiles.append(tile)
return list(set(tiles))
def activeFireDayIndex(dates,queryDateTime):
''' This function finds the index of the queryDateTime within the range
of dates of the (.hdf) file.
'''
index = None
queryDay = queryDateTime.timetuple().tm_yday
lowDay = dates[0].timetuple().tm_yday
highDay = dates[1].timetuple().tm_yday
lowYearDiff = queryDateTime.year-dates[0].year
highYearDiff = dates[1].year-queryDateTime.year
if lowYearDiff == 0:
index = queryDay-lowDay
elif highYearDiff == 0:
index = 8-(highDay-queryDay)
else:
print("Is query within range for the file?")
return index
def invertModisTile(tile,pixels=1200):
''' This function will create a pixel x pixel matrix for latitude and
longitude using the tile name. This algorithm is presented in the
Active Fire Index User Guide.
'''
R=6371007.181
T=1111950
xmin=-20015109
ymax=10007555
w=T/pixels
lat_lnsp = np.linspace(0,pixels-1,pixels)
lon_lnsp = np.linspace(0,pixels-1,pixels)
lon_grid, lat_grid = np.meshgrid(lon_lnsp,lat_lnsp)
H = float(tile[1:3])
V = float(tile[4:])
lat = (ymax-(lat_grid+0.5)*w-V*T)/R*(180/math.pi)
lon = ((lon_grid+0.5)*w+H*T+xmin)/(R*np.cos(lat/180*math.pi))*(180/math.pi)
return lat, lon
def buildContour(files,queryDateTime,
sdsname='FireMask',
composite=True,
greedyMethod=False):
''' This function will combine measurements from multiple
MODIS tiles into a single dataset. The list of file names should
correspond to the same time and be for different tiles. The file names
should reference the (.hdf) files.
'''
#print(files[0])
pixels = loadSdsData(files[0],sdsname).shape[1]
tiles = findAllTilesFromFiles(files)
tiles_grid_dict, tiles_grid = uc.mapTileGrid(tiles,pixels,coordinatesFromTile)
tiles_data = tiles_grid.copy()
tiles_lat = tiles_grid.copy()
tiles_lon = tiles_grid.copy()
for file in files:
p = loadGPolygon(file+'.xml')
startdate, enddate = loadXmlDate(file+'.xml')
plat, plon = arrangeGPolygon(p)
if not composite:
day_index = activeFireDayIndex([startdate,enddate],queryDateTime)
data = loadSdsData(file,sdsname)
if day_index < data.shape[0]:
data = data[day_index,:,:]
else:
print("Required day index does not have data included.")
print("\tdata.shape:\t",data.shape)
print("\tday_index:\t",day_index)
data = None
else:
data = loadSdsData(file,sdsname)
tile = extractTileFromFile(file)
if greedyMethod:
lat, lon = interpGPolygon(plat,plon,pixels=pixels)
else:
lat, lon = invertModisTile(tile)
if data is not None:
tiles_data = uc.fillTileGrid(tiles_data,tiles_grid_dict,tile,data,pixels)
tiles_lat = uc.fillTileGrid(tiles_lat,tiles_grid_dict,tile,lat,pixels)
tiles_lon = uc.fillTileGrid(tiles_lon,tiles_grid_dict,tile,lon,pixels)
#tiles_lat = uc.fillEmptyCoordinates(tiles_lat,tiles,pixels,coordinatesFromTile)
#tiles_lon = uc.fillEmptyCoordinates(tiles_lon,tiles,pixels,coordinatesFromTile)
return tiles_lat, tiles_lon, tiles_data
def findQuerySdsData(queryDateTime,
datadir="G:/WildfireResearch/data/aqua_vegetation/",
tiles=['h08v04','h08v05','h09v04'],
composite=False,
use_all=False,
sdsname='1 km 16 days NDVI'):
''' This function will find the specified sdsname for each tile in tiles
within the datadir and find the closest to the queryDateTime. Matrices
of the latitutde, longitude, and data are returned.
'''
# Arrange files and tiles
if tiles is None:
tiles = findAllTilesFromDir(datadir)
dates, files = findXmlTimes(datadir,tiles)
datename = findQueryDateTime(files,dates,queryDateTime)
files, tiles = removeUnlistedTilesFromFiles(datadir,datename,tiles,use_all=use_all)
# Load all tiles at the queryDateTime
lat,lon,data = buildContour(files,queryDateTime,sdsname=sdsname,composite=composite)
return lat, lon, data
def geolocateCandidates(lat,lon,data):
''' This function extracts latitude and longitude corresponding to points
in the binary mask data.
'''
r,c = np.where(data > 0)
pts = []
coords = []
for i in range(0,len(r)):
ptlat = lat[r[i],c[i]]
ptlon = lon[r[i],c[i]]
ptdat = data[r[i],c[i]]
pts.append([ptlat,ptlon,ptdat])
coords.append([r[i],c[i]])
coords = np.array(np.squeeze(coords),dtype=np.int)
pts = np.array(pts)
return pts, coords
def compareCandidates(old_pts,new_pts,dist_thresh=0.5):
''' This function compares two sets of points to return minimum distance
to a point in the new_pts set from an old_pt. dist_thresh is the minimum
distance away for two points to be considered a match in degrees.
NOTE: 1 degree is approximately 69 miles, or 111 km
NOTE: Modis resolution is approximately 1km
'''
matched_pts = []
if old_pts.shape[0] != 0 and new_pts.shape[0] != 0:
for i in range(0,old_pts.shape[0]):
squared = np.power(new_pts[:,0:2]-old_pts[i,0:2],2)
summed = np.sum(squared,axis=1)
rooted = np.power(summed,0.5)
min_dist = np.min(rooted)
if min_dist <= dist_thresh:
matched_pts.append([i,min_dist*111,np.argmin(rooted)])
matched_pts = np.array(matched_pts)
return matched_pts
def buildOneDayContour(files,sdsname='sur_refl_b01',targetPixels=1200):
pixels = loadSdsData(files[0],sdsname).shape[1]
zoomLevel = targetPixels/pixels
tiles = findAllTilesFromFiles(files)
tiles_grid_dict, tiles_grid = uc.mapTileGrid(tiles,targetPixels,coordinatesFromTile)
tiles_data = tiles_grid.copy()
tiles_lat = tiles_grid.copy()
tiles_lon = tiles_grid.copy()
for file in files:
data = loadSdsData(file,sdsname)
data = zoom(data,zoomLevel)
tile = extractTileFromFile(file)
lat, lon = invertModisTile(tile,pixels=targetPixels)
if data is not None:
tiles_data = uc.fillTileGrid(tiles_data,tiles_grid_dict,tile,data,targetPixels)
tiles_lat = uc.fillTileGrid(tiles_lat,tiles_grid_dict,tile,lat,targetPixels)
tiles_lon = uc.fillTileGrid(tiles_lon,tiles_grid_dict,tile,lon,targetPixels)
return tiles_lat, tiles_lon, tiles_data
def list2stats(datas,name=''):
dataMedian = np.median(datas,axis=0)
dataMean = np.nanmean(datas,axis=0)
dataMin = np.nanmin(datas,axis=0)
dataMax = np.nanmax(datas,axis=0)
uc.dumpPickle([dataMin,dataMax,dataMedian,dataMean],name)
return dataMin, dataMax, dataMedian, dataMean
def generateVegetationStats(datadir="G:/WildfireResearch/data/aqua_reflectance/",
outdir="E:/projects/wildfire-research/data-test/",
tiles=['h08v04','h08v05','h09v04']):
''' This function will store out images with the min, max, median, and mean
values of VIGR, NDVI, VARI, and NDI16. These are needed for moisture
content estimation.
'''
files = glob.glob(datadir+'*.hdf')
dates = []
for file in files:
dates.append(file.split("//")[1].split('.')[1])
dates = list(set(dates))
ndvis = []
varis = []
ndi16s = []
vigrs = []
for i in range(0,len(dates)):#date in dates:
date = dates[i]
files = glob.glob(datadir+'/*'+date+'*.hdf')
goodFiles = []
for file in files:
tileCheck = False
for tile in tiles:
if tile in file:
tileCheck = True
if tileCheck:
goodFiles.append(file)
lat,lon,rho1 = buildOneDayContour(goodFiles,sdsname='sur_refl_b01')
lat,lon,rho2 = buildOneDayContour(goodFiles,sdsname='sur_refl_b02')
lat,lon,rho3 = buildOneDayContour(goodFiles,sdsname='sur_refl_b03')
lat,lon,rho4 = buildOneDayContour(goodFiles,sdsname='sur_refl_b04')
lat,lon,rho6 = buildOneDayContour(goodFiles,sdsname='sur_refl_b06')
num_ndvi = np.array(rho2-rho1,dtype=np.float32)
den_ndvi = np.array(rho2+rho1,dtype=np.float32)
ndvi = np.zeros(num_ndvi.shape)
ndvi[den_ndvi > 0] = num_ndvi[den_ndvi > 0]/den_ndvi[den_ndvi > 0]
ndvis.append(ndvi)
num_vari = rho4-rho1
den_vari = rho4+rho1-rho3
vari = np.zeros(num_vari.shape)
vari[den_vari > 0] = num_vari[den_vari > 0]/den_vari[den_vari > 0]
varis.append(vari)
num_ndi16 = rho2-rho6
den_ndi16 = rho2+rho6
ndi16 = np.zeros(num_ndi16.shape)
ndi16[den_ndi16 > 0] = num_ndi16[den_ndi16 > 0]/den_ndi16[den_ndi16 > 0]
ndi16s.append(ndi16)
num_vigr = rho4-rho1
den_vigr = rho4+rho1
vigr = np.zeros(num_vigr.shape)
vigr[den_vigr > 0] = num_vigr[den_vigr > 0]/den_vigr[den_vigr > 0]
vigrs.append(vigr)
vigrMin, vigrMax, vigrMedian, vigrMean = list2stats(vigrs,name=outdir+'vigrStats2016.pkl')
ndviMin, ndviMax, ndviMedian, ndviMean = list2stats(ndvis,name=outdir+'ndviStats2016.pkl')
variMin, variMax, variMedian, variMean = list2stats(varis,name=outdir+'variStats2016.pkl')
ndi16Min, ndi16Max, ndi16Median, ndi16Mean = list2stats(ndi16s,name=outdir+'ndi16Stats2016.pkl')
uc.dumpPickle([dates,lat,lon,vigrs],outdir+'vigrAll.pkl')
uc.dumpPickle([dates,lat,lon,ndvis],outdir+'ndvisAll.pkl')
uc.dumpPickle([dates,lat,lon,varis],outdir+'varisAll.pkl')
uc.dumpPickle([dates,lat,lon,ndi16s],outdir+'ndi16sAll.pkl')
return dates, ndvis, varis, ndi16s, vigrs
def getLfmChap(vari,lfmLowerThresh=0,lfmUpperThresh=200,
vigrFile="E:/projects/wildfire-research/data-test/vigrStats2016.pkl"):
''' This function will return chapperal moisture estimation based on
VARI measurement.
'''
vigrMin, vigrMax, vigrMedian, vigrMean = uc.readPickle(vigrFile)
lfm = 97.8+471.6*vari-293.9*vigrMedian-816.2*vari*(vigrMax-vigrMin)
lfm[lfm<lfmLowerThresh] = lfmLowerThresh
lfm[lfm>lfmUpperThresh] = lfmUpperThresh
return lfm
def getLfmCss(vari,lfmLowerThresh=0,lfmUpperThresh=200,
ndi16File="E:/projects/wildfire-research/data-test/ndi16Stats2016.pkl",
ndviFile="E:/projects/wildfire-research/data-test/ndviStats2016.pkl"):
''' This function will return coastal ss moisture estimation beased on
VARI measurement.
'''
ndi16Min, ndi16Max, ndi16Median, ndi16Mean = uc.readPickle(ndi16File)
ndviMin, ndviMax, ndviMedian, ndviMean = uc.readPickle(ndviFile)
lfm = 179.2 + 1413.9*vari-450.5*ndi16Median-1825.2*vari*(ndviMax-ndviMin)
lfm[lfm<lfmLowerThresh] = lfmLowerThresh
lfm[lfm>lfmUpperThresh] = lfmUpperThresh
return lfm
def buildCanopyData(datadir='G:/WildfireResearch/data/terra_canopy/',
outdir = "E:/projects/wildfire-research/data-test/",
sdsname='Percent_Tree_Cover',
outname='canopy.pkl'):
ds = 1
method='linear'
files = glob.glob(datadir+'/*.hdf')
#f = returnDataFile(files[0])
lat,lon,data = buildOneDayContour(files,sdsname=sdsname,targetPixels=1200)
data[lat==0] = np.nan
lat[lat == 0] = np.nan
lon[lon == 0] = np.nan
data[data > 100] = 100
lat = np.reshape(lat,(lat.shape[0]*lat.shape[1]))
lon = np.reshape(lon,(lon.shape[0]*lon.shape[1]))
values = np.reshape(data,(data.shape[0]*data.shape[1]))
inds = np.where(~np.isnan(lat) & ~np.isnan(lon) & ~np.isnan(values))
lat = lat[inds]
lon = lon[inds]
values = values[inds]
pts = np.zeros((len(lat),2))
pts[:,0] = lat
pts[:,1] = lon
newpts, sz = getCustomGrid(reshape=True)
remapped = scpi.griddata(pts[0::ds],values[0::ds],newpts,method=method)
data = np.reshape(remapped,(sz[0],sz[1]))
latitude, longitude = getCustomGrid(reshape=False)
uc.dumpPickle([latitude,longitude,data],outdir+outname)
return latitude, longitude, data
def getCustomGrid(lat_lmt = [30,44],
lon_lmt = [-126,-112],
pxPerDegree = 120,
ds=1,
method='nearest',
reshape=False):
''' This function will generate custom MODIS grid
'''
lat_lnsp = np.linspace(np.min(lat_lmt),np.max(lat_lmt),
(np.max(lat_lmt)-np.min(lat_lmt)+1)*pxPerDegree)
lon_lnsp = np.linspace(np.min(lon_lmt),np.max(lon_lmt),
(np.max(lon_lmt)-np.min(lon_lmt)+1)*pxPerDegree)
lon_grid, lat_grid = np.meshgrid(lon_lnsp,lat_lnsp)
if reshape:
lon_lnsp2 = np.reshape(lon_grid,(lon_grid.shape[0]*lon_grid.shape[1]))
lat_lnsp2 = np.reshape(lat_grid,(lat_grid.shape[0]*lat_grid.shape[1]))
newpts = np.zeros((len(lat_lnsp2),2))
newpts[:,0] = lat_lnsp2
newpts[:,1] = lon_lnsp2
sz = lat_grid.shape
return newpts, sz
else:
return lat_grid, lon_grid
if __name__ == '__main__':
''' case 0: loads modis vegetation index at queryDateTime and plots for
the whole United states
case 1: Loads modis active fires at queryDateTime and plots for
California
case 2: Loads modis vegetation index, active fires, and burned area
at queryDateTime for California.
case 3: Loads modis active fires at 365 consecuitive queryDateTimes
and saves the results.
'''
# User inputs
queryDateTime = dt.datetime(year=2017,month=7,day=9,hour=6,minute=00)
case = 1
if case == 0:
tiles = None
states = 'All'
#Find vegetation index at queryDateTime
vi_lat,vi_lon,vi_data = findQuerySdsData(queryDateTime,tiles=tiles,composite=True,
datadir="G:/WildfireResearch/data/aqua_vegetation/",
sdsname='1 km 16 days NDVI')
vi_fig = uc.plotContourWithStates(vi_lat,vi_lon,vi_data,states=states,label='VI')
vi_mem = (sys.getsizeof(vi_data)+sys.getsizeof(vi_lat)+sys.getsizeof(vi_lon))/1024**2
print("VI File Size: %.4f MB"%(vi_mem))
if case == 1:
tiles = ['h08v04','h08v05','h09v04']
states = 'California'
# Find activefires at queryDateTime
af_lat,af_lon,af_data = findQuerySdsData(queryDateTime,tiles=tiles,composite=False,
datadir="G:/WildfireResearch/data/aqua_daily_activefires/",
sdsname='FireMask')
af_fig = uc.plotContourWithStates(af_lat,af_lon,af_data,states=states,
clim=np.linspace(0,9,10),label='AF',
xlim=[-121.5, -118.5], ylim=[33.5, 36.5], saveFig=True)
af_mem = (sys.getsizeof(af_data)+sys.getsizeof(af_lat)+sys.getsizeof(af_lon))/1024**2
print("AF File Size: %.4f MB"%(af_mem))
if case == 2:
tiles = ['h08v04','h08v05','h09v04']
states = 'California'
# Find activefires at queryDateTime
af_lat,af_lon,af_data = findQuerySdsData(queryDateTime,tiles=tiles,composite=False,
datadir="G:/WildfireResearch/data/aqua_daily_activefires/",
sdsname='FireMask')
#Find vegetation index at queryDateTime
vi_lat,vi_lon,vi_data = findQuerySdsData(queryDateTime,tiles=tiles,composite=True,
datadir="G:/WildfireResearch/data/aqua_vegetation/",
sdsname='1 km 16 days NDVI')
#Find burned area at queryDateTime
ba_lat,ba_lon,ba_data = findQuerySdsData(queryDateTime,tiles=tiles,composite=True,
datadir="G:/WildfireResearch/data/modis_burnedarea/",
sdsname='burndate')
af_fig = uc.plotContourWithStates(af_lat,af_lon,af_data,states=states,
clim=np.linspace(0,9,10),label='AF')
vi_fig = uc.plotContourWithStates(vi_lat,vi_lon,vi_data,states=states,label='VI')
ba_fig = uc.plotContourWithStates(ba_lat,ba_lon,ba_data,states=states,label='BA')
vi_mem = (sys.getsizeof(vi_data)+sys.getsizeof(vi_lat)+sys.getsizeof(vi_lon))/1024**2
af_mem = (sys.getsizeof(af_data)+sys.getsizeof(af_lat)+sys.getsizeof(af_lon))/1024**2
ba_mem = (sys.getsizeof(ba_data)+sys.getsizeof(ba_lat)+sys.getsizeof(ba_lon))/1024**2
total_mem = vi_mem+af_mem+ba_mem
print("VI, AF, BA, Total File Size: %.4f,%.4f,%.4f,%.4f MB"%(vi_mem,af_mem,ba_mem,total_mem))
if case == 3:
tiles = ['h08v04','h08v05','h09v04']
states = 'California'
# Find activefires at queryDateTime
#queryDateTime = dt.datetime(year=2016,month=1,day=1,hour=12,minute=0)
outdir = 'E:\\projects\\forensics\\parkfield\\'
for i in range(0,365):
af_name = outdir+'AF2_'+queryDateTime.isoformat()[0:13]+'.png'
af_lat,af_lon,af_data = findQuerySdsData(queryDateTime,tiles=tiles,composite=False,
datadir="G:/WildfireResearch/data/terra_daily_activefires/",
sdsname='FireMask')
if af_data is not None:
af_fig = uc.plotContourWithStates(af_lat,af_lon,af_data,states=states,
clim=np.linspace(0,9,10),label='AF',
saveFig=True,saveName=af_name)
af_mem = (sys.getsizeof(af_data)+sys.getsizeof(af_lat)+sys.getsizeof(af_lon))/1024**2
data_mask = af_data.copy()
data_mask[data_mask < 7] = 0
pts = geolocateCandidates(af_lat,af_lon,data_mask)
if i > 0:
match_pts = compareCandidates(old_pts,pts)
if match_pts.shape[0] > 0:
print("Time %s found %.0f matches with the closest %.4f km."%(queryDateTime.isoformat(),match_pts.shape[0],np.min(match_pts[:,1])))
else:
pass
queryDateTime = queryDateTime + dt.timedelta(days=1)
old_pts = pts
else:
old_pts = np.array([])
#print(match_pts)
print("AF File Size: %.4f MB"%(af_mem))
if case == 4:
datadir = "E:/projects/wildfire-research/data-test/"
dates, lat, lon, varis = uc.readPickle(datadir+'varisAll.pkl')
for i in range(0,1):#len(varis)):
lfm_chap = getLfmChap(varis[i])
#lfm_css = getLfmCss(varis[i])
uc.plotContourWithStates(lat,lon,lfm_chap,
clim=np.linspace(0,200,11))
#saveFig=True,saveName=datadir+"lfmCss_"+dates[i]+".png",)
if case == 5:
lat, lon, data = buildCanopyData()
uc.plotContourWithStates(lat,lon,data,clim=np.linspace(0,100,11))
"""
datadir = 'G:/WildfireResearch/data/terra_canopy/'
outdir = "E:/projects/wildfire-research/data-test/"
files = glob.glob(datadir+'/*.hdf')
#f = returnDataFile(files[0])
lat,lon,data = buildOneDayContour(files,sdsname='Percent_Tree_Cover',targetPixels=1200)
data[lat==0] = np.nan
lat[lat == 0] = np.nan
lon[lon == 0] = np.nan
data[data > 100] = 100
uc.plotContourWithStates(lat,lon,data,clim=np.linspace(0,100,11))
uc.dumpPickle([lat,lon,data],outdir+'canopy.pkl')
"""
|
# -*- coding: utf-8 -*-
from gos.executable_containers import ExecutableContainer
class Pipeline(ExecutableContainer):
entries_type_names = None
type_name = "pipeline"
|
from collections import defaultdict
import mock
from searx.engines import google_images
from searx.testing import SearxTestCase
class TestGoogleImagesEngine(SearxTestCase):
def test_request(self):
query = 'test_query'
dicto = defaultdict(dict)
dicto['pageno'] = 1
dicto['safesearch'] = 1
dicto['time_range'] = ''
params = google_images.request(query, dicto)
self.assertIn('url', params)
self.assertIn(query, params['url'])
dicto['safesearch'] = 0
params = google_images.request(query, dicto)
self.assertNotIn('safe', params['url'])
def test_response(self):
self.assertRaises(AttributeError, google_images.response, None)
self.assertRaises(AttributeError, google_images.response, [])
self.assertRaises(AttributeError, google_images.response, '')
self.assertRaises(AttributeError, google_images.response, '[]')
html = r"""
["rg_s",["dom","\u003Cstyle\u003E.rg_kn,.rg_s{}.rg_bx{display:-moz-inline-box;display:inline-block;margin-top:0;margin-right:12px;margin-bottom:12px;margin-left:0;overflow:hidden;position:relative;vertical-align:top;z-index:1}.rg_meta{display:none}.rg_l{display:inline-block;height:100%;position:absolute;text-decoration:none;width:100%}.rg_l:focus{outline:0}.rg_i{border:0;color:rgba(0,0,0,0);display:block;-webkit-touch-callout:none;}.rg_an,.rg_anbg,.rg_ilm,.rg_ilmbg{right:0;bottom:0;box-sizing:border-box;-moz-box-sizing:border-box;color:#fff;font:normal 11px arial,sans-serif;line-height:100%;white-space:nowrap;width:100%}.rg_anbg,.rg_ilmbg{background:rgba(51,51,51,0.8);margin-left:0;padding:2px 4px;position:absolute}.rg_ilmn{bottom:0;display:block;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.rg_ilm{display:none}#rg_s.rg_kn .rg_l:focus .rg_ilm{display:block}.rg_kn .rg_bx:hover .rg_ilm,.rg_bx:hover .rg_anbg{display:none}.rg_bx:hover .rg_ilm,.rg_anbg,.rg_kn .rg_bx:hover .rg_anbg{display:block}\u003C\/style\u003E\u003Cdiv eid=\"qlKuV-T3BoqksAHMnaroAw\" id=\"isr_scm_0\" style=\"display:none\"\u003E\u003C\/div\u003E\u003Cdiv data-cei=\"qlKuV-T3BoqksAHMnaroAw\" class=\"rg_add_chunk\"\u003E\u003C!--m--\u003E\u003Cdiv class=\"rg_di rg_bx rg_el ivg-i\" data-ved=\"0ahUKEwjk9PCm-7zOAhUKEiwKHcyOCj0QMwgCKAAwAA\"\u003E\u003Ca jsaction=\"fire.ivg_o;mouseover:str.hmov;mouseout:str.hmou\" class=\"rg_l\" style=\"background:rgb(170,205,240)\"\u003E\u003Cimg data-sz=\"f\" name=\"5eykIeMjmCk7xM:\" src=\"https:\/\/encrypted-tbn0.gstatic.com\/images?q=tbn\" class=\"rg_i rg_ic\" alt=\"Image result for south\" jsaction=\"load:str.tbn\" onload=\"google.aft\u0026\u0026google.aft(this)\"\u003E\u003Cdiv class=\"_aOd rg_ilm\"\u003E\u003Cdiv class=\"rg_ilmbg\"\u003E\u003Cspan class=\"rg_ilmn\"\u003E 566\u0026nbsp;\u0026#215;\u0026nbsp;365 - en.wikipedia.org \u003C\/span\u003E\u003C\/div\u003E\u003C\/div\u003E\u003C\/a\u003E\u003Cdiv class=\"rg_meta\"\u003E{\"id\":\"5eykIeMjmCk7xM:\",\"isu\":\"en.wikipedia.org\",\"itg\":false,\"ity\":\"png\",\"oh\":365,\"ou\":\"https:\/\/upload.wikimedia.org\/wikipedia\/commons\/e\/e4\/Us_south_census.png\",\"ow\":566,\"pt\":\"Southern United States - Wikipedia, the free encyclopedia\",\"rid\":\"cErfE02-v-VcAM\",\"ru\":\"https:\/\/en.wikipedia.org\/wiki\/Southern_United_States\",\"s\":\"The Southern United States as defined by the United States Census Bureau.\",\"sc\":1,\"th\":180,\"tu\":\"https:\/\/encrypted-tbn0.gstatic.com\/images?q\\u003dtbn\",\"tw\":280}\u003C\/div\u003E\u003C\/div\u003E\u003C!--n--\u003E\u003C!--m--\u003E\u003Cdiv class=\"rg_di rg_bx rg_el ivg-i\" data-ved=\"0ahUKEwjk9PCm-7zOAhUKEiwKHcyOCj0QMwgDKAEwAQ\"\u003E\u003Ca jsaction=\"fire.ivg_o;mouseover:str.hmov;mouseout:str.hmou\" class=\"rg_l\" style=\"background:rgb(249,252,249)\"\u003E\u003Cimg data-sz=\"f\" name=\"eRjGCc0cFyVkKM:\" src=\"https:\/\/encrypted-tbn2.gstatic.com\/images?q=tbn:ANd9GcSI7SZlbDwdMCgGXzJkpwgdn9uL41xUJ1IiIcKs0qW43_Yp0EhEsg\" class=\"rg_i rg_ic\" alt=\"Image result for south\" jsaction=\"load:str.tbn\" onload=\"google.aft\u0026\u0026google.aft(this)\"\u003E\u003Cdiv class=\"_aOd rg_ilm\"\u003E\u003Cdiv class=\"rg_ilmbg\"\u003E\u003Cspan class=\"rg_ilmn\"\u003E 2000\u0026nbsp;\u0026#215;\u0026nbsp;1002 - commons.wikimedia.org \u003C\/span\u003E\u003C\/div\u003E\u003C\/div\u003E\u003C\/a\u003E\u003Cdiv class=\"rg_meta\"\u003E{\"id\":\"eRjGCc0cFyVkKM:\",\"isu\":\"commons.wikimedia.org\",\"itg\":false,\"ity\":\"png\",\"oh\":1002,\"ou\":\"https:\/\/upload.wikimedia.org\/wikipedia\/commons\/thumb\/8\/84\/South_plate.svg\/2000px-South_plate.svg.png\",\"ow\":2000,\"pt\":\"File:South plate.svg - Wikimedia Commons\",\"rid\":\"F8TVsT2GBLb6RM\",\"ru\":\"https:\/\/commons.wikimedia.org\/wiki\/File:South_plate.svg\",\"s\":\"This image rendered as PNG in other widths: 200px, 500px, 1000px, 2000px.\",\"sc\":1,\"th\":159,\"tu\":\"https:\/\/encrypted-tbn2.gstatic.com\/images?q\\u003dtbn:ANd9GcSI7SZlbDwdMCgGXzJkpwgdn9uL41xUJ1IiIcKs0qW43_Yp0EhEsg\",\"tw\":317}\u003C\/div\u003E\u003C\/div\u003E\u003C!--n--\u003E\u003C\/div\u003E"]]""" # noqa
response = mock.Mock(text=html)
results = google_images.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 2)
self.assertEqual(results[0]['title'], u'Southern United States - Wikipedia, the free encyclopedia')
self.assertEqual(results[0]['url'], 'https://en.wikipedia.org/wiki/Southern_United_States')
self.assertEqual(results[0]['img_src'],
'https://upload.wikimedia.org/wikipedia/commons/e/e4/Us_south_census.png')
self.assertEqual(results[0]['content'],
'The Southern United States as defined by the United States Census Bureau.')
self.assertEqual(results[0]['thumbnail_src'],
'https://encrypted-tbn0.gstatic.com/images?q=tbn')
|
import numpy as np
import pandas as pd
import cPickle as pickle
import os
import scipy.io as sio
import matplotlib
matplotlib.use('Agg')
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams.update({'font.size': 20})
def calc_sprns_by_range_and_plot(r_data,inds_range,sp_flag):
rshape = np.shape(r_data)
print np.shape(r_data)
if sp_flag=='pop':
sp_ind = 0
elif sp_flag=='lt':
sp_ind = 1
else:
print 'Error: unknown sparseness flag'
n_frames = rshape[sp_ind]
rates_array = r_data[inds_range,:]
r_data_sq = rates_array**2
nr = (np.sum(rates_array,sp_ind)/n_frames)**2
dr = (np.sum(r_data_sq,sp_ind)/n_frames)
S = (1 - nr/dr)/(1-(1/n_frames))
S=S[~np.isnan(S)]
if sp_ind == 1:
plt.figure()
plt.hist(S)
plt.show()
else:
plt.figure()
plt.plot(S)
plt.show()
return S[~np.isnan(S)]
def evaluate_and_plot_sparseness_by_cell_type(sim_data,r_data,sp_flag):
ctype_list = ['Scnn1a','Rorb','Nr5a1','PV1','PV2','LIF_exc','LIF_inh','all_bp_exc','all_bp_inh']
ctr = 0
fig,ax_list = plt.subplots(3,3)
for ii in range(3):
for jj in range(3):
ax = ax_list[ii,jj]
if ctr<=len(ctype_list):
ctype_str = ctype_list[ctr]
#print sim_data['cells_file']
S = calc_sprns_by_cell_type(sim_data['cells_file'],r_data,ctype_str,sp_flag)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
if sp_flag=='lt':
mu=np.mean(S)
median = np.median(S)
sigma=np.std(S)
textstr = '$\mu=%.2f$\n$\mathrm{median}=%.2f$\n$\sigma=%.2f$'%(mu, median, sigma)
#ax.hist(S)
spr_hist, bins = np.histogram(S, bins=np.linspace(0, 1.0, 10))
ax.plot(bins[:-1], spr_hist)
ax.set_ylim((0, 8000.0))
# place a text box in upper left in axes coords
ax.text(0.25, 0.95, textstr, transform=ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
ax.set_title(ctype_str)
ctr = ctr+1
elif sp_flag=='pop':
mu=np.mean(S)
median = np.median(S)
sigma=np.std(S)
textstr = '$\mu=%.5f$\n$\mathrm{median}=%.5f$\n$\sigma=%.5f$'%(mu, median, sigma)
ax.plot(S)
ax.set_ylim([0.7,1])
# place a text box in upper left in axes coords
ax.text(0.25, 0.95, textstr, transform=ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
ax.set_title(ctype_str)
ctr = ctr+1
else:
print 'Error: unknown sparseness flag'
plt.savefig(sim_data['f_out_spr_hist_eps'], format='eps')
plt.show()
def calc_sprns_by_cell_type(cells_file,r_data,ctype_str,sp_flag):
cells_db = pd.read_csv(cells_file, sep=' ')
rshape = np.shape(r_data)
if sp_flag=='pop':
sp_ind = 0
elif sp_flag=='lt':
sp_ind = 1
else:
print 'Error: unknown sparseness flag'
if ctype_str=='all_bp_exc':
ct_inds_1 = np.array(np.where(cells_db['type']=='Scnn1a'))
ct_inds_2 = np.array(np.where(cells_db['type']=='Rorb'))
ct_inds_3 = np.array(np.where(cells_db['type']=='Nr5a1'))
ct_inds = np.concatenate((ct_inds_1[0],ct_inds_2[0],ct_inds_3[0]))
elif ctype_str =='all_bp_inh':
ct_inds_1 = np.array(np.where(cells_db['type']=='PV1'))
ct_inds_2 = np.array(np.where(cells_db['type']=='PV2'))
ct_inds = np.concatenate((ct_inds_1[0],ct_inds_2[0]))
else:
ct_inds = np.array(np.where(cells_db['type']==ctype_str))
ct_inds = ct_inds[0]
rates_array = r_data[ct_inds]
n_frames = rshape[sp_ind]
r_data_sq = rates_array**2
nr = (np.sum(rates_array,sp_ind)/n_frames)**2
dr = (np.sum(r_data_sq,sp_ind)/n_frames)
S = (1 - nr/dr)/(1-(1/n_frames))
return S[~np.isnan(S)]
def compute_fr_array_mov(cells_file, spk_f_names, f_out_r, t_start, t_stop, bin_size,ntr):
cells_db = pd.read_csv(cells_file, sep=' ')
t_bins = np.arange(t_start, t_stop, bin_size)
r_data = np.zeros( (len(cells_db.index), t_bins[:-1].size) )
t = np.array([])
gids = np.array([])
for f_name in spk_f_names:
#f_name = spk_f_names
print 'Processing file %s.' % (f_name)
data = np.genfromtxt(f_name, delimiter=' ')
if (data.size == 0):
t_tmp = np.array([])
gids_tmp = np.array([])
elif (data.size == 2):
t_tmp = np.array([data[0]])
gids_tmp = np.array([data[1]])
else:
t_tmp = data[:, 0]
gids_tmp = data[:, 1]
t = np.concatenate( (t, t_tmp) )
gids = np.concatenate( (gids, gids_tmp) )
for k_t, t_bin in enumerate(t_bins[:-1]):
print 'Computing rates in bins; working on bin %d of %d.' % (k_t, t_bins[:-1].size)
ind = np.intersect1d( np.where(t >= t_bin), np.where(t < (t_bin + bin_size)) )
t_tmp = t[ind]
gids_tmp = gids[ind]
df = pd.DataFrame( {'gid': gids_tmp, 't': t_tmp} )
df_tmp = df.groupby('gid').count() * 1000.0 / bin_size/ntr # Time is in ms and rate is in Hz.
df_tmp.columns = ['rates']
for gid in df_tmp.index:
r_data[gid, k_t] = df_tmp['rates'].loc[gid]
np.save(f_out_r, r_data)
def compute_fr_array_gratings(cells_file, spk_f_names, f_out_r, t_start, t_stop, bin_size,ntr):
cells_db = pd.read_csv(cells_file, sep=' ')
t_bins = np.arange(t_start, t_stop, bin_size)
r_data = np.zeros( (len(cells_db.index), t_bins[:-1].size) )
t = np.array([])
gids = np.array([])
for f_name in spk_f_names:
#f_name = spk_f_names
print 'Processing file %s.' % (f_name)
data = np.genfromtxt(f_name, delimiter=' ')
if (data.size == 0):
t_tmp = np.array([])
gids_tmp = np.array([])
elif (data.size == 2):
t_tmp = np.array([data[0]])
gids_tmp = np.array([data[1]])
else:
t_tmp = data[:, 0]
gids_tmp = data[:, 1]
t = np.concatenate( (t, t_tmp) )
gids = np.concatenate( (gids, gids_tmp) )
for k_t, t_bin in enumerate(t_bins[:-1]):
print 'Computing rates in bins; working on bin %d of %d.' % (k_t, t_bins[:-1].size)
ind = np.intersect1d( np.where(t >= t_bin), np.where(t < (t_bin + bin_size)) )
t_tmp = t[ind]
gids_tmp = gids[ind]
df = pd.DataFrame( {'gid': gids_tmp, 't': t_tmp} )
df_tmp = df.groupby('gid').count() * 1000.0 / bin_size/ntr # Time is in ms and rate is in Hz.
df_tmp.columns = ['rates']
for gid in df_tmp.index:
r_data[gid, k_t] = df_tmp['rates'].loc[gid]
np.save(f_out_r, r_data)
def create_nat_movie_sim_dict(base_dir,sys_name):
st_frame_list = ['1530','3600','5550']
end_frame_list = ['1680','3750','5700']
sim_dict_list = {}
for kk in range(len(st_frame_list)):
st_frame = st_frame_list[kk]
end_frame = end_frame_list[kk]
f1_str = st_frame+'_to_'+end_frame+'_'
expt_str = sys_name+'_toe'+st_frame
# Decide which simulations we are doing analysis for.
sim_dict = {}
f2 = '_sdlif_z101/spk.dat'
sim_dict[expt_str] = {'cells_file': '/allen/aibs/mat/antona/network/14-simulations/9-network/build/'+sys_name+'.csv',
't_start': 500.0,
't_stop': 5000.0,
'bin_size':33.3,
'N_trials':10,
#'f_1': base_dir+'simulations_'+sys_name+'/natural_movies/output_'+sys_name+'_TouchOfEvil_frames_'+f1_str,
'f_1': base_dir+'simulation_'+sys_name+'/output_'+sys_name+'_TouchOfEvil_frames_'+f1_str,
'f_2': f2,
'f_out_r': 'sparseness/LIF' + expt_str+'_r.npy',
'f_out_spr_hist_eps': 'sparseness/LIF' + expt_str + 'spr_hist.eps'}
sim_dict_list[kk] = sim_dict
return sim_dict_list
def create_grating_sim_dict(base_dir,sys_name):
gc_list = ['8','38','68']
sim_dict_list = {}
for kk in range(len(gc_list)):
f1_str = gc_list[kk]
expt_str = sys_name+'grating_g'+f1_str
# Decide which simulations we are doing analysis for.
sim_dict = {}
f2 = '_sdlif_z101/spk.dat'
sim_dict[expt_str] = {'cells_file': '/allen/aibs/mat/antona/network/14-simulations/9-network/build/'+sys_name+'.csv',
't_start': 500.0,
't_stop': 3000.0,
'bin_size':33.3,
'N_trials':10,
# 'f_1': base_dir+'simulations_'+sys_name+'/gratings/output_'+sys_name+'_g'+f1_str+'_',
'f_1': base_dir+'simulation_'+sys_name+'/output_'+sys_name+'_g'+f1_str+'_',
'f_2': f2,
'f_out_r': 'sparseness/LIF' + expt_str + '_r_v2.npy',
'f_out_spr_hist_eps': 'sparseness/LIF' + expt_str + 'spr_hist.eps'}
sim_dict_list[kk] = sim_dict
return sim_dict_list
def sparseness_nat(input_dict,sprns_type, plot_only_flag):
for kk in range(len(input_dict)):
sim_dict = input_dict[kk]
for sim_key in sim_dict.keys():
sim_data = sim_dict[sim_key]
if plot_only_flag!=1:
spk_f_names = []
for i in xrange(sim_data['N_trials']):
spk_f_names.append('%s%d%s' % (sim_data['f_1'], i, sim_data['f_2']))
compute_fr_array_mov(sim_data['cells_file'], spk_f_names, sim_data['f_out_r'], sim_data['t_start'], sim_data['t_stop'], sim_data['bin_size'],sim_data['N_trials'])
# compute_fr_array_imgs(sim_data['cells_file'], spk_f_names, sim_data['f_out_r'], sim_data['t_start'], sim_data['t_stop'], sim_data['bin_size'],sim_data['N_trials'])
# compute_fr_array_gratings(sim_data['cells_file'], spk_f_names, sim_data['f_out_r'], sim_data['t_start'], sim_data['t_stop'], sim_data['bin_size'],sim_data['N_trials'])
print sim_data['f_out_r']
r_data = np.load(sim_data['f_out_r'])
evaluate_and_plot_sparseness_by_cell_type(sim_data,r_data,sprns_type)
#calc_sprns_by_range_and_plot(r_data,np.arange(0,8500,1),'lt')
def sparseness_gratings(input_dict,sprns_type, plot_only_flag):
for kk in range(len(input_dict)):
sim_dict = input_dict[kk]
for sim_key in sim_dict.keys():
sim_data = sim_dict[sim_key]
if plot_only_flag!=1:
spk_f_names = []
for i in xrange(sim_data['N_trials']):
spk_f_names.append('%s%d%s' % (sim_data['f_1'], i, sim_data['f_2']))
# compute_fr_array_mov(sim_data['cells_file'], spk_f_names, sim_data['f_out_r'], sim_data['t_start'], sim_data['t_stop'], sim_data['bin_size'],sim_data['N_trials'])
# compute_fr_array_imgs(sim_data['cells_file'], spk_f_names, sim_data['f_out_r'], sim_data['t_start'], sim_data['t_stop'], sim_data['bin_size'],sim_data['N_trials'])
compute_fr_array_gratings(sim_data['cells_file'], spk_f_names, sim_data['f_out_r'], sim_data['t_start'], sim_data['t_stop'], sim_data['bin_size'],sim_data['N_trials'])
print sim_data['f_out_r']
r_data = np.load(sim_data['f_out_r'])
evaluate_and_plot_sparseness_by_cell_type(sim_data,r_data,sprns_type)
#calc_sprns_by_range_and_plot(r_data,np.arange(0,8500,1),'lt')
if __name__ == '__main__':
base_dir = '/allen/aibs/mat/ZiqiangW/analysis_intFire1/'
sys_list = ['ll2']
plot_only_flag = 0 #1 #0
for ss in range(len(sys_list)):
sys_name=sys_list[ss]
nat_sim_dict = create_nat_movie_sim_dict(base_dir,sys_name)
sparseness_nat(nat_sim_dict,'lt',plot_only_flag)
grating_sim_dict = create_grating_sim_dict(base_dir,sys_name)
sparseness_gratings(grating_sim_dict,'lt',plot_only_flag)
|
import unittest
from Solutions import Lesson15
class CountDistinctSlicesTests(unittest.TestCase):
def test_count_distinct_slices_example_01(self):
m = 6
a = [3, 4, 5, 5, 2]
res = Lesson15.count_distinct_slices(m, a)
self.assertEqual(9, res)
|
"""Define tests for the Airzone coordinator."""
from unittest.mock import patch
from aioairzone.exceptions import AirzoneError
from homeassistant.components.airzone.const import DOMAIN
from homeassistant.components.airzone.coordinator import SCAN_INTERVAL
from homeassistant.const import STATE_UNAVAILABLE
from homeassistant.core import HomeAssistant
from homeassistant.util.dt import utcnow
from .util import CONFIG, HVAC_MOCK
from tests.common import MockConfigEntry, async_fire_time_changed
async def test_coordinator_client_connector_error(hass: HomeAssistant) -> None:
"""Test ClientConnectorError on coordinator update."""
entry = MockConfigEntry(domain=DOMAIN, data=CONFIG)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.airzone.AirzoneLocalApi.get_hvac",
return_value=HVAC_MOCK,
) as mock_hvac:
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
mock_hvac.assert_called_once()
mock_hvac.reset_mock()
mock_hvac.side_effect = AirzoneError
async_fire_time_changed(hass, utcnow() + SCAN_INTERVAL)
await hass.async_block_till_done()
mock_hvac.assert_called_once()
state = hass.states.get("sensor.despacho_temperature")
assert state.state == STATE_UNAVAILABLE
|
# Generated by Django 3.1.11 on 2021-06-08 07:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20210608_1047'),
('schools', '0003_auto_20210530_1028'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('organizations', '0003_auto_20210526_1138'),
]
operations = [
migrations.AlterField(
model_name='organizationmember',
name='organization',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='organization_member', to='organizations.organization'),
),
migrations.CreateModel(
name='SchoolActivityOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('CANCELLED', 'Cancelled'), ('PENDING_ADMIN_APPROVAL', 'Pending Admin Approval'), ('APPROVED', 'Approved')], default='PENDING_ADMIN_APPROVAL', max_length=50, verbose_name='status')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('activity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='school_activity_orders', to='organizations.activity')),
('last_updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='last_updated_by_me_orders', to=settings.AUTH_USER_MODEL)),
('requested_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='requested_orders', to=settings.AUTH_USER_MODEL)),
('school', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='school_activity_orders', to='schools.school')),
],
),
migrations.CreateModel(
name='SchoolActivityGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='name')),
('description', models.CharField(max_length=255, verbose_name='description')),
('container_only', models.BooleanField(default=False)),
('activity_order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='activity_groups', to='organizations.schoolactivityorder')),
('consumers', models.ManyToManyField(related_name='activity_groups', to='users.Consumer')),
],
),
migrations.AddConstraint(
model_name='schoolactivityorder',
constraint=models.UniqueConstraint(fields=('school', 'activity'), name='unique_order'),
),
]
|
for row in range(4):
for col in range(4):
if col%3==0 and row<3 or row==3 and col>0:
print('*', end = ' ')
else:
print(' ', end = ' ')
print()
|
# ===============================================================================
# @file: human_perception.py
# @note: This script is for model design of human perception of movement
# @author: Ziga Miklosic
# @date: 13.01.2021
# @brief: Evaluation of human movement perception model base on
# "Vehicle modelling and washout filter tuning for the Chalmers Vehicle
# Simulator" thesis.
# ===============================================================================
# ===============================================================================
# IMPORTS
# ===============================================================================
import sys
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import freqz, bilinear
from filters.filter_utils import FunctionGenerator
from filters.iir_filter import IIR
# ===============================================================================
# CONSTANTS
# ===============================================================================
## ****** USER CONFIGURATIONS ******
## Sample frequency
# Sample frequency of real system
#
# Unit: Hz
SAMPLE_FREQ = 100.0
# Ideal sample frequency
# As a reference to sample rate constrained embedded system
#
# Unit: Hz
IDEAL_SAMPLE_FREQ = 1000.0
## Time window
#
# Unit: second
TIME_WINDOW = 10
## Input signal shape
INPUT_SIGNAL_FREQ = 0.1
INPUT_SIGNAL_AMPLITUDE = 9.81/4
INPUT_SIGNAL_OFFSET = INPUT_SIGNAL_AMPLITUDE
INPUT_SIGNAL_PHASE = -0.25
## Mux input signal
INPUT_SIGNAL_SELECTION = FunctionGenerator.FG_KIND_RECT
## Number of samples in time window
SAMPLE_NUM = int(( IDEAL_SAMPLE_FREQ * TIME_WINDOW ) + 1.0 )
## Parameters of the vestibular system
VESTIBULAR_ROLL_TL = 6.1
VESTIBULAR_ROLL_TS = 0.1
VESTIBULAR_ROLL_TA = 30.0
VESTIBULAR_PITCH_TL = 5.3
VESTIBULAR_PITCH_TS = 0.1
VESTIBULAR_PITCH_TA = 30.0
VESTIBULAR_YAW_TL = 10.2
VESTIBULAR_YAW_TS = 0.1
VESTIBULAR_YAW_TA = 30.0
VESTIBULAR_X_TL = 5.33
VESTIBULAR_X_TS = 0.66
VESTIBULAR_X_TA = 13.2
VESTIBULAR_X_K = 0.4
VESTIBULAR_Y_TL = 5.33
VESTIBULAR_Y_TS = 0.66
VESTIBULAR_Y_TA = 13.2
VESTIBULAR_Y_K = 0.4
VESTIBULAR_Z_TL = 5.33
VESTIBULAR_Z_TS = 0.66
VESTIBULAR_Z_TA = 13.2
VESTIBULAR_Z_K = 0.4
## ****** END OF USER CONFIGURATIONS ******
# ===============================================================================
# FUNCTIONS
# ===============================================================================
# ===============================================================================
# @brief: Calculate rotation perception coefficients
#
# h(s) = (1/Ts * s^2) / ( s^3 + (1/Ta + 1/Tl + 1/Ts)*s^2 + (1/Tl*Ts + 1/Tl*Ta + 1/Ta*Ts)*s + 1/(Ta*Tl*Ts)))
#
# @param[in]: Tl, Ts, Ta - Coefficient in the semicircular canals sensation model
# @param[in]: fs - Sample frequency
# @return: b,a - Array of b,a IIR coefficients
# ===============================================================================
def calc_rot_mov_coefficient(Tl, Ts, Ta, fs):
b, a = bilinear( [0, 1/Ts, 0, 0], [1, (1/Ta + 1/Tl + 1/Ts), (1/Tl*Ts + 1/Tl*Ta + 1/Ta*Ts), 1/(Ta*Tl*Ts)], fs )
return b, a
# ===============================================================================
# @brief: Calculate linear movement perception coefficients
#
# h(s) = ((K*Ta/(Tl*Ts))*s + (K*Ta/(Tl*Ts))) / ( s^2 + (1/Tl + 1/Ts)*s + 1/(Tl*Ts))
#
# @param[in]: Tl, Ts, Ta, K - Coefficients in the otolith model
# @param[in]: fs - Sample frequency
# @return: b,a - Array of b,a IIR coefficients
# ===============================================================================
def calc_lin_mov_coefficient(Tl, Ts, Ta, K, fs):
b, a = bilinear( [0, K*Ta, K], [ Tl*Ts, Tl+Ts, 1], fs )
return b, a
# ===============================================================================
# CLASSES
# ===============================================================================
## Vestibular system
class VestibularSystem:
# ===============================================================================
# @brief: Init vestibular system
#
# @return: void
# ===============================================================================
def __init__(self):
## Parameters of the vestibular system
VESTIBULAR_ROLL_TL = 6.1
VESTIBULAR_ROLL_TS = 0.1
VESTIBULAR_ROLL_TA = 30.0
VESTIBULAR_PITCH_TL = 5.3
VESTIBULAR_PITCH_TS = 0.1
VESTIBULAR_PITCH_TA = 30.0
VESTIBULAR_YAW_TL = 10.2
VESTIBULAR_YAW_TS = 0.1
VESTIBULAR_YAW_TA = 30.0
VESTIBULAR_X_TL = 5.33
VESTIBULAR_X_TS = 0.66
VESTIBULAR_X_TA = 13.2
VESTIBULAR_X_K = 0.4
VESTIBULAR_Y_TL = 5.33
VESTIBULAR_Y_TS = 0.66
VESTIBULAR_Y_TA = 13.2
VESTIBULAR_Y_K = 0.4
VESTIBULAR_Z_TL = 5.33
VESTIBULAR_Z_TS = 0.66
VESTIBULAR_Z_TA = 13.2
VESTIBULAR_Z_K = 0.4
# Rotation movement coefficient
_roll_b, _roll_a = self.__calc_rot_mov_coefficient( VESTIBULAR_ROLL_TL, VESTIBULAR_ROLL_TS, VESTIBULAR_ROLL_TA, SAMPLE_FREQ )
_pitch_b, _pitch_a = self.__calc_rot_mov_coefficient( VESTIBULAR_PITCH_TL, VESTIBULAR_PITCH_TS, VESTIBULAR_PITCH_TA, SAMPLE_FREQ )
_yaw_b, _yaw_a = self.__calc_rot_mov_coefficient( VESTIBULAR_YAW_TL, VESTIBULAR_YAW_TS, VESTIBULAR_YAW_TA, SAMPLE_FREQ )
# Linear movement coefficient
_x_b, _x_a = self.__calc_lin_mov_coefficient( VESTIBULAR_X_TL, VESTIBULAR_X_TS, VESTIBULAR_X_TA, VESTIBULAR_X_K, SAMPLE_FREQ )
_y_b, _y_a = self.__calc_lin_mov_coefficient( VESTIBULAR_Y_TL, VESTIBULAR_Y_TS, VESTIBULAR_Y_TA, VESTIBULAR_Y_K, SAMPLE_FREQ )
_z_b, _z_a = self.__calc_lin_mov_coefficient( VESTIBULAR_Z_TL, VESTIBULAR_Z_TS, VESTIBULAR_Z_TA, VESTIBULAR_Z_K, SAMPLE_FREQ )
self._roll_filt = IIR( a=_roll_a, b=_roll_b, order=3 )
self._pitch_filt = IIR( a=_pitch_a, b=_pitch_b, order=3 )
self._yaw_filt = IIR( a=_yaw_a, b=_yaw_b, order=3 )
self._x_filt = IIR( a=_x_a, b=_x_b, order=2 )
self._y_filt = IIR( a=_y_a, b=_y_b, order=2 )
self._z_filt = IIR( a=_z_a, b=_z_b, order=2 )
# ===============================================================================
# @brief: Update vesticular system
#
#
# @param[in]: a - Input accelerations
# @param[in]: w - Input angular velocities
# @return: af, wf - Sensed accelerations and angular velocities
# ===============================================================================
def update(self, a, w):
af_x = self._x_filt.update( a[0] )
af_y = self._y_filt.update( a[1] )
af_z = self._z_filt.update( a[2] )
af = [af_x, af_y, af_z]
wf_x = self._roll_filt.update( w[0] )
wf_y = self._pitch_filt.update( w[1] )
wf_z = self._yaw_filt.update( w[2] )
wf = [wf_x, wf_y, wf_z]
return af, wf
# ===============================================================================
# @brief: Calculate rotation perception coefficients
#
# h(s) = (1/Ts * s^2) / ( s^3 + (1/Ta + 1/Tl + 1/Ts)*s^2 + (1/Tl*Ts + 1/Tl*Ta + 1/Ta*Ts)*s + 1/(Ta*Tl*Ts)))
#
# @param[in]: Tl, Ts, Ta - Coefficient in the semicircular canals sensation model
# @param[in]: fs - Sample frequency
# @return: b,a - Array of b,a IIR coefficients
# ===============================================================================
def __calc_rot_mov_coefficient(self, Tl, Ts, Ta, fs):
b, a = bilinear( [0, 1/Ts, 0, 0], [1, (1/Ta + 1/Tl + 1/Ts), (1/Tl*Ts + 1/Tl*Ta + 1/Ta*Ts), 1/(Ta*Tl*Ts)], fs )
return b, a
# ===============================================================================
# @brief: Calculate linear movement perception coefficients
#
# h(s) = ((K*Ta)*s + k) / ((Tl*Ts)*s^2 + (Tl+Ts)*s + 1)
#
# @param[in]: Tl, Ts, Ta, K - Coefficients in the otolith model
# @param[in]: fs - Sample frequency
# @return: b,a - Array of b,a IIR coefficients
# ===============================================================================
def __calc_lin_mov_coefficient(self, Tl, Ts, Ta, K, fs):
b, a = bilinear( [0, K*Ta, K], [ Tl*Ts, Tl+Ts, 1], fs )
return b, a
# ===============================================================================
# MAIN ENTRY
# ===============================================================================
if __name__ == "__main__":
# Time array
_time, _dt = np.linspace( 0.0, TIME_WINDOW, num=SAMPLE_NUM, retstep=True )
# Rotation movement coefficient
_roll_b, _roll_a = calc_rot_mov_coefficient( VESTIBULAR_ROLL_TL, VESTIBULAR_ROLL_TS, VESTIBULAR_ROLL_TA, SAMPLE_FREQ )
_pitch_b, _pitch_a = calc_rot_mov_coefficient( VESTIBULAR_PITCH_TL, VESTIBULAR_PITCH_TS, VESTIBULAR_PITCH_TA, SAMPLE_FREQ )
_yaw_b, _yaw_a = calc_rot_mov_coefficient( VESTIBULAR_YAW_TL, VESTIBULAR_YAW_TS, VESTIBULAR_YAW_TA, SAMPLE_FREQ )
# Linear movement coefficient
_x_b, _x_a = calc_lin_mov_coefficient( VESTIBULAR_X_TL, VESTIBULAR_X_TS, VESTIBULAR_X_TA, VESTIBULAR_X_K, SAMPLE_FREQ )
_y_b, _y_a = calc_lin_mov_coefficient( VESTIBULAR_Y_TL, VESTIBULAR_Y_TS, VESTIBULAR_Y_TA, VESTIBULAR_Y_K, SAMPLE_FREQ )
_z_b, _z_a = calc_lin_mov_coefficient( VESTIBULAR_Z_TL, VESTIBULAR_Z_TS, VESTIBULAR_Z_TA, VESTIBULAR_Z_K, SAMPLE_FREQ )
# Filters
_roll_filt = IIR( a=_roll_a, b=_roll_b, order=3 )
_pitch_filt = IIR( a=_pitch_a, b=_pitch_b, order=3 )
_yaw_filt = IIR( a=_yaw_a, b=_yaw_b, order=3 )
_x_filt = IIR( a=_x_a, b=_x_b, order=2 )
# Get frequency characteristics
N = 256
_roll_w, _roll_h = freqz( _roll_b, _roll_a, 4096 * N )
#_pitch_w, _pitch_h = freqz( _pitch_b, _pitch_a, 4096 * N )
#_yaw_w, _yaw_h = freqz( _yaw_b, _yaw_a, 4096 * N )
_x_w, _x_h = freqz( _x_b, _x_a, 4096 * N )
#_y_w, _y_h = freqz( _y_b, _y_a, 4096 * N )
#_z_w, _z_h = freqz( _z_b, _z_a, 4096 * N )
# Vestibular system
_vest_sys = VestibularSystem()
# Filter input/output
_x = [ 0 ] * SAMPLE_NUM
_x_d = [0]
_y_d_roll = [0]
_y_d_x = [0]
# Accelerations
_y_d_a_sens = [[0], [0], [0]] * 3
# Angular rates
_y_d_w_sens = [[0], [0], [0]] * 3
# Generate inputs
_fg = FunctionGenerator( INPUT_SIGNAL_FREQ, INPUT_SIGNAL_AMPLITUDE, INPUT_SIGNAL_OFFSET, INPUT_SIGNAL_PHASE, INPUT_SIGNAL_SELECTION )
# Down sample
_downsamp_cnt = 0
_downsamp_samp = [0]
_d_time = [0]
# Generate stimuli signals
for n in range(SAMPLE_NUM):
#_x[n] = ( _fg.generate( _time[n] ))
# Some custom signal
if _time[n] < 1.0:
_x[n] = 0.0
elif _time[n] < 2.0:
_x[n] = _x[n-1] + 0.5 / IDEAL_SAMPLE_FREQ
elif _time[n] < 3.0:
_x[n] = 0.5
elif _time[n] < 4.0:
_x[n] = _x[n-1] - 0.5 / IDEAL_SAMPLE_FREQ
elif _time[n] < 10.0:
_x[n] = 0
else:
_x[n] = 0
# Apply filter
for n in range(SAMPLE_NUM):
# Mux input signals
#_x[n] = _signa_mux.out( INPUT_SIGNAL_SELECTION, [ _sin_x[n], _rect_x[n] ] )
# Down sample to SAMPLE_FREQ
if _downsamp_cnt >= (( 1 / ( _dt * SAMPLE_FREQ )) - 1 ):
_downsamp_cnt = 0
# Utils
_downsamp_samp.append(0)
_d_time.append( _time[n])
_x_d.append( _x[n] )
# Rotation sensed
_y_d_roll.append( _roll_filt.update( _x[n] ))
_y_d_x.append( _x_filt.update( _x[n] ))
a_sens, w_sens = _vest_sys.update( [ _x[n], 0, 0 ], [ _x[n], 0, 0 ] )
for n in range(3):
_y_d_a_sens[n].append( a_sens[n] )
_y_d_w_sens[n].append( w_sens[n] )
else:
_downsamp_cnt += 1
# Calculate frequency response
_roll_w = ( _roll_w / np.pi * SAMPLE_FREQ / 2) # Hz
#_pitch_w = ( _pitch_w / np.pi * SAMPLE_FREQ / 2) # Hz
#_yaw_w = ( _yaw_w / np.pi * SAMPLE_FREQ / 2) # Hz
_x_w = ( _x_w / np.pi * SAMPLE_FREQ / 2) # Hz
#_y_w = ( _y_w / np.pi * SAMPLE_FREQ / 2) # Hz
#_z_w = ( _z_w / np.pi * SAMPLE_FREQ / 2) # Hz
# For conversion to rad/s
_roll_w = 2*np.pi*_roll_w
#_pitch_w = 2*np.pi*_pitch_w
#_yaw_w = 2*np.pi*_yaw_w
_x_w = 2*np.pi*_x_w
#_y_w = 2*np.pi*_y_w
#_z_w = 2*np.pi*_z_w
# Calculate phases & convert to degrees
_roll_angle = np.unwrap( np.angle(_roll_h) ) * 180/np.pi
#_pitch_angle = np.unwrap( np.angle(_pitch_h) ) * 180/np.pi
#_yaw_angle = np.unwrap( np.angle(_yaw_h) ) * 180/np.pi
_x_angle = np.unwrap( np.angle(_x_h) ) * 180/np.pi
#_y_angle = np.unwrap( np.angle(_y_h) ) * 180/np.pi
#_z_angle = np.unwrap( np.angle(_z_h) ) * 180/np.pi
plt.style.use(['dark_background'])
## ==============================================================================================
# Rotation motion plots
## ==============================================================================================
fig, ax = plt.subplots(2, 1)
fig.suptitle( "ROTATION MOVEMENT MODEL\n fs: " + str(SAMPLE_FREQ) + "Hz", fontsize=16 )
ax[0].plot(_roll_w, 20 * np.log10(abs(_roll_h)), "w")
ax[0].grid(alpha=0.25)
ax[0].set_xscale("log")
ax[0].set_xlim(1e-3, SAMPLE_FREQ/2)
ax[0].set_ylim(-80, 2)
ax[0].set_ylabel("Magnitude [dB]", color="w", fontsize=14)
ax[0].set_xlabel("Frequency [rad/s]", fontsize=14)
ax_00 = ax[0].twinx()
ax_00.plot(_roll_w, _roll_angle, "r")
ax_00.set_ylabel("Phase [deg]", color="r", fontsize=14)
ax_00.set_xscale("log")
ax_00.grid(alpha=0.25)
ax_00.set_xlim(1e-3, SAMPLE_FREQ/2)
ax_00.set_xlabel("Frequency [rad/s]", fontsize=14)
ax[1].plot( _time, _x, "r", label="Input" )
ax[1].plot( _d_time, _y_d_roll, ".-y", label="Sensed")
ax[1].plot( _d_time, _y_d_w_sens[0], "--w", label="Sensed")
ax[1].grid(alpha=0.25)
ax[1].set_xlim(0, 8)
ax[1].legend(loc="upper right")
ax[1].grid(alpha=0.25)
ax[1].set_xlabel("Time [s]", fontsize=14)
ax[1].set_ylabel("rotation, sensed rotation [rad/s]", fontsize=14)
## ==============================================================================================
# Linear motion plots
## ==============================================================================================
fig, ax = plt.subplots(2, 1)
fig.suptitle( "LINEAR MOVEMENT MODEL\n fs: " + str(SAMPLE_FREQ) + "Hz", fontsize=16 )
ax[0].plot(_x_w, 20 * np.log10(abs(_x_h)), 'w')
ax[0].grid(alpha=0.25)
ax[0].set_xscale("log")
ax[0].set_xlim(1e-3, SAMPLE_FREQ/2)
ax[0].set_ylim(-40, 1)
ax[0].set_ylabel("Magnitude [dB]", color="w", fontsize=14)
ax[0].set_xlabel("Frequency [rad/s]", fontsize=14)
ax_00 = ax[0].twinx()
ax_00.plot(_x_w, _x_angle, "r")
ax_00.set_ylabel("Phase [deg]", color="r", fontsize=14)
ax_00.set_xscale("log")
ax_00.grid(alpha=0.25)
ax_00.set_xlim(1e-3, SAMPLE_FREQ/2)
ax_00.set_xlabel("Frequency [rad/s]")
ax[1].plot( _time, _x, "r", label="Input" )
ax[1].plot( _d_time, _y_d_x, ".-y", label="Sensed")
ax[1].plot( _d_time, _y_d_a_sens[0], "--w", label="Sensed")
ax[1].grid(alpha=0.25)
ax[1].set_xlim(0, 8)
ax[1].legend(loc="upper right")
ax[1].grid(alpha=0.25)
ax[1].set_xlabel("Time [s]", fontsize=14)
ax[1].set_ylabel("acceleration, sensed force [mm^2]", fontsize=14)
plt.show()
# ===============================================================================
# END OF FILE
# ===============================================================================
|
import json
import boto3
import re
import os
import sys
import compare_output
from . import s3_compares
# to by pass initial setting of default values
def extract_from_text(text):
if 'compare' in text and text.count('in') > 1:
return False
else:
return True
def main(text):
regionList = ['us-east-1', 'us-west-1', 'us-west-2', 'eu-west-1', 'ap-northeast-1', 'ap-southeast-2']
region = regionList[0]
awsKeyId = None
awsSecretKey = None
awsSessionToken = None
loadedbuckets = dict()
tokens = []
the_account = None
the_role = None
region = None
text.pop(0) # remove command name
if len(text) == 0:
return "You did not supply a query to run"
if text[0] == 'help':
return information()
# This function checks text if it contains one of the commands that should not extract region and account from,
# for example if user enters the compare command then this process is bypassed
if extract_from_text(text):
if 'in' in text:
while text[-1] != 'in':
tokens.append(text.pop())
extractedRegion = re.search(r'[a-z]{2}-[a-z]+-[1-9]{1}', " ".join(tokens))
if extractedRegion:
region = extractedRegion.group()
tokens.remove(region)
text.remove('in')
# default loading of bucket values
config = None
if os.path.isfile("./aws.config"):
with open("aws.config") as f:
config = json.load(f)
if config.get('s3'):
for account in config['s3']['Accounts']:
if account["RoleArn"] == "" and account["AccountName"] == "":
loadedbuckets = account["Buckets"]
# this will look at the command to see if it contains an account that is different and set the
# the_account, the_role, loadedbuckets
if len(tokens) > 0 and config != None:
for account in config['s3']['Accounts']:
if account['AccountName'] in tokens:
the_account = account['AccountName']
the_role = account["RoleArn"]
loadedbuckets = account['Buckets']
tokens.remove(account['AccountName'])
if len(tokens) > 0:
return "Could not resolve " + " ".join(tokens)
elif len(tokens) > 0:
return "Could not locate aws.config file"
if the_role:
sts_client = boto3.client('sts')
assumedRole = sts_client.assume_role(RoleArn=the_role, RoleSessionName="AssumedRole")
awsKeyId = assumedRole['Credentials']['AccessKeyId']
awsSecretKey = assumedRole['Credentials']['SecretAccessKey']
awsSessionToken = assumedRole['Credentials']['SessionToken']
session = boto3.session.Session(aws_access_key_id=awsKeyId,
aws_secret_access_key=awsSecretKey,
aws_session_token=awsSessionToken)
s3 = session.client("s3")
if 'list' in text:
text.remove("list")
ret = ""
if 'buckets' in text:
try:
s3_buckets = s3.list_buckets()['Buckets']
except Exception as e:
print(e)
return "Could not list buckets in " + region
if len(s3_buckets) == 0:
return "There are no s3 buckets associated with this region: " + region
# create list of buckets to output to slack
for bucket in s3_buckets:
for b in loadedbuckets[region]:
if bucket['Name'] == b['bucketname']:
ret = ret + str(bucket['Name']) + "\n"
return ret
if 'files' in text:
text.remove('files')
print(text)
if "filter" in text:
if len(text) == 3:
lookup = text[text.index("filter")+1]
text.remove(text[text.index("filter")+1])
text.remove('filter')
else:
return "Filter is missing lookup directories"
one_bucket_search = False
for b in loadedbuckets[region]:
try:
paginator = s3.get_paginator('list_objects_v2')
if len(text) == 1:
page_iterator = paginator.paginate(Bucket=text[0])
ret = ret + "\n\nBucket: " + str(text[0])
one_bucket_search = True
else:
page_iterator = paginator.paginate(Bucket=b['bucketname'])
ret = ret + "\n\nBucket: " + str(b['bucketname'])
for page in page_iterator:
for item in page['Contents']:
page_item = [_f for _f in item['Key'].split('/') if _f]
lookup_array = [_f for _f in lookup.split('/') if _f]
if len(page_item) == len(lookup_array)+1:
l_iterator = 0
check_match = 0
while l_iterator < len(lookup_array):
if page_item[l_iterator] == lookup_array[l_iterator]:
check_match = check_match + 1
l_iterator = l_iterator + 1
if check_match == len(lookup_array):
ret = ret + "\n\nobject: " + item['Key'] +"\nLast Modified: "+item['LastModified'].strftime('%m/%d/%Y %H:%M:%S')
# if user requested one bucket specifically than return data for the one bucket
if one_bucket_search:
return ret
except Exception as e:
print(e)
return "Could not list buckets in " + region
else:
# all top directories will be returned in the buckets or bucket if specified
s3_dictionary = []
one_bucket_search = False
for b in loadedbuckets[region]:
try:
paginator = s3.get_paginator('list_objects_v2')
if len(text) == 1:
page_iterator = paginator.paginate(Bucket=text[0])
ret = ret + "\n\nBucket: " + str(text[0])
one_bucket_search = True
else:
page_iterator = paginator.paginate(Bucket=b['bucketname'])
ret = ret + "\n\nBucket: " + str(b['bucketname'])
for page in page_iterator:
for item in page['Contents']:
page_item = item['Key'].split('/')[0]
if page_item not in s3_dictionary:
ret = ret +"\n"+page_item
s3_dictionary.append(page_item)
if one_bucket_search:
return ret
except Exception as e:
print(e)
return "Could not list buckets in " + region
return ret
elif 'compare' in text:
print(text)
text.remove("compare")
if "with" in text and (len([_f for _f in text if _f]) == 9 or len([_f for _f in text if _f]) == 7):
master_args = [_f for _f in text[:text.index("with")] if _f]
team_args = [_f for _f in text[text.index("with") + 1:] if _f]
master_args_eval = eval_args(master_args, regionList)
team_args_eval = eval_args(team_args, regionList)
if master_args_eval and team_args_eval:
config = None
# load config file
if os.path.isfile("./aws.config"):
with open("aws.config") as f:
config = json.load(f)
if config:
master_data = get_in_s3_compare_data(config, master_args, master_args_eval)
team_data = get_in_s3_compare_data(config, team_args, team_args_eval)
else:
return "Config file was not loaded"
if master_data and team_data:
compared_data = s3_compares.main_eb_check_versions(master_data, team_data)
return compare_output.slack_payload(compared_data, get_team_name(team_data))
else:
return "Values could not be retrieved from operation, 'Jarvis eb help'"
else:
return "Invalid region or account information entered"
else:
return "Invalid arguments entered to complete comparison"
else:
return "I did not understand the query. Please try again."
def about():
return "This plugin returns requested information regarding AWS s3 Buckets"
def information():
return """This plugin returns various information about clusters and services hosted on s3.
The format of queries is as follows:
jarvis s3 list buckets <in region/account> [sendto <user or channel>]
jarvis s3 list files [<bucket>] <in region/account> [sendto <user or channel>]
jarvis s3 compare [<bucket>] within <region> <account> with [<bucket>] within <account> [sendto <user or channel>]"""
def eval_args(args, regionList):
args = [_f for _f in args if _f]
# this indicates user did not specify a bucket
if len(args) == 3:
if args.index("within") == 0 and args[1] in regionList:
return 1
else:
return 0
# this indicates user specified a bucket
elif len(args) == 4:
if args.index("within") == 1 and args[2] in regionList:
return 2
else:
return 0
else:
return 0
# get the dev account role for specified account_name
def config_get_account_rolearn(account_name, config):
role_arn = None
for account in config['eb']['Accounts']:
if account['AccountName'] == account_name:
role_arn = account['RoleArn']
return role_arn
return role_arn
# if the user specifies a bucket this will attach directories from that bucket to the data to be compared
def get_data_for_specific_bucket(dataset, bucket):
arr = []
for data in dataset:
if data['bucketname'] == bucket:
arr.append(data)
return arr
# if the role arn is blank, use team names to search for account
def check_team_name(account, result, bucket=None):
check_result = False
for dir in account['Buckets'][result['region']]:
if 'team_name' in dir:
if result['AccountName'] == dir['team_name']:
result['RoleArn'] = account['RoleArn']
# set the directory
if account['Buckets']:
for region in account['Buckets']:
if region == result['region']:
if bucket:
result['Directory_List'] = get_data_for_specific_bucket(account['Buckets'][region], bucket)
else:
result['Directory_List'] = account['Buckets'][region]
check_result = True
return check_result
def get_in_s3_compare_data(config, args, args_eval):
if args_eval == 1:
# values from user arguments
result = dict()
result['region'] = args[1]
result['AccountName'] = args[2]
elif args_eval == 2:
result = dict()
result['region'] = args[2]
result['AccountName'] = args[3]
result['Bucket'] = args[0]
if config.get('s3'):
config = config['s3']['Accounts']
else:
return "Config file not loaded properly"
for account in config:
if 'RoleArn' in result:
break
elif result['AccountName'] == account['AccountName'] and result['region'] in account['Buckets']:
result['RoleArn'] = account['RoleArn']
# set the directory
if account['Buckets']:
for region in account['Buckets']:
if region == result['region']:
# the user specified a bucket than search just in that bucket
if 'Bucket' in result:
result['Directory_List'] = get_data_for_specific_bucket(account['Buckets'][region], result['Bucket'])
else:
result['Directory_List'] = account['Buckets'][region]
break
elif result['region'] in account['Buckets']:
if 'Bucket' in result:
if check_team_name(account, result, result['Bucket']):
if result['RoleArn']:
break
else:
if check_team_name(account, result):
if result['RoleArn']:
break
if ('RoleArn' in result) == False or ('RoleArn' in result) == False or not ('Accountname' in result) == False or len(result['Directory_List']) < 1:
result = dict()
return result
def get_team_name(m_data):
for data in m_data['Directory_List']:
for directory in data:
return data['team_name']
|
import pandas as pd
import numpy as np
df=pd.read_csv("C:/Users/Rui/Desktop/test-DEseq2.csv")
df['sign']=np.where(df['log2FoldChange']>0, 1, -1)
df['metric']=-np.log10(df['padj'])*df['sign']
df2=df[['Unnamed: 0', 'metric']]
df2=df2.fillna(value=0)
df3=df2.sort(columns='metric')
df3.to_csv("file.rnk", sep='\t', header=False, index=False)
|
from threading import Timer
import logging
import time
def thread_work():
logging.debug("Поехали!")
if __name__=="__main__":
logging.basicConfig(
level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
my_timer1 = Timer(0.3, thread_work)
my_timer1.setName("MyTreadTimer-1")
my_timer2 = Timer(0.3, thread_work)
my_timer2.setName("MyTreadTimer-2")
logging.debug("Запуск таймеров!")
my_timer1.start()
my_timer2.start()
logging.debug(f"Задержка перед отменой выполнения {my_timer2.getName()}")
time.sleep(0.2)
logging.debug(f"Отмена потока - {my_timer2.getName()}")
my_timer2.cancel()
logging.debug("Завершение") |
# import the opencv library
import cv2
import numpy as np
import blindfold_solver2
def get_color(colors):
blue = colors[0, 1, 1]
red = colors[1, 1, 1]
yellow = colors[2, 1, 1]
orange = colors[3, 1, 1]
white = colors[4, 1, 1]
green = colors[5, 1, 1]
f_c = []
color_options = 'bryowg'
col_string = ''
for l in range(0, 6):
for j in range(0,3):
col_string = ''
for i in range(0,3):
guessed_color = 0
color_dist = 10000
for k in range(0, 6):
dist = pow(colors[k, 1, 1, 0]-colors[l, i, j, 0] , 2) + pow(colors[k, 1, 1, 1]-colors[l, i, j, 1] , 2) + pow(colors[k, 1, 1, 2]-colors[l, i, j, 2] , 2)
if dist < color_dist:
color_dist = dist
guessed_color = k
col_string += color_options[guessed_color]
# print(colors[0, i, j])
f_c.append(col_string)
print(f_c)
blindfold_solver2.solve(f_c[0:3], f_c[3:6], f_c[6:9], f_c[9:12], f_c[12:15], f_c[15:18])
# def color(r, g, b):
# define a video capture object
vid = cv2.VideoCapture(0)
ret, frame = vid.read()
f_height = frame.shape[0]
f_width = frame.shape[1]
cent_w = int(f_width/2)
cent_h = int(f_height/2)
f_min = min(f_height, f_width)
size = int(f_min/30)
shift = int(f_min/6)
color_b = 0
color_g = 0
color_r = 0
face_num = 0
colors = np.zeros((6, 3, 3, 3) ,dtype=int)
while(True):
# Capture the video frame
# by frame
ret, frame = vid.read()
for i in range(-1,2):
for j in range(-1,2):
frame = cv2.rectangle(frame, (cent_w - size + shift * i, cent_h - size + shift * j),
(cent_w + size + shift * i, cent_h + size + shift * j),
(255,255,255), 3)
# Display the resulting frame
cv2.imshow('frame', frame)
# the 'q' button is set as the
# quitting button you may use any
# desired button of your choice
let = cv2.waitKey(1)
if let == ord('s'):
print('screenshot')
for i in range(-1,2):
for j in range(-1,2):
color_b = np.mean(frame[(cent_h - size + shift * j):(cent_h + size + shift * j),
(cent_w - size + shift * i):(cent_w + size + shift * i), 0])
color_g = np.mean(frame[(cent_h - size + shift * j):(cent_h + size + shift * j),
(cent_w - size + shift * i):(cent_w + size + shift * i), 1])
color_r = np.mean(frame[(cent_h - size + shift * j):(cent_h + size + shift * j),
(cent_w - size + shift * i):(cent_w + size + shift * i), 2])
# colors.append(None)
# colors[i, j, face_num] = [color_r, color_g, color_b]
colors[face_num, i+1, j+1] = [color_r, color_g, color_b]
# get_color(colors)
face_num += 1
if let == ord('q'):
break
# After the loop release the cap object
vid.release()
# Destroy all the windows
cv2.destroyAllWindows()
get_color(colors) |
from matplotlib import pyplot, dates
from csv import reader
from dateutil import parser
with open('table_amzn.csv', 'r') as f:
data = list(reader(f))
price = [i[5] for i in data[1::]]
time = [parser.parse(i[0]) for i in data[1::]]
pyplot.plot(time, price)
pyplot.show()
|
from hokusai.lib.command import command
from hokusai.lib.common import print_green, print_red
from hokusai.lib.exceptions import HokusaiError
from hokusai.services.ecr import ECR, ClientError
@command()
def retag(tag_to_change, tag_to_match):
ecr = ECR()
if not ecr.project_repo_exists():
raise HokusaiError("Project repo does not exist. Aborting.")
try:
ecr.retag(tag_to_match, tag_to_change)
print_green("Updated ECR '%s' tag to point to the image that '%s' tag points to." % (tag_to_change, tag_to_match), newline_after=True)
except (ValueError, ClientError) as e:
raise HokusaiError("Updating ECR tag failed due to the error: '%s'" % str(e))
|
import unittest
import datetime
from pyalgotrade.broker import optbroker
from pyalgotrade import broker as stockbroker
from pyalgotrade.broker.optbroker import optfillstrategy
from pyalgotrade.broker import fillstrategy as stockfillstrategy
from pyalgotrade.broker.optbroker import optbacktesting
import broker_backtesting_opt_test
from pyalgotrade import bar
class BaseTestCase(unittest.TestCase):
TestInstrument = "orcl"
class FreeFunctionsTestCase(BaseTestCase):
def testStopOrderTriggerBuy(self):
barsBuilder = broker_backtesting_opt_test.BarsBuilder(BaseTestCase.TestInstrument, bar.Frequency.MINUTE)
# Bar is below
self.assertEqual(stockfillstrategy.get_stop_price_trigger(optbroker.Order.Action.BUY, 10, False, barsBuilder.nextBar(5, 5, 5, 5)), None)
self.assertEqual(stockfillstrategy.get_stop_price_trigger(optbroker.Order.Action.BUY, 10, False, barsBuilder.nextBar(5, 6, 4, 5)), None)
# High touches
self.assertEqual(stockfillstrategy.get_stop_price_trigger(optbroker.Order.Action.BUY, 10, False, barsBuilder.nextBar(5, 10, 4, 9)), 10)
# High penetrates
self.assertEqual(stockfillstrategy.get_stop_price_trigger(optbroker.Order.Action.BUY, 10, False, barsBuilder.nextBar(5, 11, 4, 9)), 10)
# Open touches
self.assertEqual(stockfillstrategy.get_stop_price_trigger(optbroker.Order.Action.BUY, 10, False, barsBuilder.nextBar(10, 10, 10, 10)), 10)
# Open is above
self.assertEqual(stockfillstrategy.get_stop_price_trigger(optbroker.Order.Action.BUY, 10, False, barsBuilder.nextBar(11, 12, 4, 9)), 11)
# Bar gaps above
self.assertEqual(stockfillstrategy.get_stop_price_trigger(optbroker.Order.Action.BUY, 10, False, barsBuilder.nextBar(12, 13, 11, 12)), 12)
def testStopOrderTriggerSell(self):
barsBuilder = broker_backtesting_opt_test.BarsBuilder(BaseTestCase.TestInstrument, bar.Frequency.MINUTE)
# Bar is above
self.assertEqual(stockfillstrategy.get_stop_price_trigger(optbroker.Order.Action.SELL, 10, False, barsBuilder.nextBar(15, 15, 15, 15)), None)
self.assertEqual(stockfillstrategy.get_stop_price_trigger(optbroker.Order.Action.SELL, 10, False, barsBuilder.nextBar(15, 16, 11, 15)), None)
# Low touches
self.assertEqual(stockfillstrategy.get_stop_price_trigger(optbroker.Order.Action.SELL, 10, False, barsBuilder.nextBar(15, 16, 10, 11)), 10)
# Low penetrates
self.assertEqual(stockfillstrategy.get_stop_price_trigger(optbroker.Order.Action.SELL, 10, False, barsBuilder.nextBar(15, 16, 9, 11)), 10)
# Open touches
self.assertEqual(stockfillstrategy.get_stop_price_trigger(optbroker.Order.Action.SELL, 10, False, barsBuilder.nextBar(10, 10, 10, 10)), 10)
# Open is below
self.assertEqual(stockfillstrategy.get_stop_price_trigger(optbroker.Order.Action.SELL, 10, False, barsBuilder.nextBar(9, 12, 4, 9)), 9)
# Bar gaps below
self.assertEqual(stockfillstrategy.get_stop_price_trigger(optbroker.Order.Action.SELL, 10, False, barsBuilder.nextBar(8, 9, 6, 9)), 8)
def testLimitOrderTriggerBuy(self):
barsBuilder = broker_backtesting_opt_test.BarsBuilder(BaseTestCase.TestInstrument, bar.Frequency.MINUTE)
# Bar is above
self.assertEqual(stockfillstrategy.get_limit_price_trigger(optbroker.Order.Action.BUY, 10, False, barsBuilder.nextBar(15, 15, 15, 15)), None)
self.assertEqual(stockfillstrategy.get_limit_price_trigger(optbroker.Order.Action.BUY, 10, False, barsBuilder.nextBar(15, 16, 11, 15)), None)
# Low touches
self.assertEqual(stockfillstrategy.get_limit_price_trigger(optbroker.Order.Action.BUY, 10, False, barsBuilder.nextBar(15, 16, 10, 11)), 10)
# Low penetrates
self.assertEqual(stockfillstrategy.get_limit_price_trigger(optbroker.Order.Action.BUY, 10, False, barsBuilder.nextBar(15, 16, 9, 11)), 10)
# Open touches
self.assertEqual(stockfillstrategy.get_limit_price_trigger(optbroker.Order.Action.BUY, 10, False, barsBuilder.nextBar(10, 10, 10, 10)), 10)
# Open is below
self.assertEqual(stockfillstrategy.get_limit_price_trigger(optbroker.Order.Action.BUY, 10, False, barsBuilder.nextBar(9, 12, 4, 9)), 9)
# Bar gaps below
self.assertEqual(stockfillstrategy.get_limit_price_trigger(optbroker.Order.Action.BUY, 10, False, barsBuilder.nextBar(8, 9, 6, 9)), 8)
def testLimitOrderTriggerSell(self):
barsBuilder = broker_backtesting_opt_test.BarsBuilder(BaseTestCase.TestInstrument, bar.Frequency.MINUTE)
# Bar is below
self.assertEqual(stockfillstrategy.get_limit_price_trigger(optbroker.Order.Action.SELL, 10, False, barsBuilder.nextBar(5, 5, 5, 5)), None)
self.assertEqual(stockfillstrategy.get_limit_price_trigger(optbroker.Order.Action.SELL, 10, False, barsBuilder.nextBar(5, 6, 4, 5)), None)
# High touches
self.assertEqual(stockfillstrategy.get_limit_price_trigger(optbroker.Order.Action.SELL, 10, False, barsBuilder.nextBar(5, 10, 4, 9)), 10)
# High penetrates
self.assertEqual(stockfillstrategy.get_limit_price_trigger(optbroker.Order.Action.SELL, 10, False, barsBuilder.nextBar(5, 11, 4, 9)), 10)
# Open touches
self.assertEqual(stockfillstrategy.get_limit_price_trigger(optbroker.Order.Action.SELL, 10, False, barsBuilder.nextBar(10, 10, 10, 10)), 10)
# Open is above
self.assertEqual(stockfillstrategy.get_limit_price_trigger(optbroker.Order.Action.SELL, 10, False, barsBuilder.nextBar(11, 12, 4, 9)), 11)
# Bar gaps above
self.assertEqual(stockfillstrategy.get_limit_price_trigger(optbroker.Order.Action.SELL, 10, False, barsBuilder.nextBar(12, 13, 11, 12)), 12)
class DefaultStrategyTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.barsBuilder = broker_backtesting_opt_test.BarsBuilder(BaseTestCase.TestInstrument, bar.Frequency.MINUTE)
self.strategy = optfillstrategy.OptionDefaultStrategy()
def __getFilledMarketOrder(self, quantity, price):
order = optbacktesting.OptionMarketOrder(
optbroker.Order.Action.BUY,
BaseTestCase.TestInstrument,
quantity,
False,
optbroker.OptionOrder.Right.CALL,
20,
"2016-01-01",
stockbroker.IntegerTraits()
)
order.setState(stockbroker.Order.State.ACCEPTED)
order.addExecutionInfo(stockbroker.OrderExecutionInfo(price, quantity, 0, datetime.datetime.now()))
return order
def testVolumeLimitPerBar(self):
volume = 100
self.strategy.onBars(None, self.barsBuilder.nextBars(11, 12, 4, 9, volume))
self.assertEquals(self.strategy.getVolumeLeft()[BaseTestCase.TestInstrument], 25)
self.assertEquals(self.strategy.getVolumeUsed()[BaseTestCase.TestInstrument], 0)
self.strategy.onOrderFilled(None, self.__getFilledMarketOrder(24, 11))
self.assertEquals(self.strategy.getVolumeLeft()[BaseTestCase.TestInstrument], 1)
self.assertEquals(self.strategy.getVolumeUsed()[BaseTestCase.TestInstrument], 24)
with self.assertRaisesRegexp(Exception, "Invalid fill quantity 25. Not enough volume left 1"):
self.strategy.onOrderFilled(None, self.__getFilledMarketOrder(25, 11))
self.assertEquals(self.strategy.getVolumeLeft()[BaseTestCase.TestInstrument], 1)
self.assertEquals(self.strategy.getVolumeUsed()[BaseTestCase.TestInstrument], 24) |
# sql/functions.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .. import types as sqltypes, schema
from .expression import (
ClauseList, Function, _literal_as_binds, literal_column, _type_from_args,
cast, extract
)
from . import operators
from .visitors import VisitableType
from .. import util
_registry = util.defaultdict(dict)
def register_function(identifier, fn, package="_default"):
"""Associate a callable with a particular func. name.
This is normally called by _GenericMeta, but is also
available by itself so that a non-Function construct
can be associated with the :data:`.func` accessor (i.e.
CAST, EXTRACT).
"""
reg = _registry[package]
reg[identifier] = fn
class _GenericMeta(VisitableType):
def __init__(cls, clsname, bases, clsdict):
cls.name = name = clsdict.get('name', clsname)
cls.identifier = identifier = clsdict.get('identifier', name)
package = clsdict.pop('package', '_default')
# legacy
if '__return_type__' in clsdict:
cls.type = clsdict['__return_type__']
register_function(identifier, cls, package)
super(_GenericMeta, cls).__init__(clsname, bases, clsdict)
class GenericFunction(util.with_metaclass(_GenericMeta, Function)):
"""Define a 'generic' function.
A generic function is a pre-established :class:`.Function`
class that is instantiated automatically when called
by name from the :data:`.func` attribute. Note that
calling any name from :data:`.func` has the effect that
a new :class:`.Function` instance is created automatically,
given that name. The primary use case for defining
a :class:`.GenericFunction` class is so that a function
of a particular name may be given a fixed return type.
It can also include custom argument parsing schemes as well
as additional methods.
Subclasses of :class:`.GenericFunction` are automatically
registered under the name of the class. For
example, a user-defined function ``as_utc()`` would
be available immediately::
from sqlalchemy.sql.functions import GenericFunction
from sqlalchemy.types import DateTime
class as_utc(GenericFunction):
type = DateTime
print select([func.as_utc()])
User-defined generic functions can be organized into
packages by specifying the "package" attribute when defining
:class:`.GenericFunction`. Third party libraries
containing many functions may want to use this in order
to avoid name conflicts with other systems. For example,
if our ``as_utc()`` function were part of a package
"time"::
class as_utc(GenericFunction):
type = DateTime
package = "time"
The above function would be available from :data:`.func`
using the package name ``time``::
print select([func.time.as_utc()])
A final option is to allow the function to be accessed
from one name in :data:`.func` but to render as a different name.
The ``identifier`` attribute will override the name used to
access the function as loaded from :data:`.func`, but will retain
the usage of ``name`` as the rendered name::
class GeoBuffer(GenericFunction):
type = Geometry
package = "geo"
name = "ST_Buffer"
identifier = "buffer"
The above function will render as follows::
>>> print func.geo.buffer()
ST_Buffer()
.. versionadded:: 0.8 :class:`.GenericFunction` now supports
automatic registration of new functions as well as package
and custom naming support.
.. versionchanged:: 0.8 The attribute name ``type`` is used
to specify the function's return type at the class level.
Previously, the name ``__return_type__`` was used. This
name is still recognized for backwards-compatibility.
"""
coerce_arguments = True
def __init__(self, *args, **kwargs):
parsed_args = kwargs.pop('_parsed_args', None)
if parsed_args is None:
parsed_args = [_literal_as_binds(c) for c in args]
self.packagenames = []
self._bind = kwargs.get('bind', None)
self.clause_expr = ClauseList(
operator=operators.comma_op,
group_contents=True, *parsed_args).self_group()
self.type = sqltypes.to_instance(
kwargs.pop("type_", None) or getattr(self, 'type', None))
register_function("cast", cast)
register_function("extract", extract)
class next_value(GenericFunction):
"""Represent the 'next value', given a :class:`.Sequence`
as it's single argument.
Compiles into the appropriate function on each backend,
or will raise NotImplementedError if used on a backend
that does not provide support for sequences.
"""
type = sqltypes.Integer()
name = "next_value"
def __init__(self, seq, **kw):
assert isinstance(seq, schema.Sequence), \
"next_value() accepts a Sequence object as input."
self._bind = kw.get('bind', None)
self.sequence = seq
@property
def _from_objects(self):
return []
class AnsiFunction(GenericFunction):
def __init__(self, **kwargs):
GenericFunction.__init__(self, **kwargs)
class ReturnTypeFromArgs(GenericFunction):
"""Define a function whose return type is the same as its arguments."""
def __init__(self, *args, **kwargs):
args = [_literal_as_binds(c) for c in args]
kwargs.setdefault('type_', _type_from_args(args))
kwargs['_parsed_args'] = args
GenericFunction.__init__(self, *args, **kwargs)
class coalesce(ReturnTypeFromArgs):
pass
class max(ReturnTypeFromArgs):
pass
class min(ReturnTypeFromArgs):
pass
class sum(ReturnTypeFromArgs):
pass
class now(GenericFunction):
type = sqltypes.DateTime
class concat(GenericFunction):
type = sqltypes.String
class char_length(GenericFunction):
type = sqltypes.Integer
def __init__(self, arg, **kwargs):
GenericFunction.__init__(self, arg, **kwargs)
class random(GenericFunction):
pass
class count(GenericFunction):
"""The ANSI COUNT aggregate function. With no arguments,
emits COUNT \*.
"""
type = sqltypes.Integer
def __init__(self, expression=None, **kwargs):
if expression is None:
expression = literal_column('*')
GenericFunction.__init__(self, expression, **kwargs)
class current_date(AnsiFunction):
type = sqltypes.Date
class current_time(AnsiFunction):
type = sqltypes.Time
class current_timestamp(AnsiFunction):
type = sqltypes.DateTime
class current_user(AnsiFunction):
type = sqltypes.String
class localtime(AnsiFunction):
type = sqltypes.DateTime
class localtimestamp(AnsiFunction):
type = sqltypes.DateTime
class session_user(AnsiFunction):
type = sqltypes.String
class sysdate(AnsiFunction):
type = sqltypes.DateTime
class user(AnsiFunction):
type = sqltypes.String
|
import os
from flask import current_app, url_for
from . import core
@core.app_context_processor
def inject_variables():
return dict(SSL=current_app.config['SSL'])
@core.app_template_filter('strftime')
def _jinja2_filter_datetime(date, fmt=None):
if date is None:
return '-'
native = date.replace(tzinfo=None)
fmt = fmt or '%d.%m.%Y %H:%M:%S'
return native.strftime(fmt)
@core.app_context_processor
def override_url_for():
return dict(
url_for=dated_url_for
)
def dated_url_for(endpoint, **values):
if endpoint == 'static':
filename = values.get('filename', None)
if filename:
file_path = os.path.join(current_app.root_path,
endpoint, filename)
if os.path.exists(file_path):
values['q'] = int(os.stat(file_path).st_mtime)
return url_for(endpoint, **values)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 8 21:04:40 2021
@author: katie
"""
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
import svd_tools_copy as svdt
import scipy as sp
import scipy.sparse
from PIL import Image
'''
Embed a watermark in using the Liu & Tan algorithm
'''
def embed_watermark(mat, watermark, scale=1):
mat_rows, mat_columns = mat.shape
watermark_rows, watermark_columns = watermark.shape
if mat_rows < watermark_rows or mat_columns < watermark_columns:
print('Watermark must be smaller than matrix')
return
mat_u, mat_s, mat_vh = la.svd(mat)
mat_num_sv = len(mat_s)
# Compute the rectangular "diagonal" singular value matrix
mat_s_matrix = np.pad(np.diag(mat_s),
[(0, mat_rows - mat_num_sv), (0, mat_columns - mat_num_sv)])
watermark_padded = np.pad(watermark,
[(0, mat_rows - watermark_rows), (0, mat_columns - watermark_columns)])
mat_s_matrix_watermarked = mat_s_matrix + scale * watermark_padded
watermarked_u, watermarked_s, watermarked_vh = la.svd(mat_s_matrix_watermarked)
watermarked_num_sv = len(watermarked_s)
watermarked_s_matrix = np.pad(np.diag(watermarked_s),
[(0, mat_rows - watermarked_num_sv), (0, mat_columns - watermarked_num_sv)])
mat_watermarked = mat_u @ watermarked_s_matrix @ mat_vh
return mat_watermarked, watermarked_u, mat_s_matrix, watermarked_vh
def embed_watermark_jain(mat, watermark, scale=1, term=False):
mat_rows, mat_columns = mat.shape
watermark_rows, watermark_columns = watermark.shape
if mat_rows < watermark_rows or mat_columns < watermark_columns:
print('Watermark must be smaller than matrix')
return
mat_u, mat_s, mat_vh = la.svd(mat)
mat_num_sv = len(mat_s)
# Compute the rectangular "diagonal" singular value matrix
mat_s_matrix = np.pad(np.diag(mat_s),
[(0, mat_rows - mat_num_sv), (0, mat_columns - mat_num_sv)])
# Pad watermark to match the sizes
watermark_padded = np.pad(watermark,
[(0, mat_rows - watermark_rows), (0, mat_columns - watermark_columns)])
watermark_u, watermark_s, watermark_vh = la.svd(watermark_padded)
watermark_num_sv = len(watermark_s)
watermark_rows, watermark_columns = mat.shape
watermark_s_matrix = np.pad(np.diag(watermark_s), [(0, watermark_rows - watermark_num_sv), (0, watermark_columns - watermark_num_sv)])
watermark_pcs = watermark_u @ watermark_s_matrix
mat_s_matrix_watermarked = mat_s_matrix + scale * watermark_pcs
mat_watermarked = mat_u @ mat_s_matrix_watermarked @ mat_vh
jain_term = mat_u @ watermark_u @ watermark_s_matrix @ mat_vh
if term:
return mat_watermarked, watermark_vh, jain_term
else:
return mat_watermarked, watermark_vh
def extract_watermark_jain(mat_watermarked, mat_original, watermark_vh, scale):
mat_u, mat_s, mat_vh = la.svd(mat_original)
watermark_pcs = (mat_u.conj().T @ (mat_watermarked - mat_original) @ mat_vh.conj().T) / scale
#watermark_pcs = (la.inv(mat_u) @ (mat_watermarked - mat_original) @ la.inv(mat_vh)) / scale
return watermark_pcs @ watermark_vh
def embed_watermark_jain_mod(mat, watermark, scale=1, term=False):
mat_rows, mat_columns = mat.shape
watermark_rows, watermark_columns = watermark.shape
if mat_rows < watermark_rows or mat_columns < watermark_columns:
print('Watermark must be smaller than matrix')
return
mat_u, mat_s, mat_vh = la.svd(mat)
mat_num_sv = len(mat_s)
# Compute the rectangular "diagonal" singular value matrix
mat_s_matrix = np.pad(np.diag(mat_s),
[(0, mat_rows - mat_num_sv), (0, mat_columns - mat_num_sv)])
# Pad watermark to match the sizes
watermark_padded = np.pad(watermark,
[(0, mat_rows - watermark_rows), (0, mat_columns - watermark_columns)])
watermark_u, watermark_s, watermark_vh = la.svd(watermark_padded)
watermark_num_sv = len(watermark_s)
watermark_rows, watermark_columns = mat.shape
watermark_s_matrix = np.pad(np.diag(watermark_s), [(0, watermark_rows - watermark_num_sv), (0, watermark_columns - watermark_num_sv)])
mat_watermarked = mat + scale * watermark_u @ watermark_s_matrix @ mat_vh
jain_mod_term = watermark_u @ watermark_s_matrix @ mat_vh
if term:
return mat_watermarked, watermark_vh, jain_mod_term
else:
return mat_watermarked, watermark_vh
return mat_watermarked, watermark_vh
def extract_watermark_jain_mod(mat_watermarked, mat_original, watermark_vh, scale):
mat_u, mat_s, mat_vh = la.svd(mat_original)
return (mat_watermarked - mat_original) @ la.inv(mat_vh) @ watermark_vh / scale
def extract_watermark(mat_watermarked, watermarked_u, mat_s_matrix, watermarked_vh, scale):
_, watermarked_s, _ = la.svd(mat_watermarked)
mat_watermarked_rows, mat_watermarked_cols = mat_watermarked.shape
num_sv = len(watermarked_s)
watermarked_s_matrix = np.pad(np.diag(watermarked_s),
[(0, mat_watermarked_rows - num_sv), (0, mat_watermarked_cols - num_sv)])
return (watermarked_u @ watermarked_s_matrix @ watermarked_vh - mat_s_matrix) / scale
|
from scrapy.item import Field, Item
class DefaultsItem(Item):
"""
DefaultsItem - Item with default values.
Adds default value support for unset Field values.
Example:
MyItem(DefaultsItem):
url = Field(default='http://example.com/')
id = Field(default=0)
text = Field(default=None)
"""
def __getitem__(self, key):
try:
return self._values[key]
except KeyError:
field = self.fields[key]
if 'default' in field:
return field['default']
raise
class FlexItem(Item):
"""
FlexItem - Item with automatic Field declaration.
Allows arbitrary field names without prior declaration.
Example:
MyItem(FlexItem):
pass
"""
def __setitem__(self, key, value):
if key not in self.fields:
self.fields[key] = Field()
self._values[key] = value
# Completely dynamic creation of Item classes
# https://scrapy.readthedocs.org/en/latest/topics/practices.html#dynamic-creation-of-item-classes
from scrapy.item import DictItem, Field
def create_item_class(class_name, field_list):
field_dict = {}
for field_name in field_list:
field_dict[field_name] = Field()
return type(class_name, (DictItem,), field_dict)
|
# munmap.py
import gc
import os
import sys
import time
from arm_ds.debugger_v1 import Debugger
from arm_ds.debugger_v1 import DebugException
import config
import memory
import mmu
# obtain current execution state
debugger = Debugger()
execution_state = debugger.getCurrentExecutionContext()
def cleanup():
if mmu.page_table is not None:
del mmu.page_table
gc.collect()
def start_prolog():
# disable the mmap breakpoint
for idx in range(0, execution_state.getBreakpointService().getBreakpointCount()):
brk_object = execution_state.getBreakpointService().getBreakpoint(idx)
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_munmap:
brk_object.disable()
def end_prolog():
# enable the munmap breakpoint
for idx in range(0, execution_state.getBreakpointService().getBreakpointCount()):
brk_object = execution_state.getBreakpointService().getBreakpoint(idx)
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_munmap:
brk_object.enable()
def munmap():
# -- HEAD -- #
start_prolog()
# -- BODY -- #
# only focus on the invocation from app -> gettimeofday
lr = int(execution_state.getRegisterService().getValue("LR")) & 0xffffffff
if not config.in_app_range(lr):
# -- TAIL -- #
end_prolog()
# continue the execution of the target application
execution_state.getExecutionService().resume()
cleanup()
return
# get parameters
start = int(execution_state.getRegisterService().getValue("R0")) & 0xffffffff
length = int(execution_state.getRegisterService().getValue("R1")) & 0xffffffff
config.log_print("[munmap] start = %0#10x, length = %0#10x" % (start, length))
# -- TAIL -- #
end_prolog()
# continue the execution of the target application
execution_state.getExecutionService().resume()
cleanup()
return
if __name__ == '__main__':
munmap()
sys.exit()
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
def recur(L, R):
# 左右子树都为空
if not L and not R:
return True
# 有一个不为空
if not L or not R or L.val != R.val:
return False
# L的左子树和R的右子树,L的右子树和R的左子树
return recur(L.left, R.right) and recur(L.right, R.left)
# 特判:root为空
if not root:
return True
return recur(root.left, root.right) |
from __future__ import division, print_function
import os, sys
import ROOT
from PhysicsTools.NanoAODTools.postprocessing.framework.postprocessor import PostProcessor
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection, Object
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module
from FourTopNAOD.Kai.tools.toolbox import *
import collections, copy, json, math
from array import array
import multiprocessing
import inspect
import argparse
ROOT.PyConfig.IgnoreCommandLineOptions = True
parser = argparse.ArgumentParser(description='Test of Stitching module and post-stitching distributions')
parser.add_argument('--stage', dest='stage', action='store', type=str,
help='Stage to be processed: stitch or hist or plot')
args = parser.parse_args()
class StitchHist(Module):
def __init__(self, verbose=False, maxevt=-1, probEvt=None, mode="Flag", era="2017", channel="DL", weightMagnitude=1):
self.writeHistFile=True
self.verbose=verbose
#event counters
self.counter = 0
self.maxEventsToProcess=maxevt
# self.mode = mode
# if self.mode not in ["Flag", "Pass", "Fail"]:
# raise NotImplementedError("Not a supported mode for the Stitcher module: '{0}'".format(self.mode))
self.era = era
self.channel = channel
self.weightMagnitude=weightMagnitude
def beginJob(self,histFile=None,histDirName=None):
self.hName = histFile
if histFile == None or histDirName == None:
self.fillHists = False
Module.beginJob(self, None, None)
else:
self.fillHists = True
Module.beginJob(self,histFile,histDirName)
self.stitchPlot_PCond_nGenJets = ROOT.TH1D("stitchPlot_PCond_nGenJets", "nGenJet (pt > 30) Pass condition; nGenJets; Events", 18, 2, 20)
self.addObject(self.stitchPlot_PCond_nGenJets)
self.stitchPlot_PCond_GenHT = ROOT.TH1D("stitchPlot_PCond_GenHT", "GenHT (pt > 30, |#eta| < 2.4) Pass condition; Gen HT (GeV); Events", 800, 200, 600)
self.addObject(self.stitchPlot_PCond_GenHT)
self.stitchPlot_PCond_nGenLeps = ROOT.TH1D("stitchPlot_PCond_nGenLeps", "nGenLeps (LHE level) Pass condition; nGenLeps; Events", 10, 0, 10)
self.addObject(self.stitchPlot_PCond_nGenLeps)
self.stitchPlot_PCond_AllVar = ROOT.TH3D("stitchPlot_PCond_AllVar", "nGenLeps, nGenJets, GenHT Pass condition; nGenLeps; nGenJets; GenHT ",
6, 0, 6, 6, 5, 12, 12, 300., 600.)
self.addObject(self.stitchPlot_PCond_AllVar)
self.stitchPlot_FCond_nGenJets = ROOT.TH1D("stitchPlot_FCond_nGenJets", "nGenJet (pt > 30) Fail condition; nGenJets; Events", 18, 2, 20)
self.addObject(self.stitchPlot_FCond_nGenJets)
self.stitchPlot_FCond_GenHT = ROOT.TH1D("stitchPlot_FCond_GenHT", "GenHT (pt > 30, |#eta| < 2.4) Fail condition; Gen HT (GeV); Events", 800, 200, 600)
self.addObject(self.stitchPlot_FCond_GenHT)
self.stitchPlot_FCond_nGenLeps = ROOT.TH1D("stitchPlot_FCond_nGenLeps", "nGenLeps (LHE level) Fail condition; nGenLeps; Events", 10, 0, 10)
self.addObject(self.stitchPlot_FCond_nGenLeps)
self.stitchPlot_FCond_AllVar = ROOT.TH3D("stitchPlot_FCond_AllVar", "nGenLeps, nGenJets, GenHT Fail condition; nGenLeps; nGenJets; GenHT ",
6, 0, 6, 6, 5, 12, 12, 300., 600.)
self.addObject(self.stitchPlot_FCond_AllVar)
# self.stitchPlot_nGenLepsPart = ROOT.TH1D("stitchPlot_nGenLeps", "nGenLeps (e(1) or mu (1) or #tau (2)); nGenLeps; Events", 10, 0, 10)
# self.addObject(self.stitchPlot_nGenLepsPart)
def endJob(self):
if hasattr(self, 'objs') and self.objs != None:
prevdir = ROOT.gDirectory
self.dir.cd()
for obj in self.objs:
obj.Write()
prevdir.cd()
if hasattr(self, 'histFile') and self.histFile != None :
self.histFile.Close()
# def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):
# self.varDict = [('passStitchSL', 'O', 'Passes Single Lepton Stitch cuts'),
# ('passStitchDL', 'O', 'Passes Single Lepton Stitch cuts'),
# ('passStitchCondition', 'O', 'Passes or fails stitch cuts appropriately for the sample in this channel and era')
# ]
# if self.mode == "Flag":
# if not self.out:
# raise RuntimeError("No Output file selected, cannot flag events for Stitching")
# else:
# for name, valType, valTitle in self.varDict:
# self.out.branch("ESV_%s"%(name), valType, title=valTitle)
def analyze(self, event): #called by the eventloop per-event
"""process event, return True (go to next module) or False (fail, go to next event)"""
#Increment counter and skip events past the maxEventsToProcess, if larger than -1
self.counter +=1
if -1 < self.maxEventsToProcess < self.counter:
return False
###############################
### Collections and Objects ###
###############################
weight = math.copysign(self.weightMagnitude, getattr(event, "genWeight"))
gens = Collection(event, "GenPart")
genjets = Collection(event, "GenJet")
lheparts = Collection(event, "LHEPart")
#Stitch variables
nGL = 0
nGJ = 0
GenHT = 0
for gj, jet in enumerate(genjets):
if jet.pt > 30:
nGJ += 1
if abs(jet.eta) < 2.4:
GenHT += jet.pt
for lhep in lheparts:
if lhep.pdgId in set([-15, -13, -11, 11, 13, 15]):
nGL += 1
if self.fillHists:
if getattr(event, "ESV_passStitchCondition"):
# self.stitchPlot_nGenLepsPart.Fill(nGLgen, weight)
self.stitchPlot_PCond_nGenLeps.Fill(nGL, weight)
self.stitchPlot_PCond_nGenJets.Fill(nGJ, weight)
self.stitchPlot_PCond_GenHT.Fill(GenHT, weight)
self.stitchPlot_PCond_AllVar.Fill(nGL, nGJ, GenHT, weight)
elif not getattr(event, "ESV_passStitchCondition"):
# self.stitchPlot_nGenLepsPart.Fill(nGLgen, weight)
self.stitchPlot_FCond_nGenLeps.Fill(nGL, weight)
self.stitchPlot_FCond_nGenJets.Fill(nGJ, weight)
self.stitchPlot_FCond_GenHT.Fill(GenHT, weight)
self.stitchPlot_FCond_AllVar.Fill(nGL, nGJ, GenHT, weight)
return True
class Stitcher(Module):
def __init__(self, verbose=False, maxevt=-1, probEvt=None, mode="Flag", era="2017", channel="DL", condition="Pass"):
self.writeHistFile=True
self.verbose=verbose
self.probEvt = probEvt
if probEvt:
self.verbose = True
#event counters
self.counter = 0
self.maxEventsToProcess=maxevt
self.mode = mode
if self.mode not in ["Flag", "Pass", "Fail"]:
raise NotImplementedError("Not a supported mode for the Stitcher module: '{0}'".format(self.mode))
self.era = era
self.channel = channel
self.condition = condition
# print("Stitcher is in mode '{0}' for era '{1}', channel '{2}', with condition '{3}'".format(self.mode, self.era, self.channel, self.condition))
self.bits = {'isPrompt':0b000000000000001,
'isDecayedLeptonHadron':0b000000000000010,
'isTauDecayProduct':0b000000000000100,
'isPromptTauDecaypprProduct':0b000000000001000,
'isDirectTauDecayProduct':0b000000000010000,
'isDirectPromptTauDecayProduct':0b000000000100000,
'isDirectHadronDecayProduct':0b000000001000000,
'isHardProcess':0b000000010000000,
'fromHardProcess':0b000000100000000,
'isHardProcessTauDecayProduct':0b000001000000000,
'isDirectHardProcessTauDecayProduct':0b000010000000000,
'fromHardProcessBeforeFSR':0b000100000000000,
'isFirstCopy':0b001000000000000,
'isLastCopy':0b010000000000000,
'isLastCopyBeforeFSR':0b100000000000000
}
self.stitchDict = {'2016': {'SL': {'nGenJets': None,
'nGenLeps': None,
'GenHT': None},
'DL': {'nGenJets': None,
'nGenLeps': None,
'GenHT': None}
},
'2017': {'SL': {'nGenJets': 9,
'nGenLeps': 1,
'GenHT': 500},
'DL': {'nGenJets': 7,
'nGenLeps': 2,
'GenHT': 500}
},
'2018': {'SL': {'nGenJets': None,
'nGenLeps': None,
'GenHT': None},
'DL': {'nGenJets': None,
'nGenLeps': None,
'GenHT': None}
}
}
self.stitchSL = self.stitchDict[self.era]['SL']
self.stitchDL = self.stitchDict[self.era]['DL']
def beginJob(self,histFile=None,histDirName=None):
self.hName = histFile
if histFile == None or histDirName == None:
self.fillHists = False
Module.beginJob(self, None, None)
else:
self.fillHists = True
Module.beginJob(self,histFile,histDirName)
self.stitch_PCond_nGenJets = ROOT.TH1D("stitch_PCond_nGenJets", "nGenJet (pt > 30) Pass condition; nGenJets; Events", 18, 2, 20)
self.addObject(self.stitch_PCond_nGenJets)
self.stitch_PCond_GenHT = ROOT.TH1D("stitch_PCond_GenHT", "GenHT (pt > 30, |#eta| < 2.4) Pass condition; Gen HT (GeV); Events", 800, 200, 600)
self.addObject(self.stitch_PCond_GenHT)
self.stitch_PCond_nGenLeps = ROOT.TH1D("stitch_PCond_nGenLeps", "nGenLeps (LHE level) Pass condition; nGenLeps; Events", 10, 0, 10)
self.addObject(self.stitch_PCond_nGenLeps)
self.stitch_PCond_AllVar = ROOT.TH3D("stitch_PCond_AllVar", "nGenLeps, nGenJets, GenHT Pass condition; nGenLeps; nGenJets; GenHT ",
6, 0, 6, 6, 5, 12, 12, 300., 600.)
self.addObject(self.stitch_PCond_AllVar)
self.stitch_FCond_nGenJets = ROOT.TH1D("stitch_FCond_nGenJets", "nGenJet (pt > 30) Fail condition; nGenJets; Events", 18, 2, 20)
self.addObject(self.stitch_FCond_nGenJets)
self.stitch_FCond_GenHT = ROOT.TH1D("stitch_FCond_GenHT", "GenHT (pt > 30, |#eta| < 2.4) Fail condition; Gen HT (GeV); Events", 800, 200, 600)
self.addObject(self.stitch_FCond_GenHT)
self.stitch_FCond_nGenLeps = ROOT.TH1D("stitch_FCond_nGenLeps", "nGenLeps (LHE level) Fail condition; nGenLeps; Events", 10, 0, 10)
self.addObject(self.stitch_FCond_nGenLeps)
self.stitch_FCond_AllVar = ROOT.TH3D("stitch_FCond_AllVar", "nGenLeps, nGenJets, GenHT Fail condition; nGenLeps; nGenJets; GenHT ",
6, 0, 6, 6, 5, 12, 12, 300., 600.)
self.addObject(self.stitch_FCond_AllVar)
# self.stitch_nGenLepsPart = ROOT.TH1D("stitch_nGenLeps", "nGenLeps (e(1) or mu (1) or #tau (2)); nGenLeps; Events", 10, 0, 10)
# self.addObject(self.stitch_nGenLepsPart)
def endJob(self):
if hasattr(self, 'objs') and self.objs != None:
prevdir = ROOT.gDirectory
self.dir.cd()
for obj in self.objs:
obj.Write()
prevdir.cd()
if hasattr(self, 'histFile') and self.histFile != None :
self.histFile.Close()
def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):
self.out = wrappedOutputTree
self.varDict = [('passStitchSL', 'O', 'Passes Single Lepton Stitch cuts'),
('passStitchDL', 'O', 'Passes Single Lepton Stitch cuts'),
('passStitchCondition', 'O', 'Passes or fails stitch cuts appropriately for the sample in this channel and era')
]
if self.mode == "Flag":
if not self.out:
raise RuntimeError("No Output file selected, cannot flag events for Stitching")
else:
for name, valType, valTitle in self.varDict:
self.out.branch("ESV_%s"%(name), valType, title=valTitle)
elif self.mode == "Pass" or self.mode == "Fail":
pass
def analyze(self, event): #called by the eventloop per-event
"""process event, return True (go to next module) or False (fail, go to next event)"""
#Increment counter and skip events past the maxEventsToProcess, if larger than -1
self.counter +=1
if -1 < self.maxEventsToProcess < self.counter:
return False
if self.probEvt:
if event.event != self.probEvt:
print("Skipping...")
return False
###############################
### Collections and Objects ###
###############################
gens = Collection(event, "GenPart")
genjets = Collection(event, "GenJet")
lheparts = Collection(event, "LHEPart")
#Stitch variables
nGL = 0
nGJ = 0
GenHT = 0
for gj, jet in enumerate(genjets):
if jet.pt > 30:
nGJ += 1
if abs(jet.eta) < 2.4:
GenHT += jet.pt
for lhep in lheparts:
if lhep.pdgId in set([-15, -13, -11, 11, 13, 15]):
nGL += 1
# nGLgen = 0
# for gp, gen in enumerate(gens):
# if abs(gen.pdgId) in set([11, 13]) and gen.status == 1:
# nGLgen += 1
# elif abs(gen.pdgId) in set([15]) and gen.status == 2:
# nGLgen += 1
passStitch = {}
if nGL == self.stitchSL['nGenLeps'] and nGJ >= self.stitchSL['nGenJets'] and GenHT >= self.stitchSL['GenHT']:
shitTestSL = True
else:
shitTestSL = False
if nGL == self.stitchDL['nGenLeps'] and nGJ >= self.stitchDL['nGenJets'] and GenHT >= self.stitchDL['GenHT']:
shitTestDL = True
else:
shitTestDL = False
passStitch['passStitchSL'] = (nGL == self.stitchSL['nGenLeps'] and nGJ >= self.stitchSL['nGenJets'] and GenHT >= self.stitchSL['GenHT'])
passStitch['passStitchDL'] = (nGL == self.stitchDL['nGenLeps'] and nGJ >= self.stitchDL['nGenJets'] and GenHT >= self.stitchDL['GenHT'])
if passStitch['passStitchSL'] != shitTestSL:
print("GODDAMNED FAIL, SL!")
if passStitch['passStitchDL'] != shitTestDL:
print("GODDAMNED FAIL, DL!")
if self.condition == "Pass":
passStitch['passStitchCondition'] = passStitch['passStitch'+self.channel]
elif self.condition == "Fail":
passStitch['passStitchCondition'] = not passStitch['passStitch'+self.channel]
if self.fillHists:
if passStitch['passStitchCondition']:
# self.stitch_nGenLepsPart.Fill(nGLgen)
self.stitch_PCond_nGenLeps.Fill(nGL)
self.stitch_PCond_nGenJets.Fill(nGJ)
self.stitch_PCond_GenHT.Fill(GenHT)
self.stitch_PCond_AllVar.Fill(nGL, nGJ, GenHT)
else:
# self.stitch_nGenLepsPart.Fill(nGLgen)
self.stitch_FCond_nGenLeps.Fill(nGL)
self.stitch_FCond_nGenJets.Fill(nGJ)
self.stitch_FCond_GenHT.Fill(GenHT)
self.stitch_FCond_AllVar.Fill(nGL, nGJ, GenHT)
if self.verbose and self.counter % 100 == 0:
print("histFile: {3:s} pass SL: nGL[{0:s}] nGJ[{1:s}] GHT[{2:s}]".format(str(nGL == self.stitchSL['nGenLeps']),
str(nGJ >= self.stitchSL['nGenJets']),
str(GenHT >= self.stitchSL['GenHT']),
str(self.hName)))
print("histFile: {3:s} pass DL: nGL[{0:s}] nGJ[{1:s}] GHT[{2:s}]".format(str(nGL == self.stitchDL['nGenLeps']),
str(nGJ >= self.stitchDL['nGenJets']),
str(GenHT >= self.stitchDL['GenHT']),
str(self.hName)))
print("histFile: {5:s} nGL[{0:d}] nGJ[{1:d}] GHT[{2:f}] passSL[{3:s}] passDL[{4:s}]".format(nGL,
nGJ,
GenHT,
str(passStitch['passStitchSL']),
str(passStitch['passStitchDL']),
str(self.hName))
)
##########################
### Write out branches ###
##########################
if self.out and self.mode == "Flag":
for name, valType, valTitle in self.varDict:
self.out.fillBranch("ESV_%s"%(name), passStitch[name])
return True
elif self.mode == "Pass":
print("Testing event for passing")
return passStitch['passStitch'+self.channel]
elif self.mode == "Fail":
print("Testing event for failure")
return not passStitch['passStitch'+self.channel]
else:
raise NotImplementedError("No method in place for Stitcher module in mode '{0}'".format(self.mode))
#Dilepton Data
Tuples = []
filesTTDL=getFiles(query="dbs:/TTTo2L2Nu_TuneCP5_PSweights_13TeV-powheg-pythia8/RunIIFall17NanoAODv4-PU2017_12Apr2018_Nano14Dec2018_new_pmx_102X_mc2017_realistic_v6-v1/NANOAODSIM",
redir="root://cms-xrd-global.cern.ch/")
filesTTDL = filesTTDL[0:6]
hNameTTDL="StitchingTTDLv7.root"
TTWeight = 88.341 * 1000 * 41.53 / (68875708 - 280100)
Tuples.append((filesTTDL, hNameTTDL, "2017", "DL", "Fail", "Flag", TTWeight)) #Needs weight. They all need weights
filesTTDLGF = getFiles(query="dbs:/TTTo2L2Nu_HT500Njet7_TuneCP5_PSweights_13TeV-powheg-pythia8/RunIIFall17NanoAODv4-PU2017_12Apr2018_Nano14Dec2018_102X_mc2017_realistic_v6-v1/NANOAODSIM",
redir="root://cms-xrd-global.cern.ch/")
filesTTDLGF = filesTTDLGF[0:12]
hNameTTDLGF="StitchingTTDLGFv7.root"
TTGFWeight = 1.32512 * 1000 * 41.53 / (8415626 - 42597)
Tuples.append((filesTTDLGF, hNameTTDLGF, "2017", "DL", "Pass", "Flag", TTGFWeight))
filesTTSL=["root://cms-xrd-global.cern.ch//store/mc/RunIIFall17NanoAODv4/TTToSemiLeptonic_TuneCP5_PSweights_13TeV-powheg-pythia8/NANOAODSIM/PU2017_12Apr2018_Nano14Dec2018_102X_mc2017_realistic_v6-v1/00000/7BB010D2-1FE4-1D45-B5E0-ABC7A285E8FC.root"]
hNameTTSL="StitchingTTSLv7.root"
#Tuples.append((filesTTSL, hNameTTSL, "2017", "SL", "Fail", "Flag", FIXMFIXME))
filesTTSLGF=["root://cms-xrd-global.cern.ch//store/mc/RunIIFall17NanoAODv4/TTToSemiLepton_HT500Njet9_TuneCP5_PSweights_13TeV-powheg-pythia8/NANOAODSIM/PU2017_12Apr2018_Nano14Dec2018_102X_mc2017_realistic_v6-v1/90000/CD79F874-9C0A-6446-81A2-344B4C7B3EE9.root"]
hNameTTSLGF="StitchingTTSLGFv7.root"
#Tuples.append((filesTTSLGF, hNameTTSLGF, "2017", "SL", "Pass", "Flag", FIXMEFIXME))
def stitcher(fileList, hName=None, theEra="2021", theChannel="NL", theCondition="Nope", theMode="Bloop!"):
if hName == None:
hDirName = None
else:
hDirName = "plots"
p=PostProcessor(".",
fileList,
cut=None,
modules=[Stitcher(maxevt=300000, era=theEra, channel=theChannel, mode=theMode, condition=theCondition, verbose=False)],
# modules=[TopSystemPt(maxevt=100, wOpt=wOption)],
noOut=False,
haddFileName=hName,
histFileName="hist_"+hName,
histDirName=hDirName,
)
p.run()
def histogramer(fileList, hName=None, theEra="2021", theChannel="NL", weightMagnitude=1):
if hName == None:
hDirName = None
else:
hDirName = "plots"
p=PostProcessor(".",
fileList,
cut=None,
#Need the plotter, yo
modules=[StitchHist(maxevt=-1, era=theEra, channel=theChannel, verbose=False, weightMagnitude=weightMagnitude)],
noOut=True,
histFileName=hName,
histDirName=hDirName,
)
p.run()
def plotter(fileList, hName=None, theEra="2021", theChannel="NL", theCondition="Nope", weightMagnitude=1):
pass
if args.stage == 'stitch':
pList = []
for tup in Tuples:
p = multiprocessing.Process(target=stitcher, args=(tup[0], tup[1], tup[2], tup[3], tup[4], tup[5]))
pList.append(p)
p.start()
for p in pList:
p.join()
elif args.stage == 'hist':
pList = []
for tup in Tuples:
p = multiprocessing.Process(target=histogramer, args=([tup[1]], tup[1].replace(".root", "_post.root"), tup[2], tup[3], tup[6]))
pList.append(p)
p.start()
for p in pList:
p.join()
elif args.stage == 'plot':
pass
else:
print("Unsuppored stage selected, please choose 'stitch' (add pass/fail stitch branches), 'hist' (make histograms), or 'plot' (Plot the histograms)")
|
# solve for easy reference
from .inference_model import (
MyoPINN,
FullPINN,
CombinedPINN,
MeshPINN,
FullMeshPINN,
)
|
a=10
b=a
print(id(a))
print(type(a))
print(id(b))
print(type(b))
print(a is b)
a=a+1
print(id(a))
print(id(b))
print(a is b)
print(a)
print(b)
x=[1,2,3]
y=x
print(id(x))
print(id(y))
print(x is y)
y.pop()
print(y)
print(x)
x.append("hola")
print(y)
print(x is y)
#CUANDO YO IGUALO DOS OBJETOS INMUTABLES Y CAMBIO UNO, SU IDENTIDAD CAMBIA (id)
#CUANDO YO IGUALO DOS OBJETOS MUTABLES Y CAMBIO UNO, SIGUEN TENIENDO LA MISMA IDENTIDAD (id) |
new_file = open("tiles.txt", "w")
with open("map3.txt", "r") as f:
for line in f:
for character in line:
if character == "1":
new_file.write("simple_grass ")
elif character == "\n":
new_file.write("\n")
else:
new_file.write("sand1 ")
|
import math
import numpy as np
import gym
import torch
from sacPD import PolicyNet
from tqdm import tqdm
torch.manual_seed(2021)
np.random.seed(2021)
sigma = 0.1
PI = math.pi
lr_pi = 0.0005
epoch = 1000 # options: 1000, 0
ckpt_path = 'checkpoints/ep-%d.pt' % epoch
max_speed = 8.0
'''
state:
cos(theta), sin(theta), thetadot
state bound:
theta ~ [0, 2 * pi], thetadot ~ [-8.0, 8.0]
'''
def add_noise(s, sigma=0.01):
'''
s: [cos, sin, thetadot]
Add noise to s
Let x denote theta, y denote perturbation
cos(x+y) = cos(x)cos(y) - sin(x)sin(y)
sin(x+y) = sin(x)cos(y) + sin(y)cos(x)
:param sigma:
:return:
'''
z = np.random.rand(2) * sigma
sinz = np.sin(z[0])
cosz = np.cos(z[0])
cos = cosz * s[0] - sinz * s[1]
sin = sinz * s[0] + cosz * s[1]
new_thetadot = s[2] + z[1]
return np.array([cos, sin, new_thetadot])
def transform(theta, thetadot):
return torch.cat([torch.cos(theta), torch.sin(theta), thetadot])
def angle_normalize(x):
return (((x+np.pi) % (2*np.pi)) - np.pi)
def PDcontrol(x):
theta = x[0] # angle_normalize(x[0])
thetadot = x[1]
Kp = 5.2
Kd = 5.2
u = - Kp * theta - Kd * thetadot
# 15 * np.sin(x[0] + np.pi)
return u
def main(sigma=0.01):
print(f'Noise :{sigma}')
env = gym.make('Pendulum-v0')
score = 0.0
print_interval = 20
sample_num = 1000
pbar = tqdm(range(sample_num), dynamic_ncols=True, smoothing=0.01)
scores = []
for n_epi in pbar:
s = env.reset()
state = env.state
done = False
while not done:
a = PDcontrol(state)
s_prime, r, done, info = env.step([a])
# env.render()
state = env.state + np.random.rand(2) * sigma
score += r
scores.append(score)
pbar.set_description(
(
f'Round : {n_epi} Score :{score}'
)
)
score = 0.0
env.close()
print(f'Mean of score: {np.mean(scores)}')
def eval_sac(model, sigma=0.01):
print(f'Noise: {sigma}')
# create environment
env = gym.make('Pendulum-v0')
score = 0.0
sample_num = 500
pbar = tqdm(range(sample_num), dynamic_ncols=True, smoothing=0.01)
scores = []
for n_epi in pbar:
s = env.reset()
done = False
while not done:
a, log_prob = model(torch.from_numpy(s).float())
s_prime, r, done, info = env.step([a.item()])
# env.render()
score += r
s = add_noise(s_prime, sigma)
scores.append(score)
pbar.set_description(
(
f'Round : {n_epi} Score :{score}'
)
)
score = 0.0
env.close()
print(f'Mean of score: {np.mean(scores)}')
if __name__ == '__main__':
# for i in range(9, 11):
# main(0.01 * i)
epoch = 2000 # options: 2000, 1000, 0
ckpt_path = 'checkpoints/sac-3-%d.pt' % epoch
layers = [3, 128, 128]
# initialize model
ckpt = torch.load(ckpt_path)
model = PolicyNet(lr_pi, layers)
print(layers)
print('Load weights from %s' % ckpt_path)
model.load_state_dict(ckpt['policy'])
for i in range(11):
eval_sac(model, 0.01 * i)
|
from .routes import Layout_Detector_BLUEPRINT
from .routes import Layout_Detector_BLUEPRINT_WF
#from .documentstructure import DOCUMENTSTRUCTURE_BLUEPRINT |
import numpy as np
from copy import deepcopy
from sys import exit
from PreFRBLE.file_system import *
from PreFRBLE.label import *
class Scenario:
"""
class object to define a physical scenario.
Either full LoS or individual model
For individual host redshift or as expected to be observed by telescope, assuming redshift population
"""
def __init__(self, IGM=[], Local=[], Host=[], Inter=[], inter=[], redshift=False, IGM_outer_scale=False, N_inter=False, f_IGM=False, telescope=False, population=False):
## required parameter for unique identification
if (redshift is not False and (telescope or population) ) or not ( redshift is not False or (telescope and population)):
exit( "scenario requires either individual redshift or, both, telescope and redshift population" )
## scenario identifier (specific redshift or telescope expectations)
self.identifier = {} ## container for easy access of all identifiers
self.redshift = redshift
self.telescope = telescope
self.population = population
## optional parameter
self.parameter = {} ## container for easy access of all parameters
self.IGM_outer_scale = IGM_outer_scale
self.N_inter = N_inter
self.f_IGM = f_IGM
## models for all regions
self.regions = {}
self.IGM = IGM
self.Local = Local
self.Host = Host
self.Inter = Inter ### Inter is used for intervening galaxies at random redshift, according to prior
self.inter = inter ### inter is used for intervening galaxies at specific redshift
@property
def redshift(self):
return self._redshift
@redshift.setter
def redshift(self, redshift):
self._redshift = redshift
self.scale_factor = (1+redshift)**-1
self.identifier['redshift'] = self.redshift
@property
def telescope(self):
return self._telescope
@telescope.setter
def telescope(self, telescope):
self._telescope = telescope
self.identifier['telescope'] = self.telescope
@property
def population(self):
return self._population
@population.setter
def population(self, population):
self._population = population
self.identifier['population'] = self.population
@property
def IGM_outer_scale(self):
return self._IGM_outer_scale
@IGM_outer_scale.setter
def IGM_outer_scale(self, IGM_outer_scale):
self._IGM_outer_scale = IGM_outer_scale
self.parameter['IGM_outer_scale'] = self.IGM_outer_scale
@property
def N_inter(self):
return self._N_inter
@N_inter.setter
def N_inter(self, N_inter):
self._N_inter = N_inter
self.parameter['N_inter'] = self.N_inter
@property
def f_IGM(self):
return self._f_IGM
@f_IGM.setter
def f_IGM(self, f_IGM):
self._f_IGM = f_IGM
self.parameter['f_IGM'] = self.f_IGM
@property
def IGM(self):
return self._IGM
@IGM.setter
def IGM(self, IGM):
if len(IGM):
self._IGM = [IGM] if type(IGM) in [str, np.str_, np.string_] else IGM
self.regions['IGM'] = self.IGM
@property
def Local(self):
return self._Local
@Local.setter
def Local(self, Local):
if len(Local):
self._Local = [Local] if type(Local) in [str, np.str_, np.string_] else Local
self.regions['Local'] = self.Local
@property
def Host(self):
return self._Host
@Host.setter
def Host(self, Host):
if len(Host):
self._Host = [Host] if type(Host) in [str, np.str_, np.string_] else Host
self.regions['Host'] = self.Host
@property
def Inter(self):
return self._Inter
@Inter.setter
def Inter(self, Inter):
if len(Inter):
self._Inter = [Inter] if type(Inter) in [str, np.str_, np.string_] else Inter
self.regions['Inter'] = self.Inter
@property
def inter(self):
return self._inter
@inter.setter
def inter(self, inter):
if len(inter):
self._inter = [inter] if type(inter) in [str, np.str_, np.string_] else inter
self.regions['inter'] = self.inter
def copy(self):
""" return deepcopy of object """
return deepcopy(self)
def Properties(self, identifier=True, parameter=True, regions=True):
""" return requested properties of Scenario """
res = {}
if identifier: res.update( self.identifier )
if parameter: res.update( self.parameter )
if regions: res.update( self.regions )
return res
def CorrectScenario(self, measure=''):
"""
this function is used to correct scenario keys wenn reading data,
since some models have output stored under different name,
as some models' changes do not affect all measures
"""
scenario = self.copy()
if 'IGM' in self.regions.keys():
## different alpha in IGM only affects RM, not DM, SM or tau
if not 'RM' in measure:
if 'alpha' in self.IGM[0]:
scenario.IGM = self.IGM[0].replace( scenario.IGM[0][:10], 'primordial' ) ### !! single IGM model in use is hardcoded, change this to compare different IGM models
else: ## however, RM is only saved for alpha
if 'primordial' in self.IGM:
scenario.IGM = self.IGM[0].replace( 'primordial', 'alpha1-3rd' ) ### !! single IGM model in use is hardcoded, change this to compare different IGM models
return scenario
def Region(self):
""" identify region described by scenario parameters """
if self.telescope: ## either show expected observation of given model or redshift distribution if no model is given
region = 'Telescope' if len(self.regions) else 'redshift'
elif len( self.regions ) > 1: ## several models combine to full LoS scenario
region = 'Full'
else: ## raw likelihood of single model is found in individual file
region = list( self.regions.keys() )[0]
return region
def File(self):
""" return correct likelihood likelihood file corresponding to given scenario """
return likelihood_files[self.Region()]
def Key(self, measure='' ):
""" return key used to read/write scenario in likelihood file """
if not measure:
exit( "Key requires measure, which is not part of Scenario" )
## care for some model parameters not affecting all measures, i. e. choose model representing the case
scenario = self.CorrectScenario( measure )
## we either use telescope and redshift population or a specific redshift
if hasattr(scenario, "telescope") and scenario.telescope:
key_elements = [scenario.telescope, scenario.population]
else:
key_elements = [ str(np.round(scenario.redshift,1)) ]
keys = list( scenario.regions.keys() )
extra = len(keys) > 1 or self.redshift == False or scenario.IGM == ['Pshirkov16']
for region in np.sort( keys ):
key_elements.append( '_'.join( scenario.regions[region] ) ) ## combine all models assumed for each region (e. g. to allow consideration of multiple source environments)
if extra: ### these extras are only needed to write full Likelihoods down, as the raw likelihoods are modified after reading
if region == 'Inter': ## in order to distinguish between intervening and host galaxies, which may use the same model
key_elements[-1] += '_{}Inter'.format( 'N' if scenario.N_inter else '' )
elif region == 'inter': ## in order to distinguish between intervening galaxies at specific (inter) or random redshift (Inter)
key_elements[-1] += '_inter'
elif region == 'IGM':
if scenario.f_IGM and scenario.f_IGM < 1 and measure not in ['tau','SM']:
key_elements[-1] += '_fIGM0{:.0f}'.format( scenario.f_IGM*10 )
if measure == 'tau' and scenario.IGM_outer_scale: ## tau depends on outer scale of turbulence L0, which can be changed in post-processing
key_elements[-1] += '_L0{:.0f}kpc'.format( scenario.IGM_outer_scale ) ### initially computed assuming L0 = 1 Mpc
key_elements.append( measure )
return '/'.join( key_elements)
def Label(self):
""" return plotting label of scenario """
label = ''
if len(self.regions) == 0:
return "{} with {}".format( self.population, self.telescope )
if len(self.regions) == 1: ## if only one region is considered, indicate that in the label
label += "{}: ".format( list(self.regions.keys())[0] )
## list al considered regions
for region in self.regions:
models = self.regions.get( region )
if len(models):
label += LabelRegion( models )
label = label[:-6]
## care for additional paramters
if 'IGM' in self.regions and len(self.IGM):
if self.f_IGM:
label += r", $f_{{\rm IGM}} = {}$".format( self.f_IGM )
if self.IGM_outer_scale:
label += r", $L_0 = {}$ kpc".format( self.IGM_outer_scale )
return label
properties_benchmark = { ## this is our benchmark scenario, fed to procedures as kwargs-dict of models considered for the different regions are provided as lists (to allow to consider multiple models in the same scenario, e. g. several types of progenitors. Use mixed models only when you kno what you are doing)
'redshift' : 0.1, ## Scenario must come either with a redshift or a pair of telescope and redshift population
'IGM' : ['primordial'], ## constrained numerical simulation of the IGM (more info in Hackstein et al. 2018, 2019 & 2020 )
'Host' : ['Rodrigues18'], ## ensemble of host galaxies according to Rodrigues et al . 2018
'Inter' : ['Rodrigues18'], ## same ensemble for intervening galaxies
'Local' : ['Piro18_wind'], ## local environment of magnetar according to Piro & Gaensler 2018
'N_inter' : True, ## if N_Inter = True, then intervening galaxies are considered realistically, i. e. according to the expected number of intervened LoS N_inter
'f_IGM' : 0.9, ## considering baryon content f_IGM=0.9
}
scenario_benchmark = Scenario( **properties_benchmark )
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# Importing standard Qiskit libraries and configuring account
from qiskit import QuantumCircuit, execute, Aer, IBMQ
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from qiskit.visualization import *
# Loading your IBM Q account(s)
provider = IBMQ.load_account()
# In[2]:
get_ipython().run_line_magic('matplotlib', 'inline')
# useful additional packages
#import math tools
import numpy as np
# We import the tools to handle general Graphs
import networkx as nx
# We import plotting tools
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'svg' # Makes the images look nice")
# importing Qiskit
from qiskit import Aer, IBMQ
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute
from qiskit.providers.ibmq import least_busy
from qiskit.tools.monitor import job_monitor
from qiskit.visualization import plot_histogram
# In[3]:
# Generating the butterfly graph with 5 nodes
n = 4
V = np.arange(0,n,1)
E =[(0,1,1.0),(1,2,1.0),(1,3,1.0),(2,3,1.0),(0,3,1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
# Generate plot of the Graph
colors = ['r' for node in G.nodes()]
default_axes = plt.axes(frameon=True)
pos = nx.spring_layout(G)
nx.draw_networkx(G, node_color=colors, node_size=600, alpha=1, ax=default_axes, pos=pos)
# In[14]:
# Evaluate the function
step_size = 0.1;
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma,a_beta)
#F1 =0.5*(5+ 8*((np.cos(0.5*a_gamma))**2)*(np.cos(0.5*(a_gamma+4*a_beta)))*(np.sin(1.5*a_gamma)-np.sin(0.5*a_gamma))*np.sin(2*a_beta)-0.5*((np.sin(2*a_gamma))**2)*((np.sin(2*a_beta))**2)+((np.cos(a_gamma))**2)*np.sin(a_gamma)*np.sin(4*a_beta))
#F1 = 8*np.cos(0.5*a_gamma)*2*np.cos(0.5(a_gamma+4*a_beta))(np.sin(1.5*a_gamma)-np.sin(0.5*a_gamma))*np.sin(2*a_beta)
# Grid search for the minimizing variables
F1 = (1/32)*(78 - 8*np.cos(a_gamma) + 8*np.cos(3*a_gamma) + 2*np.cos(4*a_gamma) + 10*np.cos(a_gamma - 4*a_beta) +
2*np.cos(3*a_gamma - 4*a_beta) + 8*np.cos(2*(a_gamma - 2*a_beta)) - np.cos(4*(a_gamma - a_beta)) +
2*np.cos(4*a_beta) - np.cos(4*(a_gamma + a_beta)) - 8*np.cos(2*(a_gamma + 2*a_beta)) -
2*np.cos(a_gamma + 4*a_beta) - 10*np.cos(3*a_gamma + 4*a_beta))
result = np.where(F1 == np.amax(F1))
print(result[0])
a = list(zip(result[0],result[1]))[0]
#gamma = a[0]*step_size;
#beta = a[1]*step_size;
gamma , beta = 0.5700000000000001, 0.31
# Plot the expetation value F1
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(a_gamma, a_beta, F1, cmap=cm.coolwarm, linewidth=0, antialiased=True)
ax.set_zlim(1,4)
ax.zaxis.set_major_locator(LinearLocator(3))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
plt.show()
#The smallest parameters and the expectation can be extracted
print('\n --- OPTIMAL PARAMETERS --- \n')
print('The maximal expectation value is: M1 = %.03f' % np.amax(F1))
print('This is attained for gamma = %.03f and beta = %.03f' % (gamma,beta))
# In[15]:
# prepare the quantum and classical resisters
QAOA = QuantumCircuit(len(V), len(V))
# apply the layer of Hadamard gates to all qubits
QAOA.h(range(len(V)))
QAOA.barrier()
# apply the Ising type gates with angle gamma along the edges in E
for i in range (1):
for edge in E:
k = edge[0]
l = edge[1]
QAOA.cx( l, k)
QAOA.u1(-gamma, k)
QAOA.cx( l, k)
QAOA.barrier()
QAOA.rx(2*beta, range(len(V)))
# Finally measure the result in the computational basis
QAOA.barrier()
QAOA.measure(range(len(V)),range(len(V)))
### draw the circuit for comparison
QAOA.draw(output='mpl')
# In[16]:
# Compute the value of the cost function
def cost_function_C(x,G):
E = G.edges()
if( len(x) != len(G.nodes())):
return np.nan
C = 0;
for index in E:
e1 = index[0]
e2 = index[1]
w = G[e1][e2]['weight']
C = C + w*x[e1]*(1-x[e2]) + w*x[e2]*(1-x[e1])
return C
# In[17]:
# run on local simulator
backend = Aer.get_backend("qasm_simulator")
shots = 10000
simulate = execute(QAOA, backend=backend, shots=shots)
QAOA_results = simulate.result()
plot_histogram(QAOA_results.get_counts(),figsize = (8,6),bar_labels = False)
# In[18]:
# Evaluate the data from the simulator
counts = QAOA_results.get_counts()
avr_C = 0
max_C = [0,0]
hist = {}
for k in range(len(G.edges())+1):
hist[str(k)] = hist.get(str(k),0)
for sample in list(counts.keys()):
# use sampled bit string x to compute C(x)
x = [int(num) for num in list(sample)]
tmp_eng = cost_function_C(x,G)
# compute the expectation value and energy distribution
avr_C = avr_C + counts[sample]*tmp_eng
hist[str(round(tmp_eng))] = hist.get(str(round(tmp_eng)),0) + counts[sample]
# save best bit string
if( max_C[1] < tmp_eng):
max_C[0] = sample
max_C[1] = tmp_eng
M1_sampled = avr_C/shots
print('\n --- SIMULATION RESULTS ---\n')
print('The sampled mean value is M1_sampled = %.02f while the true value is M1 = %.02f \n' % (M1_sampled,np.amax(F1)))
print('The approximate solution is x* = %s with C(x*) = %d \n' % (max_C[0],max_C[1]))
print('The cost function is distributed as: \n')
plot_histogram(hist,figsize = (8,6),bar_labels = False)
# In[ ]:
|
#!/usr/bin/env python
#
# manage.py 用于启动程序以及其他的程序任务
import os
# from flask import Flask
# from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import app, db
from flask_debugtoolbar import DebugToolbarExtension
app.config.from_object(os.environ['APP_SETTINGS'])
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
# add flask debug toolbar
toolbar = DebugToolbarExtension(app)
if __name__ == '__main__':
manager.run()
|
import random as rnd
def qLearning(self, start, goal):
q_table = []
qInit(q_table, self.world, goal)
printQTable(q_table)
printRewards(q_table)
alpha = 1
gamma = .8
for i in range(1000):
y = rnd.randint(0, len(q_table)-1)
x = rnd.randint(0, len(q_table[y])-1)
while(self.world[y][x] == "x" or q_table[y][x].getReward() == 100):
y = rnd.randint(0, len(q_table)-1)
x = rnd.randint(0, len(q_table[y])-1)
episode(self.world, q_table, q_table[y][x], 0, gamma, alpha)
printQTable(q_table)
#traverseGrid((0,0),(2,1), q_table, self.world)
traverseGrid(start,goal, q_table, self.world)
#gets neighbors, updates the reward, and then moves to the next state
#returns if depth exceeded, goal reached, or an invalid state reached
def episode(world, qt, state, depth, gamma, alpha):
if(depth>150):
#print("early death")
return
(x,y) = state.getPosition()
if(world[y][x] == "x"):
print("went to the obstacle")
return
if(qt[y][x].getReward() == 100):
return
neighbors = state.getActions()#getNeighbors(world, qt, state) # get the list of available states to travel to
next_state, direction = nextState(qt, neighbors, state) # get the state with the optimal value, and the direction taken to get there
state.takeAction(direction) # add to the counter for taking the direction
# part2 = gamma*state.getActionReward(direction)
# part3 = (1/state.numTaken(direction))
# part4 = episode(world, qt, next_state, depth+1, gamma)
# if(part4 == None):
# return 0
# print("Part 2 is "+ str(part2)+ " other part is " + str(part3) + " part 4 is " + str(part4))
#print("winning states reward is " + str(qt[1][1].getActionReward("right")))
reward = state.getActionReward(direction) + alpha*(next_state.getReward() + gamma*max([next_state.getActionReward(l) for l in next_state.getActions()]) - state.getActionReward(direction)) # * (1/state.numTaken(direction))
state.setActionReward(direction, max(reward, state.getActionReward(direction)))
episode(world, qt, next_state, depth+1, gamma, alpha)
#looks u/d/l/r for valid neighbors and adds them to a list
def getNeighbors(world, qt, state):
neighbors = []
pos = state.getPosition()
x = pos[0]
y = pos[1]
n = len(qt)
m = len(qt[y])
if(x+1 < m and world[y][x+1] != "x"):
neighbors.append(qt[y][x+1])
if(x-1 >= 0 and world[y][x-1] != "x"):
neighbors.append(qt[y][x-1])
if(y+1 < n and world[y+1][x] != "x"):
neighbors.append(qt[y+1][x])
if(y-1 >=0 and world[y-1][x] != "x"):
neighbors.append(qt[y-1][x])
return neighbors
#given a list of neighboring states, the one with the highest reward is returned
def nextState(qt, neighbors, current):
direction = 0
'''max_val = 0
i = 0
index = 0
for x in neighbors:
if(current.getActionReward(x) > max_val):
max_val = current.getActionReward(x)
index = i
i+=1
direction = neighbors[index]
min_val = 10000
if(max_val == 0):
i = 0
index = 0
for x in neighbors:
if(current.getActionReward(x) < min_val):
min_val = current.getActionReward(x)
index = i
i+=1
direction = neighbors[index] # if all directions are 0, pick the path least traveled by
if(min_val == 0):
index = rnd.randint(0, len(neighbors)-1)
direction = neighbors[index]'''
index = rnd.randint(0, len(neighbors)-1)
direction = neighbors[index]
next_pos = None
(x,y) = current.getPosition()
if(direction == "left"):
next_pos = qt[y][x-1]
elif(direction == "right"):
next_pos = qt[y][x+1]
elif(direction == "up"):
next_pos = qt[y-1][x]
elif(direction == "down"):
next_pos = qt[y+1][x]
if(next_pos == None):
print(direction)
return next_pos, direction
def qInit(qt, world, goal):
n = len(world)
m = len(world[0])
for y in range(n):
ls = []
for x in range(m):
temp = State(x,y)
if(x+1 < m and world[y][x+1] != "x"):
temp.addAction("right", 0)
if(x-1 >= 0 and world[y][x-1] != "x"):
temp.addAction("left", 0)
if(y+1 < n and world[y+1][x] != "x"):
temp.addAction("down", 0)
if(y-1 >=0 and world[y-1][x] != "x"):
temp.addAction("up", 0)
#if((x+1 < m and world[y][x+1] == "x") or (x-1 >= 0 and world[y][x-1] == "x") or (y+1 < n and world[y+1][x] == "x") or (y-1 >=0 and world[y-1][x] == "x")):
# temp.addAction("invalid", 0)
# temp.setReward(-1)
if(y == goal[1] and x == goal[0]):
print("heyyyyyyyyy")
temp.setReward(100)
ls.append(temp)
qt.append(ls)
def printQTable(qt):
for y in range(len(qt)):
for x in range(len(qt[y])):
string = "State " + str(qt[y][x].getPosition()) + "'s Actions: " + str(qt[y][x].getActionDict())
print(string)
def printRewards(qt):
for y in range(len(qt)):
string = ""
for x in range(len(qt[y])):
string += str(qt[y][x].getReward()) + " "
print(string)
def traverseGrid(start, goal, qt, world):
(xs,ys) = start
(xg,yg) = goal
reachedEnd = False
maxReward = 0
index = 0
i = 0
paths = []
while(not reachedEnd):
if(xs == xg and ys == yg):
reachedEnd = True
break;
else:
neighbors = qt[ys][xs].getActions()
direction = neighbors[0]
for x in neighbors:
reward = qt[ys][xs].getActionReward(x)
if reward > maxReward:
maxReward = reward
index = i
direction = x
i+=1
print("Agent selects direction: ",direction)
if(rnd.randint(0,9) < 6):
wanted = direction
while(direction == wanted and len(neighbors) > 1):
pick = rnd.randint(0, len(neighbors)-1)
direction = neighbors[pick]
print("Agent moves in direction: ",direction)
next_pos = None
if(direction == "left"):
next_pos = qt[ys][xs-1]
elif(direction == "right"):
next_pos = qt[ys][xs+1]
elif(direction == "up"):
next_pos = qt[ys-1][xs]
elif(direction == "down"):
next_pos = qt[ys+1][xs]
print(next_pos.getPosition())
(xs,ys) = next_pos.getPosition()
#print("now",xs,ys)
paths.append((xs,ys))
return paths
class State:
def __init__(self, x, y):
self.position = (x, y)
self.actions = {}
self.reward = 0
self.visited = 0
def addAction(self,direction, value):
self.actions[direction] = [value, 0]
def setActionReward(self,direction,value):
#print("State " + str(self.position) + "reward for direction " + direction + " is being set to " + str(value))
self.actions[direction][1] = value
def getActionReward(self, direction):
return self.actions[direction][1]
def takeAction(self, direction):
if(direction in self.actions.keys()):
self.actions[direction][0] += 1
else:
print(self.actions.keys())
def numTaken(self, direction):
if(direction in self.actions.keys()):
return self.actions[direction][0]
else:
print("Invalid direction " + direction)
return 1
def getPosition(self):
return self.position
def getActions(self):
return list(self.actions.keys())
def getActionDict(self):
return self.actions
def setReward(self, reward):
self.reward = reward
def getReward(self):
return self.reward
|
""" A cache of recently created operations. """
class OperationsCache(dict):
""" A cache of recently created operations """
def __init__(self, size=256):
""" Creates new OperationsCache.
Args:
size: An integer specifying the maximum size of the cache.
"""
super(OperationsCache, self).__init__()
self.operations_list = []
self.max_size = size
def __setitem__(self, key, value):
""" Adds a new operation to the cache.
Args:
key: A string specifying the operation ID.
value: A dictionary containing the operation details.
"""
super(OperationsCache, self).__setitem__(key, value)
self.operations_list.append(key)
to_remove = len(self) - self.max_size
for _ in range(to_remove):
old_key = self.operations_list.pop(0)
del self[old_key]
|
# -------------------------------------------------------------------#
# Released under the MIT license (https://opensource.org/licenses/MIT)
# Contact: mrinal.haloi11@gmail.com
# Enhancement Copyright 2016, Mrinal Haloi
# -------------------------------------------------------------------#
import tensorflow as tf
from tqdm import tqdm
from core.history import History
from dataset.replay import ExperienceBuffer
from models.custom_model import Model
from core.base import Base
from utils import utils
class Player(Base):
def __init__(self, cfg, environment, sess, model_dir):
super(Player, self).__init__(cfg)
self.sess = sess
self.inputs = tf.placeholder('float32', [
None, self.cfg.screen_height, self.cfg.screen_width, self.cfg.history_length], name='inputs')
self.target_inputs = tf.placeholder('float32', [
None, self.cfg.screen_height, self.cfg.screen_width, self.cfg.history_length], name='target_inputs')
self.target_q_t = tf.placeholder('float32', [None], name='target_q_t')
self.action = tf.placeholder('int64', [None], name='action')
self.env = environment
self.history = History(self.cfg)
self.model_dir = model_dir
self.memory = ExperienceBuffer(cfg, self.model_dir)
self.learning_rate_minimum = 0.0001
self.double_q = True
def play(self, load_model=True, test_ep=None, num_step=100000, num_episodes=200, display=True):
model_q = Model()
model_target_q = Model(is_target_q=True)
end_points_q = model_q.model_def(self.inputs, self.env, name='main_q')
_ = model_target_q.model_def(
self.target_inputs, self.env, name='target_q')
init = tf.global_variables_initializer()
self.saver = tf.train.Saver(max_to_keep=None)
if load_model:
utils.load_model(self.saver, self.sess, self.model_dir)
else:
self.sess.run(init)
if test_ep is None:
test_ep = self.cfg.ep_test
if not display:
gym_dir = '/tmp/%s-%s' % (self.cfg.env_name, utils.get_time())
self.env.env.monitor.start(gym_dir)
best_reward, best_episode = 0, 0
for episode in xrange(num_episodes):
screen, reward, action, terminal = self.env.new_random_game()
current_reward = 0
for _ in xrange(self.cfg.history_length):
self.history.add(screen)
for t in tqdm(xrange(num_step), ncols=70):
# 1. predict
action = self.predict(
end_points_q['pred_action'], self.history.get(), ep=test_ep)
# 2. act
screen, reward, terminal = self.env.act(
action, is_training=False)
# 3. observe
self.history.add(screen)
current_reward += reward
if terminal:
break
if current_reward > best_reward:
best_reward = current_reward
best_episode = episode
print " [%d] Best reward : %d" % (best_episode, best_reward)
if not display:
self.env.env.monitor.close()
|
from flask import Blueprint
from flask_restful import Resource, Api
from coalaip import CoalaIp, entities
from coalaip_bigchaindb.plugin import Plugin
from web.utils import get_bigchaindb_api_url
coalaip = CoalaIp(Plugin(get_bigchaindb_api_url()))
work_views = Blueprint('work_views', __name__)
work_api = Api(work_views)
class WorkApi(Resource):
def get(self, entity_id):
work = entities.Work.from_persist_id(
entity_id, plugin=coalaip.plugin, force_load=True)
return work.to_jsonld()
work_api.add_resource(WorkApi, '/work/<entity_id>', strict_slashes=False)
|
"""
@author - Anirudh Sharma
"""
from typing import List
def fourSum(nums: List[int], target: int) -> List[List[int]]:
# Resultant list
quadruplets = list()
# Base condition
if nums is None or len(nums) < 4:
return quadruplets
# Sort the array
nums.sort()
# Length of the array
n = len(nums)
# Loop for each element of the array
for i in range(0, n - 3):
# Check for skipping duplicates
if i > 0 and nums[i] == nums[i - 1]:
continue
# Reducing to three sum problem
for j in range(i + 1, n - 2):
# Check for skipping duplicates
if j != i + 1 and nums[j] == nums[j - 1]:
continue
# Left and right pointers
k = j + 1
l = n - 1
# Reducing to two sum problem
while k < l:
current_sum = nums[i] + nums[j] + nums[k] + nums[l]
if current_sum < target:
k += 1
elif current_sum > target:
l -= 1
else:
quadruplets.append([nums[i], nums[j], nums[k], nums[l]])
k += 1
l -= 1
while k < l and nums[k] == nums[k - 1]:
k += 1
while k < l and nums[l] == nums[l + 1]:
l -= 1
return quadruplets
if __name__ == '__main__':
print(fourSum([1, 0, -1, 0, -2, 2], 0))
print(fourSum([], 0))
print(fourSum([1, 2, 3, 4], 10))
print(fourSum([0, 0, 0, 0], 0))
|
from rest_framework import generics
from .serializers import AuthorDetailSerializer
from .serializers import AuthorListSerializer
from .models import Author
class AuthorCreateView(generics.CreateAPIView):
serializer_class = AuthorDetailSerializer
class AuthorListView(generics.ListAPIView):
serializer_class = AuthorListSerializer
queryset = Author.get_all()
class AuthorDetailView(generics.RetrieveUpdateDestroyAPIView):
serializer_class = AuthorDetailSerializer
queryset = Author.get_all() |
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
def pr_curve_writer(label, pred):
a=len(label)
b=380000
curve_resolution=10000
linspace=np.linspace(0.0000, 1.0, curve_resolution,endpoint=True, dtype=np.float64)
TPR_array=np.zeros([curve_resolution], dtype=np.float64)
FPR_array=np.zeros([curve_resolution], dtype=np.float64)
PPV_array=np.zeros([curve_resolution], dtype=np.float64)
if a>=b:
label1=csr_matrix(label)
label2=csr_matrix(1*np.logical_not(label))
print('calculating the first ROC space')
for i in range(curve_resolution):
print("creating binary array")
pred_ = np.where(pred >= linspace[i], np.ones_like(pred), np.zeros_like(pred))
pred2=1*np.logical_not(pred_)
#pred_=csc_matrix(pred_)
#print pred_
#print "calc logical and"
tp = label1.dot(pred_)
#print sum(tp)
fp = label2.dot(pred_)
#print fp
fn = label1.dot(pred2)
#print fn
tn = label2.dot(pred2)
#print tn
FPR_array[i] += np.true_divide(fp,tn+fp)
TPR_array[i] += np.true_divide(tp,tp+fn)
if tp+fp==0.0:
PPV_array[i]+=0.0
else:
PPV_array[i] += np.true_divide(tp,tp+fp)
#print i
#if i>=curve_resolution-3:
#print TPR_array[i],PPV_array[i]
else:
for i in range(curve_resolution):
pred_ = np.where(pred >= linspace[i], np.ones_like(pred), np.zeros_like(pred))
#print pred_
tp = np.logical_and(pred_, label)
fp = np.logical_and(pred_, np.logical_not(label))
fn = np.logical_and(np.logical_not(pred_), label)
tn = np.logical_and(np.logical_not(pred_), np.logical_not(label))
FPR_array[i] = np.true_divide(np.nansum(fp),
np.nansum(np.logical_or(tn, fp)))
TPR_array[i] = np.true_divide(np.nansum(tp),
np.nansum(np.logical_or(tp, fn)))
if np.nansum(np.logical_or(tp, fp))==0.0:
PPV_array[i]=0.0
else:
PPV_array[i] = np.true_divide(np.nansum(tp),
np.nansum(np.logical_or(tp, fp)))
#if i>=curve_resolution-3:
#print TPR_array[i],PPV_array[i]
#rint i
area=0.0
k=curve_resolution-1
for i in range(curve_resolution):
area+=0.500*(PPV_array[k]+PPV_array[k-1])*(TPR_array[k-1]-TPR_array[k])
#print area
k-=1
if k==0:
break
return FPR_array, TPR_array, PPV_array, area
array_file='/home/fast/onimaru/data/prediction/network_constructor_deepsea_1d3_Tue_Sep_19_150851_2017.ckpt-10734_label_prediction.npz'
#genome_bed=''
np_in=np.load(array_file)
pred=np_in["prediction"]
#print len(pred)
label_array=np_in["label_array"]
#print pred[:,0]
if len(label_array.shape)==1:
num_label=1
else:
num_label=label_array.shape[1]
fpr_list=[]
tpr_list=[]
roc_auc_list=[]
precision_list=[]
recall_list=[]
average_precision_list=[]
if num_label>1:
for i in range(num_label):
fpr, tpr, ppv, area=pr_curve_writer(label_array[:,i], pred[:,i])
precision_list.append(ppv)
#tpr_list.append(tpr)
recall_list.append(tpr)
average_precision_list.append(area)
else:
fpr, tpr, ppv, area=pr_curve_writer(label_array, pred)
precision_list.append(ppv)
recall_list.append(tpr)
average_precision = area
average_precision_list.append(average_precision)
plt.figure(1, figsize=(8,8))
"""ax1=plt.subplot(211)
i=0
for i in range(num_label):
f,t,r=fpr_list[i],tpr_list[i],roc_auc_list[i]
plt.plot(f, t, color='darkorange',
label='ROC curve ('+str(i)+') (area = %0.2f)' % r)
i+=1
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.axis('equal')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic curve ('+str(model_name)+')')
plt.legend(loc="lower right")"""
#ax2=plt.subplot(212)
i=0
for i in range(num_label):
r,p,a =recall_list[i],precision_list[i], average_precision_list[i]
plt.plot(r, p, lw=2, color='navy',label='Precision-Recall curve ('+str(i)+') (area = %0.2f)' % a)
i+=1
plt.axis('equal')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.00])
plt.xlim([0.0, 1.0])
#plt.title('Precision-Recall curve ('+str(model_name)+')')
plt.legend(loc="lower left")
#plt.savefig(out_dir+"ROC_space_curve_"+str(model_name)+".pdf", format='pdf')
plt.show()
|
## -*- coding: utf-8 -*-
import markdown as md
def preprocessing(s):
"""
Returns string where \\ is replaced by \\\ so that latex new lines work
"""
return s.replace(r'\\', r'\\\\')
def md_to_html(text_md):
"""
Returns HTML string
The input string should be of the raw 'r' type
"""
css = """
<style>
/* To position Latex MathJax expressions - otherwise left-aligned by default */
div.output_area .math_center .MathJax_Display {
text-align: center !important;
}
div.output_area .math_right .MathJax_Display {
text-align: right !important;
}
/* To center markdown table - otherwise left-aligned by default */
div.output_area .rendered_html .table_center table {
margin: auto;
}
</style>
"""
html = md.markdown(preprocessing(text_md).decode('utf-8'),
extensions=['markdown.extensions.tables'])
return css+html
|
#! python3
"""
This progam displays the visual component of a pathfinding program.
The program allows users to click on squares to color them in and create
obstacles for a pathfinding algorithm to work around.
"""
import sys
import random
import pygame
from pygame.locals import *
from algorithms.dijkstra import Dijkstra
from algorithms.dijkstra_visual import Dijkstra_visual
# A Class for setting up buttons.
class Button():
def __init__(self, pos, size, label):
self.pos = pos
self.size = size
self.label = label
self.pressed = False
self.button_rect = pygame.Rect(self.pos, self.size)
self.button_text = font.render(
self.label, True, TEXT_COLOR, BUTTON_COLOR)
self.button_text_rect = self.button_text.get_rect()
self.button_text_rect.centerx = self.button_rect.centerx
self.button_text_rect.centery = self.button_rect.centery
self.button_pos = []
for x in range(self.pos[0], self.pos[0] + self.size[0]):
for y in range(self.pos[1], self.pos[1] + self.size[1]):
self.button_pos.append((x, y))
self.button_img = pygame.image.load('img/button.png')
self.button_pressed_img = pygame.image.load('img/button_pressed.png')
def draw_button(self):
if self.pressed == False:
self.button_text = font.render(
self.label, True, TEXT_COLOR, BUTTON_COLOR)
window_surface.blit(self.button_img, self.button_rect)
window_surface.blit(self.button_text, self.button_text_rect)
else:
self.button_text = font.render(
self.label, True, TEXT_COLOR, BUTTON_PRESSED_COLOR)
window_surface.blit(self.button_pressed_img, self.button_rect)
window_surface.blit(self.button_text, self.button_text_rect)
# Terminates the program.
def terminate():
pygame.quit()
sys.exit()
# Creates or removes a wall block at the designated position.
def draw_window():
# Draws the window.
window_surface.fill(BACKGROUND_COLOR)
window_surface.blit(banner_surface, (0, WINDOW_HEIGHT))
banner_surface.fill(BANNER_COLOR)
pygame.draw.line(window_surface, (180, 180, 180),
(0, WINDOW_HEIGHT), (WINDOW_WIDTH, WINDOW_HEIGHT), 2)
draw_grid()
# Draws the buttons to the surface.
start_node_button.draw_button()
end_node_button.draw_button()
place_wall_button.draw_button()
erase_wall_button.draw_button()
generate_maze_button.draw_button()
clear_board_button.draw_button()
solve_button.draw_button()
visualize_button.draw_button()
def draw_wall(pos):
global path
x_grid = int(pos[0] / GRID_SIZE) * GRID_SIZE
y_grid = int(pos[1] / GRID_SIZE) * GRID_SIZE
if y_grid < WINDOW_HEIGHT:
wall_vertex = (x_grid, y_grid)
wall_rect = pygame.Rect(x_grid, y_grid, GRID_SIZE, GRID_SIZE)
if can_draw_wall == True:
if wall_vertex not in wall_vertices:
wall_vertices.append(wall_vertex)
walls.append(wall_rect)
path = []
elif can_erase_wall == True:
if wall_vertex in wall_vertices:
wall_vertices.remove(wall_vertex)
walls.remove(wall_rect)
path = []
else:
return
# Creates the start and end nodes.
def draw_node(pos):
global path
x_grid = int(pos[0] / GRID_SIZE) * GRID_SIZE
y_grid = int(pos[1] / GRID_SIZE) * GRID_SIZE
if y_grid < WINDOW_HEIGHT:
if can_draw_start == True:
nodes[0] = (x_grid, y_grid)
node_rect[0] = pygame.Rect(x_grid, y_grid, GRID_SIZE, GRID_SIZE)
path = []
if can_draw_end == True:
nodes[1] = (x_grid, y_grid)
node_rect[1] = pygame.Rect(x_grid, y_grid, GRID_SIZE, GRID_SIZE)
path = []
else:
return
# Finds the shortest path based on the chosen algorithm.
def find_path(nodes, wall_vertices):
if visualize_button.pressed == False:
return Dijkstra(nodes, wall_vertices, WINDOW_WIDTH, WINDOW_HEIGHT, GRID_SIZE)
else:
return Dijkstra_visual(nodes, wall_vertices, WINDOW_WIDTH, WINDOW_HEIGHT, GRID_SIZE, window_surface)
# Draws the shortest path found in find_path
def draw_path(path):
for vertex in path:
path_rect = pygame.Rect(vertex[0], vertex[1], GRID_SIZE, GRID_SIZE)
pygame.draw.rect(window_surface, PATH_COLOR, path_rect)
# Generates a random maze for the user
def generate_maze():
# Fills in every possible wall vertex
for x in range(0, WINDOW_WIDTH, GRID_SIZE):
for y in range(0, WINDOW_HEIGHT, GRID_SIZE):
wall_vertices.append((x, y))
walls.append(pygame.Rect(x, y, GRID_SIZE, GRID_SIZE))
for wall in walls:
pygame.draw.rect(window_surface, WALL_COLOR, wall)
pygame.display.update()
visited = []
def clear_path(x, y):
index = [0, 1, 2, 3]
neighbor = [(x + 2 * GRID_SIZE, y), (x - 2 * GRID_SIZE, y),
(x, y + 2 * GRID_SIZE), (x, y - 2 * GRID_SIZE)]
neighbor_wall = [(x + GRID_SIZE, y), (x - GRID_SIZE, y),
(x, y + GRID_SIZE), (x, y - GRID_SIZE)]
random.shuffle(index)
for i in index:
if neighbor[i] not in visited:
visited.append((neighbor[i][0], neighbor[i][1]))
visited.append((neighbor_wall[i][0], neighbor_wall[i][1]))
try:
wall_vertices.remove(neighbor[i])
wall_vertices.remove(neighbor_wall[i])
walls.remove(pygame.Rect(
neighbor[i][0], neighbor[i][1], GRID_SIZE, GRID_SIZE))
walls.remove(pygame.Rect(
neighbor_wall[i][0], neighbor_wall[i][1], GRID_SIZE, GRID_SIZE))
if visualize_button.pressed == True:
draw_window()
for wall in walls:
pygame.draw.rect(window_surface, WALL_COLOR, wall)
pygame.display.update()
pygame.time.wait(10)
except:
continue
clear_path(neighbor[i][0], neighbor[i][1])
clear_path(random.randrange(0, WINDOW_WIDTH, GRID_SIZE),
random.randrange(0, WINDOW_HEIGHT, GRID_SIZE))
# Creates a light grid in the drawing space for ease of use.
def draw_grid():
for x in range(0, WINDOW_WIDTH, GRID_SIZE):
pygame.draw.line(window_surface, GRID_COLOR,
(x, 0), (x, WINDOW_HEIGHT), 1)
for y in range(0, WINDOW_HEIGHT, GRID_SIZE):
pygame.draw.line(window_surface, GRID_COLOR,
(0, y), (WINDOW_WIDTH, y), 1)
# Displays a message if solve is clicked without a start and end node.
def display_error():
error_text = font.render(
"Select a start and an end node first.", True, TEXT_COLOR, ERROR_COLOR)
error_text_rect = error_text.get_rect()
error_text_rect.centerx = int(WINDOW_WIDTH / 2)
error_text_rect.centery = int(WINDOW_HEIGHT / 2)
window_surface.blit(error_text, error_text_rect)
"""
This block defines constants.
"""
WINDOW_WIDTH = 1000
WINDOW_HEIGHT = 600
BANNER_HEIGHT = 175
GRID_SIZE = 20
BUTTON_SIZE = (200, 50)
TEXT_COLOR = (250, 250, 250)
BACKGROUND_COLOR = (240, 240, 240)
BANNER_COLOR = (220, 220, 220)
WALL_COLOR = (0, 0, 0)
PATH_COLOR = (35, 235, 35)
PATHFINDING_COLOR = (35, 35, 235)
NODE_COLORS = [(235, 35, 35), (235, 235, 35)]
BUTTON_COLOR = (40, 120, 200)
BUTTON_PRESSED_COLOR = (30, 60, 180)
GRID_COLOR = (200, 200, 200)
ERROR_COLOR = (230, 23, 23)
pygame.init()
# Sets default settings.
can_draw_wall = True
drawing_wall = False
can_erase_wall = False
can_draw_start = False
can_draw_end = False
display_error_message = False
# Prepares empty structures to be used in later functions.
buttons = []
walls = []
wall_vertices = []
path = {}
nodes = [None, None]
node_rect = [None, None]
# Sets up display window and surfaces
pygame.display.set_caption("Pathfinder")
icon = pygame.image.load('img/icon.png')
pygame.display.set_icon(icon)
pygame.mouse.set_visible(True)
font = pygame.font.SysFont(None, 42)
window_surface = pygame.display.set_mode(
(WINDOW_WIDTH, WINDOW_HEIGHT + BANNER_HEIGHT))
banner_surface = pygame.Surface((WINDOW_WIDTH, BANNER_HEIGHT))
window_surface.blit(banner_surface, (0, WINDOW_HEIGHT))
# Creates the buttons
start_node_button = Button(
(40, WINDOW_HEIGHT + 25),
BUTTON_SIZE,
"Place Start"
)
end_node_button = Button(
(40, WINDOW_HEIGHT + BANNER_HEIGHT - 25 - BUTTON_SIZE[1]),
BUTTON_SIZE,
"Place End"
)
place_wall_button = Button(
(BUTTON_SIZE[0] + 80, WINDOW_HEIGHT + 25),
BUTTON_SIZE,
"Place Walls"
)
erase_wall_button = Button(
(BUTTON_SIZE[0] + 80, WINDOW_HEIGHT + BANNER_HEIGHT - 25 - BUTTON_SIZE[1]),
BUTTON_SIZE,
"Erase Walls"
)
generate_maze_button = Button(
(WINDOW_WIDTH - 2 * BUTTON_SIZE[0] - 80, WINDOW_HEIGHT + 25),
BUTTON_SIZE,
"Create Maze"
)
clear_board_button = Button(
(WINDOW_WIDTH - 2 * BUTTON_SIZE[0] - 80, WINDOW_HEIGHT +
BANNER_HEIGHT - 25 - BUTTON_SIZE[1]),
BUTTON_SIZE,
"Clear Board"
)
solve_button = Button(
(WINDOW_WIDTH - BUTTON_SIZE[0] - 40, WINDOW_HEIGHT + 25),
BUTTON_SIZE,
"Solve"
)
visualize_button = Button(
(WINDOW_WIDTH - BUTTON_SIZE[0] - 40,
WINDOW_HEIGHT + BANNER_HEIGHT - 25 - BUTTON_SIZE[1]),
BUTTON_SIZE,
"Visualize"
)
# Set the default starting button.
place_wall_button.pressed = True
# Main Loop
while True:
for event in pygame.event.get():
# Quits the program.
if event.type == QUIT:
terminate()
if event.type == MOUSEBUTTONDOWN:
if event.pos in start_node_button.button_pos:
can_draw_start = True
can_draw_wall = False
can_draw_end = False
can_erase_wall = False
display_error_message = False
start_node_button.pressed = True
end_node_button.pressed = False
place_wall_button.pressed = False
erase_wall_button.pressed = False
elif event.pos in end_node_button.button_pos:
can_draw_start = False
can_draw_wall = False
can_draw_end = True
can_erase_wall = False
display_error_message = False
start_node_button.pressed = False
end_node_button.pressed = True
place_wall_button.pressed = False
erase_wall_button.pressed = False
elif event.pos in place_wall_button.button_pos:
can_draw_start = False
can_draw_wall = True
can_draw_end = False
can_erase_wall = False
display_error_message = False
start_node_button.pressed = False
end_node_button.pressed = False
place_wall_button.pressed = True
erase_wall_button.pressed = False
elif event.pos in erase_wall_button.button_pos:
can_draw_start = False
can_draw_wall = False
can_draw_end = False
can_erase_wall = True
display_error_message = False
start_node_button.pressed = False
end_node_button.pressed = False
place_wall_button.pressed = False
erase_wall_button.pressed = True
elif event.pos in generate_maze_button.button_pos:
walls = []
wall_vertices = []
path = {}
nodes = [None, None]
node_rect = [None, None]
display_error_message = False
generate_maze()
elif event.pos in clear_board_button.button_pos:
walls = []
wall_vertices = []
path = {}
nodes = [None, None]
node_rect = [None, None]
elif event.pos in solve_button.button_pos:
if nodes[0] != None and nodes[1] != None:
display_error_message = False
path = find_path(nodes, wall_vertices)
else:
display_error_message = True
elif event.pos in visualize_button.button_pos:
if visualize_button.pressed == False:
display_error_message = False
visualize_button.pressed = True
else:
display_error_message = False
visualize_button.pressed = False
if can_draw_wall == True:
drawing_wall = True
draw_wall(event.pos)
if can_erase_wall == True:
drawing_wall = True
draw_wall(event.pos)
if can_draw_start == True or can_draw_end == True:
draw_node(event.pos)
if event.type == MOUSEBUTTONUP:
drawing_wall = False
if event.type == MOUSEMOTION:
if drawing_wall:
draw_wall(event.pos)
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
terminate()
draw_window()
# Draws the walls onto the screen.
for wall in walls:
pygame.draw.rect(window_surface, WALL_COLOR, wall)
# Draws the path taken or displays an error message.
if path:
draw_path(path)
elif display_error_message == True:
display_error()
# Draws start and end nodes.
for i, node in enumerate(node_rect):
if node != None:
pygame.draw.rect(window_surface, NODE_COLORS[i], node)
# Updates the display.
pygame.display.update()
|
#Desafio 024: Crie um programa que leia o nome de uma cidade e diga se ela começa ou não com o nome "Santo".
#MODO 1: Resolução pessoal - Cobre todas as possibilidades.
city = str(input('Nos informe a cidade onde você nasceu, por gentileza. ')).upper() #O split já exclui os espaços iniciais e finais, logo, não precisa usar .strip
lista = city.split()
print(f'O nome dela começa com Santo? {lista[0] == "SANTO"}.')
#MODO 2:Professor - Cidados do tipo: santorini dá valor 'True" mesmo sendo falso.
cid = str(input('Em que cidade você nasceu? '))
print(f'a cidade começa com Santo? {cid[:5].upper() == "SANTO"}')
|
from os import system
from time import sleep
def print_all_playlist(spotifyObject, user):
_ = system('clear')
all_playlist = spotifyObject.user_playlists(user["id"], limit=50)
items = all_playlist["items"]
compteur = 0
print("{0:2} | {1:80} | {2:20}".format('N°', 'Name', 'Owner'))
print(f'{"-"*112}')
for one_playlist in items:
print("{0:2} | {1:80} | {2:20}".format(compteur, one_playlist["name"], one_playlist["owner"]["display_name"]))
compteur += 1
print()
commande = input("Enter number of playlist: ")
commande = int(commande)
return(items[commande])
def know_rules_playlist(the_playlist, user):
owner_id = the_playlist["owner"]["id"]
if owner_id == user["id"]:
return True
else:
return False
def munu_playlist_setting():
_ = system('clear')
print('>>> What do you want to do ?')
print()
print("1 : Modify playlist")
print("2 : Create a new playlist")
print("3 : Back")
while True:
print()
commande = input("Enter your choice: ")
if commande == "1":
return commande
elif commande == "2":
return commande
elif commande == "3":
return commande
else:
print("Bad input !")
def create_new_playlist(spotifyObject, user):
_ = system('clear')
while True:
name = input("Enter a name for your playlist: ")
print()
while True:
wanna_description = input("Do you want a description ? y/n:")
if wanna_description == 'y' or wanna_description == 'yes':
print()
description = input("Write your description: ")
break
elif wanna_description == 'n' or wanna_description == 'no':
description = ""
break
else:
print("Bad input !")
print()
while True:
public_private = input("Do you wanna a public or a private playlist ? public / private : ")
if public_private == 'public':
public_private = True
break
elif public_private == 'private':
public_private = False
break
else:
print("Bad input !")
print()
_ = system('clear')
print("Your playlist name is: ",name)
print()
print("Your description: ", description)
print()
print("Your playst is ", public_private)
print()
while True:
confirm = input("Did you confirm that ? y/n")
if confirm == 'y' or confirm == 'yes':
break
elif confirm == 'n' or confirm == 'no':
break
else:
print("Bad input !")
print()
if confirm == 'y' or confirm == 'yes':
spotifyObject.user_playlist_create(user["id"],name,public_private,description)
break
elif confirm == 'n' or confirm == 'no':
pass
def print_rules(spotifyObject, know_rules,the_playlist,user):
_ = system('clear')
if know_rules == True:
while True:
_ = system('clear')
the_playlist = spotifyObject.playlist(the_playlist["id"])
print('>>> Playlist: ', the_playlist["name"] )
print('>>> What do you want to do ?')
print()
print("1 : Remove Song")
print("2 : Change Name")
print("3 : Change State")
print("4 : Change Description")
print("5 : Back")
print()
commande = input("Enter commande: ")
if commande == '1':
cancel = False
_ = system("clear")
all_track = spotifyObject.playlist_tracks(the_playlist["id"],limit=100)
compteur = 0
print("{0:2} | {1:80}".format('N°', 'Name'))
print(f'{"-"*112}')
for item in all_track["items"]:
print("{0:2} | {1:80}".format(compteur,item["track"]["name"]))
compteur+=1
while True:
print()
choise_song = input("Enter number of song: ")
print("x : Cancel")
try:
int_choise_song = int(choise_song)
if int_choise_song <= compteur:
break
else:
print("Bad input !")
pass
except ValueError:
if choise_song == 'x':
cancel == True
break
else:
print("Bad input !")
pass
if cancel == False:
the_track = [all_track["items"][int_choise_song]["track"]["id"]]
spotifyObject.user_playlist_remove_all_occurrences_of_tracks(user["id"],the_playlist["id"],the_track)
sleep(0.1)
elif commande == '2':
print()
new_name = input("Enter your new name: ")
spotifyObject.user_playlist_change_details(user["id"],the_playlist["id"],name=new_name)
sleep(0.2)
_ = system("clear")
elif commande == '3':
print()
print("Your state is public:", the_playlist["public"])
while True:
print()
change_state = input("Do you wanna change ? y/n ")
if change_state == 'y' or change_state == 'yes':
if the_playlist["public"] == True:
spotifyObject.user_playlist_change_details(user["id"],the_playlist["id"],public=False)
sleep(0.2)
else:
spotifyObject.user_playlist_change_details(user["id"],the_playlist["id"],public=True)
sleep(0.2)
break
elif change_state == 'n' or change_state == 'no':
break
else:
print("Bad input !")
print()
elif commande == '4':
if the_playlist["description"] == '':
print("Your description is empty !")
else:
print("Your description:")
print(the_playlist["description"])
while True:
print()
change_description = input("Do you wanna change ? y/n")
if change_description == 'y' or change_description == 'yes':
print()
change_description2 = input("Enter your description: ")
spotifyObject.user_playlist_change_details(user["id"],the_playlist["id"],description=change_description2)
sleep(0.2)
break
elif change_description == 'n' or change_description == 'no':
break
else:
print("Bad input !")
print()
elif commande == '5':
break
else:
_ = system('clear')
print("Bad input !")
print()
else:
while True:
print('>>> Playlist: ', the_playlist["name"] )
print('>>> What do you want to do ?')
print()
print("1 : UnFollow playlist")
print("2 : Add a song to another playlist: ")
print("3 : Back")
print()
commande = input("Enter commande: ") # and play the song
if commande == '1':
spotifyObject.user_playlist_unfollow(user["id"],the_playlist["id"])
break
elif commande == '2':
_ = system("clear")
all_track = spotifyObject.playlist_tracks(the_playlist["id"],limit=None)
compteur = 0
print("{0:2} | {1:80}".format('N°', 'Name'))
print(f'{"-"*112}')
for item in all_track["items"]:
print("{0:2} | {1:80}".format(compteur,item["track"]["name"]))
compteur+=1
while True:
print()
choise_song = input("Enter number of song: ")
try:
int_choise_song = int(choise_song)
if int_choise_song <= compteur:
break
else:
print("Bad input !")
pass
except ValueError:
print("Bad input !")
pass
while True:
playlist_to_set = print_all_playlist(spotifyObject, user)
know_rules = know_rules_playlist(playlist_to_set, user)
if know_rules == True:
the_track = [all_track["items"][int_choise_song]["track"]["id"]]
spotifyObject.user_playlist_add_tracks(user["id"],playlist_to_set["id"],the_track)
_ = system("clear")
break
else:
print()
print("You are not the owner of this playlist !")
print()
pass
elif commande == '3':
break
else:
_ = system('clear')
print("Bad input !")
print()
|
'''Refaça o DESAFIO 35 dos triângulos, acrescentando o recurso de mostrar que tipo de triângulo será formado:
– EQUILÁTERO: todos os lados iguais
– ISÓSCELES: dois lados iguais, um diferente
– ESCALENO: todos os lados diferentes'''
primeiro = float(input('Primeiro segmento: '))
segundo = float(input('Segundo segmento: '))
terceiro = float(input('Terceiro segmento: '))
if primeiro < segundo + terceiro and segundo < primeiro + terceiro and terceiro < primeiro + segundo:
print('Os segmentos acima PODEM FORMAR um triângulo ', end='')
if primeiro == segundo == terceiro:
print('\033[1;36mEQUILÁTERO!\033[m')
elif primeiro != segundo != terceiro != primeiro:
print('\033[1;33mESCALENO!\033[m')
else:
print('\033[1;31mISÓSCELES!\033[m')
else:
print('Os segmentos acima NÃO PODEM FORMAR um triângulo')
|
from collections import OrderedDict
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import dash_table
from dash_docs import reusable_components as rc
from dash_docs import datasets
Display = rc.CreateDisplay({
'dash_table': dash_table,
'html': html,
'df': datasets.df_regions,
'df_regions': datasets.df_regions,
'df_election': datasets.df_election,
'df_long': datasets.df_long,
'df_long_columns': datasets.df_long_columns,
'df_15_columns': datasets.df_15_columns,
'df_moby_dick': datasets.df_moby_dick,
'df_numeric': datasets.df_numeric,
'pd': pd
})
layout = html.Div(
children=[
html.H1('DataTable Width & Column Width'),
html.H2('Default Width'),
rc.Markdown(
'''
By default, the table will expand to the width of its container.
The width of the columns is determined automatically in order to
accommodate the content in the cells.
'''
),
Display(
'''
result = dash_table.DataTable(
data=df.to_dict('records'),
columns=[{'id': c, 'name': c} for c in df.columns]
)
'''
),
html.Hr(),
rc.Markdown(
'''
The default styles work well for a small number of columns and short
text. However, if you are rendering a large number of columns or
cells with long contents, then you'll need to employ one of the
following overflow strategies to keep the table within its container.
'''
),
rc.Markdown(
'''
## Wrapping onto Multiple Lines
If your cells contain contain text with spaces, then you can overflow
your content into multiple lines.
'''
),
Display(
'''
dash_table.DataTable(
style_cell={
'whiteSpace': 'normal',
'height': 'auto',
},
data=df_election.to_dict('records'),
columns=[{'id': c, 'name': c} for c in df_election.columns]
)
'''),
rc.Markdown(
'''
`style_cell` updates the styling for the data cells & the header cells.
To specify header styles, use `style_header`.
To specify data cell styles, use `style_data`.
This example keeps the header on a single line while wrapping the data cells.
'''
),
Display(
'''
df = df_election # no-display
result = dash_table.DataTable(
style_data={
'whiteSpace': 'normal',
'height': 'auto',
},
data=df.to_dict('records'),
columns=[{'id': c, 'name': c} for c in df.columns]
)
'''),
rc.Markdown(
'''
### Denser Multi-Line Cells with Line-Height
If you are displaying lots of text in your cells, then you may want to
make the text appear a little more dense by shortening up the line-height.
By default (as above), it's around 22px. Here, it's 15px.
'''
),
Display(
'''
df = df_election # no-display
result = dash_table.DataTable(
style_data={
'whiteSpace': 'normal',
'height': 'auto',
'lineHeight': '15px'
},
data=df.to_dict('records'),
columns=[{'id': c, 'name': c} for c in df.columns]
)
'''),
rc.Markdown(
'''
### Wrapping onto Multiple Lines while Constraining the Height of Cells
If your text is really long, then you can constrain the height of the
cells and display a tooltip when hovering over the cell.
'''
),
Display(
"""
df = df_moby_dick # no-display
result = dash_table.DataTable(
style_data={
'whiteSpace': 'normal',
},
data=df.to_dict('records'),
columns=[{'id': c, 'name': c} for c in df.columns],
css=[{
'selector': '.dash-spreadsheet td div',
'rule': '''
line-height: 15px;
max-height: 30px; min-height: 30px; height: 30px;
display: block;
overflow-y: hidden;
'''
}],
tooltip_data=[
{
column: {'value': str(value), 'type': 'markdown'}
for column, value in row.items()
} for row in df.to_dict('records')
],
tooltip_duration=None,
style_cell={'textAlign': 'left'} # left align text in columns for readability
)
"""),
rc.Markdown(
'''
Hover over the cells to see the tooltip.
Why the `css`? Fixed height cells are tricky because, [by CSS 2.1 rules](https://www.w3.org/TR/CSS21/tables.html#height-layout),
the height of a table cell is "the minimum height required by the content".
So, here we are setting the height of the cell indirectly
by setting the div _within_ the cell.
In this example, we display two lines of data by setting the `line-height`
to be 15px and the height of each cell to be 30px.
The second sentence is cut off.
There are a few **limitations** with this method:
1. It is not possible to display ellipses with this method.
2. It is not possible to set a max-height. All of the cells need to be
the same height.
Subscribe to [plotly/dash-table#737](https://github.com/plotly/dash-table/issues/737) for updates or other workarounds
on this issue.
'''
),
rc.Markdown(
"""
## Overflowing Into Ellipses
Alternatively, you can keep the content on a single line but display
a set of ellipses if the content is too long to fit into the cell.
Here, `max-width` is set to 0. It could be any number, the only
important thing is that it is supplied. The behaviour will be
the same whether it is 0 or 50.
If you want to just hide the content instead of displaying ellipses,
then set `textOverflow` to `'clip'` instead of `'ellipsis'`.
"""
),
Display(
'''
df = df_election # no-display
result = dash_table.DataTable(
data=df.to_dict('records'),
columns=[{'id': c, 'name': c} for c in df.columns],
style_cell={
'overflow': 'hidden',
'textOverflow': 'ellipsis',
'maxWidth': 0
}
)
'''),
rc.Markdown(
'''
> In the example above, ellipsis are not displayed for the header.
> We consider this a bug, subscribe to [plotly/dash-table#735](https://github.com/plotly/dash-table/issues/735) for updates.
'''),
rc.Markdown(
'''
### Ellipses & Tooltips
If you are display text data that is cut off by ellipses, then you can
include tooltips so that the full text appears on hover.
By setting `tooltip_duration` to `None`, the tooltip will persist
as long as the mouse pointer is above the cell, and it will disappear
when the pointer moves away. You can override this by passing in
a number in milliseconds (e.g. 2000 if you want it to disappear after
two seconds).
'''
),
Display(
'''
df = df_election # no-display
result = dash_table.DataTable(
data=df.to_dict('records'),
columns=[{'id': c, 'name': c} for c in df.columns],
style_cell={
'overflow': 'hidden',
'textOverflow': 'ellipsis',
'maxWidth': 0,
},
tooltip_data=[
{
column: {'value': str(value), 'type': 'markdown'}
for column, value in row.items()
} for row in df.to_dict('records')
],
tooltip_duration=None
)
'''),
rc.Markdown(
'''
## Horizontal Scroll
Instead of trying to fit all of the content in the container, you could
overflow the entire container into a scrollable container.
'''
),
Display(
'''
df = df_election # no-display
result = dash_table.DataTable(
data=df.to_dict('records'),
columns=[{'id': c, 'name': c} for c in df.columns],
style_table={'overflowX': 'auto'},
)
'''),
rc.Markdown(
'''
Note how we haven't explicitly set the width of the individual columns
yet. The widths of the columns have been computed dynamically depending
on the width of the table and the width of the cell's contents.
In the example above, by providing a scrollbar, we're effectively
giving the table as much width as it needs in order to fit the entire
width of the cell contents on a single line.
'''
),
rc.Markdown('### Horizontal Scroll with Fixed-Width Columns & Cell Wrapping'),
rc.Markdown(
'''
Alternatively, you can fix the width of each column by adding `width`.
In this case, the column's width will be constant, even if its contents
are shorter or wider.
'''
),
Display(
'''
df = df_election # no-display
result = dash_table.DataTable(
data=df.to_dict('records'),
columns=[{'id': c, 'name': c} for c in df.columns],
style_table={'overflowX': 'auto'},
style_cell={
'height': 'auto',
# all three widths are needed
'minWidth': '180px', 'width': '180px', 'maxWidth': '180px',
'whiteSpace': 'normal'
}
)
'''),
rc.Markdown('### Horizontal Scroll with Fixed-Width & Ellipses'),
Display(
'''
df = df_election # no-display
result = dash_table.DataTable(
data=df.to_dict('records'),
columns=[{'id': c, 'name': c} for c in df.columns],
style_table={'overflowX': 'auto'},
style_cell={
# all three widths are needed
'minWidth': '180px', 'width': '180px', 'maxWidth': '180px',
'overflow': 'hidden',
'textOverflow': 'ellipsis',
}
)
'''),
rc.Markdown(
'''
### Horizontal Scrolling via Fixed Columns
You can also add a horizontal scrollbar to your table by fixing
the leftmost columns with `fixed_columns`.
'''
),
Display(
'''
df = df_election # no-display
result = dash_table.DataTable(
data=df.to_dict('records'),
columns=[{'id': c, 'name': c} for c in df.columns],
fixed_columns={'headers': True, 'data': 1},
style_table={'minWidth': '100%'}
)
'''),
rc.Markdown(
'''
Here is the same example but with *fixed-width cells & ellipses*.
'''
),
Display(
'''
df = df_election # no-display
result = dash_table.DataTable(
data=df.to_dict('records'),
columns=[{'id': c, 'name': c} for c in df.columns],
fixed_columns={ 'headers': True, 'data': 1 },
style_table={'minWidth': '100%'},
style_cell={
# all three widths are needed
'minWidth': '180px', 'width': '180px', 'maxWidth': '180px',
'overflow': 'hidden',
'textOverflow': 'ellipsis',
}
)
'''),
rc.Markdown("## Setting Column Widths"),
rc.Markdown(
'''
### Percentage Based Widths
The widths of individual columns can be supplied through the
`style_cell_conditional` property. These widths can be specified as
percentages or fixed pixels.
'''
),
Display(
'''
result = dash_table.DataTable(
data=df.to_dict('records'),
columns=[{'id': c, 'name': c} for c in df.columns],
style_cell_conditional=[
{'if': {'column_id': 'Date'},
'width': '30%'},
{'if': {'column_id': 'Region'},
'width': '30%'},
]
)
'''),
Display(
'''
result = dash_table.DataTable(
data=df.to_dict('records'),
columns=[{'id': c, 'name': c} for c in df.columns],
style_cell_conditional=[
{'if': {'column_id': 'Date'},
'width': '30%'},
{'if': {'column_id': 'Region'},
'width': '30%'},
]
)
'''),
rc.Markdown(
'''
By default, the column width is the maximum of the percentage given
and the width of the content. So, if the content in the column is wide,
the column may be wider than the percentage given. This prevents overflow.
In the example below, note the first column is actually wider than 10%;
if it were shorter, the text "New York City" would overflow.
'''
),
Display(
'''
html.Div([
html.Div('10%', style={'backgroundColor': 'hotpink', 'color': 'white', 'width': '10%'}),
dash_table.DataTable(
data=df.to_dict('records'),
columns=[{'id': c, 'name': c} for c in df.columns if c != 'Date'],
style_cell_conditional=[
{'if': {'column_id': 'Region'},
'width': '10%'}
]
)
])
'''),
rc.Markdown(
'''
To force columns to be a certain width (even if that causes overflow)
use `table-layout: fixed`.
### Percentage Based Widths and `table-layout: fixed`
If you want all columns to have the same percentage-based width,
use `style_data` and `table-layout: fixed`.
'''
),
Display(
'''
dash_table.DataTable(
data=df.to_dict('records'),
columns=[{'id': c, 'name': c} for c in df.columns],
css=[{'selector': 'table', 'rule': 'table-layout: fixed'}],
style_cell={
'width': '{}%'.format(len(df.columns)),
'textOverflow': 'ellipsis',
'overflow': 'hidden'
}
)
'''),
rc.Markdown(
'''
Setting consistent percentage-based widths is a good option if you are using
`virtualization`, sorting (`sort_action`), or `filtering` (`filter_action`).
Without fixed column widths, the table will dynamically resize the
columns depending on the width of the data that is displayed.
**Limitations**
1. Percentage-based widths is not available with `fixed_rows` & `table-layout: fixed`.
See [plotly/dash-table#745](https://github.com/plotly/dash-table/issues/748)
2. Percentage-based widths with `fixed_rows` and without `table-layout: fixed`
has some issues when resizing the window. See [plotly/dash-table#747](https://github.com/plotly/dash-table/issues/747)
'''
),
rc.Markdown(
'''
### Individual Column Widths with Pixels
In this example, we set three columns to have fixed-widths. The remaining
two columns will be take up the remaining space.
'''),
Display(
'''
result = dash_table.DataTable(
data=df.to_dict('records'),
columns=[{'id': c, 'name': c} for c in df.columns],
style_cell_conditional=[
{'if': {'column_id': 'Temperature'},
'width': '130px'},
{'if': {'column_id': 'Humidity'},
'width': '130px'},
{'if': {'column_id': 'Pressure'},
'width': '130px'},
]
)
'''),
rc.Markdown(
'''
### Overriding a Single Column's Width
You can set the width of all of the columns with `style_data` and
override a single column with `style_cell_conditional`.
'''
),
Display(
'''
result = dash_table.DataTable(
data=df.to_dict('records'),
columns=[{'id': c, 'name': c} for c in df.columns],
style_data={
'width': '100px',
'maxWidth': '100px',
'minWidth': '100px',
},
style_cell_conditional=[
{
'if': {'column_id': 'Region'},
'width': '250px'
},
],
style_table={
'overflowX': 'auto'
}
)
'''),
]
)
|
"""Defines the class for a node health check task"""
from __future__ import unicode_literals
import datetime
from django.conf import settings
from job.tasks.base_task import AtomicCounter
from job.tasks.node_task import NodeTask
from node.resources.node_resources import NodeResources
from node.resources.resource import Cpus, Mem
HEALTH_TASK_ID_PREFIX = 'scale_health'
COUNTER = AtomicCounter()
class HealthTask(NodeTask):
"""Represents a task that performs a health check on a node. This class is thread-safe.
"""
BAD_DAEMON_CODE = 2
LOW_DOCKER_SPACE_CODE = 3
BAD_LOGSTASH_CODE = 4
def __init__(self, framework_id, agent_id):
"""Constructor
:param framework_id: The framework ID
:type framework_id: string
:param agent_id: The agent ID
:type agent_id: string
"""
task_id = '%s_%s_%d' % (HEALTH_TASK_ID_PREFIX, framework_id, COUNTER.get_next())
super(HealthTask, self).__init__(task_id, 'Scale Health Check', agent_id)
self._uses_docker = False
self._docker_image = None
self._docker_params = []
self._is_docker_privileged = False
self._running_timeout_threshold = datetime.timedelta(minutes=15)
health_check_commands = []
# Check if docker version works (indicates if daemon is working)
bad_daemon_check = 'docker version'
bad_daemon_check = 'timeout -s SIGKILL 10s %s' % bad_daemon_check # docker version has 10 seconds to succeed
bad_daemon_check = '%s; if [[ $? != 0 ]]; then exit %d; fi' % (bad_daemon_check, HealthTask.BAD_DAEMON_CODE)
health_check_commands.append(bad_daemon_check)
# Check if docker ps works (also indicates if daemon is working)
docker_ps_check = 'docker ps'
docker_ps_check = 'timeout -s SIGKILL 10s %s' % docker_ps_check # docker ps has 10 seconds to succeed
docker_ps_check = '%s; if [[ $? != 0 ]]; then exit %d; fi' % (docker_ps_check, HealthTask.BAD_DAEMON_CODE)
health_check_commands.append(docker_ps_check)
# Check if Docker disk space is below 1 GiB (assumes /var/lib/docker, ignores check otherwise)
get_disk_space = 'df --output=avail /var/lib/docker | tail -1'
test_disk_space = 'test `%s` -lt 1048576; if [[ $? == 0 ]]; then exit %d; fi'
test_disk_space = test_disk_space % (get_disk_space, HealthTask.LOW_DOCKER_SPACE_CODE)
low_docker_space_check = 'if [[ -d /var/lib/docker ]]; then %s; fi' % test_disk_space
health_check_commands.append(low_docker_space_check)
# Check to ensure that fluentd is reachable
if settings.LOGGING_HEALTH_ADDRESS:
logging_check = 'timeout -s SIGKILL 5s curl %s; if [[ $? != 0 ]]; then exit %d; fi'
logging_check = logging_check % (settings.LOGGING_HEALTH_ADDRESS, HealthTask.BAD_LOGSTASH_CODE)
health_check_commands.append(logging_check)
self._command = ' && '.join(health_check_commands)
# Node task properties
self.task_type = 'health-check'
self.title = 'Node Health Check'
self.description = 'Checks the health status of the node'
def get_resources(self):
"""See :meth:`job.tasks.base_task.Task.get_resources`
"""
return NodeResources([Cpus(0.1), Mem(32.0)])
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-13 02:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0004_label'),
]
operations = [
migrations.AlterField(
model_name='label',
name='adolescent_score',
field=models.DecimalField(decimal_places=2, default=0, max_digits=4),
),
migrations.AlterField(
model_name='label',
name='geriatric_score',
field=models.DecimalField(decimal_places=2, default=0, max_digits=4),
),
migrations.AlterField(
model_name='label',
name='mnchn_score',
field=models.DecimalField(decimal_places=2, default=0, max_digits=4),
),
migrations.AlterField(
model_name='label',
name='spec_pop_score',
field=models.DecimalField(decimal_places=2, default=0, max_digits=4),
),
]
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import math
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.op_test import skip_check_grad_ci
def bilinear_interp_mkldnn_np(input,
out_h,
out_w,
out_size=None,
actual_shape=None,
data_layout='NCHW'):
"""bilinear interpolation implement in shape [N, C, H, W]"""
if data_layout == "NHWC":
input = np.transpose(input, (0, 3, 1, 2)) # NHWC => NCHW
if out_size is not None:
out_h = out_size[0]
out_w = out_size[1]
if actual_shape is not None:
out_h = actual_shape[0]
out_w = actual_shape[1]
batch_size, channel, in_h, in_w = input.shape
out = np.zeros((batch_size, channel, out_h, out_w))
for oh in range(out_h):
h0 = int(math.floor((oh + 0.5) * in_h / out_h - 0.5))
h1 = int(math.ceil((oh + 0.5) * in_h / out_h - 0.5))
h0 = max(h0, 0)
h1 = min(h1, in_h - 1)
Wh = (oh + 0.5) * in_h / out_h - 0.5 - h0
for ow in range(out_w):
w0 = int(math.floor((ow + 0.5) * in_w / out_w - 0.5))
w1 = int(math.ceil((ow + 0.5) * in_w / out_w - 0.5))
w0 = max(w0, 0)
w1 = min(w1, in_w - 1)
Ww = (ow + 0.5) * in_w / out_w - 0.5 - w0
input_h0_w0 = input[:, :, h0, w0]
input_h1_w0 = input[:, :, h1, w0]
input_h0_w1 = input[:, :, h0, w1]
input_h1_w1 = input[:, :, h1, w1]
out[:, :, oh,
ow] = input_h0_w0 * (1 - Wh) * (1 - Ww) + input_h1_w0 * Wh * (
1 - Ww) + input_h0_w1 * (1 -
Wh) * Ww + input_h1_w1 * Wh * Ww
if data_layout == "NHWC":
out = np.transpose(out, (0, 2, 3, 1)) # NCHW => NHWC
return out.astype(input.dtype)
@skip_check_grad_ci(reason="Haven not implement interpolate grad kernel.")
class TestBilinearInterpMKLDNNOp(OpTest):
def init_test_case(self):
pass
def setUp(self):
self.op_type = "bilinear_interp_v2"
self.interp_method = 'bilinear'
self._cpu_only = True
self.use_mkldnn = True
self.input_shape = [1, 1, 2, 2]
self.data_layout = 'NCHW'
# priority: actual_shape > out_size > scale > out_h & out_w
self.out_h = 1
self.out_w = 1
self.scale = 2.0
self.out_size = None
self.actual_shape = None
self.init_test_case()
input_np = np.random.random(self.input_shape).astype("float32")
if self.data_layout == "NCHW":
in_h = self.input_shape[2]
in_w = self.input_shape[3]
else:
in_h = self.input_shape[1]
in_w = self.input_shape[2]
scale_h = 0
scale_w = 0
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
scale_h = float(self.scale)
scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
scale_w = self.scale[0]
scale_h = self.scale[0]
elif isinstance(self.scale, list) and len(self.scale) > 1:
scale_w = self.scale[1]
scale_h = self.scale[0]
if scale_h > 0 and scale_w > 0:
out_h = int(in_h * scale_h)
out_w = int(in_w * scale_w)
else:
out_h = self.out_h
out_w = self.out_w
output_np = bilinear_interp_mkldnn_np(input_np, out_h, out_w,
self.out_size, self.actual_shape,
self.data_layout)
if isinstance(self.scale, float):
self.scale = [self.scale, self.scale]
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape
self.attrs = {
'interp_method': self.interp_method,
'out_h': self.out_h,
'out_w': self.out_w,
'scale': self.scale,
'data_layout': self.data_layout,
'use_mkldnn': self.use_mkldnn
}
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output(check_dygraph=False)
class TestBilinearInterpOpMKLDNNNHWC(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [3, 2, 32, 16]
self.out_h = 27
self.out_w = 49
self.scale = [2.0, 3.0]
self.data_layout = 'NHWC'
class TestBilinearNeighborInterpMKLDNNCase2(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
class TestBilinearNeighborInterpCase3(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 128
self.scale = [0.1, 0.05]
class TestBilinearNeighborInterpCase4(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = [13.0, 15.0]
self.out_size = np.array([65, 129]).astype("int32")
class TestBilinearNeighborInterpCase5(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [1, 1, 9, 6]
self.out_h = 12
self.out_w = 12
self.out_size = np.array([13, 13]).astype("int32")
class TestBilinearNeighborInterpCase6(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 1.0
self.out_size = np.array([65, 129]).astype("int32")
class TestBilinearNeighborInterpSame(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [2, 3, 32, 64]
self.out_h = 32
self.out_w = 64
self.scale = 2.0
self.out_size = np.array([65, 129]).astype("int32")
if __name__ == "__main__":
from paddle import enable_static
enable_static()
unittest.main()
|
"""
Generate Dataset
1. Converting video to frames
2. Extracting features
3. Getting change points
4. User Summary ( for evaluation )
"""
import os, sys
sys.path.append('../')
from networks.CNN import ResNet
from utils.KTS.cpd_auto import cpd_auto
from tqdm import tqdm
import math
import cv2
import numpy as np
import h5py
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path', type=str, required=True, help="path of video file, whos h5 needs to generate.")
parser.add_argument('--h5_gen', type=str, required=True, help="path to h5 generated file")
args = parser.parse_args()
class Generate_Dataset:
def __init__(self, video_path, save_path):
self.resnet = ResNet()
self.dataset = {}
self.video_list = []
self.video_path = ''
self.frame_root_path = './frames'
self.h5_file = h5py.File(save_path, 'w')
self._set_video_list(video_path)
print('Video path : {} H5 autogen path : {}'.format(video_path, save_path))
def _set_video_list(self, video_path):
if os.path.isdir(video_path):
self.video_path = video_path
self.video_list = os.listdir(video_path)
self.video_list.sort()
else:
self.video_path = ''
self.video_list.append(video_path)
for idx, file_name in enumerate(self.video_list):
self.dataset['video_{}'.format(idx+1)] = {}
self.h5_file.create_group('video_{}'.format(idx+1))
def _extract_feature(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (224, 224))
res_pool5 = self.resnet(frame)
frame_feat = res_pool5.cpu().data.numpy().flatten()
return frame_feat
def _get_change_points(self, video_feat, n_frame, fps):
print('n_frame {} fps {}'.format(n_frame, fps))
n = n_frame / math.ceil(fps)
m = int(math.ceil(n/2.0))
K = np.dot(video_feat, video_feat.T)
change_points, _ = cpd_auto(K, m, 1)
change_points = np.concatenate(([0], change_points, [n_frame-1]))
temp_change_points = []
for idx in range(len(change_points)-1):
segment = [change_points[idx], change_points[idx+1]-1]
if idx == len(change_points)-2:
segment = [change_points[idx], change_points[idx+1]]
temp_change_points.append(segment)
change_points = np.array(list(temp_change_points))
temp_n_frame_per_seg = []
for change_points_idx in range(len(change_points)):
n_frame = change_points[change_points_idx][1] - change_points[change_points_idx][0]
temp_n_frame_per_seg.append(n_frame)
n_frame_per_seg = np.array(list(temp_n_frame_per_seg))
return change_points, n_frame_per_seg
# TODO : save dataset
def _save_dataset(self):
pass
def generate_dataset(self):
for video_idx, video_filename in enumerate(tqdm(self.video_list)):
video_path = video_filename
if os.path.isdir(self.video_path):
video_path = os.path.join(self.video_path, video_filename)
video_basename = os.path.basename(video_path).split('.')[0]
if not os.path.exists(os.path.join(self.frame_root_path, video_basename)):
os.mkdir(os.path.join(self.frame_root_path, video_basename))
video_capture = cv2.VideoCapture(video_path)
fps = video_capture.get(cv2.CAP_PROP_FPS)
n_frames = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
#frame_list = []
picks = []
video_feat = None
video_feat_for_train = None
for frame_idx in tqdm(range(n_frames-1)):
success, frame = video_capture.read()
if success:
frame_feat = self._extract_feature(frame)
if frame_idx % 15 == 0:
picks.append(frame_idx)
if video_feat_for_train is None:
video_feat_for_train = frame_feat
else:
video_feat_for_train = np.vstack((video_feat_for_train, frame_feat))
if video_feat is None:
video_feat = frame_feat
else:
video_feat = np.vstack((video_feat, frame_feat))
img_filename = "{}.jpg".format(str(frame_idx).zfill(5))
cv2.imwrite(os.path.join(self.frame_root_path, video_basename, img_filename), frame)
else:
break
video_capture.release()
change_points, n_frame_per_seg = self._get_change_points(video_feat, n_frames, fps)
# self.dataset['video_{}'.format(video_idx+1)]['frames'] = list(frame_list)
# self.dataset['video_{}'.format(video_idx+1)]['features'] = list(video_feat)
# self.dataset['video_{}'.format(video_idx+1)]['picks'] = np.array(list(picks))
# self.dataset['video_{}'.format(video_idx+1)]['n_frames'] = n_frames
# self.dataset['video_{}'.format(video_idx+1)]['fps'] = fps
# self.dataset['video_{}'.format(video_idx+1)]['change_points'] = change_points
# self.dataset['video_{}'.format(video_idx+1)]['n_frame_per_seg'] = n_frame_per_seg
self.h5_file['video_{}'.format(video_idx+1)]['features'] = list(video_feat_for_train)
self.h5_file['video_{}'.format(video_idx+1)]['picks'] = np.array(list(picks))
self.h5_file['video_{}'.format(video_idx+1)]['n_frames'] = n_frames
self.h5_file['video_{}'.format(video_idx+1)]['fps'] = fps
self.h5_file['video_{}'.format(video_idx+1)]['change_points'] = change_points
self.h5_file['video_{}'.format(video_idx+1)]['n_frame_per_seg'] = n_frame_per_seg
if __name__ == "__main__":
gen = Generate_Dataset(args.path, args.h5_gen)
gen.generate_dataset()
gen.h5_file.close()
|
DEBUG = True
SECRET_KEY = 'GUOLABHOMECEH8SNH20A0MU9060USDWEXVSYCL515UWCN409TLBKHCTQDIFEH36CE' # make sure to change this
|
import pytest
from discopy import Word
from discopy.rigid import Diagram, Id
from lambeq.core.types import AtomicType
from lambeq.reader import cups_reader, spiders_reader
@pytest.fixture
def sentence():
return 'This is a sentence'
@pytest.fixture
def words(sentence):
words = sentence.split()
assert len(words) == 4
return words
def test_spiders_reader(sentence, words):
S = AtomicType.SENTENCE
combining_diagram = spiders_reader.combining_diagram
assert combining_diagram.dom == S @ S and combining_diagram.cod == S
expected_diagram = (Diagram.tensor(*(Word(word, S) for word in words)) >>
combining_diagram @ Id(S @ S) >>
combining_diagram @ Id(S) >>
combining_diagram)
assert (spiders_reader.sentences2diagrams([sentence])[0] ==
spiders_reader.sentence2diagram(sentence) == expected_diagram)
def test_other_readers(sentence):
# since all the readers share behaviour, just test that they don't fail
assert cups_reader.sentence2diagram(sentence)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class NetOneHot(nn.Module):
def __init__(self, output_dim, annotator_num, text_embedding_dim=768, **kwargs):
super().__init__()
self.text_embedding_dim = text_embedding_dim
self.fc1 = nn.Linear(text_embedding_dim + annotator_num, output_dim)
self.worker_onehots = nn.parameter.Parameter(torch.eye(annotator_num), requires_grad=False)
def forward(self, features):
x = features['embeddings']
annotator_ids = features['annotator_ids']
worker_onehots = self.worker_onehots[annotator_ids]
x = x.view(-1, self.text_embedding_dim)
x = self.fc1(torch.cat([x, worker_onehots], dim=1))
return x
|
import requests, boto3, asyncio, math
async def upload_s3_async(init, end, i, url, bucket, key, s3, mpu):
print(f'Upload {i}')
# download do pedaço do arquivo
r = requests.get(url, headers={'Range':f'bytes={init}-{end}'})
#upload para o S3
return s3.upload_part(Bucket=bucket, Key=key, PartNumber=i, UploadId=mpu['UploadId'], Body=r.content)
async def upload_s3(url, bucket, key, s3, mpu):
# separar arquivo em pedaços de 50 MB
b = 52428800
t = int(requests.head(url).headers['Content-Length'])
parts, tasks = [], []
for i in range(math.ceil(t/b)):
init = i*b if i == 0 else i*b + 1
end = min((i+1)*b, t)
if t-end < 6291456: end = t
tasks.append(asyncio.ensure_future(upload_s3_async(init, end, i+1, url, bucket, key, s3, mpu)))
# executar coleta dos pedaços do arquivo
return await asyncio.gather(*tasks)
def lambda_handler(event, context):
# url que virá pelo parâmetro
url = event['url']
# nome do bucket para upload
bucket = 'dataops-impacta-dados-profernandosousa'
#nome do arquivo que será salvo no bucket
key = f'input/vacinas_{event["uf"]}.csv'
#Concexão com s3
s3 = boto3.client('s3')
# iniciar upload multipart
mpu = s3.create_multipart_upload(Bucket=bucket, Key=key)
#fazer download do arquivo por partes
parts = asyncio.get_event_loop().run_until_complete(upload_s3(url, bucket, key, s3, mpu))
# finalizar upload
part_info = {'Parts':
[{'PartNumber': i+1, 'ETag': p['ETag']} for i, p in enumerate(parts)]
}
s3.complete_multipart_upload(Bucket=bucket, Key=key, UploadId=mpu['UploadId'], MultipartUpload=part_info)
return True
|
from django.shortcuts import render
import datetime
from .models import EnergyUsage, Event, Laptop
from django.db.models import Avg, Max, Min
from django.http import HttpResponse
from django.utils import timezone
from django.core import serializers
from .EpicesParser import EpicesParser
from django.utils.dateparse import parse_date
from django.views.decorators.csrf import csrf_exempt
import pandas as pd
import requests, zipfile, io
import json
import pytz
import numpy as np
from django.db.models import Count, Avg, Sum, Max
debug = True
old_usages = []
def getMaxChargeLevelAt(timestamp):
min_frame = timestamp.replace(minute=0, second=0, microsecond=0)
max_frame = timestamp.replace(minute=59, second=59, microsecond=0)
events = Event.objects.exclude(timestamp__lt=min_frame)
events = events.exclude(timestamp__gt=max_frame)
if len(events):
total_charge_by_user = list(events.values('username').annotate(Sum('charge_level')).aggregate(Max('charge_level__sum')).values())
return total_charge_by_user[0]
else:
return 0
def index(request):
return render(request, 'CairnFORM/index.html')
#timestamp = timezone.now()
#return getUsersNearFromTo(timestamp)
def sedyl(request):
frame = timezone.now()
min_frame = frame.replace(hour=0, minute=0, second=0)
max_frame = frame.replace(hour=23, minute=59, second=59)
usages = EnergyUsage.objects
usages = usages.exclude(timestamp__lt=min_frame)
usages = usages.exclude(timestamp__gt=max_frame)
usages = usages.order_by('timestamp')
#if len(usages) == 0 :
# generateEnergyUsage(request)
# return getEnergyUsageFromTo(request, year1, month1, day1, hour1, minute1, second1, year2, month2, day2, hour2, minute2, second2)
return render(request, 'sedyl/index.html', locals())
@csrf_exempt
def script(request):
if request.method == 'GET':
return HttpResponse("ERROR")
elif request.method == 'POST':
return HttpResponse("OK")
def study(request):
frame = timezone.now()
min_frame = frame.replace(hour=0, minute=0, second=0)
max_frame = frame.replace(hour=23, minute=59, second=59)
usages = EnergyUsage.objects
usages = usages.exclude(timestamp__lt=min_frame)
usages = usages.exclude(timestamp__gt=max_frame)
events = Event.objects
events = events.exclude(timestamp__lt=min_frame)
events = events.exclude(timestamp__gt=max_frame)
usages = usages.order_by('timestamp')
events = events.order_by('timestamp')
return render(request, 'CairnFORM/etude.html', locals())
#timestamp = timezone.now()
#return getUsersNearFromTo(timestamp)
def vis(request):
frame = timezone.now()
min_frame = frame.replace(hour=0, minute=0, second=0)
max_frame = frame.replace(hour=23, minute=59, second=59)
usages = EnergyUsage.objects
usages = usages.exclude(timestamp__lt=min_frame)
usages = usages.exclude(timestamp__gt=max_frame)
usages = usages.order_by('timestamp')
return render(request, 'CairnFORM/vis.html', locals())
def monitor1(request, hour1, hour2):
global debug
now_frame = timezone.now() + datetime.timedelta(hours=2)
min_frame = now_frame.replace(hour=8, minute=0, second=0, microsecond = 0)
max_frame = now_frame.replace(hour=17, minute=0, second=0, microsecond = 0)
#print(min_frame," ",now_frame," ",max_frame)
usages = EnergyUsage.objects
usages = usages.exclude(timestamp__lt=min_frame)
usages = usages.exclude(timestamp__gt=max_frame)
usages = usages.order_by('timestamp')
min_frame = now_frame.replace(hour=int(hour1), minute=0, second=0, microsecond = 0)
max_frame = now_frame.replace(hour=int(hour2), minute=0, second=0, microsecond = 0)
if debug : print("monitor1 : ", usages)
#return HttpResponse(serializers.serialize("json", filter(usages)))
return render(request, 'CairnFORM/monitor.html', locals())
def monitor2(request, timestamp1, hour1, hour2):
now_frame = timezone.now() + datetime.timedelta(hours=2)
min_frame = datetime.datetime.strptime(timestamp1, "%Y-%m-%d").replace(hour=int(hour1), minute=0, second=0)
max_frame = datetime.datetime.strptime(timestamp1, "%Y-%m-%d").replace(hour=int(hour2), minute=0, second=0)
usages = EnergyUsage.objects
usages = usages.exclude(timestamp__lt=min_frame)
usages = usages.exclude(timestamp__gt=max_frame)
usages = usages.order_by('timestamp')
return render(request, 'CairnFORM/monitor_past.html', locals())
def studyAt(request, timestamp):
frame = datetime.datetime.strptime(timestamp, "%Y-%m-%d")
min_frame = frame.replace(hour=0, minute=0, second=0)
max_frame = frame.replace(hour=23, minute=59, second=59)
usages = EnergyUsage.objects
usages = usages.exclude(timestamp__lt=min_frame)
usages = usages.exclude(timestamp__gt=max_frame)
events = Event.objects
events = events.exclude(timestamp__lt=min_frame)
events = events.exclude(timestamp__gt=max_frame)
usages = usages.order_by('timestamp')
events = events.order_by('timestamp')
#if len(usages) == 0 :
# generateEnergyUsage(request)
# return getEnergyUsageFromTo(request, year1, month1, day1, hour1, minute1, second1, year2, month2, day2, hour2, minute2, second2)
return render(request, 'CairnFORM/etude.html', locals())
def studyFromTo(request, timestamp1, timestamp2):
min_frame = datetime.datetime.strptime(timestamp1, "%Y-%m-%d")
max_frame = datetime.datetime.strptime(timestamp2, "%Y-%m-%d")
max_frame = max_frame.replace(hour=23, minute=59, second=59)
usages = EnergyUsage.objects
usages = usages.exclude(timestamp__lt=min_frame)
usages = usages.exclude(timestamp__gt=max_frame)
events = Event.objects
events = events.exclude(timestamp__lt=min_frame)
events = events.exclude(timestamp__gt=max_frame)
usages = usages.order_by('timestamp')
events = events.order_by('timestamp')
#if len(usages) == 0 :
# generateEnergyUsage(request)
# return getEnergyUsageFromTo(request, year1, month1, day1, hour1, minute1, second1, year2, month2, day2, hour2, minute2, second2)
return render(request, 'CairnFORM/etude.html', locals())
def jsonFromTo(request, timestamp1, timestamp2):
min_frame = datetime.datetime.strptime(timestamp1, "%Y-%m-%d")
max_frame = datetime.datetime.strptime(timestamp2, "%Y-%m-%d")
max_frame = max_frame.replace(hour=23, minute=59, second=59)
usages = EnergyUsage.objects
usages = usages.exclude(timestamp__lt=min_frame)
usages = usages.exclude(timestamp__gt=max_frame)
events = Event.objects
events = events.exclude(timestamp__lt=min_frame)
events = events.exclude(timestamp__gt=max_frame)
usages = usages.order_by('timestamp')
events = events.order_by('timestamp')
#if len(usages) == 0 :
# generateEnergyUsage(request)
# return getEnergyUsageFromTo(request, year1, month1, day1, hour1, minute1, second1, year2, month2, day2, hour2, minute2, second2)
return HttpResponse(serializers.serialize("json", events))
def users(request):
events = Event.objects.all()
return render(request, 'CairnFORM/users.html', locals())
def generateEnergyUsage(request):
epicesParser = EpicesParser('6bm-cRpuq5X3HwdphKU5x8lInv4')
epicesParser.update()
times = epicesParser.times
productions = epicesParser.forecasts
for timestamp, production in zip(times, productions):
try :
usage = EnergyUsage.objects.get(timestamp=timestamp)
usage.raw_production = production
usage.save()
except :
usage = EnergyUsage(timestamp=timestamp, raw_production = production, production=0, consumption=0)
usage.save()
return HttpResponse("Done.")
def getEnergyUsageFromTo(request, year1, month1, day1, hour1, minute1, second1, year2, month2, day2, hour2, minute2, second2):
global debug
now_frame = timezone.now() + datetime.timedelta(hours=2)
now_frame = now_frame.replace(year=int(year1), month=int(month1), day=int(day1))
min_frame = now_frame.replace(year=int(year1), month=int(month1), day=int(day1), hour=8, minute=0, second=0, microsecond=0)
max_frame = now_frame.replace(year=int(year2), month=int(month2), day=int(day2), hour=17, minute=0, second=0, microsecond=0)
usages = EnergyUsage.objects
usages = usages.exclude(timestamp__lt=min_frame)
usages = usages.exclude(timestamp__gt=max_frame)
usages = usages.order_by('timestamp')
if debug : print("getEnergyUsageFromTo : ", usages)
return HttpResponse(serializers.serialize("json", usages))
def getEventFromTo(request, year1, month1, day1, hour1, minute1, second1, year2, month2, day2, hour2, minute2, second2):
min_frame = timezone.now()
max_frame = timezone.now()
min_frame = min_frame.replace(year=int(year1), month=int(month1), day=int(day1), hour=int(hour1), minute=int(minute1), second=int(second1), microsecond=0)
max_frame = max_frame.replace(year=int(year2), month=int(month2), day=int(day2), hour=int(hour2), minute=int(minute2), second=int(second2), microsecond=0)
events = Event.objects
events = events.exclude(timestamp__lt=min_frame)
events = events.exclude(timestamp__gt=max_frame)
events = events.order_by('timestamp')
if len(events) > 0 :
return HttpResponse(serializers.serialize("json", events))
else :
return HttpResponse("Empty.")
def getEventByUsername(request, username):
events = Event.objects.filter(username=username)
events = events.order_by('timestamp')
if len(events) > 0 :
return HttpResponse(serializers.serialize("json", events))
else :
return HttpResponse("Empty")
def operate(start_timestamp):
start_timestamp = start_timestamp.replace(minute=0, second=0, microsecond=0)
now_timestamp = timezone.now()
while start_timestamp < now_timestamp:
consumption = min(1., getMaxChargeLevelAt(start_timestamp)/50)
try :
usage = EnergyUsage.objects.get(timestamp=start_timestamp)
usage.consumption=consumption
usage.save()
except :
usage = EnergyUsage(timestamp=start_timestamp, raw_production=0, production=0, consumption=consumption)
usage.save()
start_timestamp += datetime.timedelta(hours=1)
@csrf_exempt
def post(request):
if(request.method == "POST"):
try:
jsonEvent = json.loads(request.body.decode('utf-8'))
laptop = Laptop(
timestamp = jsonEvent['Timestamp'],
username = jsonEvent['Username'],
plugged = jsonEvent['Plugged'],
near = jsonEvent['Near'],
batteryLevel = jsonEvent['BatteryLevel'],
chargeLevel = jsonEvent['ChargeLevel'],
dischargeLevel = jsonEvent['DischargeLevel'],
availability = jsonEvent['Availability'],
batteryRechargeTime = jsonEvent['BatteryRechargeTime'],
batteryStatus = jsonEvent['BatteryStatus'],
caption = jsonEvent['Caption'],
chemistry = jsonEvent['Chemistry'],
configManagerErrorCode = jsonEvent['ConfigManagerErrorCode'],
configManagerUserConfig = jsonEvent['ConfigManagerUserConfig'],
creationClassName = jsonEvent['CreationClassName'],
description = jsonEvent['Description'],
designCapacity = jsonEvent['DesignCapacity'],
designVoltage = jsonEvent['DesignVoltage'],
deviceID = jsonEvent['DeviceID'],
errorCleared = jsonEvent['ErrorCleared'],
errorDescription = jsonEvent['ErrorDescription'],
estimatedChargeRemaining = jsonEvent['EstimatedChargeRemaining'],
estimatedRunTime = jsonEvent['EstimatedRunTime'],
expectedBatteryLife = jsonEvent['ExpectedBatteryLife'],
expectedLife = jsonEvent['ExpectedLife'],
fullChargeCapacity = jsonEvent['FullChargeCapacity'],
lastErrorCode = jsonEvent['LastErrorCode'],
maxRechargeTime = jsonEvent['MaxRechargeTime'],
name = jsonEvent['Name'],
PNPDeviceID = jsonEvent['PNPDeviceID'],
powerManagementSupported = jsonEvent['PowerManagementSupported'],
smartBatteryVersion = jsonEvent['SmartBatteryVersion'],
status = jsonEvent['Status'],
statusInfo = jsonEvent['StatusInfo'],
systemCreationClassName = jsonEvent['SystemCreationClassName'],
systemName = jsonEvent['SystemName'],
timeOnBattery = jsonEvent['TimeOnBattery'],
timeToFullCharge = jsonEvent['TimeToFullCharge']
)
laptop.save()
return HttpResponse('OK')
except Exception as e:
print(e)
return HttpResponse('NOT OK')
return HttpResponse('NOT OK')
def getLaptopByUsername(request, username):
laptops = Laptop.objects.filter(username=username)
laptops = laptops.order_by('timestamp')
if len(laptops) > 0 :
return HttpResponse(serializers.serialize("json", laptops))
else :
return HttpResponse("Empty")
def getLaptopFromTo(request, year1, month1, day1, hour1, minute1, second1, year2, month2, day2, hour2, minute2, second2):
min_frame = timezone.now()
max_frame = timezone.now()
min_frame = min_frame.replace(year=int(year1), month=int(month1), day=int(day1), hour=int(hour1), minute=int(minute1), second=int(second1), microsecond=0)
max_frame = max_frame.replace(year=int(year2), month=int(month2), day=int(day2), hour=int(hour2), minute=int(minute2), second=int(second2), microsecond=0)
laptops = Laptop.objects
laptops = laptops.exclude(timestamp__lt=min_frame)
laptops = laptops.exclude(timestamp__gt=max_frame)
laptops = laptops.order_by('timestamp')
if len(laptops) > 0 :
return HttpResponse(serializers.serialize("json", laptops))
else :
return HttpResponse("Empty.")
#def post(request, username, near, year, month, day, hour, minute, second, battery_level, charge_level, discharge_level, plugged):
# timestamp = timezone.now()
# timestamp=timestamp.replace(year=int(year), month=int(month), day=int(day), hour=int(hour), minute=int(minute), second=int(second))
# event = Event(timestamp=timestamp, username=username, near=near, battery_level=battery_level, charge_level=charge_level, discharge_level=discharge_level, plugged=plugged)
# event.save()
# if int(near) == 1 and int(charge_level) > 0:
# new_timestamp = timestamp.replace(minute=0, second=0, microsecond=0)
# consumption = min(1., getMaxChargeLevelAt(new_timestamp)/50.)
# try :
# usage = EnergyUsage.objects.get(timestamp=new_timestamp)
# usage.consumption=consumption
# usage.save()
# except :
# usage = EnergyUsage(timestamp=new_timestamp, raw_production = 0, production=0, consumption=consumption)
# usage.save()
# return HttpResponse('Ok')
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import os, time, json
from openml.apiconnector import APIConnector
from scipy.io.arff import loadarff
import numpy as np
import matplotlib.pylab as plt
from SGDDataset import SGDDataSet
def get_dataset(did):
home_dir = os.path.expanduser("~")
openml_dir = os.path.join(home_dir, ".openml")
cache_dir = os.path.join(openml_dir, "cache")
with open(os.path.join(openml_dir, "apikey.txt"), 'r') as fh:
key = fh.readline().rstrip('\n')
fh.close()
openml = APIConnector(cache_directory = cache_dir, apikey = key)
dataset = openml.download_dataset(did)
# print('Data-set name: %s'%dataset.name)
# print(dataset.description)
_, meta = loadarff(dataset.data_file)
target_attribute = dataset.default_target_attribute
target_attribute_names = meta[target_attribute][1]
X, y, attribute_names = dataset.get_dataset(target = target_attribute, return_attribute_names = True)
return X, y, attribute_names, target_attribute_names
if __name__ == '__main__':
## get dataset - MNIST
X, y, attribute_names, target_attribute_names = get_dataset(554)
# vectorize y
vec_y = np.zeros((y.shape[0], 10), dtype = np.int32)
for vec_y_i, y_i in zip(vec_y, y):
vec_y_i[y_i] = 1
## 60,000 as training data, 10,000 as test data
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = vec_y[:60000], vec_y[60000:]
train_data = SGDDataSet(X_train, y_train, dtype = tf.float32)
BATCH_SIZE = 1000
'''
A placeholder, a value that we'll input when we ask TensorFlow to run a computation.
Here None means that a dimension can be of any length.
'''
x = tf.placeholder(tf.float32, [None, 784])
y_true = tf.placeholder(tf.float32, [None, 10])
'''
A Variable is a modifiable tensor that lives in TensorFlow's graph of interacting operations.
It can be used and even modified by the computation. For machine learning applications,
one generally has the model parameters be Variables.
'''
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y_pred = tf.nn.softmax(tf.matmul(x, W) + b)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_true * tf.log(y_pred), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.05).minimize(cross_entropy)
'''
add an operation to initialize the variables we created
'''
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = train_data.next_batch(BATCH_SIZE)
sess.run(train_step, feed_dict={x: batch_xs, y_true: batch_ys})
'''
Evaluating Our Model
'''
correct_prediction = tf.equal(tf.argmax(y_pred,1), tf.argmax(y_true, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: X_test, y_true: y_test}))
|
"""Test configuration fragment replacement."""
import pytest
from dictator.validators.replace import (
FragmentReplace,
AutoFragmentReplace,
FragmentError,
)
from dictator.validators.lists import SubListValidator
from dictator.config import validate_config
def test_fragment_replace():
"""Test manual fragment replacement."""
TEST_CONFIG = {
"my_key": "my_value",
"my_other_key": "REPLACETHIS_dontreplace",
}
TEST_REQ = {
"my_key": str,
"my_other_key": FragmentReplace({"REPLACETHIS": "my_key"}),
}
return validate_config(TEST_CONFIG, TEST_REQ)
def test_auto_fragment_replace():
"""Test automatic fragment replacement."""
TEST_CONFIG = {
"my_key": "my_value",
"my_other_key": "${my_key}_dontreplace",
}
TEST_REQ = {"my_key": str, "my_other_key": AutoFragmentReplace()}
return validate_config(TEST_CONFIG, TEST_REQ)
def test_parent_fragment_replace():
"""Test auto replacement with value from parent config."""
TEST_CONFIG = {
"my_key": "my_value",
"other_keys": [{"my_other_key": "${..my_key}_blabla"}],
}
TEST_REQ = {
"my_key": str,
"other_keys": SubListValidator(
{"my_other_key": AutoFragmentReplace()}
),
}
return validate_config(TEST_CONFIG, TEST_REQ)
def test_toplevel_fragment_replace():
"""Test auto replacement with value from toplevel config."""
TEST_CONFIG = {
"my_key": "my_value",
"other_keys": [{"my_other_key": "${:my_key}_blabla"}],
}
TEST_REQ = {
"my_key": str,
"other_keys": SubListValidator(
{"my_other_key": AutoFragmentReplace()}
),
}
return validate_config(TEST_CONFIG, TEST_REQ)
def test_fragment_replace_fail():
"""Test fragment replacement failure."""
TEST_CONFIG = {
"my_key": "my_value",
"my_other_key": "${my_key2}_dontreplace",
}
TEST_REQ = {"my_key": str, "my_other_key": AutoFragmentReplace()}
with pytest.raises(FragmentError):
validate_config(TEST_CONFIG, TEST_REQ)
return validate_config(TEST_CONFIG, TEST_REQ, my_key2="bla")
def test_fragment_key():
"""Test fragments which access keys."""
TEST_CONFIG = {
"my_key": {"foo": "bar"},
"my_other_key": "${my_key::foo}_dontreplace",
}
TEST_CONFIG_2 = {
"my_key": {"foo": "bar"},
"my_other_key": "${:my_key::foo}_dontreplace",
}
TEST_REQ = {"my_key": dict, "my_other_key": AutoFragmentReplace()}
validate_config(TEST_CONFIG, TEST_REQ)
return validate_config(TEST_CONFIG_2, TEST_REQ)
if __name__ == "__main__":
print(test_fragment_replace())
print(test_auto_fragment_replace())
print(test_parent_fragment_replace())
print(test_fragment_replace_fail())
print(test_toplevel_fragment_replace())
print(test_fragment_key())
|
import os
import matplotlib.pyplot as plt
import pandas as pd
import streamlit as st
import SimpleITK as sitk
def dir_selector(folder_path='.'):
dirnames = [d for d in os.listdir(folder_path) if os.path.isdir(os.path.join(folder_path, d))]
selected_folder = st.sidebar.selectbox('Select a folder', dirnames)
if selected_folder is None:
return None
return os.path.join(folder_path, selected_folder)
def plot_slice(vol, slice_ix):
fig, ax = plt.subplots()
plt.axis('off')
selected_slice = vol[slice_ix, :, :]
ax.imshow(selected_slice, origin='lower', cmap='gray')
return fig
st.sidebar.title('DieSitCom')
dirname = dir_selector()
if dirname is not None:
try:
reader = sitk.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames(dirname)
reader.SetFileNames(dicom_names)
reader.LoadPrivateTagsOn()
reader.MetaDataDictionaryArrayUpdateOn()
data = reader.Execute()
img = sitk.GetArrayViewFromImage(data)
n_slices = img.shape[0]
slice_ix = st.sidebar.slider('Slice', 0, n_slices, int(n_slices/2))
output = st.sidebar.radio('Output', ['Image', 'Metadata'], index=0)
if output == 'Image':
fig = plot_slice(img, slice_ix)
plot = st.pyplot(fig)
else:
metadata = dict()
for k in reader.GetMetaDataKeys(slice_ix):
metadata[k] = reader.GetMetaData(slice_ix, k)
df = pd.DataFrame.from_dict(metadata, orient='index', columns=['Value'])
st.dataframe(df)
except RuntimeError:
st.text('This does not look like a DICOM folder!')
|
from pathlib import Path
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from django.urls import reverse
from .forms import CommentForm
from .models import User, Webpage, Template, Comment
USERNAME = 'haha69420'
PASSWORD = 'foobar < xyzzy'
EMAIL = 'foo@example.com'
ADMIN_USERNAME = 'admin'
ADMIN_PASSWORD = 'hard123passwd'
class UserRegisterTest(TestCase):
def setUp(self):
# we create an account with the `USERNAME` and `PASSWORD` vars
params = {
'username': USERNAME,
'email': EMAIL,
'password1': PASSWORD,
'password2': PASSWORD
}
self.client.post(reverse('register'), params)
self.client.login(username=USERNAME, password=PASSWORD)
def test_user_exists(self):
self.assertEqual(User.objects.count(), 1)
user = User.objects.get(pk=1)
self.assertEqual(user.username, USERNAME)
self.assertEqual(user.email, EMAIL)
def test_register_and_home(self):
"""
Assumptions (AKA what this integration test tests):
- `/user/register` exists.
- ^ takes certain params.
- ^ also logs us in.
- `/` exists.
- ^ tells us our username.
- We are using django's built-in auth system.
"""
res = self.client.get('/').content
self.assertIn(USERNAME.encode(), res, 'Our username does not appear in the home view.')
self.client.logout()
res = self.client.get('/').content
self.assertNotIn(USERNAME.encode(), res, 'Our username was already in the home view.')
class UserViewsTest(TestCase):
fixtures = ['users.yaml', 'webpages.yaml', 'templates.yaml']
def test_user_detail(self):
user = User.objects.get(pk=1)
res = self.client.get(reverse('user-detail', kwargs={'pk': 1}))
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['viewed_user'], user)
self.assertIsInstance(res.context['user_pages'][0], Webpage)
def test_user_update(self):
self.client.login(username=USERNAME, password=PASSWORD)
res = self.client.post(reverse('user-update', kwargs={'pk': 1}), {
'first_name': 'john', 'last_name': 'lemon', 'email': 'jl@sth.com'
})
self.assertEqual(res.status_code, 403)
res = self.client.post(reverse('user-update', kwargs={'pk': 2}), {
'first_name': 'john', 'last_name': 'lemon', 'email': 'jl@sth.com'
})
self.assertRedirects(res, '/')
user = User.objects.get(pk=2)
self.assertEqual(user.last_name, 'lemon')
def test_user_delete(self):
self.client.login(username=USERNAME, password=PASSWORD)
res = self.client.post(reverse('user-delete', kwargs={'pk': 2}))
self.assertRedirects(res, '/')
self.assertEqual(User.objects.count(), 1)
class WebpageViewsTest(TestCase):
fixtures = ['users.yaml', 'templates.yaml', 'webpages.yaml', 'comments.yaml']
def test_webpage_create_redirect(self):
res = self.client.get(reverse('webpage-create'))
self.assertRedirects(res, reverse('login') + '?next=' + reverse('webpage-create'))
def test_webpage_create(self):
self.client.login(username=USERNAME, password=PASSWORD)
res = self.client.get(reverse('webpage-create'))
self.assertEqual(res.status_code, 200)
image_path = Path(settings.BASE_DIR) / 'test_media' / 'images' / 'test.png'
image1 = SimpleUploadedFile(name='test.png', content=open(image_path, 'rb').read(), content_type='image/jpeg')
image2 = SimpleUploadedFile(name='test.png', content=open(image_path, 'rb').read(), content_type='image/jpeg')
image3 = SimpleUploadedFile(name='test.png', content=open(image_path, 'rb').read(), content_type='image/jpeg')
form_input = {
'name': 'asdf',
'template_used': '1',
'user_title': 'ASDF',
'user_text_1': 'zxcvasdfqwer',
'user_text_2': 'zxcvasdfqwer',
'user_text_3': 'zxcvasdfqwer',
'user_image_1': image1,
'user_image_2': image2,
'user_image_3': image3,
}
res = self.client.post(reverse('webpage-create'), form_input)
self.assertRedirects(res, reverse('webpage-view', kwargs={'pagename': 'asdf'}))
self.assertEqual(Webpage.objects.count(), 2)
webpage = Webpage.objects.get(pk=2)
self.assertEqual(webpage.name, form_input['name'])
def test_webpage_preview(self):
res = self.client.get(reverse('webpage-view', kwargs={'pagename': 'mypage'}))
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['webpage'], Webpage)
self.assertEqual(res.context['webpage'].user_title, 'thats my page')
def test_webpage_detail(self):
res = self.client.get(reverse('webpage-detail', kwargs={'pagename': 'mypage'}))
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['comment_form'], CommentForm)
self.assertIsInstance(res.context['comments'][0], Comment)
def test_webpage_list(self):
res = self.client.get(reverse('webpage-list'))
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['webpages'][0], Webpage)
def test_webpage_update(self):
self.client.login(username=ADMIN_USERNAME, password=ADMIN_PASSWORD)
res = self.client.get(reverse('webpage-update', kwargs={'pagename': 'mypage'}))
self.assertEqual(res.status_code, 200)
webpage = Webpage.objects.get(pk=1)
form_data = webpage.__dict__
form_data['user_title'] = 'abcd test 01234'
form_data['template_used'] = '1'
res = self.client.post(reverse('webpage-update', kwargs={'pagename': 'mypage'}), form_data)
self.assertRedirects(res, reverse('webpage-view', kwargs={'pagename': 'mypage'}))
webpage = Webpage.objects.get(pk=1)
self.assertEqual(webpage.user_title, 'abcd test 01234')
def test_webpage_delete(self):
self.client.login(username=USERNAME, password=PASSWORD)
res = self.client.post(reverse('webpage-delete', kwargs={'pagename': 'mypage'}))
self.assertEqual(res.status_code, 403)
self.client.login(username=ADMIN_USERNAME, password=ADMIN_PASSWORD)
res = self.client.post(reverse('webpage-delete', kwargs={'pagename': 'mypage'}))
self.assertRedirects(res, '/')
self.assertEqual(Webpage.objects.count(), 0)
class TemplateViewsTest(TestCase):
fixtures = ['users.yaml', 'templates.yaml']
def setUp(self):
self.client.login(username=USERNAME, password=PASSWORD)
def test_template_create(self):
"""
What this assumes:
- When you create a page the
available templates are
passed in. and in a certain
way.
- POST-ing `/templates/new`
creates a specified template
- The console theme exists.
"""
# example stylesheet
template_name = 'blahblahblah'
style = str(Path(settings.MEDIA_ROOT) / 'static' / 'themes' / 'console.css')
res = self.client.get(reverse('webpage-create'))
template_entries = [entry[1] for entry in res.context['form'].fields['template_used'].choices]
self.assertNotIn(template_name, template_entries)
self.client.post(reverse('template-create'), {'name': template_name, 'style_sheet': style})
res = self.client.get(reverse('webpage-create'))
template_entries = [entry[1] for entry in res.context['form'].fields['template_used'].choices]
self.assertIn(template_name, template_entries)
def test_template_detail(self):
res = self.client.get(reverse('template-detail', kwargs={'templatename': self.template_name}))
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['template'], Template)
def test_template_list(self):
res = self.client.get(reverse('template-list'))
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['templates'][0], Template)
def test_template_delete(self):
self.client.login(username=USERNAME, password=PASSWORD)
res = self.client.post(reverse('template-delete', kwargs={'templatename': self.template_name}))
self.assertEqual(res.status_code, 403)
self.client.login(username=ADMIN_USERNAME, password=ADMIN_PASSWORD)
res = self.client.post(reverse('template-delete', kwargs={'templatename': self.template_name}))
self.assertRedirects(res, '/')
self.assertEqual(Template.objects.count(), 1)
class CommentViewsTest(TestCase):
fixtures = ['users.yaml', 'webpages.yaml', 'templates.yaml', 'comments.yaml']
def test_comment_create(self):
form_data = {
'title': 'test comment',
'content': 'another test'
}
res = self.client.post(reverse('comment-create', kwargs={'pagename': 'mypage'}), form_data)
self.assertRedirects(
res, reverse('login') + '?next=' + reverse('webpage-detail', kwargs={'pagename': 'mypage'})
)
self.client.login(username=USERNAME, password=PASSWORD)
res = self.client.post(reverse('comment-create', kwargs={'pagename': 'mypage'}), form_data)
self.assertRedirects(res, reverse('webpage-detail', kwargs={'pagename': 'mypage'}))
self.assertEqual(Comment.objects.count(), 3)
def test_comment_delete(self):
self.client.login(username=USERNAME, password=PASSWORD)
res = self.client.post(reverse('comment-delete', kwargs={'pk': 1}))
self.assertEqual(res.status_code, 403)
self.client.login(username=ADMIN_USERNAME, password=ADMIN_PASSWORD)
res = self.client.post(reverse('comment-delete', kwargs={'pk': 1}))
self.assertRedirects(res, reverse('webpage-detail', kwargs={'pagename': 'mypage'}))
self.assertEqual(Comment.objects.count(), 1)
|
from userinput import userinput
import pytest
def test_ip(monkeypatch):
monkeypatch.setattr('builtins.input', lambda x: "127.0.0.1")
assert userinput("user_input", validator="ip", cache=False)
monkeypatch.setattr('builtins.input', lambda x: "")
with pytest.raises(ValueError):
userinput("user_input", validator="ip", cache=False, maximum_attempts=3)
monkeypatch.setattr('builtins.input', lambda x: "255.254.253.252")
with pytest.raises(ValueError):
userinput("user_input", validator="ip", cache=False, maximum_attempts=3) |
# coding: utf-8
import os
import sys
import re
import time
from pyspark import SparkConf, SparkContext # 重要
from pyspark.sql import SQLContext #
import pandas as pd
reload(sys)
sys.setdefaultencoding("utf-8")
# 0 add the envs # part0 重要
if "SPARK_HOME" not in os.environ:
os.environ["SPARK_HOME"] = '/usr/hdp/2.3.4.0-3485/spark'
SPARK_HOME = os.environ["SPARK_HOME"]
sys.path.insert(0, os.path.join(SPARK_HOME, "python", "lib"))
sys.path.insert(0, os.path.join(SPARK_HOME, "python"))
# 2 process each parquet
if __name__ == "__main__":
start_time = time.time()
try:
sc.stop() # --------重要!否则可能下面定义sc会报错----------------
except:
pass
conf = SparkConf().setAppName("UserLabelGenerate")
#set("spark.driver.allowMultipleContexts", "true")
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
rdd = sc.textFile("test/total_summary9")
rdd.take(3)
print rdd.count()
#sc.stop()
|
import json
import sys
from unittest.mock import MagicMock
import pytest
import Browser.keywords as interaction
from Browser.keywords import PlaywrightState
WARN_MESSAGE = (
"WARNING RobotFramework:logger.py:95 Direct assignment of values as 'secret' is deprecated. "
"Use special variable syntax to resolve variable. Example $var instead of ${var}.\n"
)
def test_fill_secret_in_plain_text(caplog):
secrerts = interaction.Interaction(MagicMock())
secrerts.fill_secret("selector", "password")
assert caplog.text == WARN_MESSAGE
def test_type_secret_in_plain_text(caplog):
secrerts = interaction.Interaction(MagicMock())
secrerts.type_secret("selector", "password")
assert caplog.text == WARN_MESSAGE
def test_type_secret_with_prefix(caplog):
secrerts = interaction.Interaction(MagicMock())
secrerts._replace_placeholder_variables = MagicMock(return_value="123")
secrerts.type_secret("selector", "$password")
assert caplog.text == ""
secrerts.type_secret("selector", "%password")
assert caplog.text == ""
def test_fill_secret_with_prefix(caplog):
secrerts = interaction.Interaction(MagicMock())
secrerts._replace_placeholder_variables = MagicMock(return_value="123")
secrerts.fill_secret("selector", "$password")
assert caplog.text == ""
secrerts.fill_secret("selector", "%password")
assert caplog.text == ""
@pytest.mark.skipif(sys.version_info.minor == 7, reason="Does not work with Python 3.7")
def test_http_credentials_in_new_context():
class Response:
contextOptions = json.dumps({'username': 'USERNAME', 'password': 'PWD'})
log = "Something here"
newBrowser = True
id = 123
ctx = MagicMock()
dummy_new_context = MagicMock(return_value=Response())
pw = PlaywrightState(ctx)
pw._new_context = dummy_new_context
pw.resolve_secret = MagicMock(
return_value={"username": "USERNAME", "password": "PWD"}
)
pw.new_context(httpCredentials={"username": "$username", "password": "$pwd"})
name, args, kwargs = dummy_new_context.mock_calls[0]
result_raw_options = json.loads(args[0])
assert result_raw_options["httpCredentials"]["username"] == "USERNAME"
assert result_raw_options["httpCredentials"]["password"] == "PWD"
|
#
# Copyright (c) 2021 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
import logging
from s3replicationcommon.job import JobEvents
from s3replicationcommon.s3_common import S3RequestState
from s3replicationcommon.s3_get_object import S3AsyncGetObject
from s3replicationcommon.s3_put_object import S3AsyncPutObject
from s3replicationcommon.timer import Timer
_logger = logging.getLogger('s3replicator')
class ObjectReplicator:
def __init__(self, job, transfer_chunk_size_bytes, range_read_offset,
range_read_length, source_session, target_session) -> None:
"""Initialise."""
self._transfer_chunk_size_bytes = transfer_chunk_size_bytes
self._job_id = job.get_job_id()
self._request_id = self._job_id
self._timer = Timer()
self._range_read_offset = range_read_offset
self._range_read_length = range_read_length
# A set of observers to watch for varius notifications.
# To start with job completed (success/failure)
self._observers = {}
self._s3_source_session = source_session
self._object_source_reader = S3AsyncGetObject(
self._s3_source_session,
self._request_id,
job.get_source_bucket_name(),
job.get_source_object_name(),
int(job.get_source_object_size()),
self._range_read_offset,
self._range_read_length)
# Setup target site info
self._s3_target_session = target_session
self._object_writer = S3AsyncPutObject(
self._s3_target_session,
self._request_id,
job.get_target_bucket_name(),
job.get_source_object_name(),
int(job.get_source_object_size()))
self._object_target_reader = S3AsyncGetObject(
self._s3_target_session,
self._request_id,
job.get_target_bucket_name(),
job.get_source_object_name(),
int(job.get_source_object_size()),
self._range_read_offset,
self._range_read_length)
def get_execution_time(self):
"""Return total time for Object replication."""
return self._timer.elapsed_time_ms()
def setup_observers(self, label, observer):
self._observers[label] = observer
async def start(self):
# Start transfer
self._timer.start()
await self._object_writer.send(self._object_source_reader,
self._transfer_chunk_size_bytes)
self._timer.stop()
_logger.info(
"Replication completed in {}ms for job_id {}".format(
self._timer.elapsed_time_ms(), self._job_id))
# notify job state events
for label, observer in self._observers.items():
_logger.debug(
"Notify completion to observer with label[{}]".format(label))
if self._object_writer.get_state() == S3RequestState.PAUSED:
await observer.notify(JobEvents.STOPPED, self._job_id)
elif self._object_writer.get_state() == S3RequestState.ABORTED:
await observer.notify(JobEvents.ABORTED, self._job_id)
else:
await observer.notify(JobEvents.COMPLETED, self._job_id)
if JobEvents.COMPLETED:
source_etag = self._object_source_reader.get_etag()
target_etag = self._object_writer.get_etag()
_logger.info(
"MD5 : Source {} and Target {}".format(
source_etag, target_etag))
# check md5 of source and replicated objects at target
if source_etag == target_etag:
_logger.info("MD5 matched for job_id {}".format(self._job_id))
else:
_logger.error(
"MD5 not matched for job_id {}".format(
self._job_id))
# check content length of source and target objects
# [system-defined metadata]
reader_generator = self._object_target_reader.fetch(
self._transfer_chunk_size_bytes)
async for _ in reader_generator:
pass
source_content_length = self._object_source_reader.get_content_length()
target_content_length = self._object_target_reader.get_content_length()
_logger.info(
"Content Length : Source {} and Target {}".format(
source_content_length,
target_content_length))
if source_content_length == target_content_length:
_logger.info(
"Content length matched for job_id {}".format(
self._job_id))
else:
_logger.error(
"Content length not matched for job_id {}".format(
self._job_id))
def pause(self):
"""Pause the running object tranfer."""
pass # XXX
def resume(self):
"""Resume the running object tranfer."""
pass # XXX
def abort(self):
"""Abort the running object tranfer."""
self._object_writer.abort()
|
#!/usr/bin/python3
import getpass
import os
import re
import json
import yaml
import shutil
import subprocess
import threading
def clone_or_update_conan_center_index():
if not os.path.exists("conan-center-index"):
os.system("/bin/bash -c \"git clone https://github.com/conan-io/conan-center-index\"")
else:
os.chdir("conan-center-index")
os.system("/bin/bash -c \"git pull\"")
os.chdir("..")
def get_package_paths(package_name):
package_paths = []
version_yml = None
version_yml_path = os.path.join(
os.path.join("conan-center-index/recipes", package_name),
"config.yml")
with open(version_yml_path, "r") as f:
version_yml = yaml.safe_load(f)
for version in version_yml["versions"]:
package_paths.append(os.path.join(os.path.dirname(version_yml_path),
version_yml["versions"][version]["folder"]))
return list(set(package_paths))
def get_versions(package_name):
versions = []
version_yml = None
version_yml_path = os.path.join(
os.path.join("conan-center-index/recipes", package_name),
"config.yml")
with open(version_yml_path, "r") as f:
version_yml = yaml.safe_load(f)
for version in version_yml["versions"]:
versions.append(str(version))
return versions
def get_source_versions(pakcage_name):
package_paths = get_package_paths(package_name)
source_versions = []
for package_path in package_paths:
source_version_yml = None
source_version_yml_path = os.path.join(package_path, "conandata.yml")
if os.path.exists(source_version_yml_path):
with open(source_version_yml_path, "r") as f:
source_version_yml = yaml.safe_load(f)
source_versions = list(source_version_yml["sources"].keys())
return source_versions
def get_source_url(pakcage_name, version):
package_paths = get_package_paths(package_name)
source_url = ""
for package_path in package_paths:
source_version_yml = None
source_version_yml_path = os.path.join(package_path, "conandata.yml")
if os.path.exists(source_version_yml_path):
with open(source_version_yml_path, "r") as f:
source_version_yml = yaml.safe_load(f)
if source_version_yml["sources"].get(version) is not None:
if type(source_version_yml["sources"].get(version)) is not list:
tmp_url = source_version_yml["sources"][version]["url"]
if type(tmp_url) is list:
source_url = tmp_url[len(tmp_url) - 1]
else:
source_url = tmp_url
else:
tmp_url = source_version_yml["sources"][version][0]["url"]
if type(tmp_url) is list:
source_url = tmp_url[len(tmp_url) - 1]
else:
source_url = tmp_url
return source_url
def load_conan_packages():
package_names = []
with open("conan_packages.json", "r") as f:
package_names = json.load(f)["packages"]
return package_names
def get_dependencies(package_name):
dependency_names = []
for package_path in get_package_paths(package_name):
conanfile_py_path = os.path.join(package_path, "conanfile.py")
with open(conanfile_py_path, "r") as f:
file_content = f.read()
dependency_names = re.findall(r'\s{3,4}self.requires\(\"([a-z0-9]+)/\S+\"\)', file_content)
for dependency_name in dependency_names:
dependency_names = dependency_names + get_dependencies(dependency_name)
return dependency_names
def download_source(package_name, version):
url = get_source_url(package_name, version)
if url != "":
print(package_name, version, url)
tmp = url.split("/")
wget_cmd = "wget " + url + " -O " + "sources/" + package_name + "/" + version + "/" + tmp[len(tmp) - 1];
if not os.path.exists("sources/" + package_name + "/" + version + "/" + tmp[len(tmp) - 1]):
os.system(wget_cmd)
def be_ready_for_source_dir(package_name):
if not os.path.exists("sources"):
os.mkdir("sources")
package_source_dir = os.path.join("sources", package_name)
if not os.path.exists(package_source_dir):
os.mkdir(package_source_dir)
for version in get_source_versions(package_name):
version_dir = os.path.join(package_source_dir, version)
if not os.path.exists(version_dir):
os.mkdir(version_dir)
download_source(package_name, version)
def execute_conan_server():
return subprocess.Popen("gunicorn -b 0.0.0.0:9300 -w 4 -t 300 conans.server.server_launcher:app",
stdout=subprocess.PIPE, shell=True)
def copy_and_modify_package(package_name):
# username = getpass.getuser()
src_dir = os.path.join("conan-center-index/recipes", package_name)
dst_dir = os.path.join("modified_packages", package_name)
if not os.path.exists(dst_dir):
os.system("cp -r " + src_dir + " " + dst_dir)
for version_dir in get_package_paths(package_name):
conandata_yml_path = os.path.join(version_dir, "conandata.yml")
if os.path.exists(conandata_yml_path):
conandata_yml = ""
with open(conandata_yml_path, "r") as f:
conandata_yml = yaml.safe_load(f)
print("*******************" + package_name + "*******************")
for version in conandata_yml["sources"]:
modified_url = "http://pkgmachine.local:8080/sources/" + package_name + "/"
print(json.dumps(conandata_yml["sources"][version], indent=4))
if type(conandata_yml["sources"][version]) is list:
for i, _ in enumerate(conandata_yml["sources"][version]):
enumerate(conandata_yml["sources"][version][i]["url"] =
pass
def start():
msg = '''
____ ___ _ _ _ _ _ ____ _____ ______ _______ ____ ____ _ _ ___ _ ____ _____ ____
/ ___/ _ \| \ | | / \ | \ | | / ___|| ____| _ \ \ / / ____| _ \ | __ )| | | |_ _| | | _ \| ____| _ \
| | | | | | \| | / _ \ | \| | \___ \| _| | |_) \ \ / /| _| | |_) | | _ \| | | || || | | | | | _| | |_) |
| |__| |_| | |\ |/ ___ \| |\ | ___) | |___| _ < \ V / | |___| _ < | |_) | |_| || || |___| |_| | |___| _ <
\____\___/|_| \_/_/ \_\_| \_| |____/|_____|_| \_\ \_/ |_____|_| \_\ |____/ \___/|___|_____|____/|_____|_| \_\
'''
print(msg)
if __name__ == '__main__':
start()
clone_or_update_conan_center_index()
package_names = load_conan_packages()
for package_name in package_names:
package_names = package_names + get_dependencies(package_name)
package_names = list(set(package_names))
for package_name in package_names:
be_ready_for_source_dir(package_name)
conan_server_process = execute_conan_server()
for package_name in package_names:
copy_and_modify_package(package_name)
conan_server_process.kill()
b = 1
pass
|
while True:
try:
S = input()
res = ""
for k in S:
o = ord(k)
if o >= 65 and o <= 90: # A to Z
o += 13
if o > 90:
o = o - 90 + 64
res += chr(o)
elif o >= 97 and o <= 122: # a to z
o += 13
if o > 122:
o = o - 122 + 96
res += chr(o)
else:
res += k
print(res)
except:
break
|
from __future__ import print_function, absolute_import
import os
import gc
import sys
import time
import h5py
import scipy
import datetime
import argparse
import os.path as osp
import oneflow as flow
import oneflow.nn as nn
import shutil
import oneflow.typing as tp
from typing import Tuple
import oneflow.math as math
import numpy as np
import models
import models.getresnet as getresnet
import transforms.spatial_transforms as ST
import transforms.temporal_transforms as TT
import tools.data_manager as data_manager
from tools.video_loader import VideoDataset
from tools.losses import TestTripletLoss as TripletLoss
from tools.losses import _CrossEntropyLoss as _CrossEntropyLoss
from tools.utils import AverageMeter, Logger
from tools.eval_metrics import evaluate
from tools.samplers import RandomIdentitySampler
parser = argparse.ArgumentParser(description='Test AP3D using all frames')
# Datasets
parser.add_argument('--root', type=str, default='/content/mars/')
parser.add_argument('-d', '--dataset', type=str, default='mars')
parser.add_argument('--height', type=int, default=256)
parser.add_argument('--width', type=int, default=128)
# Augment
parser.add_argument("--model_load_dir", type=str, default='/content/resnet_v15_of_best_model_val_top1_77318', required=False,
help="model load directory")
parser.add_argument('--seq_len', type=int, default=4,
help="number of images to sample in a tracklet")
parser.add_argument('--sample_stride', type=int, default=8,
help="stride of images to sample in a tracklet")
# Optimization options
parser.add_argument('--max_epoch', default=240, type=int)
parser.add_argument('--start_epoch', default=0, type=int)
parser.add_argument('--train_batch', default=32, type=int)
parser.add_argument('--test_batch', default=32, type=int)
parser.add_argument('--lr', default=0.0001, type=float)
parser.add_argument('--stepsize', default=[14100, 28200, 42300], nargs='+', type=int,
help="stepsize to decay learning rate")
parser.add_argument('--gamma', default=0.1, type=float,
help="learning rate decay")
parser.add_argument('--weight_decay', default=5e-04, type=float)
parser.add_argument('--margin', type=float, default=0.3,
help="margin for triplet loss")
parser.add_argument('--distance', type=str, default='cosine',
help="euclidean or cosine")
parser.add_argument('--num_instances', type=int, default=4,
help="number of instances per identity")
# Architecture
parser.add_argument('-a', '--arch', type=str, default='ap3dres50',
help="ap3dres50, ap3dnlres50")
# Miscs
parser.add_argument('--eval_step', type=int, default=10)
parser.add_argument('--start_eval', type=int, default=0,
help="start to evaluate after specific epoch")
parser.add_argument('--save_dir', type=str, default='log-mars-ap3d')
parser.add_argument('--gpu', default='0', type=str,
help='gpu device ids for CUDA_VISIBLE_DEVICES')
args = parser.parse_args()
test_image = tp.Numpy.Placeholder((args.test_batch , 3, args.seq_len, args.height,args.width))
input_pid = tp.Numpy.Placeholder((args.train_batch,))
func_config = flow.FunctionConfig()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
func_config.default_data_type(flow.float)
dataset = data_manager.init_dataset(name=args.dataset, root=args.root)
@flow.global_function(function_config=func_config)
def gallery_job(
image:test_image
)->tp.Numpy:
model = models.init_model(name=args.arch, num_classes=dataset.num_gallery_pids,training=False,resnetblock=getresnet)
feat=model.build_network(image)
feat=math.reduce_mean(feat,1)
feat=flow.layers.batch_normalization(inputs=feat,
axis=1,
momentum=0.997,
epsilon=1.001e-5,
center=True,
scale=True,
trainable=False,
name= "gallery_feature_bn")
return feat
@flow.global_function(function_config=func_config)
def query_job(
image:test_image
)->tp.Numpy:
model = models.init_model(name=args.arch, num_classes=dataset.num_query_pids,training=False,resnetblock=getresnet)
feat=model.build_network(image)
feat=math.reduce_mean(feat,1)
feat=flow.layers.batch_normalization(inputs=feat,
momentum=0.997,
epsilon=1.001e-5,
center=True,
scale=True,
trainable=False,
axis=1,
name= "query_feature_bn")
return feat
def getDataSets(dataset):
spatial_transform_test = ST.Compose([
ST.Scale((args.height, args.width), interpolation=3),
ST.ToNumpy(),
ST.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
temporal_transform_test = TT.TemporalBeginCrop()
queryset = VideoDataset(dataset.query, spatial_transform=spatial_transform_test, temporal_transform=temporal_transform_test)
galleryset =VideoDataset(dataset.gallery, spatial_transform=spatial_transform_test, temporal_transform=temporal_transform_test)
return queryset,galleryset
def addmm(mat,mat1,mat2,beta=1,alpha=1):
temp=np.matmul(mat,mat2)
out=(beta*mat+alpha*temp)
return out
def main():
sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
print("==========\nArgs:{}\n==========".format(args))
assert os.path.isdir(args.model_load_dir)
print("Restoring model from {}.".format(args.model_load_dir))
checkpoint=flow.train.CheckPoint()
checkpoint.load(args.model_load_dir)
queryset,galleryset=getDataSets(dataset)
print("==> Test")
rank1=test(queryset,galleryset,dataset)
def test(queryset, galleryset, dataset,ranks=[1, 5, 10, 20]):
since=time.time()
qf, q_pids, q_camids = [], [], []
batch_size=args.test_batch
query_img, query_id, query_cam_id = map(list, zip(*dataset.query))
indicies=np.arange(len(query_id))
for i in range(len(indicies) // batch_size):
try:
test_batch = queryset.__getbatch__(indicies[i * batch_size:(i + 1) * batch_size])
except:
test_batch = queryset.__getbatch__(indicies[-batch_size:])
feat=query_job(test_batch[0])
qf.append(feat)
q_pids.extend(test_batch[1].astype(np.float32))
q_camids.extend(test_batch[2])
qf=np.concatenate(qf,0)
q_pids=np.asarray(q_pids)
q_camids=np.asarray(q_camids)
print("Extracted features for query set, obtained {} matrix".format(qf.shape))
gf, g_pids, g_camids = [], [], []
gallery_img, gallery_id, gallery_cam_id = map(list, zip(*dataset.gallery))
indicies=np.arange(len(gallery_id))
for i in range(len(indicies) // batch_size):
try:
gallery_batch = galleryset.__getbatch__(indicies[i * batch_size:(i + 1) * batch_size])
except:
gallery_batch = galleryset.__getbatch__(indicies[-batch_size:])
feat=query_job(gallery_batch[0])
gf.append(feat)
g_pids.extend(gallery_batch[1].astype(np.float32))
g_camids.extend(gallery_batch[2])
gf=np.concatenate(gf,0)
g_pids = np.asarray(g_pids)
g_camids = np.asarray(g_camids)
if args.dataset == 'mars':
gf=np.concatenate((qf,gf),0)
g_pids = np.append(q_pids, g_pids)
g_camids = np.append(q_camids, g_camids)
print("Extracted features for gallery set, obtained {} matrix".format(gf.shape))
time_elapsed = time.time() - since
print('Extracting features complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print("Computing distance matrix")
m, n = qf.shape[0], gf.shape[0]
distmat = np.zeros((m,n))
if args.distance== 'euclidean':
distmat1=np.power(qf,2)
distmat2=np.power(gf,2)
distmat1=np.sum(distmat1,axis=1,keepdims=True)
distmat2=np.sum(distmat2,axis=1,keepdims=True)
distmat1=np.broadcast_to(distmat1,(m,n))
distmat2=np.broadcast_to(distmat2,(n,m))
distmat2=np.transpose(distmat2)
distmat=distmat2+distmat1
tempgf=np.transpose(gf)
for i in range(m):
distmat[i:i+1]=addmm(
distmat[i:i+1],qf[i:i+1],tempgf,1,-2
)
else:
q_norm=np.linalg.norm(qf,ord=2,axis=1,keepdims=True)
g_norm=np.linalg.norm(gf,ord=2,axis=1,keepdims=True)
q_norm=np.broadcast_to(q_norm,qf.shape)
g_norm=np.broadcast_to(g_norm,gf.shape)
gf=np.divide(gf,g_norm)
qf=np.divide(qf,q_norm)
tempgf=np.transpose(gf)
for i in range(m):
distmat[i] = - np.matmul(qf[i:i+1],tempgf)
print("Computing CMC and mAP")
cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)
print("Results ----------")
print('top1:{:.1%} top5:{:.1%} top10:{:.1%} mAP:{:.1%}'.format(cmc[0],cmc[4],cmc[9],mAP))
print("------------------")
return cmc[0]
if __name__ == '__main__':
main()
|
import ast
import numpy as np
import pandas as pd
import torch
from sklearn.metrics.pairwise import cosine_similarity
def similarity(dataset, number_of_recommendations=3):
IRI = extractIRI(dataset)
id = extractIDs(IRI)
all_datasets_ids = [3392, 3403, 3407, 3405, 3388, 3390, 3386, 3512, 3510, 3508, 3176, 3172, 3171, 974]
modelRotatE = torch.load('EmbeddingModels/results/resultsRotatE/trained_model.pkl')
entity_embeddings = modelRotatE.entity_representations[0]
original = entity_embeddings(torch.as_tensor(id)).detach().numpy()
d = dict.fromkeys(all_datasets_ids)
for i in range(len(all_datasets_ids)):
embdding = entity_embeddings(torch.as_tensor(all_datasets_ids[i])).detach().numpy()
print("Is embedding complex(real and imaginary) in nature?", np.iscomplexobj(embdding)) # -> False
cos_sim = cosine_similarity(original.reshape(1, -1), embdding.reshape(1, -1))
d[all_datasets_ids[i]] = cos_sim
# print(d)
recommended_ids= sorted(d, key=d.get, reverse=True)[:number_of_recommendations]
print(recommended_ids)
return recommended_ids
def extractIRI(name):
str = ".csv"
file_name = name.__add__(str)
path = "/home/cjain/PycharmProjects/RS_2021/data/Datasets/"
file_path = path.__add__(file_name)
df = pd.read_csv(file_path)
IRI = df.iat[0, 0]
return IRI
def extractIDs(IRI):
entity_ids = open("EmbeddingModels/results/resultsRotatE/entities_ids.txt", "r")
contents1 = entity_ids.read()
entity = ast.literal_eval(contents1)
return entity[IRI]
if __name__ == "__main__":
dataset = "Grundwasserkörper, NGP 2009, Österreich"
similarity(dataset, 4)
|
import select
import socket
import sys
import signal
import argparse
import threading
from utils import *
SERVER_HOST = 'localhost'
stop_thread = False
def get_and_send(client):
while not stop_thread:
data = sys.stdin.readline().strip()
if data:
send(client.sock, data)
sys.stdout.write(client.prompt)
sys.stdout.flush()
class ChatClient():
""" A command line chat client using select """
def __init__(self, name, port, host=SERVER_HOST):
self.name = name
self.connected = False
self.host = host
self.port = port
# Initial prompt
self.prompt = f'[{name}@{socket.gethostname()}]> '
# Connect to server at port
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, self.port))
print(f'Now connected to chat server@ port {self.port}')
self.connected = True
# Send my name...
send(self.sock, 'NAME: ' + self.name)
data = receive(self.sock)
# Contains client address, set it
addr = data.split('CLIENT: ')[1]
self.prompt = '[' + '@'.join((self.name, addr)) + ']> '
threading.Thread(target=get_and_send, args=(self,)).start()
except socket.error as e:
print(f'Failed to connect to chat server @ port {self.port}')
sys.exit(1)
def cleanup(self):
"""Close the connection and wait for the thread to terminate."""
self.sock.close()
def run(self):
""" Chat client main loop """
while self.connected:
try:
sys.stdout.write(self.prompt)
sys.stdout.flush()
# Wait for input from stdin and socket
# readable, writeable, exceptional = select.select([0, self.sock], [], [])
readable, writeable, exceptional = select.select(
[self.sock], [], [])
for sock in readable:
# if sock == 0:
# data = sys.stdin.readline().strip()
# if data:
# send(self.sock, data)
if sock == self.sock:
data = receive(self.sock)
if not data:
print('Client shutting down.')
self.connected = False
break
else:
if type(data) == list:
# only one possible list value here, so can be relatively naive and assume it's always a list of clients
data = receieve_list_clients(data)
sys.stdout.write(data + '\n')
sys.stdout.flush()
except KeyboardInterrupt:
print(" Client interrupted. " "")
stop_thread = True
self.cleanup()
break
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--name', action="store", dest="name", required=True)
parser.add_argument('--port', action="store",
dest="port", type=int, required=True)
given_args = parser.parse_args()
port = given_args.port
name = given_args.name
client = ChatClient(name=name, port=port)
client.run()
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/settings/master/pokemon/encounter_attributes.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.enums import pokemon_movement_type_pb2 as pogoprotos_dot_enums_dot_pokemon__movement__type__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/settings/master/pokemon/encounter_attributes.proto',
package='pogoprotos.settings.master.pokemon',
syntax='proto3',
serialized_pb=_b('\n=pogoprotos/settings/master/pokemon/encounter_attributes.proto\x12\"pogoprotos.settings.master.pokemon\x1a,pogoprotos/enums/pokemon_movement_type.proto\"\xc3\x04\n\x13\x45ncounterAttributes\x12\x19\n\x11\x62\x61se_capture_rate\x18\x01 \x01(\x02\x12\x16\n\x0e\x62\x61se_flee_rate\x18\x02 \x01(\x02\x12\x1a\n\x12\x63ollision_radius_m\x18\x03 \x01(\x02\x12\x1a\n\x12\x63ollision_height_m\x18\x04 \x01(\x02\x12\x1f\n\x17\x63ollision_head_radius_m\x18\x05 \x01(\x02\x12<\n\rmovement_type\x18\x06 \x01(\x0e\x32%.pogoprotos.enums.PokemonMovementType\x12\x18\n\x10movement_timer_s\x18\x07 \x01(\x02\x12\x13\n\x0bjump_time_s\x18\x08 \x01(\x02\x12\x16\n\x0e\x61ttack_timer_s\x18\t \x01(\x02\x12\"\n\x1a\x62onus_candy_capture_reward\x18\n \x01(\x05\x12%\n\x1d\x62onus_stardust_capture_reward\x18\x0b \x01(\x05\x12\x1a\n\x12\x61ttack_probability\x18\x0c \x01(\x02\x12\x19\n\x11\x64odge_probability\x18\r \x01(\x02\x12\x18\n\x10\x64odge_duration_s\x18\x0e \x01(\x02\x12\x16\n\x0e\x64odge_distance\x18\x0f \x01(\x02\x12\x17\n\x0f\x63\x61mera_distance\x18\x10 \x01(\x02\x12&\n\x1emin_pokemon_action_frequency_s\x18\x11 \x01(\x02\x12&\n\x1emax_pokemon_action_frequency_s\x18\x12 \x01(\x02\x62\x06proto3')
,
dependencies=[pogoprotos_dot_enums_dot_pokemon__movement__type__pb2.DESCRIPTOR,])
_ENCOUNTERATTRIBUTES = _descriptor.Descriptor(
name='EncounterAttributes',
full_name='pogoprotos.settings.master.pokemon.EncounterAttributes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='base_capture_rate', full_name='pogoprotos.settings.master.pokemon.EncounterAttributes.base_capture_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='base_flee_rate', full_name='pogoprotos.settings.master.pokemon.EncounterAttributes.base_flee_rate', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='collision_radius_m', full_name='pogoprotos.settings.master.pokemon.EncounterAttributes.collision_radius_m', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='collision_height_m', full_name='pogoprotos.settings.master.pokemon.EncounterAttributes.collision_height_m', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='collision_head_radius_m', full_name='pogoprotos.settings.master.pokemon.EncounterAttributes.collision_head_radius_m', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='movement_type', full_name='pogoprotos.settings.master.pokemon.EncounterAttributes.movement_type', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='movement_timer_s', full_name='pogoprotos.settings.master.pokemon.EncounterAttributes.movement_timer_s', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='jump_time_s', full_name='pogoprotos.settings.master.pokemon.EncounterAttributes.jump_time_s', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attack_timer_s', full_name='pogoprotos.settings.master.pokemon.EncounterAttributes.attack_timer_s', index=8,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bonus_candy_capture_reward', full_name='pogoprotos.settings.master.pokemon.EncounterAttributes.bonus_candy_capture_reward', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bonus_stardust_capture_reward', full_name='pogoprotos.settings.master.pokemon.EncounterAttributes.bonus_stardust_capture_reward', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attack_probability', full_name='pogoprotos.settings.master.pokemon.EncounterAttributes.attack_probability', index=11,
number=12, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dodge_probability', full_name='pogoprotos.settings.master.pokemon.EncounterAttributes.dodge_probability', index=12,
number=13, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dodge_duration_s', full_name='pogoprotos.settings.master.pokemon.EncounterAttributes.dodge_duration_s', index=13,
number=14, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dodge_distance', full_name='pogoprotos.settings.master.pokemon.EncounterAttributes.dodge_distance', index=14,
number=15, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='camera_distance', full_name='pogoprotos.settings.master.pokemon.EncounterAttributes.camera_distance', index=15,
number=16, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_pokemon_action_frequency_s', full_name='pogoprotos.settings.master.pokemon.EncounterAttributes.min_pokemon_action_frequency_s', index=16,
number=17, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_pokemon_action_frequency_s', full_name='pogoprotos.settings.master.pokemon.EncounterAttributes.max_pokemon_action_frequency_s', index=17,
number=18, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=148,
serialized_end=727,
)
_ENCOUNTERATTRIBUTES.fields_by_name['movement_type'].enum_type = pogoprotos_dot_enums_dot_pokemon__movement__type__pb2._POKEMONMOVEMENTTYPE
DESCRIPTOR.message_types_by_name['EncounterAttributes'] = _ENCOUNTERATTRIBUTES
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EncounterAttributes = _reflection.GeneratedProtocolMessageType('EncounterAttributes', (_message.Message,), dict(
DESCRIPTOR = _ENCOUNTERATTRIBUTES,
__module__ = 'pogoprotos.settings.master.pokemon.encounter_attributes_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.settings.master.pokemon.EncounterAttributes)
))
_sym_db.RegisterMessage(EncounterAttributes)
# @@protoc_insertion_point(module_scope)
|
from itertools import islice, chain
def iter_chunks(iterable, size):
"""Splits an iterable into even sized chunks"""
iterator = iter(iterable)
for first in iterator:
yield chain([first], islice(iterator, size - 1))
def utax_format(ranks, lineage, empty_patt):
return ','.join('{}:{}'.format(r[0], name.replace(',', '_'))
for r, name in zip(ranks, lineage)
if len(clean_name(name, empty_patt)) > 0)
def clean_name(name, patt):
if patt.search(name) is not None:
return ''
return name
|
import unittest
from compress import compress
class Test_Case_Compress(unittest.TestCase):
def test_compress(self):
self.assertEqual(compress('aabcccccaaa'), 'a2b1c5a3')
self.assertEqual(compress('abc'), 'abc')
self.assertEqual(compress('AbC'), 'AbC')
self.assertEqual(compress('aAbBcC'), 'aAbBcC')
self.assertEqual(compress('AAaaBBCC'), 'AAaaBBCC')
if __name__ == '__main__':
unittest.main() |
# Copyright 2016
# Drewan Tech, LLC
# ALL RIGHTS RESERVED
from flask import (Flask,
render_template,
request,
flash)
from os import urandom
app = Flask(__name__)
app.secret_key = urandom(32)
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'GET':
return render_template('index.html')
if request.method == 'POST':
try:
try:
int(request.form['matrix_rows'])
int(request.form['matrix_columns'])
except Exception as e:
raise RuntimeError('Please provide an integer '
'for the rows and columns values.')
try:
float(request.form['max_random_value'])
except Exception as e:
raise RuntimeError('Please provide a number '
'for the max random value.')
flash('Matrix parameters: Rows = {}, '
'Columns = {}, '
'Max random value = {}'
.format(request.form['matrix_rows'],
request.form['matrix_columns'],
request.form['max_random_value']))
matrix = create_random_matrix(int(request.form['matrix_rows']),
int(request.form['matrix_columns']),
float(request.form['max_random_value']))
flash('')
flash('Matrix generated:')
for row in matrix:
row_string = ''
for column in row:
row_string = row_string + str(column) + ' '
flash(row_string)
return render_template('index.html')
except Exception as e:
flash(e)
return render_template('index.html')
def create_random_matrix(rows, columns, max_random_value):
import random
return [[(random.random() * max_random_value)
for column in range(columns)]
for row in range(rows)]
|
import os
import uvicorn
from fastapi import FastAPI
from app.api.v1.api_router import api_router
app = FastAPI()
app.include_router(api_router)
if __name__ == "__main__":
debug: bool = True if os.getenv("ENV", "dev") == "dev" else False
port: int = int(os.getenv("PORT", 8000))
uvicorn.run(app, host="0.0.0.0", port=port, reload=debug, debug=debug)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.