content
stringlengths 5
1.05M
|
|---|
# coding: utf-8
"""
SpaCy deep_learning_keras.py solution to Kaggle Toxic Comment challenge
"""
import os
from utils import xprint_init, xprint, load_json, save_json
from framework import (SUMMARY_DIR, Evaluator, set_random_seed, show_auc, set_n_samples,
get_n_samples_str, auc_score_list, show_results)
from clf_spacy import ClfSpacy, PREDICT_METHODS
submission_name = 'spacy_lstm15'
epochs = 40
set_n_samples(10000)
run_summary_path = os.path.join(SUMMARY_DIR,
'%s.%s.run_summary.json' % (submission_name, get_n_samples_str()))
def get_clfx():
return ClfSpacy(n_hidden=16, max_length=10, # Shape
dropout=0.5, learn_rate=0.001, # General NN config
epochs=epochs, batch_size=150, frozen=frozen,
lstm_type=lstm_type, predict_method=predict_method)
def get_clf0():
return ClfSpacy(n_hidden=64, max_length=100, # Shape
dropout=0.5, learn_rate=0.001, # General NN config
epochs=epochs, batch_size=150, frozen=frozen,
lstm_type=lstm_type, predict_method=predict_method)
def get_clf1():
return ClfSpacy(n_hidden=128, max_length=100, # Shape
dropout=0.5, learn_rate=0.001, # General NN config
epochs=epochs, batch_size=150, frozen=frozen,
lstm_type=lstm_type, predict_method=predict_method)
def get_clf2():
return ClfSpacy(n_hidden=64, max_length=150, # Shape
dropout=0.5, learn_rate=0.001, # General NN config
epochs=epochs, batch_size=150, frozen=frozen,
lstm_type=lstm_type, predict_method=predict_method)
def get_clf3():
return ClfSpacy(n_hidden=64, max_length=100, # Shape
dropout=0.2, learn_rate=0.001, # General NN config
epochs=epochs, batch_size=150, frozen=frozen,
lstm_type=lstm_type, predict_method=predict_method)
def get_clf4():
return ClfSpacy(n_hidden=64, max_length=100, # Shape
dropout=0.5, learn_rate=0.002, # General NN config
epochs=epochs, batch_size=150, frozen=frozen,
lstm_type=lstm_type, predict_method=predict_method)
def get_clf5():
return ClfSpacy(n_hidden=64, max_length=100, # Shape
dropout=0.5, learn_rate=0.001, # General NN config
epochs=epochs, batch_size=50, frozen=frozen,
lstm_type=lstm_type, predict_method=predict_method)
def get_clf6():
return ClfSpacy(n_hidden=64, max_length=50, # Shape
dropout=0.5, learn_rate=0.001, # General NN config
epochs=epochs, batch_size=50, frozen=frozen,
lstm_type=lstm_type, predict_method=predict_method)
def get_clf12():
return ClfSpacy(n_hidden=256, max_length=100, # Shape
dropout=0.5, learn_rate=0.001, # General NN config
epochs=epochs, batch_size=150, frozen=frozen,
lstm_type=lstm_type, predict_method=predict_method)
def get_clf13():
return ClfSpacy(n_hidden=128, max_length=100, # Shape
dropout=0.5, learn_rate=0.002, # General NN config
epochs=epochs, batch_size=150, frozen=frozen,
lstm_type=lstm_type, predict_method=predict_method)
def get_clf14():
return ClfSpacy(n_hidden=128, max_length=100, # Shape
dropout=0.5, learn_rate=0.005, # General NN config
epochs=epochs, batch_size=150, frozen=frozen,
lstm_type=lstm_type, predict_method=predict_method)
def get_clf15():
return ClfSpacy(n_hidden=128, max_length=100, # Shape
dropout=0.5, learn_rate=0.010, # General NN config
epochs=epochs, batch_size=150, frozen=frozen,
lstm_type=lstm_type, predict_method=predict_method)
# spacy_lstm15.log instance3 has
# RESULTS SUMMARY: 68
# auc=0.9920 46: get_clf12 ClfSpacy(batch_size=150, dropout=0.5, epochs=40, epochs2=2, frozen=True, learn_rate=0.001, lstm_type=2, max_length=100, n_hidden=256, predict_method=LINEAR3)
# auc=0.9905 38: get_clf12 ClfSpacy(batch_size=150, dropout=0.5, epochs=40, epochs2=2, frozen=True, learn_rate=0.001, lstm_type=2, max_length=100, n_hidden=256, predict_method=MEAN)
# auc=0.9894 22: get_clf12 ClfSpacy(batch_size=150, dropout=0.5, epochs=40, epochs2=2, frozen=True, learn_rate=0.001, lstm_type=6, max_length=100, n_hidden=256, predict_method=LINEAR3)
# spacy_lstm15.log instance3 has
# RESULTS SUMMARY: 63
# auc=0.9919 46: get_clf12 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=2, max_length=100, n_hidden=256, predict_method=LINEAR)
# auc=0.9899 49: get_clf12 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=2, max_length=100, n_hidden=256, predict_method=LINEAR4)
# auc=0.9895 22: get_clf12 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=6, max_length=100, n_hidden=256, predict_method=LINEAR)
# auc=0.9885 34: get_clf12 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=5, max_length=100, n_hidden=256, predict_method=LINEAR)
# auc=0.9885 42: get_clf12 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=2, max_length=100, n_hidden=256, predict_method=MEAN_MAX)
# spacy_lstm15.log instance4 has
# RESULTS SUMMARY: 65
# auc=0.9900 34: get_clf1 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=6, max_length=100, n_hidden=128, predict_method=LINEAR2)
# auc=0.9897 60: get_clf1 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=2, max_length=100, n_hidden=128, predict_method=LINEAR2)
# auc=0.9877 62: get_clf1 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=2, max_length=100, n_hidden=128, predict_method=LINEAR4)
# spacy_lstm12.log instance4 has
# RESULTS SUMMARY: 37
# auc=0.9899 34: get_clf13 ClfSpacy(dropout=0.5, learn_rate=0.002, lstm_type=6, max_length=100, n_hidden=128, predict_method=LINEAR2)
# auc=0.9866 36: get_clf13 ClfSpacy(dropout=0.5, learn_rate=0.002, lstm_type=6, max_length=100, n_hidden=128, predict_method=LINEAR4)
# auc=0.9861 35: get_clf13 ClfSpacy(dropout=0.5, learn_rate=0.002, lstm_type=6, max_length=100, n_hidden=128, predict_method=LINEAR3)
# spacy_lstm14.log instance4 has
# RESULTS SUMMARY: 61
# auc=0.9815 7: get_clf13 ClfSpacy(dropout=0.5, learn_rate=0.002, lstm_type=8, max_length=100, n_hidden=128, predict_method=LINEAR)
# auc=0.9786 8: get_clf13 ClfSpacy(dropout=0.5, learn_rate=0.002, lstm_type=8, max_length=100, n_hidden=128, predict_method=LINEAR2)
# auc=0.9779 1: get_clf13 ClfSpacy(dropout=0.5, learn_rate=0.002, lstm_type=8, max_length=100, n_hidden=128, predict_method=MEAN)
# spacy_lstm12.log instance3 has
# RESULTS SUMMARY: 61
# auc=0.9896 34: get_clf12 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=6, max_length=100, n_hidden=256, predict_method=LINEAR2)
# auc=0.9891 60: get_clf12 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=2, max_length=100, n_hidden=256, predict_method=LINEAR2)
# auc=0.9885 55: get_clf12 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=2, max_length=100, n_hidden=256, predict_method=MEAN_MAX)
# auc=0.9873 33: get_clf12 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=6, max_length=100, n_hidden=256, predict_method=LINEAR)
# spacy_lstm11.log has
# RESULTS SUMMARY: 59
# auc=0.9837 25: get_clf1 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=6, max_length=100, n_hidden=128, predict_method=LINEAR2)
# auc=0.9833 14: get_clf1 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=7, max_length=100, n_hidden=128, predict_method=LINEAR2)
# auc=0.9831 21: get_clf1 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=7, max_length=100, n_hidden=128, predict_method=PC90)
# clf=0.9837 6: get_clf1 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=2, max_length=100, n_examples=-1, n_hidden=128)
# clf=0.9815 2: get_clf0 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=2, max_length=100, n_examples=-1, n_hidden=64)
# clf=0.9804 3: get_clf0 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=1, max_length=100, n_examples=-1, n_hidden=64)
# clf=0.9741 4: get_clf1 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=4, max_length=100, n_examples=-1, n_hidden=128)
# clf=0.9733 1: get_clf0 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=3, max_length=100, n_examples=-1, n_hidden=64)
# clf=0.9721 0: get_clf0 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=4, max_length=100, n_examples=-1, n_hidden=64)
# clf=0.9708 5: get_clf1 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=3, max_length=100, n_examples=-1, n_hidden=128)
# RESULTS SUMMARY: 4
# auc=0.9760 0: get_clf1 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=2, max_length=100, n_examples=-1, n_hidden=128)
# auc=0.9730 1: get_clf0 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=2, max_length=100, n_examples=-1, n_hidden=64)
# auc=0.9725 2: get_clf2 ClfSpacy(dropout=0.5, learn_rate=0.001, lstm_type=2, max_length=150, n_examples=-1, n_hidden=64)
# auc=0.9670 3: get_clf3 ClfSpacy(dropout=0.2, learn_rate=0.001, lstm_type=2, max_length=100, n_examples=-1, n_hidden=64)
xprint_init(submission_name, False)
clf_list = [get_clf12, get_clf13, get_clf14, get_clf15,
# get_clf0, get_clf2, get_clf3,
get_clf4, get_clf5,
get_clf1]
auc_list = []
frozen = True
completed_tests = load_json(run_summary_path, {})
n_completed0 = len(completed_tests)
n_runs0 = 0 # min(len(v) for v in (list(completed_tests.values()) + [[]]))
print('n_completed0=%d n_runs0=%d' % (n_completed0, n_runs0))
for get_clf in clf_list:
for lstm_type in (8, 7, 6, 5, 2): # , 3, 4, 1):
for frozen in [True]: # (False, True, False):
for predict_method in PREDICT_METHODS:
xprint('#' * 80)
clf_str = str(get_clf())
xprint(clf_str)
runs = completed_tests.get(clf_str, [])
if len(runs) > n_runs0:
xprint('skipping')
continue
set_random_seed(1234)
evaluator = Evaluator(n=1)
ok, auc = evaluator.evaluate(get_clf)
auc_list.append((auc, get_clf.__name__, str(get_clf())))
show_results(auc_list)
runs.append(auc_score_list(auc))
completed_tests[clf_str] = runs
save_json(run_summary_path, completed_tests)
xprint('n_completed=%d = %d + %d' % (len(completed_tests), n_completed0,
len(completed_tests) - n_completed0))
xprint('&' * 100)
xprint('$' * 100)
|
#!/usr/bin/python3
import os
import subprocess
import docker
import utils
FNULL = open(os.devnull, 'w')
def setup(coin):
os.chdir("../docker-compose")
print("Starting " + f"{coin}_api node...")
sp = subprocess.Popen(["docker-compose", "-f", f"{coin}.yml", "-p", f"{coin}_api", "up", "--build", "-d"],
stdin=FNULL, stdout=FNULL, stderr=subprocess.PIPE)
err = sp.communicate()
if sp.returncode == 0:
print(f"{coin}_api node started")
else:
print(f"An error occurred while trying to start {coin}_api:")
print("\n")
print(err[1].decode("ascii"))
def stop(coin):
os.chdir("../docker-compose")
print("Stopping " + f"{coin}_api node...")
sp = subprocess.Popen(["docker-compose", "-f", f"{coin}.yml", "-p", f"{coin}_api", "down"],
stdin=FNULL, stdout=FNULL, stderr=subprocess.PIPE)
err = sp.communicate()
if sp.returncode == 0:
print(f"{coin}_api node stopped")
else:
print(f"An error occurred while trying to start {coin}_api:")
print("\n")
print(err[1].decode("ascii"))
def exitSetup():
print("Exiting...")
raise SystemExit
def invalid():
print("INVALID CHOICE!")
def checkIfRunning(coin):
for container in client.containers.list():
if "com.docker.compose.project" in client.containers.get(container.name).attrs["Config"]["Labels"] and client.containers.get(container.name).attrs["Config"]["Labels"]["com.docker.compose.project"] == f"{coin}_api":
return True
def listRunningContainers():
running = []
for container in client.containers.list():
if "com.docker.compose.project" in client.containers.get(container.name).attrs["Config"]["Labels"] and not client.containers.get(container.name).attrs["Config"]["Labels"]["com.docker.compose.project"] in running:
running.append(
client.containers.get(container.name).attrs["Config"]["Labels"]["com.docker.compose.project"][:-4])
return running
def listApis():
composes = os.listdir("../docker-compose")
return [f[:-4] for f in composes]
def getUsedPort(coin):
for container in client.containers.list():
if "com.docker.compose.project" in client.containers.get(container.name).attrs["Config"]["Labels"] and client.containers.get(container.name).attrs["Config"]["Labels"]["com.docker.compose.project"] == f"{coin}_api":
bindings = client.containers.get(container.name).attrs["HostConfig"]["PortBindings"]
if "80/tcp" in bindings:
os.environ["PORT"] = bindings["80/tcp"][0]["HostPort"]
if "443/tcp" in bindings:
os.environ["SSL_PORT"] = bindings["443/tcp"][0]["HostPort"]
return
def checkStatus():
if checkIfRunning(os.environ["COIN"].lower()):
getUsedPort(os.environ["COIN"].lower())
stop(os.environ["COIN"].lower())
else:
os.environ["PORT"] = utils.queryPort("Port to start: ")
os.environ["BLOCKCHAIN_PATH"] = utils.queryPath(os.environ["COIN"].lower())
os.environ["SSL_PORT"] = utils.queryPort("Port to start (SSL): ")
utils.askSSL()
os.environ["STAGE"] = "PRO"
setup(os.environ["COIN"].lower())
def apiChoice(coin):
os.environ["COIN"] = coin.upper()
checkStatus()
client = docker.from_env()
menu = {}
counter = 1
for coinContainer in listApis():
menu[str(counter)] = (coinContainer, apiChoice)
counter += 1
menu[str(len(listApis()) + 1)] = ("Exit", exitSetup)
for key in sorted(menu.keys()):
if menu[key][0] in listRunningContainers():
print("[RUNNING]"+"\t" + key + "." + menu[key][0])
else:
print("[OFF]"+"\t" + key + "." + menu[key][0])
ans = input("Please pick a server to start/stop (1-{options}): ".format(options=(len(listApis()) + 1)))
menu.get(ans, [None, invalid])[1](menu[ans][0].upper())
|
#!/usr/bin/env python
"""Test classes for clients-related testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import codecs
import collections
import os
import platform
import subprocess
import tempfile
import types
from grr_response_client import actions
from grr_response_client.client_actions import standard
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
from grr_response_core.lib.util import context
from grr_response_server import action_registry
from grr_response_server import server_stubs
from grr.test_lib import test_lib
from grr.test_lib import worker_mocks
class EmptyActionTest(test_lib.GRRBaseTest):
"""Test the client Actions."""
def tearDown(self):
# Reset the global last progress time to prevent order-dependent tests.
actions.ActionPlugin.last_progress_time = (
rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0))
def RunAction(self, action_cls, arg=None, grr_worker=None):
if arg is None:
arg = rdf_flows.GrrMessage()
self.results = []
action = self._GetActionInstance(action_cls, grr_worker=grr_worker)
action.status = rdf_flows.GrrStatus(
status=rdf_flows.GrrStatus.ReturnedStatus.OK)
action.Run(arg)
return self.results
def ExecuteAction(self,
action_cls,
arg=None,
grr_worker=None,
session_id=None):
message = rdf_flows.GrrMessage(
name=action_cls.__name__,
payload=arg,
auth_state="AUTHENTICATED",
session_id=session_id)
self.results = []
action = self._GetActionInstance(action_cls, grr_worker=grr_worker)
action.Execute(message)
return self.results
def _GetActionInstance(self, action_cls, grr_worker=None):
"""Run an action and generate responses.
This basically emulates GRRClientWorker.HandleMessage().
Args:
action_cls: The action class to run.
grr_worker: The GRRClientWorker instance to use. If not provided we make
a new one.
Returns:
A list of response protobufs.
"""
# A mock SendReply() method to collect replies.
def mock_send_reply(mock_self, reply=None, **kwargs):
if reply is None:
reply = mock_self.out_rdfvalues[0](**kwargs)
self.results.append(reply)
if grr_worker is None:
grr_worker = worker_mocks.FakeClientWorker()
action = action_cls(grr_worker=grr_worker)
action.SendReply = types.MethodType(mock_send_reply, action)
return action
class OSSpecificClientTests(EmptyActionTest):
"""OS-specific client action tests.
We need to temporarily disable the actionplugin class registry to avoid
registering actions for other OSes.
"""
def setUp(self):
super().setUp()
action_reg_stubber = utils.Stubber(actions.ActionPlugin, "classes", {})
action_reg_stubber.Start()
self.addCleanup(action_reg_stubber.Stop)
binary_command_stubber = utils.Stubber(standard.ExecuteBinaryCommand,
"classes", {})
binary_command_stubber.Start()
self.addCleanup(binary_command_stubber.Stop)
# pylint: disable=g-bad-name
class MockWindowsProcess(object):
"""A mock windows process."""
def __init__(self, name="cmd", pid=10, ppid=1):
self._name = name
self.pid = pid
self._ppid = ppid
def name(self):
return self._name
def ppid(self):
return self._ppid
def exe(self):
return "cmd.exe"
def username(self):
return "test"
def cmdline(self):
return ["c:\\Windows\\cmd.exe", "/?"]
def create_time(self):
return 1217061982.375000
def status(self):
return "running"
def cwd(self):
return b"X:\\RECEP\xc3\x87\xc3\x95ES"
def num_threads(self):
return 1
def cpu_times(self):
cpu_times = collections.namedtuple(
"CPUTimes", ["user", "system", "children_user", "children_system"])
return cpu_times(
user=1.0, system=1.0, children_user=1.0, children_system=1.0)
def cpu_percent(self):
return 10.0
def memory_info(self):
meminfo = collections.namedtuple("Meminfo", ["rss", "vms"])
return meminfo(rss=100000, vms=150000)
def memory_percent(self):
return 10.0
def open_files(self):
return []
def connections(self):
return []
def nice(self):
return 10
def as_dict(self, attrs=None):
"""Return mock process as dict."""
dic = {}
if attrs is None:
return dic
for name in attrs:
if hasattr(self, name):
attr = getattr(self, name)
if callable(attr):
dic[name] = attr()
else:
dic[name] = attr
else:
dic[name] = None
return dic
def oneshot(self):
return context.NullContext(None)
# pylint: enable=g-bad-name
# pylint: disable=invalid-name
class WMIWin32NetworkAdapterConfigurationMock(object):
"""Mock netadapter."""
class UnSerializable(object):
pass
Caption = "[000005] Intel Gigabit Network Connection"
DatabasePath = "%SystemRoot%\\System32\\drivers\\etc"
DefaultIPGateway = ["192.168.1.254", "fe80::211:5eaa:fe00:222"]
Description = "Intel Gigabit Network Connection"
DHCPEnabled = True
DHCPLeaseExpires = "20140825162259.123456-420"
DHCPLeaseObtained = "20140825122259.123456-420"
DHCPServer = "192.168.1.1"
DNSDomain = "internal.example.com"
DNSDomainSuffixSearchOrder = [
"blah.example.com", "ad.example.com", "internal.example.com",
"example.com"
]
DNSEnabledForWINSResolution = False
DNSHostName = "MYHOST-WIN"
DNSServerSearchOrder = ["192.168.1.1", "192.168.255.81", "192.168.128.88"]
DomainDNSRegistrationEnabled = False
FullDNSRegistrationEnabled = True
GatewayCostMetric = [0, 256]
Index = 7
InterfaceIndex = 11
IPAddress = [
"192.168.1.20", "ffff::ffff:aaaa:1111:aaaa",
"dddd:0:8888:6666:bbbb:aaaa:eeee:bbbb",
"dddd:0:8888:6666:bbbb:aaaa:ffff:bbbb"
]
IPConnectionMetric = 10
IPEnabled = True
IPFilterSecurityEnabled = False
IPSecPermitIPProtocols = []
IPSecPermitTCPPorts = []
IPSecPermitUDPPorts = []
IPSubnet = ["255.255.254.0", "192", "168", "1"]
MACAddress = "BB:AA:EE:CC:DD:CC"
ServiceName = "e1e"
SettingID = "{AAAAAAAA-EEEE-DDDD-AAAA-CCCCCCCCCCCC}"
TcpipNetbiosOptions = 0
WINSEnableLMHostsLookup = True
WINSScopeID = ""
NestingTest = {
"one": {
"two": [3, 4],
"broken": UnSerializable(),
"three": {}
},
"four": [],
"five": "astring",
"six": [None, None, ""],
"seven": None,
"rdfvalue": rdf_protodict.Dict(a="asdf")
}
OpaqueObject = UnSerializable()
# pylint: enable=invalid-name
class Popen(object):
"""A mock object for subprocess.Popen."""
def __init__(self, run, stdout, stderr, stdin, env=None, cwd=None):
del env, cwd # Unused.
Popen.running_args = run
Popen.stdout = stdout
Popen.stderr = stderr
Popen.stdin = stdin
Popen.returncode = 0
try:
# Store the content of the executable file.
Popen.binary = open(run[0], "rb").read()
except IOError:
Popen.binary = None
def communicate(self): # pylint: disable=g-bad-name
return b"stdout here", b"stderr here"
class Test(server_stubs.ClientActionStub):
"""A test action which can be used in mocks."""
in_rdfvalue = rdf_protodict.DataBlob
out_rdfvalues = [rdf_protodict.DataBlob]
action_registry.RegisterAdditionalTestClientAction(Test)
def import_to_registry(data):
r"""Imports the given data to the Registry.
Example:
Windows Registry Editor Version 5.00
[HKEY_LOCAL_MACHINE\SOFTWARE\GRR_TEST]
"foobar"=dword:0
"bar":"baz"
Args:
data: a unicode string of Registry data, following the .REG-format.
See https://en.wikipedia.org/wiki/Windows_Registry#.REG_files
Raises:
RuntimeError: if the function is not called under Windows.
"""
if platform.system() != "Windows":
raise RuntimeError("import_to_registry can only be called under Windows.")
# NamedTemporaryFile cannot be used on Win, because the file is locked while
# open for writes and deleted as soon as it is closed.
filehandle, filename = tempfile.mkstemp(suffix=".reg")
try:
with os.fdopen(filehandle, "wb") as fd:
fd.write(codecs.BOM_UTF16_LE)
fd.write(data.encode("utf_16_le"))
except:
raise
else:
subprocess.check_call(["REG", "IMPORT", filename])
finally:
os.unlink(filename)
|
/anaconda/lib/python3.6/tempfile.py
|
# vim: fileencoding=utf8
import logging
import re
import amf, amf.utils
from django.conf import settings
from django.http import HttpResponse, HttpResponseForbidden
from django.db.models.query import QuerySet
from django.core import exceptions, urlresolvers
class AMFMiddleware(object):
CONTENT_TYPE = 'application/x-amf'
AUTO_MAPPING_VIEW_NAME = 'amf.django.views'
def __init__(self):
"""Initialize AMFMiddleware."""
self.init_logger()
self.init_class_mapper()
self.init_timezone()
self.gateway_path = getattr(settings, 'AMF_GATEWAY_PATH', '/gateway/')
if not self.gateway_path:
msg = "'AMF_GATEWAY_PATH' is not set in 'settings.py'"
self.logger.fatal(msg)
raise AttributeError, msg
self.matcher = re.compile(r"^%s.+" % (self.gateway_path))
self.logger.debug("AMFMiddleware initialization was done.")
def init_timezone(self):
amf.utils.timeoffset = getattr(settings, 'AMF_TIME_OFFSET', None)
def init_class_mapper(self):
mapper_name = getattr(settings, 'AMF_CLASS_MAPPER', None)
if mapper_name:
self.logger.debug("Init AMF class mappings.")
try:
mapper = amf.utils.get_module(mapper_name)
except ImportError:
msg = "AMF_CLASS_MAPPER module is not found. [module='%s']" % (mapper_name,)
self.logger.fatal(msg)
raise ImportError(msg)
if mapper and getattr(mapper, 'amf_class_mappings', False):
mappings = mapper.amf_class_mappings()
if isinstance(mappings, dict):
amf.utils.class_mappings = mappings
else:
msg = "The return value of amf_class_mappings() is not a dictionary type. [type='%s']" % (type(mappings),)
self.logger.fatal(msg)
raise TypeError(msg)
else:
msg = "'amf_class_mappings' function is not defined in AMF_CLASS_MAPPER module. [module='%s']" % (mapper_name,)
self.logger.fatal(msg)
raise AttributeError(msg)
def init_logger(self):
"""Build custom logger instance for AMF handling."""
# TODO: Logging time is not a local time
file = getattr(settings, 'AMF_LOG_FILE', None)
mode = getattr(settings, 'AMF_LOG_FILE_MODE', 'a')
encoding = getattr(settings, 'AMF_LOG_FILE_ENCODING', 'utf8')
loglevel = getattr(settings, 'AMF_LOG_LEVEL', 'INFO')
logger = logging.getLogger('AMF')
logger.setLevel(logging.__dict__[str(loglevel).upper()])
if file:
handler = logging.FileHandler(filename=file, mode=mode, encoding=encoding)
formatter = logging.Formatter('%(asctime)s - %(name)s %(filename)s(%(lineno)d) [%(levelname)s] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
#amf.set_logger(logger)
self.logger = logger
def process_request(self, request):
if self.matcher.match(request.path): # Invalid access
return HttpResponseForbidden()
elif request.method == 'POST' and request.path == self.gateway_path and (request.META.get('HTTP_CONTENT_TYPE') == AMFMiddleware.CONTENT_TYPE or request.META.get('CONTENT_TYPE') == AMFMiddleware.CONTENT_TYPE):
request_message = amf.read(request.raw_post_data)
#if request_message.use_cache: # Get cached data
# key = request_message._get_cache_key()
# if key:
# from django.core.cache import cache
# cached_response = cache.get(key)
# if cached_response: # If cached data found, return it
# return cached_response
response_message = amf.AMFMessage()
response_message.version = request_message.version
self.set_credentials(request_message, request)
for request_body in request_message.bodies:
res_body = self.process_request_message_body(request, request_body)
response_message.add_body(res_body)
response_data = amf.write(response_message)
response = HttpResponse(response_data, AMFMiddleware.CONTENT_TYPE)
#if request_message.use_cache: # Cache response data
# key = request_message._get_cache_key()
# if key:
# from django.core.cache import cache
# cache.set(key, response, request_message.cache_timeout)
return response
def process_request_message_body(self, request, request_body):
path = request.path + request_body.service_method_path
resolver = urlresolvers.RegexURLResolver(r'^/', settings.ROOT_URLCONF)
try:
callback, callback_args, callback_kwargs = resolver.resolve(path)
# Find callback method for auto method mapping.
if callback.__module__ + '.' + callback.__name__ == self.AUTO_MAPPING_VIEW_NAME:
callback = self.find_callback_method(callback_args, callback_kwargs)
except Exception, e:
result = {'description':"Cannot find a view for the path ['%s'], %s" % (path, e),
'details':"Cannot find a view for the path ['%s'], %s" % (path, e),
'type':'AMFRuntimeException',
'code':0}
res_target = request_body.response + amf.RESPONSE_STATUS
else:
try:
result = callback(request, *request_body.args)
# Convert django's QuerySet object into an array of it's containing objects.
if isinstance(result, QuerySet): result = list(result)
res_target = request_body.response + amf.RESPONSE_RESULT
except amf.AMFAuthenticationError, e:
msg = "Exception was thrown when executing remoting method.(%s) [method=%s, args=%s]" % (e, callback.__module__ + "." + callback.__name__, repr(request_body.args))
result = {'description':str(e), 'details':msg, 'type':e.__class__.__name__, 'code':401,}
res_target = request_body.response + amf.RESPONSE_STATUS
except Exception, e:
msg = "Exception was thrown when executing remoting method.(%s) [method=%s, args=%s]" % (e, callback.__module__ + "." + callback.__name__, repr(request_body.args))
# TODO: This can bring about UnicodeDecodeError in logging module
#amf.logger.error(msg)
result = {'description':str(e), 'details':msg, 'type':'AMFRuntimeException', 'code':500,}
res_target = request_body.response + amf.RESPONSE_STATUS
return amf.AMFMessageBody(res_target, '', result)
def find_callback_method(self, callback_args, callback_kwargs):
mod_name = callback_kwargs['views']
method_name = callback_args[0]
self.logger.debug("Using auto method mapping. [module='%s', method='%s']", mod_name, method_name)
mod = amf.utils.get_module(mod_name)
callback = getattr(mod, method_name)
return callback
def set_credentials(self, request_message, request):
"""
If the request amf message has headers for credentials, set them to
the given request object.
The request object has an attribute named 'amfcredentials' which is a
dict holding two keys, 'username' and 'password'.
request.amfcredentials.get('username')
request.amfcredentials.get('password')
"""
username = request_message.get_header("credentialsUsername")
password = request_message.get_header("credentialsPassword")
if username is not None and password is not None:
request.amfcredentials = {'username':username, 'password':password}
|
import os
from geckordp.settings import *
import geckordp.settings
# pylint: disable=invalid-name
# check if environment variables are set and override it
VAR_ID = "_Settings__X"
for name, value in GECKORDP.__dict__.items():
# check if correct variable
if (not name.startswith(VAR_ID)):
continue
# get required variables
func_name = name.replace(VAR_ID, "")
env_name = name.replace(VAR_ID, "GECKORDP_")
env_value = os.environ.get(env_name, None)
func = getattr(Settings, func_name)
# check if environment variable was set
if (env_value is None):
continue
# try to convert value to class variable type
try:
env_value = type(value)(env_value)
except Exception as ex:
print(f"invalid type for environment variable '{env_name}':\n{ex}")
continue
# change value by calling setter property
func.fset(GECKORDP, env_value)
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""PID fetchers for Invenio-OpenDefinition."""
from __future__ import absolute_import, print_function
from invenio_pidstore.fetchers import FetchedPID
def license_fetcher(record_uuid, data):
"""Fetch PID from license record."""
return FetchedPID(
provider=None,
pid_type='od_lic',
pid_value=str(data['id'])
)
|
"""models.py - This file contains the class definitions for the Datastore
entities used by the Game."""
from protorpc import messages
from google.appengine.ext import ndb
class User(ndb.Model):
"""User profile"""
name = ndb.StringProperty(required=True)
email = ndb.StringProperty()
wins = ndb.IntegerProperty(default=0)
total_games = ndb.IntegerProperty(default=0)
@property
def win_percentage(self):
"""Return user win percentage"""
if self.total_games > 0:
return float(self.wins)/float(self.total_games)
else:
return 0.0
def to_form(self):
"""Returns a UserForm representation of the User"""
form = UserForm()
form.user_name = self.name
form.email = self.email
form.wins = self.wins
form.total_games = self.total_games
form.win_percentage = self.win_percentage
return form
def add_win(self):
"""Add a win"""
self.wins += 1
self.total_games += 1
self.put()
def add_loss(self):
"""Add a loss"""
self.total_games += 1
self.put()
class Game(ndb.Model):
"""Game object"""
game_over = ndb.BooleanProperty(required=True, default=False)
user = ndb.KeyProperty(required=True, kind='User')
history = ndb.StringProperty(repeated=True)
@classmethod
def new_game(cls, user):
"""Creates and returns a new game"""
game = Game(user=user, game_over=False, history=[])
game.put()
return game
def to_form(self, message):
"""Returns a GameForm representation of the Game"""
form = GameForm()
form.urlsafe_key = self.key.urlsafe()
form.user_name = self.user.get().name
form.game_over = self.game_over
form.message = message
return form
def end_game(self, game='', message='',
player_hand='', computer_hand='', won=False):
"""Ends the game - if won is True, the player won. - if won is False,
the player lost."""
if player_hand != computer_hand:
self.game_over = True
self.put()
if won:
self.user.get().add_win()
else:
self.user.get().add_loss()
# Add the game to the score 'board'
score = Score(user=self.user, game=game, message=message, won=won,
player_hand=player_hand, computer_hand=computer_hand)
score.put()
class Score(ndb.Model):
"""Score object"""
user = ndb.KeyProperty(required=True, kind='User')
game = ndb.StringProperty(required=True)
message = ndb.StringProperty(required=True)
player_hand = ndb.StringProperty(required=True)
computer_hand = ndb.StringProperty(required=True)
won = ndb.BooleanProperty(required=True)
def to_form(self):
return ScoreForm(user_name=self.user.get().name,
game=self.game,
message=self.message,
player_hand=self.player_hand,
computer_hand=self.computer_hand,
won=self.won)
class GameForm(messages.Message):
"""GameForm for outbound game state information"""
urlsafe_key = messages.StringField(1, required=True)
game_over = messages.BooleanField(2, required=True)
message = messages.StringField(3, required=True)
user_name = messages.StringField(4, required=True)
class GameForms(messages.Message):
"""Return multiple GameForms"""
items = messages.MessageField(GameForm, 1, repeated=True)
class NewGameForm(messages.Message):
"""Used to create a new game"""
user_name = messages.StringField(1, required=True)
class MakeMoveForm(messages.Message):
"""Used to make a move in an existing game"""
hand = messages.StringField(1, required=True)
class ScoreForm(messages.Message):
"""ScoreForm for outbound Score information"""
user_name = messages.StringField(1, required=True)
game = messages.StringField(2, required=True)
message = messages.StringField(3, required=True)
player_hand = messages.StringField(4, required=True)
computer_hand = messages.StringField(5, required=True)
won = messages.BooleanField(6, required=True)
class ScoreForms(messages.Message):
"""Return multiple ScoreForms"""
items = messages.MessageField(ScoreForm, 1, repeated=True)
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
message = messages.StringField(1, required=True)
class UserForm(messages.Message):
"""UserForm for outbound User information"""
user_name = messages.StringField(1, required=True)
email = messages.StringField(2)
wins = messages.IntegerField(3, required=True)
total_games = messages.IntegerField(4, required=True)
win_percentage = messages.FloatField(5, required=True)
class UserForms(messages.Message):
"""Return multiple UserForms"""
items = messages.MessageField(UserForm, 1, repeated=True)
class HistoryForm(messages.Message):
"""HistoryForm for outbound History information"""
items = messages.StringField(1, repeated=True)
|
from django.shortcuts import render , get_object_or_404
from .models import Job
# Create your views here.
def home (request):
jobs = Job.objects
return render(request,'jobs/home.html',{'jobs':jobs})
def detail (request, job_id):
job_detail = get_object_or_404(Job, pk = job_id)
return render(request,'jobs/detail.html', {'job':job_detail})
|
import numpy as np
import scipy.stats as stat
from utils import dichotomic_search
""" Implementation of last particle variant """
def ImportanceSplittingLp(gen,kernel,h,tau=0,N=100,s=0.1,decay=0.9,T = 20, accept_ratio = 0.9,
alpha_est = 0.95, alpha_test=0.99,verbose=1, gain_thresh=0.01, check_every=3, p_c = 10**(-20),n_max = int(10**6),
reject_forget_rate =0, gain_forget_rate=0, reject_thresh=0.005):
"""
Importance splitting last particle estimator, i.e. the importance splitting algorithm with K=N-1
Args:
gen: generator of iid samples X_i [fun]
kernel: mixing kernel invariant to f_X [fun]
h: score function from gaussian vector [fun]
tau: threshold. The rare events are defined as h(X)>tau_j [tx1]
N: number of samples [1x1] (100)
s: strength of the the kernel [1x1] (0.1)
T: number of repetitions of the mixing kernel [1x1] (20)
n_max: max number of iterations [1x1] (200)
n_mod: check each n_mod iteration [1x1] (100)
decay: decay rate of the strength [1x1] (0.9)
accept_ratio: lower bound of accept ratio [1x1] (0.5)
alpha: level of confidence interval [1x1] (0.95)
verbose: level of verbosity [1x1] (0)
Returns:
P_est: estimated probability
s_out: a dictionary containing additional data
-s_out['Var_est']: estimated variance
-s_out['CI_est']: estimated confidence of interval
-s_out['Xrare']: Examples of the rare event
-s_out['result']: Result of the estimation/hypothesis testing process
"""
# Internals
q = -stat.norm.ppf((1-alpha_est)/2) # gaussian quantile
#d =gen(1).shape[-1] # dimension of the random vectors
k = 1 # Number of iterations
p = (N-1)/N
confidence_level_m = lambda y :stat.gamma.sf(-np.log(p_c),a=y, scale =1/N)
m, _ = dichotomic_search(f = confidence_level_m, a=100, b=n_max, thresh=alpha_test)
m = int(m)+1
if verbose:
print(f"Starting Last Particle algorithm with {m}, to certify p<p_c={p_c}, with confidence level alpha ={1-alpha_test}.")
if m>=n_max:
raise AssertionError(f"Confidence level requires more than n_max={n_max} iterations... increase n_max ?")
tau_j = -np.inf
P_est = 0
Var_est = 0
CI_est = np.zeros((2))
kernel_pass=0
Count_accept = 0
check=0
## Init
# step A0: generate & compute scores
X = gen(N) # generate N samples
SX = h(X) # compute their scores
Count_h = N # Number of calls to function h
reject_rate = 0
avg_gain=0
#step B: find new threshold
## While
while (k<=m):
#find new threshold
i_dead = np.argmin(SX,axis = None) # sort in descending order
#print(SX[i_dead], tau_j )
if tau_j!=-np.inf:
gain = np.abs((SX[i_dead]-tau_j)/tau_j)
else:
gain=0
gamma = 1+gain_forget_rate*(k-1)
avg_gain = (1-gamma/k)*avg_gain + (gamma/k)*gain
if k>1 and avg_gain<gain_thresh and reject_rate<reject_thresh:
s = s/decay
if verbose>=1 and check%check_every==0:
print('Strength of kernel increased!')
print(f's={s}')
tau_j = SX[i_dead] # set the threshold to the last particule's score
if tau_j>tau:
P_est= p**(k-1)
break #it is useless to compute new minimum if desired level has already been reached
if verbose>=1 and check%check_every==0:
print('Iter = ',k, ' tau_j = ', tau_j, " Calls = ", Count_h)
check+=1
# Refresh samples
i_new = np.random.choice(list(set(range(N))-set([i_dead])))
z0 = X[i_new,:]
sz0 = SX[i_new]
for t in range(T):
w = kernel(z0,s)
sw = h(w)
if sw>=tau_j:
z0 = w
sz0 = sw
Count_accept+=1
X[i_dead,:] = z0
SX[i_dead] = sz0
Count_h+=T
gamma = T+reject_forget_rate*kernel_pass
reject_rate = (1-gamma/(kernel_pass+T))*reject_rate + gamma*(1-Count_accept/T)/(kernel_pass+T)
if check%check_every==0 and verbose>=1:
print(f'Accept ratio:{Count_accept/T}')
print(f'Reject rate:{reject_rate}')
kernel_pass+=T
if reject_rate > (1-accept_ratio):
s = s*decay
if verbose>=1 and check%check_every==0:
print('Strength of kernel diminished!')
print(f's={s}')
Count_accept = 0
k += 1 # increase iteration number
if tau_j>tau:
Var_est = P_est**2*(P_est**(-1/N)-1)
CI_est[0] = P_est*np.exp(-q/np.sqrt(N)*np.sqrt(-np.log(P_est)+(q**2)/4/N) - (q**2)/2/N)
CI_est[1] = P_est*np.exp(q/np.sqrt(N)*np.sqrt(-np.log(P_est)+(q**2)/4/N) - (q**2)/2/N)
s_out = {'Var_est':Var_est,'CI_est':CI_est,'Iter':k,'Calls':Count_h,'Sample size':N}
s_out['Cert']=False
s_out['Xrare'] = X
else:
s_out = {'Var_est':None, 'CI_est':[0,p_c],'Iter':k,'Calls':Count_h,'Sample size':N}
P_est = p_c
s_out['Cert']=True
s_out['Xrare']= None
return P_est, s_out
def ImportanceSplittingLpBatch(gen,kernel_b,h,h_big,nb_system=5,d=784,tau=0,N=100,s=0.1,decay=0.92,T = 20, accept_ratio = 0.9,
alpha_est = 0.95, alpha_test=0.99,verbose=1, gain_thresh=0.01, check_every=3, p_c = 10**(-20),n_max = int(10**6),
reject_forget_rate =0, gain_forget_rate=0, reject_thresh=0.005,fast_decay=True, fast_d=1):
"""
Importance splitting last particle estimator, i.e. the importance splitting algorithm with K=N-1
with several particle systems.
Args:
gen: generator of iid samples X_i [fun]
kernel_batch: mixing kernel invariant to f_X [fun]
h: score function from gaussian vector [fun]
tau: threshold. The rare events are defined as h(X)>tau_j [tx1]
N: number of samples [1x1] (100)
s: strength of the the kernel [1x1] (0.1)
T: number of repetitions of the mixing kernel [1x1] (20)
n_max: max number of iterations [1x1] (200)
n_mod: check each n_mod iteration [1x1] (100)
decay: decay rate of the strength [1x1] (0.9)
accept_ratio: lower bound of accept ratio [1x1] (0.5)
alpha: level of confidence interval [1x1] (0.95)
verbose: level of verbosity [1x1] (0)
Returns:
P_est: estimated probability
s_out: a dictionary containing additional data
-s_out['Var_est']: estimated variance
-s_out['CI_est']: estimated confidence of interval
-s_out['Xrare']: Examples of the rare event
-s_out['result']: Result of the estimation/hypothesis testing process
"""
q = -stat.norm.ppf((1-alpha_est)/2) # gaussian quantile
s_b = s*np.ones(nb_system)
k = 1 # Number of iterations
p = (N-1)/N
confidence_level_m = lambda y :stat.gamma.sf(-np.log(p_c),a=y, scale =1/N)
m, _ = dichotomic_search(f = confidence_level_m, a=100, b=n_max, thresh=alpha_test)
m = int(m)+1
if verbose:
print(f"Starting Last Particle algorithm with {m}, to certify p<p_c={p_c}, with confidence level alpha ={1-alpha_test}.")
if m>=n_max:
raise AssertionError(f"Confidence level requires more than n_max={n_max} iterations... increase n_max ?")
tau_j = np.array(nb_system*[-np.inf])
is_done = np.zeros(nb_system)
done_k = -np.ones(nb_system)
kernel_pass= 0
Count_accept = np.zeros(nb_system)
check=0
X = gen(nb_system*N).reshape((nb_system,N,d)) # generate N*nb_system samples
SX = h_big(X.reshape((nb_system*N,d))).reshape((nb_system,N)) # compute their scores
Count_h = nb_system*N # Number of calls to function h
reject_rate = np.zeros(nb_system)
avg_gain= np.zeros(nb_system)
Xrare = -np.ones((nb_system,N,d))
nb_system_c = nb_system #current number, as systems can get deleted as algorithm goes
real_indices = np.arange(nb_system) #keeping track of initial systems indices as systems gets deleted
local_indices = np.arange(nb_system_c)
while (k<=m):
#find new threshold
i_deads = np.argmin(SX,axis = 1) # sort in descending order
#we switch the 'last' particle in terms of score and the first particle as indices go, for simplicity
tempXs, tempSs = np.array(X[:,0],copy=True), np.array(SX[:,0],copy=True)
X[:,0], SX[:,0] = X[local_indices,i_deads],SX[local_indices,i_deads]
X[local_indices,i_deads],SX[local_indices,i_deads] = tempXs, tempSs
del tempSs, tempXs
#print(SX[i_dead], tau_j )
if k>1:
gain = np.abs((SX[local_indices, i_deads]-tau_j[None])/tau_j[None])
else:
gain=np.zeros(nb_system_c)
gamma = 1+gain_forget_rate*(k-1)
avg_gain = (1-gamma/k)*avg_gain + (gamma/k)*gain
if k>1:
is_too_low = (avg_gain<gain_thresh) * (reject_rate<reject_thresh)
if is_too_low.sum()>0:
s_b = s_b/decay*is_too_low+s_b*(1-is_too_low)
s_b = s_b.reshape(-1)
if verbose>=1 and check%check_every==0:
print('Strengths of kernels updated!')
print(f's_b={s_b}')
tau_j = SX[:,0] # set the threshold to the last particules's scores
if (tau_j>tau).sum()>0:
is_over = np.where(tau_j>tau)[0]
if verbose:
print(f"System(s):{is_over} reached required level.")
#we need to kill systems that have reached required level, while taking this into account for the real systems indices
is_done[real_indices[is_over]],done_k[real_indices[is_over]]=1,k
if is_done.sum()==nb_system:
break #if all the systems have reached the final level we can stop the itertions there
nb_system_c-=len(is_over)
local_indices = np.arange(nb_system_c)
Xrare[is_over] = X[is_over]
X,SX = np.delete(X,is_over, axis=0),np.delete(SX,is_over, axis=0)
gain, avg_gain,tau_j = np.delete(gain,is_over), np.delete(avg_gain,is_over), np.delete(tau_j,is_over)
reject_rate, Count_accept = np.delete(reject_rate,is_over), np.delete(Count_accept,is_over)
real_indices = np.delete(real_indices,is_over)
s_b = np.delete(s_b ,is_over)
if verbose>=1 and check%check_every==0:
print('Iter = ',k, ' tau_j = ', tau_j, " Calls = ", Count_h)
check+=1
# Refresh samples
i_news = np.random.choice(range(1,N),size=nb_system_c)
z0s = X[local_indices,i_news]
sz0s = SX[local_indices,i_news]
for _ in range(T):
w = kernel_b(z0s,s_b) #kernel_b must take into account the number of systems and different strengths
sw = h(w, real_indices)
is_good_move = sw>=tau_j
z0s,sz0s = z0s*(1-is_good_move)[:,None] + is_good_move[:,None]*w, sz0s *(1-is_good_move) + is_good_move*sw
Count_accept = Count_accept + is_good_move
X[:,0] = z0s
SX[:,0] = sz0s
del z0s, sz0s
Count_h+=T*nb_system_c
gamma = T+reject_forget_rate*kernel_pass
reject_rate = (1-gamma/(kernel_pass+T))*reject_rate + gamma*(1-Count_accept/T)/(kernel_pass+T)
if check%check_every==0 and verbose>=1:
print(f'Accept ratios (local averages):{Count_accept/T}')
print(f'Reject rates (moving averages):{reject_rate}')
kernel_pass+=T
is_zero_accept = Count_accept==0
is_too_high = reject_rate > (1-accept_ratio)
if is_too_high.sum()>0:
s_b = s_b*decay*is_too_high+s_b*(1-is_too_high)
s_b = s_b.reshape(-1)
if fast_decay:
s_b = s_b*decay**fast_d*is_zero_accept+(1-is_zero_accept)*s_b
if verbose>=1 and check%check_every==0:
print('Strengths of kernel updated!')
print(f's_b={s_b}')
Count_accept = np.zeros(nb_system_c)
k += 1 # increase iteration number
if is_done.sum()>0:
P_est = p**(done_k-1)*is_done+(1-is_done)*p_c
Var_est = is_done*P_est**2*(P_est**(-1/N)-1)-(1-is_done)
CI_est = np.zeros((nb_system,2))
CI_est[:,0] = is_done*(P_est*np.exp(-q/np.sqrt(N)*np.sqrt(-np.log(P_est)+(q**2)/4/N) - (q**2)/2/N))
CI_est[:,1] = is_done*(P_est*np.exp(q/np.sqrt(N)*np.sqrt(-np.log(P_est)+(q**2)/4/N) - (q**2)/2/N)) + (1-is_done)*p_c
cert_ = 1-is_done
s_out ={'Var_est':Var_est,'CI_est':CI_est,'Iter':k,'Calls':Count_h,'Sample size':N,'Cert':cert_}
s_out['Xrare'] = Xrare
else:
s_out = {'Var_est': -np.ones(nb_system), 'CI_est':np.array(nb_system*[0,p_c]),'Iter':k,'Calls':Count_h,'Sample size':N}
s_out['Cert']= np.array([True]*nb_system)
s_out['Xrare']= None
P_est = np.array(nb_system*[p_c])
return P_est, s_out
def ImportanceSplitting(gen,kernel,h,tau,N=2000,K=1000,s=1,decay=0.99,T = 30,n_max = 300, alpha = 0.95,
verbose=1, track_rejection=False, rejection_ctrl = False, rej_threshold=0.9, gain_rate = 1.0001,
prog_thresh=0.01):
"""
Importance splitting estimator
Args:
gen: generator of iid samples X_i [fun]
kernel: mixing kernel invariant to f_X [fun]
h: score function [fun]
tau: threshold. The rare event is defined as h(X)>tau [1x1]
N: number of samples [1x1] (2000)
K: number of survivors [1x1] (1000)
s: strength of the the mixing kernel [1x1] (1)
decay: decay rate of the strength of the kernel [1x1] (0.9)
T: number of repetitions of the mixing kernel [1x1] (20)
n_max: max number of iterations [1x1] (200)
alpha: level of confidence interval [1x1] (0.95)
verbose: level of verbosity [1x1] (1)
Returns:
P_est: estimated probability
s_out: a dictionary containing additional data
-s_out['Var_est']: estimated variance
-s_out.['CI_est']: estimated confidence of interval
-s_out.['Xrare']: Examples of the rare event
"""
# Internals
q = -stat.norm.ppf((1-alpha)/2) # gaussian quantile
d =gen(1).shape[-1] # dimension of the random vectors
n = 1 # Number of iterations
## Init
# step A0: generate & compute scores
X = gen(N) # generate N samples
SX = h(X) # compute their scores
Count_h = N # Number of calls to function h
#step B: find new threshold
ind = np.argsort(SX,axis=None)[::-1] # sort in descending order
S_sort= SX[ind]
tau_j = S_sort[K] # set the threshold to (K+1)-th
h_mean = SX.mean()
if verbose>=1:
print('Iter = ',n, ' tau_j = ', tau_j, "h_mean",h_mean, " Calls = ", Count_h)
rejection_rate=0
kernel_pass=0
rejection_rates=[0]
## While
while (n<n_max) and (tau_j<tau):
n += 1 # increase iteration number
if n >=n_max:
raise RuntimeError('The estimator failed. Increase n_max?')
# step C: Keep K highest scores samples in Y
Y = X[ind[0:K],:]
SY = SX[ind[0:K]] # Keep their scores in SY
# step D: refresh samples
Z = np.zeros((N-K,d))
SZ = np.zeros((N-K,1))
for k in range(N-K):
u = np.random.choice(range(K),size=1,replace=False) # pick a sample at random in Y
z0 = Y[u,:]
accept_flag = False
for t in range(T):
w = kernel(z0,s) # propose a refreshed sample
kernel_pass+=1
sw = h(w) # compute its score
Count_h = Count_h + 1
if sw>tau_j: # accept if true
z0 = w
sz0 = sw
accept_flag = True # monitor if accepted
elif track_rejection:
rejection_rate=((kernel_pass-1.)/kernel_pass)*rejection_rate+(1/kernel_pass)
Z[k,:] = z0 # a fresh sample
SZ[k] = sz0 # its score
if rejection_ctrl and rejection_rate>=rej_threshold:
print('Strength of kernel diminished!')
s = s*decay
print(f's={s}')
if not accept_flag:
s = s * decay # decrease the strength of the mixing kernel
# step A: update set X and the scores
X[:K,:] = Y # copy paste the old samples of Y into X
SX[:K] = SY
X[K:N,:] = Z # copy paste the new samples of Z into X
SX[K:N] = SZ
# step B: Find new threshold
ind = np.argsort(SX,axis=None)[::-1] # sort in descending order
S_sort= SX[ind]
new_tau = S_sort[K]
if (new_tau-tau_j)/tau_j<prog_thresh:
s = s*gain_rate
print('Strength of kernel increased!')
print(f's={s}')
tau_j = S_sort[K] # set the threshold to (K+1)-th
h_mean = SX.mean()
if verbose>=1:
print('Iter = ',n, ' tau_j = ', tau_j, "h_mean",h_mean, " Calls = ", Count_h)
if track_rejection:
print(f'Rejection rate: {rejection_rate}')
rejection_rates+=[rejection_rate]
# step E: Last round
K_last = (SX>=tau).sum() # count the nb of score above the target threshold
#Estimation
p = K/N
p_last = K_last/N
P_est = (p**(n-1))*p_last
Var_est = (P_est**2)*((n-1)*(1-p)/p + (1-p_last)/p_last)/N
P_bias = P_est*n*(1-p)/p/N
CI_est = P_est*np.array([1,1]) + q*np.sqrt(Var_est)*np.array([-1,1])
Xrare = X[(SX>=tau).reshape(-1),:]
s_out = {"Var_est":Var_est,"CI_est": CI_est,"N":N,"K":K,"s":s,"decay":decay,"T":T,"Count_h":Count_h,
"P_bias":P_bias,"n":n,"Xrare":Xrare}
if track_rejection:
s_out["rejection_rates"]=np.array(rejection_rates)
s_out["Avg. rejection rate"]=rejection_rate
return P_est,s_out
|
from __future__ import division
import pymol
from pymol import cmd, stored
from pymol.cgo import *
import numpy as np
import time
from utils import get_glyco_bonds_within_chain_and_model, writer
def elapsed(starting, s):
e = time.time() - starting
print(s, e)
return e
def find_rings(resn_list, chain, model):
"""determine wich atoms define the sugar rings"""
matrix_rings = []
start = time.time()
for resi in resn_list:
ring = []
# identify the oxygens that belong to the ring
o_atm = cmd.get_model(f'model {model} and chain {chain} and resi {resi} and name C1 extend 1 and (resi {resi} and name O* and not name O1*)')
try:
ring.append(o_atm.atom[0].name)
except IndexError:
continue
for carbon in range(1, 10):
if cmd.select(
'tmp', 'not hydrogen and (neighbor (model %s and chain %s and resi %s and name c%s ))' %
(model, chain, resi, carbon)) > 2:
ring.append('C%s' % carbon)
while True:
if cmd.select('tmp', 'model %s and chain %s and resi %s and name %s extend 1 and name %s' % (
model, chain, resi, ring[0], ring[-1])) == 0:
ring.pop()
else:
break
matrix_rings.append(ring)
return matrix_rings
def get_ring_coords(resn_list, matrix, chain, model):
"""obtain coordinates of sugar rings"""
matrix_coords = []
for state in range(1, cmd.count_states() + 1):
coords = []
for i, resi in enumerate(resn_list):
stored.coords = []
try:
cmd.iterate_state(state, 'model %s and chain %s and resi %s and name %s' % (
model, chain, resi, '+'.join(matrix[i])), 'stored.coords.append([x,y,z])')
coords.append(stored.coords)
except IndexError:
continue
matrix_coords.append(coords)
return matrix_coords
def get_bonds_coords(resn_list, matrix, chain, model):
"""obtain coordinates of the atoms in the glycosidic bond"""
matrix_coords = []
for state in range(1, cmd.count_states() + 1):
coords = []
for bond in matrix:
stored.pos = []
#Ex bond:
# (3101, 'NAG', 3100, 'NAG', 1, 4)
# (475, 'MAN', 471, 'MAN', 1, 6) Exocyclic
if bond[4] == 6:
cmd.iterate_state(
state, 'model %s and chain %s and resi %s and name C%s or model %s and chain %s and resi %s and name C%s' %
(model, chain, bond[0], 5, model, chain, bond[2], bond[5]), 'stored.pos.append((x,y,z))')
elif bond[5] == 6:
cmd.iterate_state(
state, 'model %s and chain %s and resi %s and name C%s or model %s and chain %s and resi %s and name C%s' %
(model, chain, bond[0], bond[4], model, chain, bond[2], 5), 'stored.pos.append((x,y,z))')
else:
cmd.iterate_state(
state, 'model %s and chain %s and resi %s and name C%s or model %s and chain %s and resi %s and name C%s' %
(model, chain, bond[0], bond[4], model, chain, bond[2], bond[5]), 'stored.pos.append((x,y,z))')
x1, y1, z1 = stored.pos[0]
x2, y2, z2 = stored.pos[1]
coords.append((x1, y1, z1, x2, y2, z2))
matrix_coords.append(coords)
return matrix_coords
def get_colors_c1(resn_list, color, chain, model):
"""obtain colors of c1 atoms"""
matrix_colors = []
if color == 'auto':
for state in range(1, cmd.count_states() + 1):
colors = []
for i, resi in enumerate(resn_list):
stored.colors = []
cmd.iterate_state(
state, 'model %s and chain %s and resi %s and name C1' %
(model, chain, resi), 'stored.colors.append(color)')
colors.extend(stored.colors)
matrix_colors.append(colors)
else:
for state in range(1, cmd.count_states() + 1):
matrix_colors.append([color] * len(resn_list))
return matrix_colors
def get_bonds_colors(resn_list, matrix, color, chain, model):
"""obtain colors for the bonds"""
matrix_colors = []
if color == 'auto':
for state in range(1, cmd.count_states() + 1):
colors = []
for bond in matrix:
stored.colors = []
cmd.iterate_state(
state, 'model %s and chain %s and resi %s and name C1 or model %s and chain %s and resi %s and name C1' %
(model, chain, bond[0], model, chain, bond[2]), 'stored.colors.append(color)')
colors.append((stored.colors[0], stored.colors[1]))
matrix_colors.append(colors)
else:
for state in range(1, cmd.count_states() + 1):
matrix_colors.append([(color, color)] * (len(resn_list) - 1))
return matrix_colors
def hexagon(obj, coords, colors, rep, radius):
"""draw the rings"""
for idx, coord in enumerate(coords):
r, g, b = cmd.get_color_tuple(colors[idx])
coord.append(coord[0])
coord = np.array(coord)
mean = np.mean(coord[:-1], axis=0)
x3, y3, z3 = mean
# average the normals of two neighbouring triangles
normals = [0.5 * (
np.cross((coord[i] - coord[i - 1]), (mean - coord[i])) +
np.cross((coord[i + 1] - coord[i]), (mean - coord[i])))
for i in range(len(coord) - 1)]
centernormal = np.mean(normals, axis=0).tolist()
# add first value to be able to cycle trought the triangles
normals.append(normals[0])
for i in range(len(coord) - 1):
x1, y1, z1 = coord[i]
x2, y2, z2 = coord[i + 1]
# Triangles
if rep == 'cartoon':
tri = [
BEGIN, TRIANGLES,
COLOR, r, g, b,
NORMAL, normals[i][0], normals[i][1], normals[i][2],
NORMAL, normals[i + 1][0], normals[i + 1][1], normals[i + 1][2],
NORMAL, centernormal[0], centernormal[1], centernormal[2],
VERTEX, x1, y1, z1,
VERTEX, x2, y2, z2,
VERTEX, x3, y3, z3,
END
]
obj.extend(tri)
obj.extend([CYLINDER, x1, y1, z1, x2, y2,
z2, radius, r, g, b, r, g, b])
obj.extend([COLOR, r, g, b, SPHERE, x1, y1, z1, radius])
return obj
def beads(obj, coords, colors, radius):
"""draw beads"""
for idx, coord in enumerate(coords):
r, g, b = cmd.get_color_tuple(colors[idx])
x1, y1, z1 = np.mean(coord, axis=0)
obj.extend([COLOR, r, g, b, SPHERE, x1, y1, z1, radius])
return obj
def cylinder(obj, coords, colors, radius):
"""draw the bonds between rings"""
for idx, coord in enumerate(coords):
r1, g1, b1 = cmd.get_color_tuple(colors[idx][0])
r2, g2, b2 = cmd.get_color_tuple(colors[idx][1])
x1, y1, z1, x2, y2, z2 = coord
obj.extend([CYLINDER, x1, y1, z1, x2, y2, z2,
radius, r1, g1, b1, r2, g2, b2])
return obj
@cmd.extend
def cartoonize(chains, color='auto', rep='cartoon', show_bonds=False, model = "all"):
"""draw a cartoon representation of glycans, iterate over models and all chains."""
if not chains:
print('Please specify a chain')
obj_names = []
stored.all_models = []
cmd.iterate('(all)', 'stored.all_models.append((model))')
models = set(stored.all_models)
if model.lower() == "all":
for model in models:
for chain in chains:
obj_names.extend(cartoonize_model_and_chain(model, chain, color, rep, show_bonds))
elif model in models:
for chain in chains:
obj_names.extend(cartoonize_model_and_chain(model, chain, color, rep, show_bonds))
else:
print("model", model, "not in list of models:",models)
return
obj_names.sort()
for obj_name in obj_names:
cmd.group("azahar", obj_name)
@cmd.extend
def cartoonize_model_and_chain(model, chain, color='auto', rep='cartoon', show_bonds=False):
"""draw a cartoon representation of glycans"""
reps = ['cartoon', 'wire', 'beads']
colors = ['auto', 'green', 'red', 'blue', 'yellow', 'cyan', 'magenta',
'orange', 'grey', 'black', 'white']
if rep not in reps:
print("Rep not understood. Available: ", reps)
if color not in colors:
print("color not understood. Available: ", colors)
start = time.time()
stored.ResiduesNumber = []
atoms = cmd.get_model(f'model {model} and chain {chain} and name C1')
resn_list = [int(at.resi) for at in atoms.atom]
#con_matrix = writer2(bonds)
rings = find_rings(resn_list, chain, model)
rings_coords = get_ring_coords(resn_list, rings, chain, model)
colors = get_colors_c1(resn_list, color, chain, model)
cmd.set('suspend_updates', 'on')
obj = []
#Set the radius
if rep == 'beads':
radius = 0.18
elif rep == "cartoon":
radius = 0.075
else:
radius = 0.035
#Show bonds is currently the slowest step.
## It doesn't work well with branching (1-6 is too short)
## It doesn't work with Glycan-protein connections.
obj = []
names = []
if show_bonds:
bonds = get_glyco_bonds_within_chain_and_model(resn_list[0], resn_list[-1] + 1, chain, model)
con_matrix = writer(bonds)
bonds_colors = get_bonds_colors(resn_list, con_matrix, color, chain, model)
bonds_coords = get_bonds_coords(resn_list, con_matrix, chain, model)
for state, coords in enumerate(rings_coords):
obj = cylinder(
obj,
bonds_coords[state],
bonds_colors[state],
radius)
name = 'cbb' + chain + "_" + model
names.append(name)
cmd.load_cgo(obj, name ,state + 1)
#Independant objects to toggle either one on/off.
obj = []
for state, coords in enumerate(rings_coords):
if rep == 'beads':
obj = beads(obj, coords, colors[state], 1.8)
else:
obj = hexagon(obj, coords, colors[state], rep, radius)
name = 'cbr' + chain + "_" + model
names.append(name)
cmd.load_cgo(obj, name, state + 1)
cmd.select('glycan', 'byres name C1')
cmd.delete('glycan')
cmd.delete('tmp')
cmd.set('two_sided_lighting', 1)
cmd.set('suspend_updates', 'off')
return names
|
from intercom_test import http_best_matches as subject
from base64 import b64encode
from io import StringIO
import json
from should_dsl import should, should_not
JSON_STR = """[{
"id": 1,
"first_name": "Jeanette",
"last_name": "Penddreth",
"email": "jpenddreth0@census.gov",
"gender": "Female",
"ip_address": "26.58.193.2"
}, {
"id": 2,
"first_name": "Giavani",
"last_name": "Frediani",
"email": "gfrediani1@senate.gov",
"gender": "Male",
"ip_address": "229.179.4.212"
}, {
"id": 3,
"first_name": "Noell",
"last_name": "Bea",
"email": "nbea2@imageshack.us",
"gender": "Female",
"ip_address": "180.66.162.255"
}, {
"id": 4,
"first_name": "Willard",
"last_name": "Valek",
"email": "wvalek3@vk.com",
"gender": "Male",
"ip_address": "67.76.188.26"
}]"""
def new_json_data(mod=None):
data = json.loads(JSON_STR)
if mod is not None:
mod(data)
return data
def make_case(method, url, body=None):
result = {'method': method, 'url': url}
if body is not None:
result['request body'] = body
return result
def json_data_pair(mod):
return (new_json_data(), new_json_data(mod))
def remove_index_2(data):
del data[2]
def swap_at_indexes(a, b):
def swapper(data):
data[a], data[b] = data[b], data[a]
swapper.swaps = (a, b)
return swapper
JsonType = subject.JsonType
class JsonDescStrings:
CASE_DESCRIPTION = 'case description'
JSON_BODY_DELTAS = 'minimal JSON request body deltas'
SCALAR_BODY_DELTAS = 'closest request bodies'
ALTER_SUBSTRUCT = 'alter substructures'
REARRANGE_SUBSTRUCT = 'rearrange substructures'
ALTER_SCALARS = 'alter scalar values'
KNOWN_METHODS = 'available HTTP methods'
QSTRING_DELTAS = 'minimal query string deltas'
TARGET_QSPARAMS = 'params with differing value sequences'
GOOD_PATHS = 'closest URL paths'
ADDNL_FIELDS_SETS = 'available additional test case field value sets'
################################# TESTS #################################
def test_report_incorrect_scalar_value():
def alter_request(request):
request[0]['first_name'] = 'Bob'
case, request = (
make_case('post', '/foo', body)
for body in json_data_pair(alter_request)
)
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableJsonRequestBodiesReport)
suggestions.diff_case_pairs |should| have(1).item
diff, case = suggestions.diff_case_pairs[0]
diff.structure_diffs |should| be_empty
diff.structure_location_diffs |should| be_empty
diff.scalar_diffs |should_not| be_empty
diff.scalar_diffs |should| equal_to(({'set': (0, 'first_name'), 'to': 'Jeanette'},))
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.JSON_BODY_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'diff': {
JsonDescStrings.ALTER_SCALARS: [
{'set': (0, 'first_name'), 'to': 'Jeanette'},
]
},
}
]
})
def test_report_incorrect_scalar_type():
def alter_request(request):
request[0]['first_name'] = 7
case, request = (
make_case('post', '/foo', body)
for body in json_data_pair(alter_request)
)
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableJsonRequestBodiesReport)
suggestions.diff_case_pairs |should| have(1).item
diff, case = suggestions.diff_case_pairs[0]
diff.structure_diffs |should| have(2).items
diff.structure_diffs[0] |should| equal_to({'del': (0,)})
d = diff.structure_diffs[1]
d['add'][0] |should| equal_to(JsonType.dict)
d['add'][1] |should| equal_to({
('first_name', JsonType.str),
('last_name', JsonType.str),
('id', JsonType.int),
('gender', JsonType.str),
('ip_address', JsonType.str),
('email', JsonType.str),
})
del request['request body'][0]
request['request body'].append(dict(
(fname, t.construct())
for fname, t in d['add'][1]
))
suggestions2 = db.best_matches(request)
suggestions2.diff_case_pairs |should| have(1).item
diff, case = suggestions2.diff_case_pairs[0]
diff.structure_diffs |should| have(0).items
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.JSON_BODY_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'diff': {
JsonDescStrings.ALTER_SUBSTRUCT: [
{'del': (0,)},
{'add': {
'email': '',
'ip_address': '',
'first_name': '',
'last_name': '',
'gender': '',
'id': 0,
}},
]
},
}
]
})
def test_report_misplaced_substructure():
def alter_request(request):
request[2]['oops'] = request[3]
del request[3]
case, request = (
make_case('post', '/foo', body)
for body in json_data_pair(alter_request)
)
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableJsonRequestBodiesReport)
suggestions.diff_case_pairs |should| have(1).item
diff, case = suggestions.diff_case_pairs[0]
diff.structure_diffs |should| have(2).items
d = diff.structure_diffs[0]
d |should| contain('alt')
d['alt'] |should| equal_to(())
d['to'][0] |should| be(JsonType.list)
d['to'][1] |should| equal_to((JsonType.dict,) * 4)
d = diff.structure_diffs[1]
d |should| contain('alt')
d['alt'] |should| equal_to((2,))
d['to'][0] |should| be(JsonType.dict)
d['to'][1] |should| equal_to({
('first_name', JsonType.str),
('last_name', JsonType.str),
('id', JsonType.int),
('gender', JsonType.str),
('ip_address', JsonType.str),
('email', JsonType.str),
})
set(request['request body'][2]).difference(k for k, _ in d['to'][1]) |should| equal_to({'oops'})
# In particular, note that there is no 'add' key in any of
# diff.structure_diffs; this indicates that the difference at key_path ()
# must come from something in request['request body'][2] (which also wants
# a structural change).
request['request body'].append(request['request body'][2]['oops'])
del request['request body'][2]['oops']
suggestions2 = db.best_matches(request)
suggestions2.diff_case_pairs |should| have(1).item
diff, case = suggestions2.diff_case_pairs[0]
diff.structure_diffs |should| have(0).items
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.JSON_BODY_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'diff': {
JsonDescStrings.ALTER_SUBSTRUCT: [
{'alt': (), 'to': [{}, {}, {}, {}]},
{'alt': (2,), 'to': {
'email': '',
'id': 0,
'ip_address': '',
'last_name': '',
'first_name': '',
'gender': ''
}}
]
},
},
]
})
def test_swapped_substructure():
case, request = (
make_case('post', '/foo', body)
for body in json_data_pair(swap_at_indexes(0, 2))
)
case['request body'][0]['foo'] = request['request body'][2]['foo'] = 42
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableJsonRequestBodiesReport)
suggestions.diff_case_pairs |should| have(1).item
diff, case = suggestions.diff_case_pairs[0]
diff.structure_diffs |should| have(0).items
diff.structure_location_diffs |should| have(2).items
d = diff.structure_location_diffs[0]
d |should| contain('alt')
d['alt'] |should| equal_to((0,))
d['to'][0] |should| be(JsonType.dict)
d['to'][1] |should| equal_to({
('first_name', JsonType.str),
('last_name', JsonType.str),
('id', JsonType.int),
('gender', JsonType.str),
('ip_address', JsonType.str),
('email', JsonType.str),
('foo', JsonType.int),
})
d = diff.structure_location_diffs[1]
d |should| contain('alt')
d['alt'] |should| equal_to((2,))
d['to'][0] |should| be(JsonType.dict)
d['to'][1] |should| equal_to({
('first_name', JsonType.str),
('last_name', JsonType.str),
('id', JsonType.int),
('gender', JsonType.str),
('ip_address', JsonType.str),
('email', JsonType.str),
})
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.JSON_BODY_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'diff': {
JsonDescStrings.REARRANGE_SUBSTRUCT: [
{'alt': (0,), 'to': {
'id': 0,
'foo': 0,
'gender': '',
'first_name': '',
'last_name': '',
'ip_address': '',
'email': ''
}},
{'alt': (2,), 'to': {
'id': 0,
'gender': '',
'first_name': '',
'last_name': '',
'ip_address': '',
'email': ''
}},
]
}
}
]
})
def test_body_string_diff():
case, request = (
make_case('post', '/get_bar_info', "name={}".format(name))
for name in ('Cheers', 'Cheers!')
)
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableScalarRequestBodiesReport)
suggestions.test_cases |should| have(1).item
suggestions.test_cases[0]['request body'] |should| equal_to(case['request body'])
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.SCALAR_BODY_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'request body': case['request body'],
}
]
})
def test_body_binary_diff():
case, request = (
make_case('post', '/fingerprint', data)
for data in (b'123456789', b'123654789')
)
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableScalarRequestBodiesReport)
suggestions.test_cases |should| have(1).item
suggestions.test_cases[0]['request body'] |should| equal_to(case['request body'])
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.SCALAR_BODY_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'request body': b64encode(case['request body']).decode('ASCII'),
'isBase64Encoded': True,
}
]
})
def test_http_method_suggestion():
case = make_case('post', '/foo')
request = make_case('get', '/foo')
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableHttpMethodsReport)
suggestions.methods |should| equal_to({'post'})
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.KNOWN_METHODS: ['post']
})
def test_missing_query_param():
case = make_case('get', '/foo?bar=BQ')
request = make_case('get', '/foo')
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableQueryStringParamsetsReport)
suggestions.deltas |should| have(1).item
d = suggestions.deltas[0]
d[0] |should| respond_to('params')
d[0] |should| respond_to('mods')
d[0].params |should| equal_to({'bar': ['BQ']})
d[0].mods |should| equal_to(({'field': 'bar', 'add': 'BQ'},))
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.QSTRING_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'diff': {
JsonDescStrings.TARGET_QSPARAMS: {
'bar': ['BQ'],
},
'mods': (
{'field': 'bar', 'add': 'BQ'},
)
}
}
]
})
def test_wrong_query_param_value():
case = make_case('get', '/foo?bar=BQ')
request = make_case('get', '/foo?bar=Cheers')
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableQueryStringParamsetsReport)
suggestions.deltas |should| have(1).item
d = suggestions.deltas[0]
d[0] |should| respond_to('params')
d[0] |should| respond_to('mods')
d[0].params |should| equal_to({'bar': ['BQ']})
d[0].mods |should| equal_to(({'field': 'bar', 'chg': 'Cheers', 'to': 'BQ'},))
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.QSTRING_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'diff': {
JsonDescStrings.TARGET_QSPARAMS: {
'bar': ['BQ'],
},
'mods': (
{'field': 'bar', 'chg': 'Cheers', 'to': 'BQ'},
)
}
}
]
})
def test_extra_query_param():
case = make_case('get', '/foo?bar=BQ')
request = make_case('get', '/foo?bar=BQ&bar=Cheers')
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableQueryStringParamsetsReport)
suggestions.deltas |should| have(1).item
d = suggestions.deltas[0]
d[0] |should| respond_to('params')
d[0] |should| respond_to('mods')
d[0].params |should| equal_to({'bar': ['BQ']})
d[0].mods |should| equal_to(({'field': 'bar', 'del': 'Cheers'},))
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.QSTRING_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'diff': {
JsonDescStrings.TARGET_QSPARAMS: {
'bar': ['BQ'],
},
'mods': (
{'field': 'bar', 'del': 'Cheers'},
)
}
}
]
})
def test_misordered_query_params():
case = make_case('get', '/foo?bar=BQ&bar=Cheers')
request = make_case('get', '/foo?bar=Cheers&bar=BQ')
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableQueryStringParamsetsReport)
suggestions.deltas |should| have(1).item
d = suggestions.deltas[0]
d[0] |should| respond_to('params')
d[0] |should| respond_to('mods')
d[0].params |should| equal_to({'bar': ['BQ', 'Cheers']})
d[0].mods |should| equal_to(({'field': 'bar', 'add': 'BQ'}, {'field': 'bar', 'del': 'BQ'}))
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.QSTRING_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'diff': {
JsonDescStrings.TARGET_QSPARAMS: {
'bar': ['BQ', 'Cheers'],
},
'mods': (
{'field': 'bar', 'add': 'BQ'},
{'field': 'bar', 'del': 'BQ'},
)
}
}
]
})
def test_ignores_order_between_query_params():
case = make_case('get', '/foo?bar=BQ&baz=Cheers&zapf=1')
request = make_case('get', '/foo?baz=Cheers&bar=BQ&zapf=2')
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableQueryStringParamsetsReport)
suggestions.deltas |should| have(1).item
d = suggestions.deltas[0]
d[0] |should| respond_to('params')
d[0] |should| respond_to('mods')
d[0].params |should| equal_to({'zapf': ['1']})
d[0].mods |should| equal_to(({'field': 'zapf', 'chg': '2', 'to': '1'},))
# The as_jsonic_data is covered in test_wrong_query_param_value
pass
def test_wrong_path():
cases = [
make_case('get', '/food/hippopatamus'),
make_case('get', '/food'),
make_case('get', '/food/cat'),
make_case('get', '/food/goat'),
make_case('get', '/food/dog'),
make_case('get', '/food/pig'),
make_case('get', '/food/brachiosaurus'),
]
request = make_case('get', '/foo')
db = subject.Database(cases)
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailablePathsReport)
suggestions.test_case_groups |should| have(5).items
tcgs = suggestions.test_case_groups
list(g[0] for g in tcgs) |should| include_all_of(c['url'] for c in cases[1:6])
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.GOOD_PATHS: [
('/food', []),
('/food/cat', []),
('/food/dog', []),
('/food/pig', []),
('/food/goat', []), # Note this is moved later in the list because of higher edit distance
]
})
def test_json_exchange_get_case():
case = {
'method': 'get',
'url': '/pet_name',
'response body': 'Fluffy',
}
db = subject.Database([case])
output = StringIO()
db.json_exchange(json.dumps(make_case('get', '/pet_name')), output)
output.tell() |should_not| equal_to(0)
output.seek(0)
result = json.load(output)
result |should| contain('response status')
list(result.items()) |should| include_all_of(case.items())
def test_json_exchange_miss_case():
db = subject.Database([
{
'method': 'post',
'url': '/pet_name',
'response body': 'Fluffy',
}
])
output = StringIO()
db.json_exchange(json.dumps(make_case('get', '/pet_name')), output)
output.tell() |should_not| equal_to(0)
output.seek(0)
result = json.load(output)
result |should_not| contain('response status')
def test_json_exchange_differentiate_on_addnl_field():
cases = [
{
'story': "Alice's pet",
'description': "Getting Alice's pet's name",
'method': 'get',
'url': '/pet_name',
'response body': 'Fluffy',
},
{
'story': "Bob's pet",
'description': "Getting Bob's pet's name",
'method': 'get',
'url': '/pet_name',
'response body': 'Max',
},
]
db = subject.Database(cases, add_request_keys=('story',))
base_request = make_case('get', '/pet_name')
def exchange_for_story(story):
output = StringIO()
db.json_exchange(
json.dumps(dict(base_request, story=story)),
output
)
output.tell() |should_not| equal_to(0)
output.seek(0)
return json.load(output)
result = exchange_for_story("Alice's pet")
result |should| contain('response status')
result['response body'] |should| equal_to('Fluffy')
result = exchange_for_story("Bob's pet")
result |should| contain('response status')
result['response body'] |should| equal_to('Max')
result = exchange_for_story("Charlie's pet")
result |should_not| contain('response status')
result |should| contain(JsonDescStrings.ADDNL_FIELDS_SETS)
result[JsonDescStrings.ADDNL_FIELDS_SETS] |should| include_all_of({'story': case['story']} for case in cases)
|
import esgfpid.rabbit.rabbit
import logging
import time
import datetime
TESTVALUES_REAL = dict(
url_messaging_service='handle-esgf.dkrz.de',
messaging_exchange='rabbitsender_integration_tests',
exchange_no_queue='rabbitsender_integration_tests_no_queue',
exchange_name='rabbitsender_integration_tests', # TODO JUST ONE NAME!
rabbit_username='sendertests',
rabbit_password='testmebaby',
routing_key='test_routing_key',
)
#logging.basicConfig(level=logging.DEBUG)
logdate = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
FORMAT = "%(asctime)-15s %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(level=logging.DEBUG, filename='test_log_'+logdate+'.log', filemode='w', format=FORMAT)
logging.getLogger('esgfpid.rabbit.asynchronous.thread_builder').setLevel(logging.DEBUG)
logging.getLogger('pika').setLevel(logging.DEBUG)
# Test variables:
routing_key = TESTVALUES_REAL['routing_key']
num_messages = 15
# Make connection:
#testrabbit = self.make_rabbit_with_access()
testrabbit = esgfpid.rabbit.rabbit.RabbitMessageSender(
exchange_name=TESTVALUES_REAL['exchange_name'],
urls_fallback=TESTVALUES_REAL['url_messaging_service'],
url_preferred=None,
username=TESTVALUES_REAL['rabbit_username'],
password=TESTVALUES_REAL['rabbit_password'],
)
testrabbit.start()
# Run code to be tested:
for i in range(num_messages):
print("Publishing message %i..." % i)
testrabbit.send_message_to_queue({"stuffee":"foo"+str(i+1),"ROUTING_KEY":routing_key})
print("Publishing message %i... done." % i)
if i==2:
print('Please reboot server now.')
time.sleep(3)
# Close connection after some time:
#print('Wait to close... 1')
#time.sleep(5)
print('Try to close... 1')
testrabbit.finish()
# Check result:
leftovers = testrabbit.get_leftovers()
if not len(leftovers)==0:
print('There is leftovers: '+str(len(leftovers)))
if not testrabbit.is_finished():
print('Thread is still alive.')
# Close connection after some time:
print('Wait to close... 2')
time.sleep(5)
print('Try to close... 2')
testrabbit.finish()
# Check result:
leftovers = testrabbit.get_leftovers()
if not len(leftovers)==0:
print('There is leftovers: '+str(len(leftovers)))
if not testrabbit.is_finished():
print('Thread is still alive.')
# Close connection after some time:
print('Wait to close... 2')
time.sleep(5)
print('Try to force-close... 2')
testrabbit.force_finish()
print('Force-close done')
# Check result:
leftovers = testrabbit.get_leftovers()
if not len(leftovers)==0:
print('There is leftovers: '+str(len(leftovers)))
if not testrabbit.is_finished():
print('Thread is still alive.')
|
"""
The challenge: find the first non repeating character in a string
"""
s = "abacabad"
def firstNotRepeatingCharacter(s):
for i in s:
if s.index(i) == s.rindex(i):
return i
return '_'
|
"""
********************************************************************************
* Name: setup.py
* Author: Nathan Swain
* Created On: 2014
* Copyright: (c) Brigham Young University 2014
* License: BSD 2-Clause
********************************************************************************
"""
import os
from setuptools import setup
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
setup_requires=['pbr'],
pbr=True,
)
|
from .base import CRDT
from .base import Tuple
from .base import mutator
class LWWReg(CRDT):
@classmethod
def initial(cls):
return Tuple((0, None))
@classmethod
def join(cls, s1, s2):
if s1[0] == s2[0] and s2[1] > s1[1]:
return Tuple((s2[0], s2[1]))
return Tuple((s2[0], s2[1])) if s2[0] > s1[0] else Tuple((s1[0], s1[1]))
@classmethod
def value(cls, state):
return state[1]
@mutator
def write(self, ts, value):
return Tuple((ts, value))
|
import classyjson # classy-json
import ffmpeg # ffmpeg-python
import numpy # numpy
import time
import math
import os
with open('config.json', 'r') as c:
config = classyjson.load(c)
# reverse gradients
config.gradients[0] = ''.join(reversed([c for c in config.gradients[0]]))
config.gradients[1] = ''.join(reversed([c for c in config.gradients[1]]))
# for test1
"""
h = 540
w = 960
aspect_ratio = w/h
sh = 175
sw = math.ceil(aspect_ratio*sh)
"""
h = 320
w = 320
aspect_ratio = w/h
sh = 100
sw = math.floor(math.ceil(aspect_ratio*sh)*2) # 2x to account for font size
vid_inp = ffmpeg.input('test2.mp4')
vid_inp = vid_inp.video.filter('scale', sw, sh)
process = vid_inp.output('pipe:', format='rawvideo', pix_fmt='rgb24').run_async(pipe_stdout=True)
frames = [] # will be list of asciified frames
def get_ascii_pixel(p): # takes [r, g, b]
avg = (int(p[0]) + int(p[1]) + int(p[2])) / 3
grad = config.gradients[1]
return grad[int((avg*(len(grad)-1))/255)]
while True:
bytes_in = process.stdout.read(sh * sw * 3)
if not bytes_in:
break
# frame is essentially a list of rgb [[r, g, b], [r, g, b], [r, g, b],...]
#frames.append(numpy.frombuffer(bytes_in, numpy.uint8).reshape([h, w, 3]))
frame = numpy.frombuffer(bytes_in, numpy.uint8).reshape([sh, sw, 3]).copy()
# frame[0][0] is [r, g, b], frame is 2d array / matrix duh
frame_new = []
for i, row in enumerate(frame):
frame_new.append([])
for col in row:
frame_new[i].append(get_ascii_pixel(col))
frames.append(frame_new) # append asciified frame
# test
for frame in frames:
#print('\n'*50)
os.system('cls')
body = ''
for row in frame:
body += '\n' + ''.join(row)
print(body)
time.sleep(.025)
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from django.test import TestCase
from ..utils.data import bytes_for_humans
class BytesForHumans(TestCase):
def test_bytes(self):
self.assertEqual("132B", bytes_for_humans(132))
def test_kilobytes(self):
self.assertEqual("242.10KB", bytes_for_humans(242.1 * 1024))
def test_megabytes(self):
self.assertEqual("377.10MB", bytes_for_humans(377.1 * 1024 * 1024))
def test_gigabytes(self):
self.assertEqual("421.50GB", bytes_for_humans(421.5 * 1024 * 1024 * 1024))
def test_terabytes(self):
self.assertEqual("555.00TB", bytes_for_humans(555 * 1024 * 1024 * 1024 * 1024))
def test_petabytes(self):
self.assertEqual(
"611.77PB", bytes_for_humans(611.77 * 1024 * 1024 * 1024 * 1024 * 1024)
)
|
from intcode import IntcodeVM
from collections import defaultdict
file = open('input.txt', 'r')
for line in file:
memry = line.split(',')
def addPos(one, two):
return one[0] + two[0], one[1] + two[1]
def positionQueried(direction):
global rob
offset = (0, 0)
if direction == 1:
offset = (0, 1)
if direction == 2:
offset = (0, -1)
if direction == 3:
offset = (-1, 0)
if direction == 4:
offset = (1, 0)
return addPos(rob, offset)
def getLeft():
global facing
if facing == 1:
return 3
if facing == 2:
return 4
if facing == 3:
return 2
if facing == 4:
return 1
def getRight():
global facing
if facing == 1:
return 4
if facing == 2:
return 3
if facing == 3:
return 1
if facing == 4:
return 2
def getNeighborRooms(thisRoom):
rooms = []
rooms.append((thisRoom[0], thisRoom[1] + 1))
rooms.append((thisRoom[0], thisRoom[1] - 1))
rooms.append((thisRoom[0] + 1, thisRoom[1]))
rooms.append((thisRoom[0] - 1, thisRoom[1]))
return rooms
def drawMap(special):
global grid
smallestx = 9999
smallesty = 9999
biggestx = -9999
biggesty = -9999
for space in grid:
if grid[space] != -1:
if space[0] < smallestx:
smallestx = space[0]
if space[0] > biggestx:
biggestx = space[0]
if space[1] < smallesty:
smallesty = space[1]
if space[1] > biggesty:
biggesty = space[1]
for i in range(smallestx-5, biggestx+5):
for j in range(smallesty-5, biggesty+5):
if (i,j) in special:
print("@",end="")
elif grid[(i, j)] == -1:
print(" ", end="")
elif grid[(i, j)] == 0:
print(" ", end="")
elif grid[(i, j)] == 2:
print("@", end="")
else:
print("▓", end="")
print("")
grid = defaultdict(lambda: -1)
robot = IntcodeVM(memry)
startPos = (0,0)
grid[startPos] = 1
rob = startPos
OXYGEN = (1234,1234)
facing = 1
while True:
robot.run(getLeft())
if robot.prgOutput == 2:
rob = positionQueried(getLeft())
facing = getLeft()
OXYGEN = rob
grid[OXYGEN] = 1
elif robot.prgOutput == 0:
grid[positionQueried(getLeft())] = 0
robot.run(facing)
if robot.prgOutput == 2:
rob = positionQueried(facing)
OXYGEN = rob
grid[OXYGEN] = 1
elif robot.prgOutput == 0:
grid[positionQueried(facing)] = 0
facing = getRight()
elif robot.prgOutput == 1:
grid[positionQueried(facing)] = 1
rob = positionQueried(facing)
if rob == startPos:
break
elif robot.prgOutput == 1:
grid[positionQueried(getLeft())] = 1
rob = positionQueried(getLeft())
facing = getLeft()
if rob == startPos:
break
howManyRooms = 0
for place in grid:
if grid[place] == 1:
howManyRooms += 1
roomsWithOxygen = [OXYGEN]
minutes = 0
while len(roomsWithOxygen) < howManyRooms:
roomsToBeAdded = []
for oxygenatedRoom in roomsWithOxygen:
for neighborRoom in getNeighborRooms(oxygenatedRoom):
if grid[neighborRoom] == 1 and neighborRoom not in roomsWithOxygen:
roomsToBeAdded.append(neighborRoom)
for roomToBeAdded in roomsToBeAdded:
roomsWithOxygen.append(roomToBeAdded)
minutes += 1
print(minutes)
|
from blaze import dshape
from blaze.expr.nodes import Node
from blaze.expr.viz import dump
from blaze.table import NDArray, NDArray
from blaze.datashape.coretypes import float64, dynamic
from blaze.expr.graph import IntNode, FloatNode, App, StringNode
from unittest import skip
# Print out graphviz to the screen
DEBUG = False
def test_walk():
e = Node([])
d = Node([])
b = Node([d])
c = Node([e])
a = Node([b,c])
assert len([n for n in a]) == 4
if DEBUG:
dump(a, filename='walk', tree=True)
@skip
def test_dynamic_arguments():
a = NDArray([])
b = NDArray([a])
children = b.children
assert len(children) == 1
def test_binary_ops():
a = IntNode(1)
b = IntNode(2)
x = a+b
if DEBUG:
dump(x, filename='binary')
def test_binary_mixed():
a = IntNode(1)
x = a+2
if DEBUG:
dump(x, filename='binarymixed')
def test_unary_ops():
a = IntNode(1)
x = abs(a)
if DEBUG:
dump(x, filename='unary')
def test_indexing():
a = NDArray([])
x = a[0]
if DEBUG:
dump(x, filename='indexer')
def test_slice():
a = NDArray([])
x = a[0:1]
if DEBUG:
dump(x, filename='slice')
def test_scalars():
a = IntNode(1)
b = IntNode(1)
c = IntNode(2)
x = abs((a + b + c + 3) * 4)
if DEBUG:
dump(x, filename='scalars')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# imports.
from fil3s.classes.exceptions import Exceptions
from fil3s.classes.files import *
# pip imports.
from django.http import JsonResponse
import json as pypi_json
import traceback as _traceback_
# the response manager class.
class __Response__(object):
def __init__(self):
# set log file.
# assign new values with self.log_file.assign(...) to keep it shared across other objects.
self.log_file = String(str(None))
# set log level.
# assign new values with self.log_level.assign(...) to keep it shared across other objects.
self.log_level = Integer(0)
# objects.
self.parameters = Parameters()
# for imports.
self.ResponseObject = ResponseObject
# response functions.
def success(self,
# the message (must be param #1).
message,
# additional returnable functions (must be param #2).
variables={},
# log log level of the message (int).
log_level=None,
# the required log level for when printed to console (leave None to use self.log_level).
required_log_level=None,
# save the error to the logs file.
save=False,
# return as a django JsonResponse.
django=False,
):
response = self.response({
"success":True,
"message":message,
"error":None,
})
for key, value in variables.items():
response[key] = value
self.log(message=response["message"], log_level=log_level, save=save, required_log_level=required_log_level)
if django:
try:
response = JsonResponse(response.dict(), safe=False)
except AttributeError:
response = JsonResponse(response)
return response
def error(self,
# the error message.
error="",
# log log level of the message (int).
log_level=None,
# the required log level for when printed to console (leave None to use self.log_level).
required_log_level=None,
# save the error to the erros file.
save=False,
# return as a django JsonResponse.
django=False,
# raise error for developer traceback.
traceback=ERROR_TRACEBACK,
):
response = self.response({
"success":False,
"message":None,
"error":error,
})
self.log(error=response["error"], log_level=log_level, save=save, required_log_level=required_log_level)
if traceback:
raise ValueError(response["error"])
if django:
try:
response = JsonResponse(response.dict(), safe=False)
except AttributeError:
response = JsonResponse(response)
return response
#
# log functions.
def log(self,
# option 1:
# the message (#1 param).
message=None,
# option 2:
# the error.
error=None,
# option 3:
# the response dict (leave message None to use).
response={},
# print the response as json.
json=False,
# optionals:
# the active log level.
log_level=0,
# the required log level for when printed to console (leave None to use self.log_level).
required_log_level=None,
# save to log file.
save=False,
# save errors always (for options 2 & 3 only).
save_errors=None,
# the log mode (leave None for default).
mode=None,
):
if mode != None: mode = str(mode).lower().replace("-","").replace("_","")
if mode not in [None, "warning", "alert"]:
raise Exceptions.InvalidUsage(f"{self.__traceback__(function='log', parameter='mode')}: Selected an invalid mode [{mode}], options: [None, warning, alert].")
def fill_message(msg, error=False):
if mode == None:
if error:
msg = f"Error: {msg}"
else:
msg = msg
elif mode == "warning":
msg = f"&RED&:Warning&END&: {msg}"
elif mode == "alert":
msg = f"&ORANGE&:Alert&END&: {msg}"
return msg
#
msg, _error_ = None, False
if [message,error,response] == [None,None,{}]:
raise Exceptions.InvalidUsage(f"{self.__traceback__(function='log')}: Define either parameter [message:str], [error:str] or [response:dict].")
if response != {}:
if response["error"] != None:
_error_ = True
msg = f"Error: {response['error']}"
else:
if response.__class__.__name__ in ["Output"]:
msg = response.output
else:
msg = response["message"]
elif isinstance(error, (str, String)):
msg = fill_message(error, error=True)
else:
msg = fill_message(message, error=False)
if required_log_level == None: required_log_level = self.log_level
try:
required_log_level = int(required_log_level)
except:
required_log_level = 0
try:
comparison = log_level != None and log_level >= required_log_level
except TypeError as e:
if "not supported between instances of 'dict' and 'int'" in f"{e}":
raise TypeError(f"You most likely returned a Response.error when you meant a Response.success, error: {e}")
else:
raise TypeError(e)
if comparison:
#print(f"{Date().seconds_timestamp} - {color.fill(msg)}")
if json:
if response != {}:
print(response.json())
elif error != None:
print(self.error(error))
else:
print(self.success(message))
else:
print(f"{color.fill(msg)}")
if save:
self.__log_to_file__(msg)
elif save_errors and _error_:
self.__log_to_file__(msg)
#
def load_logs(self, format="webserver", options=["webserver", "cli", "array", "string"]):
try:
logs = File(str(self.log_file), load=True, blank="").data
except:
return self.error("Failed to load the logs.")
if format == "webserver":
logs = logs.replace("\n", "<br>")
elif format == "cli":
a=1
elif format == "array" or format == list:
logs = logs.split("\n")
elif format == "string" or format == str:
logs = str(logs)
else:
return self.error(f"Invalid format parameter [{format}], valid options: {options}.")
return self.success("Succesfully loaded the logs.", {"logs":logs})
def reset_logs(self):
Formats.File(str(self.log_file)).save(f"Resetted log file.\n")
#
# init a response blank object.
def serialize(self,
# the response (#1) (dict) (str repr of dict) (ResponseObject) (generator) .
response={},
# init to response object.
init=True,
):
# check async generator.
if response.__class__.__name__ in ["generator"]:
raise ValueError("Not supported yet.")
return self.serialize(response=response.value, init=init)
#print("Converting generator.")
#print(f"Generator value ({response.value.__class__.__name__}) [{response.value}].")
#try:
# value = next(response)
#except StopIteration as e:
# return self.serialize(response=str(e), init=True)
# #print(f"Generator output: [{e}].")
# #response = self.serialize(response=str(e), init=True)
# #print(f"Serialized generator output instance: [{response.__class__.__name__}].")
# #return response
#except Eception as e:
# return Response.error(f"An error occured during the execution of generator [{response}], error: {e}")
# check ResponseObject.
elif response.__class__.__name__ in ["ResponseObject"]:
return response
# check syst3m.console.Output.
elif response.__class__.__name__ in ["Output"]:
try:
return response.response()
except AttributeError:
return response
# dict / str.
elif response.__class__.__name__ in ["str", "String", "dict", "Dictionary"]:
if response.__class__.__name__ in ["str", "String"]:
try:
try:
response = ast.literal_eval(response)
except:
response = json.loads(response)
except Exception as e:
raise Exceptions.JsonDecodeError(f"Unable to parse a dictionary from [{response}], {e}.")
for key in list(response.keys()):
value = response[key]
no_dict = False
if isinstance(value, dict):
value = self.serialize(value)
elif value in [None, "None", "none", "null"]: value = None
elif value in [True, "True", "true", "TRUE"]: value = True
elif value in [False, "False", "false", "FALSE"]: value = False
elif isinstance(value, str):
if "." in value:
try: value = float(value)
except: a=1
else:
try: value = int(value)
except: a=1
response[key] = value
# invalid.
else:
raise Exceptions.InvalidUsage(f"The parameter [response] must be [str, String, dict, Dictionary, generator] not [{response.__class__.__name__}].")
# return dict.
if init:
return ResponseObject(response)
else:
return response
#
def response(self,
# the blank response (dict, str, generator) (#1).
response={
"success":False,
"message":None,
"error":None,
},
):
# serialize shortcut.
return self.serialize(response, init=True)
#
# system functions.
def __log_to_file__(self, message):
# init.
try:
with open(self.log_file, "a") as file:
file.write(f'{Date().seconds_timestamp} - {message}\n')
response = self.response()
response["success"] = True
response["message"] = "Succesfully logged the message."
# check file size.
size = FilePath(self.log_file).size(mode="mb", format="integer")
if size >= 100: self.reset_logs()
# handler.
return response
except Exception as e:
response = self.response()
response["error"] = f"Failed to log the message, error: {e}."
return response
#
#
# the parameters manager object class.
class Parameters(object):
def __init__(self):
a=1
#
# get request parameters.
def get(self,
# the django request (1).
request=None,
# the identifiers (#2).
# str instance: return the parameters value.
# list instance: return a parameters object & return an error response when a parameter is undefined.
# dict instance: return a parameters object & return the parameter's value from the dict as a default when undefined.
parameters=[],
# default return value (dict instance of parameters overwrites the default parameter).
default=None,
# traceback id.
traceback=None,
):
if request == None:
raise Exceptions.InvalidUsage("<Response.paramters.get>: Define parameter: [request].")
# single parameter.
if isinstance(parameters, (str,String)):
parameters = str(parameters)
response = Response.response()
format = None
if ":" in parameters:
parameters,format = parameters.split(":")
while True:
if " " in format: format = format.replace(" ","")
else: break
if request.method in ["post", "POST"]:
variable = request.POST.get(parameters)
else:
variable = request.GET.get(parameters)
if variable in ["", None]:
if traceback != None:
return variable, Response.error(f"{traceback}: Define parameter: [{parameters}].")
else:
return variable, Response.error(f"Define parameter: [{parameters}].")
elif format != None:
if format in ["str", "string"]: variable = str(variable)
elif format in ["int", "integer"]: variable = int(variable)
elif format in ["bool", "boolean"]:
if variable in ["true", "True", "TRUE", True]: variable = True
else: variable = False
elif format in ["float", "double"]: variable = float(variable)
elif format in ["array", "list"]: variable = variable.split(",")
else:
raise ValueError(f"Unrecognized <Response.parameters.get> format: {format}.")
# handler.
return variable, Response.success(f"Succesfully retrieved request parameter [{parameters}].", {
"key":parameters,
"value":variable,
})
# list recursive.
elif isinstance(parameters, (list, Array)):
optional = False
params = ResponseObject()
for param in parameters:
param_value, response = self.get(request, param, traceback=traceback)
param = param.split(":")[0]
if response["error"] != None:
if optional:
params[param] = default
else:
return params, response
else:
params[param] = param_value
if optional:
for key in parameters:
try: params[key]
except: params[key] = default
return params, Response.success(f"Succesfully retrieved {len(params)} request parameter(s).")
# dict recursive.
elif isinstance(parameters, (dict, Dictionary, ResponseObject)):
if isinstance(parameters, (ResponseObject)): parameters = parameters.clean()
optional = True
params = ResponseObject()
for param,default in parameters.items():
param_value, response = self.get(request, param, traceback=traceback)
param = param.split(":")[0]
if response["error"] != None:
if optional:
params[param] = default
else:
return params, response
else:
params[param] = param_value
if optional:
for key,default in parameters.items():
try: params[key]
except: params[key] = default
return params, Response.success(f"Succesfully retrieved {len(params)} request parameter(s).")
# invalid.
else:
raise Exceptions.InvalidUsage(f"The parameters parameter must be [str, String, list, Array, dict, Dictionary] not [{dictionary.__class__.__name__}].")
#
# check parameter's values.
def check(self,
# the parameters (dict) (#1).
parameters={"parameter":None},
# the recognizer value for when the parameters are supposed to be empty.
default=None,
# the traceback id.
traceback=None,
):
# single.
if isinstance(parameters, tuple):
name,parameter = parameters
if parameter == default:
if traceback != None:
return Response.error(f"{traceback}: Define parameter [{name}].")
else:
return Response.error(f"Define parameter [{name}].")
if ":" in name:
name,formats = name.split(":")
while True:
if " " in formats: formats = formats.replace(" ","")
else: break
formats = formats.split(",")
param_format = Formats.get(parameter, serialize=True)
if param_format not in formats:
return Response.error(f"Incorrect parameter [{name}:{param_format}] format, correct format(s): {Array(path=False, array=formats).string(joiner=', ')}.")
return Response.success(f"Succesfully checked parameter [{name}].")
# recursive.
elif isinstance(parameters, (dict,Dictionary,ResponseObject)):
for id, value in parameters.items():
response = self.check(parameters=(id, value), default=default, traceback=traceback)
if response["error"] != None: return response
return Response.success(f"Succesfully checked {len(parameters)} parameter(s).")
# invalid.
else:
raise Exceptions.InstanceError(f"Parameter [parameters] requires to be a [dict, Dictionary, tuple] not [{parameters.__class__.__name__}].")
#
#
# the response & parameters object class.
class ResponseObject(object):
def __init__(self,
# the response attributes.
attributes={
"success":False,
"message":None,
"error":None,
},
# import a dumped json response (str) (ignores attributes).
json=None,
):
# check self instance.
if isinstance(attributes, ResponseObject):
#attributes = attributes.dict()
#self = attributes
attributes = attributes.dict()
elif isinstance(json, ResponseObject):
json = json.dict()
# assign attributes.
if json != None:
if isinstance(json, str):
try:
self.assign(pypi_json.loads(json))
except:
try:
self.assign(ast.literal_eval(json))
except:
self.assign(pypi_json.loads(String(json).slice_dict()))
elif isinstance(json, dict):
self.assign(json)
elif json != None:
raise Exceptions.InvalidUsage("The ResponseObject.json parameter must be str / dict format.")
else:
self.assign(attributes)
# clean message & error.
try:
if self.message in ["None", "null", "", "nan"]: self.message = None
if self.message != None:
self.message = String(self.message).capitalized_word()
if self.error in ["None", "null", "", "nan"]: self.error = None
if self.error != None:
self.error = String(self.error).capitalized_word()
while True:
if self.message != None and len(self.message) >= 1 and self.message[len(self.message)-1] in [" ", ".", ","]:
self.message = self.message[:-1]
elif self.error != None and len(self.error) >= 1 and self.error[len(self.error)-1] in [" ", ".", ","]:
self.error = self.error[:-1]
elif self.error != None and len(self.error) >= len("Error: ") and self.error[:len("Error: ")] in ["Error: "]:
self.error = String(self.error[len("Error: "):]).capitalized_word()
else: break
# add dot.
if self.message != None and len(self.message) > 0 and self.message[len(self.message)-1] not in ["!", "?"]:
self.message += "."
if self.error != None and len(self.error) > 0 and self.error[len(self.error)-1] not in ["!", "?"]:
self.error += "."
# check error passed as success response. & reversed
if self.message != None and len(self.message) >= len("Failed") and self.message[:len("Failed")] == "Failed":
#_traceback_.print_exc()
raise ValueError("A success response may not start with (failed ...). You most likely called an Response.success return while you meant Response.error.")
if self.error != None and len(self.error) >= len("Success") and self.error[:len("Success")] == "Success":
#_traceback_.print_exc()
raise ValueError("An error response may not start with (success ...). You most likely called an Response.error return while you meant Response.success.")
# except if not present.
except AttributeError: a=1
# clean default values.
def clean(self,
# the clean options, select * for all, options: [traceback].
options=["*"],
# serialize to ResponseObject (with serialize False the ResponseObject's values are not updated).
serialize=True,
):
# options.
if options in ["*", "all"]: options = ["*"]
_options = []
for i in options: _options.append(i.lower())
options = _options
# clean.
attributes = self.dict(json=False)
remove = ["error", "message", "success"]
if "*" in options or "traceback" in options: remove += ["_traceback_", "__traceback__", "raw_traceback", "_raw_traceback_", "__raw_traceback__"]
for i in remove:
try:
if serialize:
del self[str(i)]
else:
del attributes[str(i)]
except KeyError: a=1
if serialize:
return self
else:
return attributes
#
# assign dict.
def assign(self, dictionary):
if isinstance(dictionary, (dict, Dictionary)):
for key,value in dictionary.items():
self[key] = value
elif isinstance(dictionary, (tuple, list, Array)):
for key,value in dictionary:
self[key] = value
else:
raise Exceptions.InvalidUsage("The dictionary parameter must be a dict or tuple.")
return self
# crash the error message.
def crash(self, error="ValueError", traceback=True, json=False):
if json:
self.log(error=self["error"], json=json)
sys.exit(1)
else:
#if not traceback:
# sys.tracebacklimit = 0
#else:
# sys.tracebacklimit = 1
if error.lower() in ["valueerror", "value_error"]: raise ValueError(self["error"])
else: raise Exception(self["error"])
def unpack(self,
# the key / keys / defaults parameter (#1).
# str instance:
# unpack the str key
# list instance:
# unpack all keys in the list.
# dict instance:
# unpack all keys from the dict & when not present return the key's value as default.
keys,
):
defaults = {}
if isinstance(keys, (dict, Files.Dictionary, ResponseObject)):
if isinstance(keys, dict):
defaults = dict(keys)
keys = list(keys.keys())
else:
defaults = keys.dict()
keys = keys.keys()
elif isinstance(keys, str):
keys = [keys]
unpacked = []
for key in keys:
value, set = None, True
try:
value = self[key]
except KeyError:
try:
value = defaults[key]
except KeyError:
set = False
if not set:
raise Exceptions.UnpackError(f"Dictionary does not contain attribute [{key}].")
unpacked.append(value)
return unpacked
def remove(self, keys=[], values=[], save=False):
for i in keys:
try: del self[i]
except: a=1
if values != []:
new = {}
for k,v in self.iterate():
if v not in values: new[k] = v
self.assign(new)
if save: self.save()
return self
# iterations.
def iterate(self, sorted=False, reversed=False):
return self.items(reversed=reversed, sorted=sorted)
def items(self, sorted=False, reversed=False, dictionary=None):
if dictionary == None:
dictionary = self.dict()
if sorted: dictionary = self.sort()
if reversed: return self.reversed().items()
else: return dictionary.items()
def keys(self, sorted=False, reversed=False):
if sorted:
return self.sort(self.keys(sorted=False, reversed=reversed))
if reversed:
keys = list(vars(self))
reversed_keys = []
c = len(keys)-1
for _ in range(len(keys)):
reversed_keys.append(keys[c])
c -= 1
return reversed_keys
else: return list(vars(self))
def values(self, sorted=False, reversed=False, dictionary=None):
if dictionary == None: dictionary = self.dict()
if sorted:
return self.sort(self.values(sorted=False, reversed=reversed, dictionary=dictionary))
values = []
for key, value in self.items(reversed=reversed, dictionary=dictionary):
values.append(value)
return values
def reversed(self, dictionary=None):
if dictionary == None: dictionary = self.dict()
reversed_dict = []
for key in self.keys(reversed=True, dictionary=dictionary):
reversed_dict[key] = dictionary[key]
return reversed_dict
def sort(self, alphabetical=True, ascending=False, reversed=False, dictionary=None):
if dictionary == None: dictionary = self.dict()
new = {}
if alphabetical or ascending:
_sorted_ = Array(path=False, array=list(dictionary.keys())).sort(alphabetical=alphabetical, ascending=ascending, reversed=reversed)
else: raise ValueError("Unknown behaviour, alphabetical=False.")
for key in _sorted_:
new[Formats.denitialize(key)] = dictionary[Formats.denitialize(key)]
return new
# return self as dict.
def dict(self, sorted=False, reversed=False, json=False):
dictionary = {}
for key in self.keys():
dictionary[Formats.denitialize(key)] = self[Formats.denitialize(key)]
return self.serialize(json=json, sorted=sorted, reversed=reversed, dictionary=dictionary)
# dump json string.
def json(self, sorted=False, reversed=False, indent=4, dictionary=None, ):
if dictionary == None: dictionary = self.dict()
return json.dumps(self.serialize(json=False, sorted=sorted, reversed=reversed, dictionary=dictionary), indent=indent).replace(': "False"', ': false').replace(': "True"', ': true').replace(': "None"', ': null')
# serialize dict.
def serialize(self, sorted=False, reversed=False, json=False, dictionary=None):
if dictionary == None: dictionary = self.dict()
if isinstance(dictionary, Dictionary):
dictionary = dictionary.dictionary
if sorted:
items = self.items(reversed=reversed, dictionary=self.sort(alphabetical=True, dictionary=dictionary))
else:
items = self.items(reversed=reversed, dictionary=dictionary)
dictionary = {}
for key, value in items:
value = Formats.denitialize(value)
if isinstance(value, (dict, Dictionary)):
value = self.serialize(json=json, sorted=sorted, reversed=reversed, dictionary=value)
elif isinstance(value, (list, Array)):
value = Array(value).serialize(json=json, sorted=sorted, reversed=reversed)
elif isinstance(value, object):
value = str(value)
elif isinstance(value, str) or isinstance(value, bool) or value == None:
if value in [True, "True", "True".lower()]:
if json:
value = "true"
else:
value = True
elif value in [False, "False", "False".lower()]:
if json:
value = "false"
else:
value = False
elif value in [None, "None", "None".lower()]:
if json:
value = "null"
else:
value = None
dictionary[key] = value
return dictionary
# support default iteration.
def __iter__(self):
return iter(self.keys())
# support '>=' & '>' operator.
def __gt__(self, response):
if not isinstance(directory, self.__class__):
raise Exceptions.InstanceError(f"Can not compare object {self.__class__} & {response.__class__}.")
return len(self) > len(response)
def __ge__(self, response):
if not isinstance(response, self.__class__):
raise Exceptions.InstanceError(f"Can not compare object {self.__class__} & {response.__class__}.")
return len(self) >= len(response)
# support '<=' & '<' operator.
def __lt__(self, response):
if not isinstance(response, self.__class__):
raise Exceptions.InstanceError(f"Can not compare object {self.__class__} & {response.__class__}.")
return len(self) < len(response)
def __le__(self, response):
if not isinstance(response, self.__class__):
raise Exceptions.InstanceError(f"Can not compare object {self.__class__} & {response.__class__}.")
return len(self) <= len(response)
# support '==' & '!=' operator.
def __eq__(self, dictionary):
if isinstance(dictionary, dict):
return str(self.sort()) == str(Dictionary(dictionary).sort())
elif isinstance(dictionary, Dictionary):
return str(self.sort()) == str(dictionary.sort())
else:
try:
return str(self.sort()) == str(dictionary.sort())
except:
return False
def __ne__(self, dictionary):
if isinstance(dictionary, dict):
return str(self.sort()) != str(Dictionary(dictionary).sort())
elif isinstance(dictionary, Dictionary):
return str(self.sort()) != str(dictionary.sort())
else:
try:
return str(self.sort()) != str(dictionary.sort())
except:
return False
# support 'in' operator.
def __contains__(self, response):
keys = self.keys()
if isinstance(response, (list, Array)):
for i in response:
if response in keys:
return True
return False
else:
return response in keys
# support item assignment.
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def __delitem__(self, key):
delattr(self, key)
# str representable.
def __str__(self):
return self.json(indent=4)
# bool representable.
def __bool__(self):
return self.success
#
# content count.
def __len__(self):
return len(self.keys())
# object id.
def __id__(self):
return f"({self.instance()}:{str(self)})"
# object instance.
def instance(self):
return "ResponseObject"
@property
def __name__(self):
return self.instance()
# return raw data.
def raw(self):
return self.dict()
# return response self for syst3m.console.Output and other objects that init ResponseObject as self and want it to be converted to response.
def response(self):
return self
#
# initialized objects.
# must be initialized as Response since object class Parameters requires it.
Response = __Response__()
|
import os
from fast_align.generate_alignments import generate_word_alignments_fast_align
from mgiza.generate_alignments import generate_word_alignments_mgiza
from SimAlign.generate_alignments import generate_word_alignments_simalign
from awesome.generate_alignments import generate_word_alignments_awesome
from typing import Optional, List
from tokenization.conll2text import conll2text
from tokenization.utils import count_lines
from projection.annotation_proyection import dataset_projection
import argparse
def generate_alignments(
source_train: Optional[str],
source_dev: Optional[str],
source_test: Optional[str],
target_train: Optional[str],
target_dev: Optional[str],
target_test: Optional[str],
source_augmentation: Optional[str],
target_augmentation: Optional[str],
output_dir: str,
output_name: str,
do_fastalign: bool = False,
do_mgiza: bool = False,
do_simalign: bool = True,
do_awesome: bool = False,
remove_awesome_model: bool = True,
awesome_model_path: str = None,
):
"""
Generate word alignments for the given datasets.
:param str source_train: Path to the source language training dataset. A txt file, one sentence per line.
:param str source_dev: Path to the source language development dataset. A txt file, one sentence per line.
:param str source_test: Path to the source language test dataset. A txt file, one sentence per line.
:param str target_train: Path to the target language training dataset. A txt file, one sentence per line.
:param str target_dev: Path to the target language development dataset. A txt file, one sentence per line.
:param str target_test: Path to the target language test dataset. A txt file, one sentence per line.
:param str source_augmentation: Path to the source language augmentation dataset. A txt file, one sentence per line.
:param str target_augmentation: Path to the target language augmentation dataset. A txt file, one sentence per line.
:param str output_dir: Path to the output directory.
:param str output_name: Name of the output files
:param bool do_fastalign: Whether to generate word alignments with fastalign.
:param bool do_mgiza: Whether to generate word alignments with mgiza.
:param bool do_simalign: Whether to generate word alignments with simalign.
:param bool do_awesome: Whether to generate word alignments with awesome.
:param bool remove_awesome_model: Whether to remove the trained awesome model after the alignment generation.
:param str awesome_model_path: Path to a pretrained awesome model.
"""
# 1) Sanity checks
assert source_train or source_dev or source_test, f"Nothing to do"
assert target_train or target_dev or target_test, f"Nothing to do"
assert (source_train is not None and target_train is not None) or (
source_train is None and target_train is None
), f"Source train: {source_train}. Target train: {target_train}"
assert (source_dev is not None and target_dev is not None) or (
source_dev is None and target_dev is None
), f"Source dev: {source_dev}. Target dev: {target_dev}"
assert (source_test is not None and target_test is not None) or (
source_test is None and target_test is None
), f"Source test: {source_test}. Target test: {target_test}"
assert (source_augmentation is not None and target_augmentation is not None) or (
source_augmentation is None and target_augmentation is None
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Projection
source_paths: List[str] = []
target_paths: List[str] = []
if source_train:
source_paths.append(source_train)
target_paths.append(target_train)
if source_dev:
source_paths.append(source_dev)
target_paths.append(target_dev)
if source_test:
source_paths.append(source_test)
target_paths.append(target_test)
if do_mgiza:
output_names = []
if source_train:
output_names.append(output_name + ".mgiza.train")
if source_dev:
output_names.append(output_name + ".mgiza.dev")
if source_test:
output_names.append(output_name + ".mgiza.test")
print(
f"Generate word alignments Mgiza.\n"
f"Source paths: {source_paths}.\n"
f"Target paths: {target_paths}.\n"
f"source_parallel_corpus: {source_augmentation}.\n"
f"target_parallel_corpus: {target_augmentation}.\n"
f"Output names: {output_names}.\n"
f"Output_dir: {output_dir}.\n"
)
generate_word_alignments_mgiza(
source_paths=source_paths,
target_paths=target_paths,
source_parallel_corpus=[source_augmentation]
if source_augmentation
else None,
target_parallel_corpus=[target_augmentation]
if target_augmentation
else None,
output_names=output_names,
output_dir=output_dir,
)
if do_fastalign:
output_names = []
if source_train:
output_names.append(output_name + ".fast_align.train")
if source_dev:
output_names.append(output_name + ".fast_align.dev")
if source_test:
output_names.append(output_name + ".fast_align.test")
print(
f"Generate word alignments Fast Align.\n"
f"Source paths: {source_paths}.\n"
f"Target paths: {target_paths}.\n"
f"source_parallel_corpus: {source_augmentation}.\n"
f"target_parallel_corpus: {target_augmentation}.\n"
f"Output names: {output_names}.\n"
f"Output_dir: {output_dir}.\n"
)
generate_word_alignments_fast_align(
source_paths=source_paths,
target_paths=target_paths,
source_parallel_corpus=[source_augmentation]
if source_augmentation
else None,
target_parallel_corpus=[target_augmentation]
if target_augmentation
else None,
output_names=output_names,
output_dir=output_dir,
)
if do_simalign:
if source_train and target_train:
print(
f"Generate word alignments SimAlign. "
f"source_file: {source_train}. "
f"target_file: {target_train}. "
f"output: {os.path.join(output_dir, f'{output_name}.simalign.train')}"
)
generate_word_alignments_simalign(
source_file=source_train,
target_file=target_train,
output=os.path.join(output_dir, f"{output_name}.simalign.train"),
)
if source_dev and target_dev:
print(
f"Generate word alignments SimAlign. "
f"source_file: {source_dev}. "
f"target_file: {target_dev}. "
f"output: {os.path.join(output_dir, f'{output_name}.simalign.dev')}"
)
generate_word_alignments_simalign(
source_file=source_dev,
target_file=target_dev,
output=os.path.join(output_dir, f"{output_name}.simalign.dev"),
)
if source_test and target_test:
print(
f"Generate word alignments SimAlign. "
f"source_file: {source_test}. "
f"target_file: {target_test}. "
f"output: {os.path.join(output_dir, f'{output_name}.simalign.test')}"
)
generate_word_alignments_simalign(
source_file=source_test,
target_file=target_test,
output=os.path.join(output_dir, f"{output_name}.simalign.test"),
)
if do_awesome:
output_names = []
if source_train:
output_names.append(output_name + ".awesome.train.talp")
if source_dev:
output_names.append(output_name + ".awesome.dev.talp")
if source_test:
output_names.append(output_name + ".awesome.test.talp")
print(
f"Generate word alignments awesome.\n"
f"Source paths: {source_paths}.\n"
f"Target paths: {target_paths}.\n"
f"source_parallel_corpus: {source_augmentation}.\n"
f"target_parallel_corpus: {target_augmentation}.\n"
f"Output names: {output_names}.\n"
f"Output_dir: {output_dir}.\n"
)
generate_word_alignments_awesome(
source_paths=source_paths,
target_paths=target_paths,
source_parallel_corpus=[source_augmentation]
if source_augmentation
else None,
target_parallel_corpus=[target_augmentation]
if target_augmentation
else None,
output_names=output_names,
output_dir=output_dir,
remove_tmp_dir=False if awesome_model_path else remove_awesome_model,
tmp_dir=awesome_model_path,
)
def run_projection(
source_train: Optional[str],
source_dev: Optional[str],
source_test: Optional[str],
target_train: Optional[str],
target_dev: Optional[str],
target_test: Optional[str],
source_augmentation: Optional[str],
target_augmentation: Optional[str],
output_dir: str,
output_name: str,
do_fastalign: bool = False,
do_mgiza: bool = False,
do_simalign: bool = True,
do_awesome: bool = False,
remove_awesome_model: bool = True,
awesome_model_path: str = None,
):
"""
Perform annotation projection for the given datasets.
:param str source_train: Path to the source language training dataset. A tsv file.
:param str source_dev: Path to the source language development dataset. A tsv file.
:param str source_test: Path to the source language test dataset. A tsv file.
:param str target_train: Path to the target language training dataset. A txt file, one sentence per line.
:param str target_dev: Path to the target language development dataset. A txt file, one sentence per line.
:param str target_test: Path to the target language test dataset. A txt file, one sentence per line.
:param str source_augmentation: Path to the source language augmentation dataset. A txt file, one sentence per line.
:param str target_augmentation: Path to the target language augmentation dataset. A txt file, one sentence per line.
:param str output_dir: Path to the output directory.
:param str output_name: Name of the output files
:param bool do_fastalign: Whether to generate word alignments with fastalign.
:param bool do_mgiza: Whether to generate word alignments with mgiza.
:param bool do_simalign: Whether to generate word alignments with simalign.
:param bool do_awesome: Whether to generate word alignments with awesome.
:param bool remove_awesome_model: Whether to remove the trained awesome model after the alignment generation.
:param str awesome_model_path: Path to a pretrained awesome model.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
assert source_train or source_dev or source_test, f"Nothing to do"
assert target_train or target_dev or target_test, f"Nothing to do"
assert (source_train is not None and target_train is not None) or (
source_train is None and target_train is None
), f"Source train: {source_train}. Target train: {target_train}"
assert (source_dev is not None and target_dev is not None) or (
source_dev is None and target_dev is None
), f"Source dev: {source_dev}. Target dev: {target_dev}"
assert (source_test is not None and target_test is not None) or (
source_test is None and target_test is None
), f"Source test: {source_test}. Target test: {target_test}"
assert (source_augmentation is not None and target_augmentation is not None) or (
source_augmentation is None and target_augmentation is None
)
if source_train:
source_train_txt = os.path.join(
output_dir, os.path.basename(os.path.splitext(source_train)[0]) + ".txt"
)
conll2text(input_path=source_train, sentences_output_path=source_train_txt)
lines_source = count_lines(input_path=source_train_txt)
lines_target = count_lines(input_path=target_train)
assert lines_source == lines_target, (
f"The number of lines in the source and target files are different.\n"
f"Source ({source_train_txt}): {lines_source}\n"
f"Target ({target_train}): {lines_target}"
)
else:
source_train_txt = None
if source_dev:
source_dev_txt = os.path.join(
output_dir, os.path.basename(os.path.splitext(source_dev)[0]) + ".txt"
)
conll2text(input_path=source_dev, sentences_output_path=source_dev_txt)
lines_source = count_lines(input_path=source_dev_txt)
lines_target = count_lines(input_path=target_dev)
assert lines_source == lines_target, (
f"The number of lines in the source and target files are different.\n"
f"Source ({source_dev_txt}): {lines_source}\n"
f"Target ({target_dev}): {lines_target}"
)
else:
source_dev_txt = None
if source_test:
source_test_txt = os.path.join(
output_dir, os.path.basename(os.path.splitext(source_test)[0]) + ".txt"
)
conll2text(input_path=source_test, sentences_output_path=source_test_txt)
lines_source = count_lines(input_path=source_test_txt)
lines_target = count_lines(input_path=target_test)
assert lines_source == lines_target, (
f"The number of lines in the source and target files are different.\n"
f"Source ({source_test_txt}): {lines_source}\n"
f"Target ({target_test}): {lines_target}"
)
else:
source_test_txt = None
if source_augmentation:
lines_source = count_lines(input_path=source_augmentation)
lines_target = count_lines(input_path=target_augmentation)
assert lines_source == lines_target, (
f"The number of lines in the source and target files are different.\n"
f"Source ({source_augmentation}): {lines_source}\n"
f"Target ({target_augmentation}): {lines_target}"
)
generate_alignments(
source_train=source_train_txt,
target_train=target_train,
source_dev=source_dev_txt,
target_dev=target_dev,
source_test=source_test_txt,
target_test=target_test,
source_augmentation=source_augmentation,
target_augmentation=target_augmentation,
output_dir=output_dir,
output_name=output_name,
do_fastalign=do_fastalign,
do_mgiza=do_mgiza,
do_simalign=do_simalign,
do_awesome=do_awesome,
remove_awesome_model=remove_awesome_model,
awesome_model_path=awesome_model_path,
)
alignment_list = []
if do_mgiza:
alignment_list.append("mgiza")
if do_fastalign:
alignment_list.append("fastalign")
if do_simalign:
alignment_list.append("simalign")
if do_awesome:
alignment_list.append("awesome")
dataset_list = []
if source_train:
dataset_list.append("train")
if source_dev:
dataset_list.append("dev")
if source_test:
dataset_list.append("test")
output_files: List[str] = []
for alignment_method in alignment_list:
for dataset_split in dataset_list:
if alignment_method == "mgiza" or alignment_method == "fast_align":
alignments_path = os.path.join(
output_dir,
f"{output_name}.{alignment_method}.{dataset_split}.grow_diag_final-and.talp",
)
elif alignment_method == "simalign":
alignments_path = os.path.join(
output_dir,
f"{output_name}.{alignment_method}.{dataset_split}.itermax.talp",
)
elif alignment_method == "awesome":
alignments_path = os.path.join(
output_dir,
f"{output_name}.{alignment_method}.{dataset_split}.talp",
)
else:
raise ValueError(f"{alignment_method} not supported")
if dataset_split == "train":
source_dataset = source_train
target_dataset = target_train
elif dataset_split == "dev":
source_dataset = source_dev
target_dataset = target_dev
elif dataset_split == "test":
source_dataset = source_test
target_dataset = target_test
else:
raise ValueError(f"{dataset_split} dataset split not supported")
dataset_projection(
source_dataset=source_dataset,
target_sentences=target_dataset,
alignments_path=alignments_path,
batch_size=10000,
output_path=os.path.join(
output_dir, f"{output_name}.{alignment_method}.{dataset_split}.tsv"
),
)
output_files.append(
os.path.join(
output_dir, f"{output_name}.{alignment_method}.{dataset_split}.tsv"
)
)
if source_train_txt:
os.remove(source_train_txt)
if source_dev_txt:
os.remove(source_dev_txt)
if source_test_txt:
os.remove(source_test_txt)
print("Done!")
print("Output files:")
print("\n".join(output_files))
print("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate alignments for a given dataset."
)
parser.add_argument(
"--source_train",
default=None,
type=str,
help="Path to the source training file. TSV format",
)
parser.add_argument(
"--target_train",
default=None,
type=str,
help="Path to the target training file. A txt file with one sentence per line",
)
parser.add_argument(
"--source_dev",
default=None,
type=str,
help="Path to the source development file. TSV format",
)
parser.add_argument(
"--target_dev",
default=None,
type=str,
help="Path to the target development file. A txt file with one sentence per line",
)
parser.add_argument(
"--source_test",
default=None,
type=str,
help="Path to the source test file. TSV format",
)
parser.add_argument(
"--target_test",
default=None,
type=str,
help="Path to the target test file. A txt file with one sentence per line",
)
parser.add_argument(
"--source_augmentation",
default=None,
type=str,
help="Path to the source augmentation file. A txt file with one sentence per line",
)
parser.add_argument(
"--target_augmentation",
default=None,
type=str,
help="Path to the target augmentation file. A txt file with one sentence per line",
)
parser.add_argument(
"--output_dir",
type=str,
help="Path to the output directory",
)
parser.add_argument(
"--output_name",
type=str,
help="Name of the output file",
)
parser.add_argument(
"--do_mgiza",
action="store_true",
help="Whether to generate alignments using mgiza",
)
parser.add_argument(
"--do_fastalign",
action="store_true",
help="Whether to generate alignments using fast_align",
)
parser.add_argument(
"--do_simalign",
action="store_true",
help="Whether to generate alignments using simalign",
)
parser.add_argument(
"--do_awesome",
action="store_true",
help="Whether to generate alignments using awesome",
)
parser.add_argument(
"--remove_awesome_model",
action="store_true",
help="Whether to remove the trained awesome model after the alignment is generated",
)
parser.add_argument(
"--awesome_model_path",
default=None,
type=str,
help="If provided, the path to a pretrained awesome model",
)
args = parser.parse_args()
run_projection(
source_train=args.source_train,
target_train=args.target_train,
source_dev=args.source_dev,
target_dev=args.target_dev,
source_test=args.source_test,
target_test=args.target_test,
source_augmentation=args.source_augmentation,
target_augmentation=args.target_augmentation,
output_dir=args.output_dir,
output_name=args.output_name,
do_mgiza=args.do_mgiza,
do_fastalign=args.do_fastalign,
do_simalign=args.do_simalign,
do_awesome=args.do_awesome,
remove_awesome_model=args.remove_awesome_model,
awesome_model_path=args.awesome_model_path,
)
|
#!/usr/bin/env python3
import pfp
from pfp.utils import binary
from pfp.fuzz import Changer
def test_changeset():
template = """
struct {
ushort a;
ushort b;
ushort c;
ushort d;
uint e;
} data;
"""
data = "aabbccddeeee"
dom = pfp.parse(template=template, data=data)
orig_data = dom._pfp__build()
assert orig_data == binary(data)
dom.data.a = 0x4141
dom.data.b = 0x4242
dom.data.c = 0x4343
dom.data.d = 0x4444
dom.data.e = 0x45454545
changer = Changer(orig_data)
changer.push_changes([dom.data.a])
assert changer.build() == bytearray(b"AAbbccddeeee")
changer.pop_changes()
assert changer.build() == bytearray(binary(data))
changer.push_changes([dom.data.a, dom.data.d])
assert changer.build() == bytearray(b"AAbbccDDeeee")
changer.push_changes([dom.data.b, dom.data.c])
assert changer.build() == bytearray(b"AABBCCDDeeee")
changer.push_changes([dom.data.e])
assert changer.build() == bytearray(b"AABBCCDDEEEE")
changer.pop_changes()
assert changer.build() == bytearray(b"AABBCCDDeeee")
changer.pop_changes()
assert changer.build() == bytearray(b"AAbbccDDeeee")
changer.pop_changes()
assert changer.build() == bytearray(binary(data))
def test_changeset_with_bitfields():
template = """
BigEndian();
struct {
char a:2; // 11
char b:2; // 00
char c:3; // 111
char d:1; // 0
uint e;
} data;
"""
# 0xc3 = 0b11001110
data = "\xceeeee"
dom = pfp.parse(template=template, data=data)
orig_data = dom._pfp__build()
assert orig_data == binary(data)
dom.data.a = 0
changer = Changer(orig_data)
with changer.change([dom.data.a]) as changed:
assert changed == binary("\x0eeeee") # 0x0e = 0b00001110
assert changer.build() == binary(data)
dom._pfp__snapshot()
dom.data.a = 0
dom.data.d = 1
with changer.change([dom.data.a, dom.data.d]) as changed:
assert changed == binary("\x0feeee") # 0x0f = 0b00001111
dom._pfp__snapshot()
dom.data.b = 3
dom.data.c = 0
with changer.change([dom.data.b, dom.data.c]) as changed:
assert changed == binary("\x31eeee") # 0x31 = 0b00110001
dom._pfp__snapshot()
dom.data.e = 0x45454545
with changer.change([dom.data.e]) as changed:
assert changed == binary("\x31EEEE") # 0x31 = 0b00110001
dom._pfp__restore_snapshot()
assert changer.build() == binary("\x31eeee") # 0x31 = 0b00110001
dom._pfp__restore_snapshot()
assert changer.build() == binary("\x0feeee") # 0x0f = 0b00001111
dom._pfp__restore_snapshot()
assert changer.build() == binary(data)
|
from Crypto.Cipher import AES
from Crypto.Hash import SHA3_256
from Crypto.Random import get_random_bytes as rand
import pathlib
from pwinput import pwinput as getpass
from sys import argv as args
import argparse
from zipfile import ZipFile
from zipfile import Path as ZPath
from io import BytesIO
from tempfile import NamedTemporaryFile
import subprocess
import re
import curses
from curses import wrapper
from curses.textpad import rectangle
files = {}
key = None
fn = None
reinit = True
draw_help = False
def main():
global files, key, fn, reinit
parser = argparse.ArgumentParser(description="Encrypt files with AES-GCM.")
parser.add_argument("file", type=pathlib.Path)
parser.add_argument("-n", "--new", dest="new", action="store_true", help="Create a new archive.")
parser.add_argument("-t", "--test", dest="test", action="store_true", help="Create a new test archive with password 'lol'")
args = vars(parser.parse_args())
fn = args.get("file").absolute()
if args.get("test"):
d = BytesIO()
z = ZipFile(d, mode="w")
z.writestr("one/two/test.txt", b"Hello, world!")
z.writestr("three/test.txt", b"Hello, world!")
z.writestr("one/two/another/lol.txt", b"Hello, world!")
z.writestr("one/two/another/lol2.txt", b"Hello, world!")
z.close()
d.seek(0)
d = d.read()
d = encrypt(SHA3_256.new(b"lol").digest(), d)
f = open(fn, "wb")
f.write(d)
f.close()
return
pwd = getpass().encode("utf-8")
key = SHA3_256.new(pwd).digest()
if args.get("new"):
if getpass(prompt="Retype: ").encode("utf-8") != pwd:
print("Passwords don't match!")
return
d = BytesIO()
z = ZipFile(d, mode="w")
z.close()
d.seek(0)
d = d.read()
d = encrypt(key, d)
f = open(fn, "wb")
f.write(d)
f.close()
return
f = open(fn, "rb")
d = f.read()
f.close()
d = decrypt(key, d)
if not d:
print("Invalid password!")
return
d = BytesIO(d)
z = ZipFile(d, mode="r")
for n in z.namelist():
files[n] = z.read(n)
z.close()
nfiles = {}
for f in files:
nfiles["./" + f] = files[f]
files = nfiles
while reinit:
reinit = False
wrapper(gui)
changes = False
def gui(stdscr):
global files, reinit, changes, draw_help
stdscr.clear()
stdscr.refresh()
k = 0
s = 0
notif_w = 40
notif_h = 7
notif = curses.newwin(notif_h, notif_w, 0, 0)
curses.curs_set(0)
copy = {}
while True:
stdscr.clear()
height, width = stdscr.getmaxyx()
notif.mvwin(height // 2 - (notif_h // 2), width // 2 - (notif_w // 2))
stdscr.addstr(0, 0, "SecretZIP", curses.A_BOLD)
def draw_helpstr(s, y=0):
stdscr.addstr(height - 1 - y, width - len(s) - 1, s)
if not draw_help:
draw_helpstr("Press 'h' for help!")
else:
helpstrs = [
"Press 'h' to hide this screen!",
"Press 'q' or ESC to quit!",
"Press the arrow up and down keys to navigate!",
"Press 'd' to delete a file or a folder!",
"Press 'n' to create a new file!",
"Press 'c' to copy a file or folder!",
"Press 'p' to paste!",
"Press 'v' to edit a file!"
]
for n, h in enumerate(helpstrs):
draw_helpstr(h, len(helpstrs) - 1 - n)
r = rec()
for n, f in enumerate(r):
stdscr.addstr(2 + n, f["i"] * 2, f["x"], (curses.A_UNDERLINE if n == s else curses.A_NORMAL))
stdscr.refresh()
k = stdscr.getch()
if k == 27 or k == ord("q"):
if changes:
draw_notif("(w)rite/(r)evert/(C)ancel", notif, notif_w, notif_h)
k2 = stdscr.getch()
if k2 == ord("w"):
changes = False
save()
break
elif k2 == ord("r"):
break
else:
pass
else:
break
elif k == curses.KEY_DOWN:
s += 1
if s >= len(r):
s = len(r) - 1
elif k == curses.KEY_UP:
s -= 1
if s <= 0:
s = 0
elif k == ord("h"):
draw_help = not draw_help
elif k == 127 or k == ord("d"):
if r[s]["fn"] in files:
draw_notif(f'Delete file "{r[s]["x"]}" y/N', notif, notif_w, notif_h)
if stdscr.getch() == ord("y"):
del files[r[s]["fn"]]
changes = True
r = rec()
l = len(r)
if s >= l:
s = l - 1
if s <= 0:
s = 0
else:
draw_notif(f'Delete ENTIRE folder "{r[s]["x"]}" y/N', notif, notif_w, notif_h)
if stdscr.getch() == ord("y"):
d = []
for f in files:
if f.startswith(r[s]["fn"]):
d.append(f)
for f in d:
del files[f]
changes = True
r = rec()
l = len(r)
if s >= l:
s = l - 1
if s <= 0:
s = 0
elif k == curses.KEY_F2 or k == ord("r"):
oname = r[s]["x"]
fname = r[s]["fn"]
if fname != ".":
name = take_input(stdscr, r[s]["i"] * 2, 2 + s, len(r[s]["x"]), oname)
if (not name == oname) and name != "":
fname_p = pathlib.Path(fname)
if fname in files:
files["./" + str(fname_p.parent / name)] = files[fname]
del files[fname]
changes = True
else:
rname = {}
for f in files:
if f.startswith(fname):
fp = pathlib.Path(f)
rname[f] = "./" + str(fname_p.parent / name / fp.relative_to(fname_p))
for f in rname:
files[rname[f]] = files[f]
del files[f]
changes = True
elif k == ord("n"):
if not r[s]["fn"] in files:
name = draw_notif_input("File name:", notif, notif_w, notif_h, "")
files["./" + str(pathlib.Path(r[s]["fn"][2:]) / name)] = b""
changes = True
elif k == ord("c"):
if r[s]["fn"] in files:
copy = {r[s]["x"]: files[r[s]["fn"]]}
else:
copy = {}
for f in files:
if f.startswith(r[s]["fn"]):
copy[str(pathlib.Path(f).relative_to(pathlib.Path(r[s]["fn"]).parent))] = files[f]
elif k == ord("p"):
if not r[s]["fn"] in files:
for c in copy:
files["./" + str(pathlib.Path(r[s]["fn"]) / c)] = copy[c]
if len(copy) > 0:
changes = True
elif k == ord("w"):
save()
draw_notif("Wrote to file.", notif, notif_w, notif_h)
changes = False
stdscr.getch()
elif k == ord("v"):
if r[s]["fn"] in files:
reinit = True
h = hash(files[r[s]["fn"]])
if write_file(r[s]["fn"]) != h:
changes = True
break
def save():
d = BytesIO()
z = ZipFile(d, mode="w")
for f in files:
z.writestr(f[2:], files[f])
z.close()
d.seek(0)
d = d.read()
d = encrypt(key, d)
f = open(fn, "wb")
f.write(d)
f.close()
def write_file(f):
d = None
with NamedTemporaryFile() as tmp:
tmp.write(files[f])
tmp.flush()
subprocess.run(["nano", tmp.name])
tmp.seek(0)
d = tmp.read()
files[f] = d
return hash(d) if d else None
def draw_notif(m, f, fw, fh):
if len(m) >= fw - 3:
return
f.clear()
f.addstr(fh // 2, 1 + (fw // 2) - (len(m) // 2) - 1, m)
rectangle(f, 0, 0, fh - 1, fw - 2)
f.refresh()
def draw_notif_input(m, f, fw, fh, start_text):
if len(m) >= fw - 3:
return
x = 1 + (fw // 2) - (len(m) // 2) - 1
y = fh // 2 + 1
curses.curs_set(1)
escdelay = curses.get_escdelay()
curses.set_escdelay(25)
k2 = None
text = ""
f.clear()
f.addstr(fh // 2, 1 + (fw // 2) - (len(m) // 2) - 1, m)
rectangle(f, 0, 0, fh - 1, fw - 2)
f.refresh()
f.move(y, x)
while (k2 := f.getch()) != ord("\n"):
if k2 == 127:
if len(text) > 0:
text = text[:-1]
elif k2 == 27:
text = ""
break
else:
text += chr(k2)
if not re.match(r"[a-z0-9_.]+", text):
text = text[:-1]
f.clear()
f.addstr(fh // 2, 1 + (fw // 2) - (len(m) // 2) - 1, m)
rectangle(f, 0, 0, fh - 1, fw - 2)
f.addstr(y, x, text)
f.refresh()
curses.curs_set(0)
curses.noecho()
curses.set_escdelay(escdelay)
return text
def take_input(stdscr, x, y, lclear, start_text):
curses.curs_set(1)
escdelay = curses.get_escdelay()
curses.set_escdelay(25)
stdscr.move(y, x)
for i in range(lclear):
stdscr.delch(y, x)
k2 = None
text = start_text
stdscr.addstr(text)
stdscr.refresh()
while (k2 := stdscr.getch()) != ord("\n"):
if k2 == 127:
if len(text) > 0:
text = text[:-1]
stdscr.delch(stdscr.getyx()[0], stdscr.getyx()[1] - 1)
elif k2 == 27:
text = start_text
break
else:
text += chr(k2)
try:
stdscr.echochar(chr(k2))
except OverflowError: #Really lazy solution, but it works well enough
text = text[:-1]
curses.curs_set(0)
curses.noecho()
curses.set_escdelay(escdelay)
return text
def rec():
r = {}
def gRec(dic, fs):
if len(fs) == 1:
return dic[fs[0]]
return gRec(dic[fs[0]], fs[1:])
for f in files:
fs = f.split("/")
t = []
for x in fs:
if len(t) == 0:
if not x in r:
r[x] = {}
t.append(x)
continue
if not x in gRec(r, t):
gRec(r, t)[x] = {}
t.append(x)
l = []
def it(d, i, fn):
for x in sorted(d):
y = (fn + "/" + x)
l.append({"i": i, "x": x, "fn": y[1:]}) #indent, number, name, full name
it(d[x], i+1, y)
it(r, 0, "")
if len(l) == 0:
l.append({"i": 0, "x": ".", "fn": "."})
return l
def encrypt(key, d):
d_out = b""
nonce = rand(12)
d_out += nonce
cipher = AES.new(key, AES.MODE_GCM, nonce=nonce)
ct, tag = cipher.encrypt_and_digest(d)
d_out += tag
d_out += ct
return d_out
def decrypt(key, d):
nonce = d[:12]
tag = d[12:28]
ct = d[28:]
cipher = AES.new(key, AES.MODE_GCM, nonce=nonce)
try:
pt = cipher.decrypt_and_verify(ct, tag)
except ValueError:
return None
return pt
if __name__ == "__main__":
main()
|
from primitives import GameObject
import constants as c
import pygame
import random
class Note(GameObject):
def __init__(self, game, note_key, scene, beat):
super().__init__(game)
self.scene = scene
self.beat = beat
self.note_key = note_key
self.y = self.get_y()
self.x = self.get_x()
self.color = c.NOTE_COLORS[note_key]
self.destroyed = False
self.radius = 15
self.path = random.choice(["", "_2", "_3","_4"])
self.sprite = game.load_image(f"note{self.path}.png").convert()
self.flair = game.load_image("note_flair.png")
#self.color_sprite()
self.color_flair()
self.sprite.set_colorkey(self.sprite.get_at((0, 0)))
self.missed = False
def color_flair(self):
self.flair = self.flair.copy()
for x in range(self.flair.get_width()):
for y in range(self.flair.get_height()):
r, g, b, a = self.flair.get_at((x, y))
r, g, b = self.color
self.flair.set_at((x, y), (r, g, b, a))
def color_sprite(self):
self.sprite = self.game.load_image(f"note{self.path}.png").convert()
surf = self.sprite.copy()
surf.fill(self.color)
self.sprite.blit(surf, (0, 0), special_flags=pygame.BLEND_MULT)
self.sprite.set_colorkey(self.sprite.get_at((0, 0)))
def destroy(self):
self.destroyed = True
def get_y(self):
self.y = (self.beat - self.scene.current_beat())*c.BEAT_HEIGHT/self.scene.subdivision + c.NOTE_TARGET_Y
return self.y
def get_x(self):
newline = self.note_key
if self.note_key >= 5:
newline = 9 - self.note_key
x = c.TARGET_MARGIN + (newline - 1) * c.TARGET_SPACING
if self.note_key >= 5:
x = c.WINDOW_WIDTH - x
self.x = x
return x
def update(self, dt, events):
if self.missed and self.y < -100:
self.destroy()
pass
def draw(self, surf, offset=(0, 0)):
x = self.x + offset[0]
y = self.get_y() + offset[1]
if not self.missed:
surf.blit(self.flair, (x - self.flair.get_width()//2, y - self.flair.get_height()//2))
#pygame.draw.circle(surf, self.color, (x, y), self.radius)
#pygame.draw.circle(surf, (0, 0, 0), (x, y), self.radius, 1)
surf.blit(self.sprite, (x - self.sprite.get_width()//2, y - self.sprite.get_height()//2))
pass
def miss(self):
if self.missed:
return
self.scene.shake(15)
self.color = (200, 50, 50)
self.color_sprite()
self.missed = True
|
from multiprocessing import Process
from proxypool.tester import Tester
from proxypool.db import RedisClient
from proxypool.crawl import Crawler
from proxypool.api import start_api
from time import sleep
from proxypool.settings import *
class Manager(object):
def handle_getter(self):
"""
爬取代理
"""
crawler = Crawler()
client = RedisClient()
while True:
for proxy in crawler.start_crawl():
client.add(proxy)
sleep(20)
def handle_tester(self):
"""
测试代理
"""
tester = Tester()
while True:
tester.run()
sleep(20)
def handle_api(self):
"""
开启api
"""
start_api()
def start_pool(self):
if ENABLE_CRAWL:
process = Process(target=self.handle_getter)
process.start()
if ENABLE_TEST:
process = Process(target=self.handle_tester)
process.start()
if ENABLE_API:
process = Process(target=self.handle_api)
process.start()
if __name__ == '__main__':
m = Manager()
m.start_pool()
|
#!/bin/zsh
# source: youtube.com/watch?v=jBxRGcDmfWA
# source: youtube.com/watch?v=-zd1UI2JTuk
import pyautogui, time
from pynput import keyboard
import sys
text = "Pog"
def on_press(key):
if '{0}'.format(key) == "Key.esc":
sys.exit()
elif '{0}'.format(key) == "Key.enter":
# for word in f:
pyautogui.typewrite(text)
pyautogui.press("enter")
time.sleep(0)
with keyboard.Listener(on_press=on_press) as Listener:
Listener.join()
|
import matplotlib.pyplot as plt
from collections import namedtuple
import numpy as np
import sys
import os
import seaborn as sns
inputFile = sys.argv[1]
plotsDir = inputFile + "-distribution.png"
weights = []
file = open(inputFile, "r")
for line in file:
weight = int(line)
weights.append(weight)
plt.hist(weights, bins = 20)
plt.title('distribution of weights')
plt.yscale('log')
plt.savefig(plotsDir)
|
__all__ = ['split']
def split(splittable, splits=None, index=None):
"""Splits a list into :arg:`jobs` chunks
Args:
splittable (Sequence[T]): A list of any T to be split into
jobs chunks
splits (Union[int, str]): The number of parallel jobs. Default: 1
index (Union[int, str]): If this is a specified agent of a
parallel job, this is the split index to return. 0 indexed.
Default: None, which means return all splits.
Returns:
List[T]: list of T split into jobs chunks or the chunk
specified by index.
"""
splits = _default_int(splits, 1)
index = _default_int(index)
if splits == 1:
return splittable
splits = split_consistently(splittable, splits)
if index:
return splits[index]
else:
return splits
def split_consistently(splittable, splits):
_splits = [[] for _ in range(0, splits)]
for i, item in enumerate(splittable):
_, j = divmod(i, splits)
_splits[j].append(item)
return _splits
def _default_int(value, default=None):
if not value:
return default
else:
return int(value)
|
"""Example code for MNIST. A fully-connected network and a convolutional neural network were implemented."""
import runtime_path # isort:skip
import argparse
import gzip
import os
import pickle
import sys
import time
import numpy as np
from core.evaluator import AccEvaluator
from core.layers import Dense
from core.layers import ReLU
from core.losses import SoftmaxCrossEntropyLoss
from core.model import Model
from core.nn import Net
from core.optimizer import Adam
from core.tensor import Tensor
from utils.data_iterator import BatchIterator
from utils.downloader import download_url
from utils.seeder import random_seed
def get_one_hot(targets, nb_classes):
return np.eye(nb_classes)[np.array(targets).reshape(-1)]
def prepare_dataset(data_dir):
url = "https://raw.githubusercontent.com/mnielsen/neural-networks-and-deep-learning/master/data/mnist.pkl.gz"
save_path = os.path.join(data_dir, url.split("/")[-1])
print("Preparing MNIST dataset ...")
try:
download_url(url, save_path)
except Exception as e:
print('Error downloading dataset: %s' % str(e))
sys.exit(1)
# load the dataset
with gzip.open(save_path, "rb") as f:
return pickle.load(f, encoding="latin1")
def main(args):
if args.seed >= 0:
random_seed(args.seed);
train_set, valid_set, test_set = prepare_dataset(args.data_dir)
train_x, train_y = train_set
test_x, test_y = test_set
train_y = get_one_hot(train_y, 10)
train_x = Tensor(train_x)
train_y = Tensor(train_y)
test_x = Tensor(test_x)
test_y = Tensor(test_y)
net = Net([
Dense(200),
ReLU(),
Dense(100),
ReLU(),
Dense(70),
ReLU(),
Dense(30),
ReLU(),
Dense(10)
])
model = Model(net=net, loss=SoftmaxCrossEntropyLoss(), optimizer=Adam(lr=args.lr))
loss_layer = SoftmaxCrossEntropyLoss()
iterator = BatchIterator(batch_size=args.batch_size)
evaluator = AccEvaluator()
loss_list = list()
for epoch in range(args.num_ep):
t_start = time.time()
for batch in iterator(train_x, train_y):
model.zero_grad()
pred = model.forward(batch.inputs)
loss = loss_layer.loss(pred, batch.targets)
loss.backward()
model.step()
loss_list.append(loss.values)
print("Epoch %d tim cost: %.4f" % (epoch, time.time() - t_start))
# evaluate
model.set_phase("TEST")
test_pred = model.forward(test_x)
test_pred_idx = np.argmax(test_pred, axis=1)
test_y_idx = test_y.values
res = evaluator.evaluate(test_pred_idx, test_y_idx)
print(res)
model.set_phase("TRAIN")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--num_ep", default=50, type=int)
parser.add_argument("--data_dir", default="./examples/mnist/data", type=str)
parser.add_argument("--lr", default=1e-3, type=float)
parser.add_argument("--batch_size", default=128, type=int)
parser.add_argument("--seed", default=-1, type=int)
args = parser.parse_args()
main(args)
|
from hikcamerabot.services.stream.dvr.service import DvrStreamService
__all__ = [
'DvrStreamService',
]
|
from pathlib import Path
import numpy as np
import nltk
import re
from parse_json import tokenize
from tqdm import tqdm
import constants
import json
def basic_tokenizer(sentence):
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(re.split(" ", space_separated_fragment))
return [w for w in words if w]
def create_vocabulary(vocabulary_path, data_paths, tokenizer=None):
file = Path(vocabulary_path)
if not file.exists():
print("Creating vocabulary %s from data %s" % (vocabulary_path, str(data_paths)))
vocab = {}
for path in data_paths:
with open(path, mode="r") as f:
counter = 0
for line in f:
counter += 1
if counter % 10000 == 0:
print("processing line %d" % counter)
tokens = tokenize(
nltk.word_tokenize(line)) # tokenizer(line) if tokenizer else basic_tokenizer(line)
for w in tokens:
if w in vocab:
vocab[w] += 1
else:
vocab[w] = 1
vocab_list = constants.START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
print("Vocabulary size: %d" % len(vocab_list))
with open(vocabulary_path, "w") as vocab_file:
for w in vocab_list:
vocab_file.write(w + "\n")
# Returns:
# vocab: dictionary of form <token>, <token_id>
# rev_vocab: list of all words in vocabulary without the new line character
def initialize_vocabulary(vocabulary_path):
file = Path(vocabulary_path)
if file.exists():
rev_vocab = []
with open(vocabulary_path, "r") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip('\n') for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
# Create a subset of the glove file having only the tokens in our vocabulary
def process_glove(glove_file_path, vocab_list, save_path, size=4e5, random_init=True, glove_dim=300):
file = Path(save_path)
if not file.exists():
if random_init:
glove = np.random.randn(len(vocab_list), glove_dim)
else:
glove = np.zeros((len(vocab_list), glove_dim))
found = 0
#Fix the padding to zero
glove[constants.PAD_ID, :] = np.zeros((1, glove_dim))
with open(glove_file_path, 'r') as fh:
for line in tqdm(fh, total=size):
array = line.lstrip().rstrip().split(" ")
word = array[0]
vector = list(map(float, array[1:]))
if word in vocab_list:
idx = vocab_list.index(word)
glove[idx, :] = vector
found += 1
if word.capitalize() in vocab_list:
idx = vocab_list.index(word.capitalize())
glove[idx, :] = vector
found += 1
if word.upper() in vocab_list:
idx = vocab_list.index(word.upper())
glove[idx, :] = vector
found += 1
print("{}/{} of word vocab have corresponding vectors in {}".format(found, len(vocab_list), glove_file_path))
np.savez_compressed(save_path, glove=glove)
print("saved trimmed glove matrix at: {}".format(save_path))
def sentence_to_token_ids(sentence, vocabulary, tokenizer=None):
if tokenizer:
words = tokenizer(sentence)
else:
words = basic_tokenizer(sentence)
return [vocabulary.get(w, constants.UNK_ID) for w in words]
def data_to_token_ids(data_path, target_path, vocabulary_path,
tokenizer=None):
target_file = Path(target_path)
data_file = Path(data_path)
if not target_file.exists():
print("Tokenizing data in %s" % data_path)
vocab, _ = initialize_vocabulary(vocabulary_path)
with open(data_path, "r") as data_file:
with open(target_path, "w") as tokens_file:
counter = 0
for line in data_file:
counter += 1
if counter % 5000 == 0:
print("tokenizing line %d" % counter)
token_ids = sentence_to_token_ids(line, vocab, tokenizer)
tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n")
def word2charix(word, char2ix, norm_word_length=16):
"""
Converting a word to a list of indices representing its character
We truncate/pad the word to be of size 'norm_word_length'
We assume each word directly fed as a string
"""
# splitting into list of chars
word = [char for char in word]
# padding / truncating each word to word_length
if len(word) > norm_word_length:
word = word[:norm_word_length]
elif len(word) < norm_word_length:
word = word + (norm_word_length - len(word)) * ['<pad>']
# converting characters to int in word list
tmp = []
for i in range(len(word)):
if word[i] in char2ix:
char = word[i]
else:
char = '<unk>'
tmp.append(int(char2ix[char]))
word = tmp
return word
def create_vocab2charix_dict(vocab_file, vocab2charix_file, char2ix):
vocab2charix = {}
with open(vocab_file) as f:
for line in f:
line = line.strip()
if line in ['<pad>', '<sos>', '<unk>']:
continue
vocab2charix[line] = word2charix(line, char2ix)
with open(vocab2charix_file, 'w') as f:
json.dump(vocab2charix, f)
|
# Copyright 2022 University of New South Wales, Ingham Institute
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
# Python 2
import Tkinter as tk
import ttk
import tkFileDialog as filedialog
import tkMessageBox as messagedialog
import tkSimpleDialog as simpledialog
except ImportError:
# Python 3
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from tkinter import simpledialog
from datastore import get_datastore, set_datastore
from tools import PerformActionTask, is_xvi_running
import os, subprocess, datetime
import Queue
import yaml
# Dialog to configure the XVI Paths to scan
class XVIPathsDialog:
def __init__(self, parent):
self.top = tk.Toplevel(parent)
self.top.title('Configure XVI Paths')
self.top.geometry('480x360')
self.top.update()
self.top.minsize(self.top.winfo_width(), self.top.winfo_height())
self.top.resizable(False, False)
self.top.focus_set()
self.top.grab_set()
tk.Label(self.top,text='XVI directory locations').grid(row=0, columnspan=2, padx=5, pady=5)
self.listbox_paths = tk.Listbox(self.top)
self.listbox_paths.grid(row=1, columnspan=2, padx=(5,0), pady=5, sticky='news')
vsb = ttk.Scrollbar(self.top, orient="vertical", command=self.listbox_paths.yview)
vsb.grid(row=1, column=2, sticky=("N", "S", "E", "W"), padx=(0,10), pady=(5, 5))
self.listbox_paths.configure(yscrollcommand=vsb.set)
datastore = get_datastore()
for p in datastore['xvi_paths']:
self.listbox_paths.insert(tk.END, p)
tk.Button(self.top,text='Add Path',command=self.add_xvi_path, width=20).grid(row=2, column=0, padx=5, pady=5)
tk.Button(self.top,text='Remove Selected Path',command=self.remove_xvi_path, width=20).grid(row=2, column=1, padx=5, pady=5)
self.top.columnconfigure(0, weight=1)
self.top.columnconfigure(1, weight=1)
self.top.rowconfigure(1, weight=1)
self.top.attributes("-topmost", True)
# Add a new path to the list and datastore
def add_xvi_path(self):
self.directory = os.path.normpath(filedialog.askdirectory(parent=self.top))
datastore = get_datastore()
datastore['xvi_paths'].append(self.directory)
set_datastore(datastore)
self.listbox_paths.insert(tk.END, self.directory)
# Remove a path from the list and datastore
def remove_xvi_path(self):
datastore = get_datastore()
selected_indexes = self.listbox_paths.curselection()
for ind in selected_indexes:
datastore['xvi_paths'].pop(int(ind))
self.listbox_paths.delete(int(ind))
set_datastore(datastore)
# Dialog to configure the OIS connection parameters
class OISConnectionDialog:
def __init__(self, parent):
self.top = tk.Toplevel(parent)
self.top.title('Configure OIS')
self.top.geometry('450x300')
self.top.update()
self.top.focus_set()
self.top.grab_set()
self.top.columnconfigure(0, weight=1)
self.top.columnconfigure(1, weight=3)
self.top.attributes("-topmost", True)
tk.Label(self.top, text='Configure the connection parameters for OIS').grid(row=0, columnspan=2, padx=5, pady=5, sticky='EW')
tk.Label(self.top, text='Host:').grid(row=1, sticky='EW', padx=5, pady=5)
tk.Label(self.top, text='Username:').grid(row=2, sticky='EW', padx=5, pady=5)
tk.Label(self.top, text='Password:').grid(row=3, sticky='EW', padx=5, pady=5)
tk.Label(self.top, text='Database:').grid(row=4, sticky='EW', padx=5, pady=5)
self.txt_host = tk.Entry(self.top)
self.txt_user = tk.Entry(self.top)
self.txt_pass = tk.Entry(self.top)
self.txt_db = tk.Entry(self.top)
datastore = get_datastore()
try:
self.txt_host.insert(0,datastore['ois_config']['host'])
self.txt_user.insert(0,datastore['ois_config']['user'])
self.txt_pass.insert(0,datastore['ois_config']['pass'])
self.txt_db.insert(0,datastore['ois_config']['db'])
except Exception as e:
# No ois_config in datastore yet
pass
self.txt_host.grid(row=1, column=1, padx=5, sticky='EW')
self.txt_user.grid(row=2, column=1, padx=5, sticky='EW')
self.txt_pass.grid(row=3, column=1, padx=5, sticky='EW')
self.txt_db.grid(row=4, column=1, padx=5, sticky='EW')
tk.Button(self.top,text='Save',command=self.save_configuration, width=15).grid(row=5, columnspan=2, padx=5, pady=5)
# Save parameters to the datastore
def save_configuration(self):
datastore = get_datastore()
ois_config = {}
ois_config['host'] = self.txt_host.get()
ois_config['user'] = self.txt_user.get()
ois_config['pass'] = self.txt_pass.get()
ois_config['db'] = self.txt_db.get()
datastore['ois_config'] = ois_config
set_datastore(datastore)
self.top.destroy()
# Dialog to configure MRNs to ignore
class IgnoreMRNsDialog:
def __init__(self, parent):
self.top = tk.Toplevel(parent)
self.top.title('Ignore MRNs')
self.top.geometry('480x360')
self.top.update()
self.top.minsize(self.top.winfo_width(), self.top.winfo_height())
self.top.resizable(False, False)
self.top.focus_set()
self.top.grab_set()
self.top.attributes("-topmost", True)
tk.Label(self.top,text='Directories matching these MRNs will be ignored (from next scan)', wraplength=480).grid(row=0, columnspan=2, padx=5, pady=5)
self.listbox_mrns = tk.Listbox(self.top)
self.listbox_mrns.grid(row=1, columnspan=2, padx=(5,0), pady=5, sticky='news')
vsb = ttk.Scrollbar(self.top, orient="vertical", command=self.listbox_mrns.yview)
vsb.grid(row=1, column=2, sticky=("N", "S", "E", "W"), padx=(0,10), pady=(5, 5))
self.listbox_mrns.configure(yscrollcommand=vsb.set)
datastore = get_datastore()
for p in datastore['ignore_mrns']:
self.listbox_mrns.insert(tk.END, p)
tk.Button(self.top,text='Add MRN',command=self.add_mrn, width=15).grid(row=2, column=0, padx=5, pady=5)
tk.Button(self.top,text='Remove MRN',command=self.remove_mrn, width=15).grid(row=2, column=1, padx=5, pady=5)
self.top.columnconfigure(0, weight=1)
self.top.columnconfigure(1, weight=1)
self.top.rowconfigure(1, weight=1)
# Add a new MRN to the list and datastore
def add_mrn(self):
mrn = simpledialog.askstring('MRN', 'Enter MRN to ignore', parent=self.top)
if not mrn == None and len(mrn) > 0:
datastore = get_datastore()
datastore['ignore_mrns'].append(mrn)
set_datastore(datastore)
self.listbox_mrns.insert(tk.END, mrn)
# Remove an MRN from the list and datastore
def remove_mrn(self):
datastore = get_datastore()
selected_indexes = self.listbox_mrns.curselection()
for ind in selected_indexes:
datastore['ignore_mrns'].pop(ind)
self.listbox_mrns.delete(ind)
set_datastore(datastore)
# Dialog to configure sending of Email Reports
class EmailReportsDialog:
def __init__(self, parent):
self.top = tk.Toplevel(parent)
self.top.title('Email Reports')
self.top.geometry('580x520')
self.top.update()
self.top.minsize(self.top.winfo_width(), self.top.winfo_height())
self.top.resizable(False, False)
self.top.focus_set()
self.top.grab_set()
self.top.attributes("-topmost", True)
tk.Label(self.top,text='Configure Email reports to send for schedule runs (command line)', wraplength=480).grid(row=0, columnspan=2, padx=5, pady=5)
tk.Label(self.top, text='This Machine Name:').grid(row=1, sticky='EW', padx=5, pady=5)
tk.Label(self.top, text='SMTP Server Host:').grid(row=2, sticky='EW', padx=5, pady=5)
tk.Label(self.top, text='SMTP Server Port:').grid(row=3, sticky='EW', padx=5, pady=5)
tk.Label(self.top, text='SMTP User (blank if none):').grid(row=4, sticky='EW', padx=5, pady=5)
tk.Label(self.top, text='SMTP Password (blank if none):').grid(row=5, sticky='EW', padx=5, pady=5)
tk.Label(self.top, text='Send Emails From:').grid(row=6, sticky='EW', padx=5, pady=5)
self.txt_name = tk.Entry(self.top)
self.txt_host = tk.Entry(self.top)
self.txt_port = tk.Entry(self.top)
self.txt_user = tk.Entry(self.top)
self.txt_password = tk.Entry(self.top)
self.txt_from = tk.Entry(self.top)
datastore = get_datastore()
try:
self.txt_name.insert(0,datastore['email_reports_config']['name'])
self.txt_host.insert(0,datastore['email_reports_config']['host'])
self.txt_port.insert(0,datastore['email_reports_config']['port'])
self.txt_user.insert(0,datastore['email_reports_config']['user'])
self.txt_password.insert(0,datastore['email_reports_config']['password'])
self.txt_from.insert(0,datastore['email_reports_config']['from'])
except Exception as e:
# No email_reports_config in datastore yet
pass
self.txt_name.grid(row=1, column=1, padx=5, sticky='EW')
self.txt_host.grid(row=2, column=1, padx=5, sticky='EW')
self.txt_port.grid(row=3, column=1, padx=5, sticky='EW')
self.txt_user.grid(row=4, column=1, padx=5, sticky='EW')
self.txt_password.grid(row=5, column=1, padx=5, sticky='EW')
self.txt_from.grid(row=6, column=1, padx=5, sticky='EW')
self.listbox_emails = tk.Listbox(self.top)
self.listbox_emails.grid(row=7, columnspan=2, padx=(5,0), pady=5, sticky='news')
vsb = ttk.Scrollbar(self.top, orient="vertical", command=self.listbox_emails.yview)
vsb.grid(row=7, column=2, sticky=("N", "S", "E", "W"), padx=(0,10), pady=(5, 5))
self.listbox_emails.configure(yscrollcommand=vsb.set)
datastore = get_datastore()
try:
for p in datastore['email_reports_config']['email_addresses']:
self.listbox_emails.insert(tk.END, p)
except Exception as e:
# No email_reports_config in datastore yet
pass
tk.Button(self.top,text='Add Email Address',command=self.add_email, width=15).grid(row=8, column=0, padx=5, pady=5)
tk.Button(self.top,text='Remove Email Address',command=self.remove_email, width=15).grid(row=8, column=1, padx=5, pady=5)
tk.Button(self.top,text='Save',command=self.save_configuration, width=15).grid(row=9, column=0, columnspan=2, padx=5, pady=5)
self.top.columnconfigure(0, weight=1)
self.top.columnconfigure(1, weight=1)
self.top.rowconfigure(7, weight=1)
# Add a new Email to the list
def add_email(self):
email = simpledialog.askstring('Email', 'Enter Email Address', parent=self.top)
if not email == None and len(email) > 0:
self.listbox_emails.insert(tk.END, email)
# Remove an Email from the list
def remove_email(self):
selected_indexes = self.listbox_emails.curselection()
for ind in selected_indexes:
self.listbox_emails.delete(ind)
# Save parameters to the datastore
def save_configuration(self):
datastore = get_datastore()
email_reports_config = {}
email_reports_config['name'] = self.txt_name.get()
email_reports_config['host'] = self.txt_host.get()
email_reports_config['port'] = self.txt_port.get()
email_reports_config['user'] = self.txt_user.get()
email_reports_config['password'] = self.txt_password.get()
email_reports_config['from'] = self.txt_from.get()
email_reports_config['email_addresses'] = self.listbox_emails.get(0, tk.END)
datastore['email_reports_config'] = email_reports_config
set_datastore(datastore)
self.top.destroy()
# Dialog to configure the archive path
class ArchivePathDialog:
def __init__(self, parent):
self.top = tk.Toplevel(parent)
self.top.title('Configure Archive Path')
self.top.geometry('400x200')
self.top.update()
self.top.minsize(self.top.winfo_width(), self.top.winfo_height())
self.top.resizable(True, False)
self.top.focus_set()
self.top.grab_set()
self.top.attributes("-topmost", True)
self.top.columnconfigure(0, weight=10)
self.top.columnconfigure(1, weight=1)
tk.Label(self.top, text='Location to archive XVI data to:').grid(row=0, columnspan=2, padx=5, pady=5)
self.txt_path = tk.Entry(self.top)
datastore = get_datastore()
try:
self.txt_path.insert(0,datastore['archive_path'])
except Exception as e:
# No archive path in datastore yet
pass
self.txt_path.grid(row=1, column=0, padx=5, pady=5, sticky='NEWS')
tk.Button(self.top,text='...',command=self.select_path).grid(row=1, column=1, padx=5, pady=5)
tk.Button(self.top,text='Save',command=self.save_configuration, width=15).grid(row=3, columnspan=2, padx=5, pady=5)
# Select a path and enter it in the text box
def select_path(self):
self.directory = filedialog.askdirectory(parent=self.top)
self.txt_path.delete(0,tk.END)
self.txt_path.insert(0,self.directory)
# Save parameters to the datastore
def save_configuration(self):
datastore = get_datastore()
datastore['archive_path'] = self.txt_path.get()
set_datastore(datastore)
self.top.destroy()
# Dialog to configure the log file path
class LogPathDialog:
def __init__(self, parent):
self.top = tk.Toplevel(parent)
self.top.title('Configure Log File Path')
self.top.geometry('400x200')
self.top.update()
self.top.minsize(self.top.winfo_width(), self.top.winfo_height())
self.top.resizable(True, False)
self.top.focus_set()
self.top.grab_set()
self.top.attributes("-topmost", True)
self.top.columnconfigure(0, weight=10)
self.top.columnconfigure(1, weight=1)
tk.Label(self.top, text='Location to store log files (restart required):').grid(row=0, columnspan=2, padx=5, pady=5)
self.txt_path = tk.Entry(self.top)
datastore = get_datastore()
try:
self.txt_path.insert(0,datastore['log_path'])
except Exception as e:
# No archive path in datastore yet
pass
self.txt_path.grid(row=1, column=0, padx=5, pady=5, sticky='NEWS')
tk.Button(self.top,text='...',command=self.select_path).grid(row=1, column=1, padx=5, pady=5)
tk.Button(self.top,text='Save',command=self.save_configuration, width=15).grid(row=3, columnspan=2, padx=5, pady=5)
# Select a path and enter it in the text box
def select_path(self):
self.directory = filedialog.askdirectory(parent=self.top)
self.txt_path.delete(0,tk.END)
self.txt_path.insert(0,self.directory)
# Save parameters to the datastore
def save_configuration(self):
datastore = get_datastore()
datastore['log_path'] = self.txt_path.get()
set_datastore(datastore)
self.top.destroy()
# About Dialog
class AboutDialog:
def __init__(self, parent):
# Load the release info to display authors and version
try:
with open('release.yaml', 'r') as f:
release_info = yaml.load(f)
except IOError as e:
release_info = {}
self.parent = parent
self.top = tk.Toplevel(parent)
self.top.title('About XVI Archive Tool')
self.top.geometry('640x480')
self.top.update()
self.top.focus_set()
self.top.grab_set()
self.top.attributes("-topmost", True)
authors = ""
version = ""
date = ""
try:
for a in release_info['authors']:
authors += a['name'] + " (" + a['email'] + ")\n"
version = release_info['version']
date = release_info['date']
except KeyError as e:
pass
txt = "Use this tool to archive and delete old patient data from XVI. If a " \
+ "patient has 4D data or was in a clinical trial their data will be archived, " \
+ "otherwise it will be deleted. Patients still under treatment will be kept.\n\n" \
+ "The data is only moved and deleted from the disk, no changes are made within " \
+ "XVI. See actioned.yaml for a list of patients removed by this tool, but still in " \
+ "XVI.\n\n" \
+ "This tool is developed by the Medical Physics Department at Liverpool and Macarthur " \
+ "CTCs. It is intended for internal use only.\n\n" \
+ "Authors:\n" + authors + "\n" \
+ "Version: " + version + " (" + date + ")"
lbl = tk.Label(self.top,text=txt)
lbl.pack(expand=True, fill=tk.X)
lbl.bind("<Configure>", self.resize)
def resize(self, event):
lbl = event.widget
pad = 0
pad += int(str(lbl['bd']))
pad += int(str(lbl['padx']))
pad *= 2
lbl.configure(wraplength = event.width - pad)
# Report Issue Dialog
class ReportDialog:
def __init__(self, parent):
# Load the release info to display authors
try:
with open('release.yaml', 'r') as f:
release_info = yaml.load(f)
except IOError as e:
release_info = {}
self.parent = parent
self.top = tk.Toplevel(parent)
self.top.title('Report Issue')
self.top.geometry('640x480')
self.top.update()
self.top.focus_set()
self.top.grab_set()
self.top.attributes("-topmost", True)
authors = ""
try:
for a in release_info['authors']:
authors += a['name'] + " (" + a['email'] + ")\n"
except KeyError as e:
pass
txt = "Please report any problems or feature requests to:\n" + authors + "\n\n" \
+ "or create an issue directly at: https://bitbucket.org/swscsmedphys/xviarchivetool/issues/new\n\n" \
+ "Provide a full description of the problem along with the date and time and machine where the problem occured."
lbl = tk.Label(self.top,text=txt)
lbl.pack(expand=True, fill=tk.X)
lbl.bind("<Configure>", self.resize)
def resize(self, event):
lbl = event.widget
pad = 0
pad += int(str(lbl['bd']))
pad += int(str(lbl['padx']))
pad *= 2
lbl.configure(wraplength = event.width - pad)
# Dialog to prompt the user to perform some actions in XVI
class ScanningDialog:
def __init__(self, parent):
self.parent = parent
self.top = tk.Toplevel(parent)
self.top.title('Scanning Directories')
self.top.geometry('300x100')
self.top.minsize(self.top.winfo_width(), self.top.winfo_height())
self.top.resizable(True, False)
self.top.update()
self.top.focus_set()
self.top.grab_set()
self.top.attributes("-topmost", True)
self.str_status = tk.StringVar()
self.str_status.set("Scanning...")
self.lbl_status = tk.Label(self.top,textvariable=self.str_status)
self.lbl_status.grid(row=0, padx=5, pady=5)
self.progress = ttk.Progressbar(self.top, orient="horizontal", mode="indeterminate")
self.progress.grid(row=1, padx=5, pady=5, sticky='news')
self.progress.start(50)
self.top.columnconfigure(0, weight=1)
self.top.rowconfigure(0, weight=1)
self.top.protocol("WM_DELETE_WINDOW", self.cancel)
def cancel(self):
self.str_status.set("Stopping Scan...")
self.parent.scan_task.stop();
# Dialog to prompt the user to perform some actions in XVI
class ActionDialog:
def __init__(self, parent):
self.parent = parent
self.top = tk.Toplevel(parent)
self.top.title('XVI Action')
self.top.geometry('840x600')
self.top.update()
self.top.focus_set()
self.top.grab_set()
self.top.attributes("-topmost", True)
self.str_action_instructions = tk.StringVar()
self.lbl_action_instructions = tk.Label(self.top,textvariable=self.str_action_instructions, wraplength=480)
self.lbl_action_instructions.grid(row=0, padx=5, pady=5)
self.progress = ttk.Progressbar(self.top, orient="horizontal", mode="determinate")
self.listbox_patients = tk.Listbox(self.top)
self.listbox_patients.grid(row=2, padx=(5,0), pady=5, sticky='news')
vsb = ttk.Scrollbar(self.top, orient="vertical", command=self.listbox_patients.yview)
vsb.grid(row=2, column=2, sticky=("N", "S", "E", "W"), padx=(0,10), pady=(5, 5))
self.listbox_patients.configure(yscrollcommand=vsb.set)
self.str_action_button = tk.StringVar()
self.btn_perform_action = tk.Button(self.top,textvariable=self.str_action_button,command=self.perform_action, width=25)
self.btn_perform_action.grid(row=3, padx=5, pady=5)
self.btn_cancel_action = tk.Button(self.top,text='Cancel',command=self.cancel_action, width=15)
self.btn_close = tk.Button(self.top,text='Close',command=self.close_dialog, width=15)
self.action_running = False
self.top.protocol("WM_DELETE_WINDOW", self.cancel_action)
self.top.columnconfigure(0, weight=1)
self.top.rowconfigure(2, weight=1)
self.action = ""
# Set list of patients
def set_patient_list(self, list, action):
self.patients = list
self.action = action
datastore = get_datastore()
# First check that the XVI process isn't running,
# If it is alert the user and abort the action
if is_xvi_running():
messagedialog.showwarning(
"XVI Running",
"Please close the XVI application before performing " + action.lower() + ".",
parent=self.top
)
self.top.destroy()
return
# Also make sure a valid archive path has been setup (in case of archive)
if action == 'ARCHIVE' and (not 'archive_path' in datastore or not os.path.exists(datastore['archive_path'])):
messagedialog.showwarning(
"Archive Path",
"The archive path cannot be found. Make sure the directory exists and the network location is available.",
parent=self.top
)
self.top.destroy()
return
for p in list:
self.listbox_patients.insert(tk.END, p["mrn"] + " - " + p['name'])
# Make sure the list contains some patients
if len(list) > 0:
if list[0]["action"] == "ARCHIVE":
self.str_action_instructions.set("All data for the following patients will be copied to:\n\n" \
+ datastore['archive_path'] + \
"\n\nOnce successfully copied the data will be deleted.")
self.str_action_button.set("ARCHIVE PATIENT DATA")
else:
self.action = "DELETE"
self.str_action_instructions.set("All data for the following patients will be deleted.")
self.str_action_button.set("DELETE PATIENT DATA")
else:
# If there are no patients, alert the user and close the dialog
messagedialog.showwarning(
"No Patient Locations",
"There are no patients for this action",
parent=self.top
)
self.top.destroy()
# Perform either the Archive or Delete Action
def perform_action(self):
# Hide the perform action button and show the cancel button
self.btn_perform_action.grid_forget()
self.btn_cancel_action.grid(row=3, padx=5, pady=5)
# Remove the top label from the dialog
self.lbl_action_instructions.grid_forget()
# Clear the list box to show log of actions
self.listbox_patients.delete(0, tk.END) # clear
# Show the progress bar
self.progress.grid(row=1, padx=5, pady=5, sticky='news')
self.progress['maximum']=len(self.patients)
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# Start the action task, allowing it to report back its progress to the queue
self.listbox_patients.insert(tk.END, now + " - " + self.action.capitalize() + " Action Start")
self.queue = Queue.Queue()
self.action_task = PerformActionTask(self.queue, self.patients, self.action)
self.action_task.start()
self.action_running = True
self.parent.after(100, self.process_queue)
def process_queue(self):
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
actioned_dirs = None
# Loop over each item in queue
for q in range(self.queue.qsize()):
msg = self.queue.get(0) # Always get 0 because its a queue so FIFO
# If the msg is a list, we know this was the last item
if type(msg) == list:
actioned_dirs = msg
else:
# Otherwise it was a progress update
self.listbox_patients.insert(tk.END, now + " - " + msg)
self.progress['value'] += 1
# Scroll to end of listbox to see new message
self.listbox_patients.yview(tk.END)
# If actioned_dirs is not None then the action is complete
if not actioned_dirs == None:
self.action_running = False
action_complete = "Complete"
if not self.progress['value'] == self.progress['maximum']:
action_complete = "Cancelled"
self.listbox_patients.insert(tk.END, now + " - " + self.action.capitalize() + " Action " + action_complete)
# Update the directories in the parent window
for d in actioned_dirs:
self.parent.directories[:] = [dir for dir in self.parent.directories if not os.path.join(dir["path"],dir["dir_name"]) == os.path.join(d["path"],d["dir_name"])]
self.parent.update_gui()
self.parent.update_list()
# Hide the cancel button and show the close button
self.btn_cancel_action.grid_forget()
self.btn_close.grid(row=3, padx=5, pady=5)
# Alert the user
messagedialog.showinfo(action_complete, self.action.capitalize() + " " + action_complete, parent=self.top)
else:
# Run again in 100 ms
self.parent.after(100, self.process_queue)
# Cancel the current action
def cancel_action(self):
# Confirm that the user would like to cancel
if self.action_running:
if messagedialog.askyesno("Cancel " + self.action.capitalize() + " Action", "Are you sure you wish to cancel?", parent=self.top):
self.btn_cancel_action['state'] = 'disabled'
self.action_task.stop()
else:
self.top.destroy()
# Close the window
def close_dialog(self):
self.top.destroy()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-17 17:44
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ingredient_name', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='MethodStep',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('step', models.TextField(max_length=500)),
],
),
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chef', models.CharField(blank=True, max_length=200)),
('source', models.CharField(blank=True, max_length=200)),
('title', models.CharField(max_length=200)),
('description', models.TextField(blank=True)),
('recipe_picture_url', models.CharField(blank=True, max_length=500)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='RecipePicture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('picture', models.ImageField(blank=True, upload_to=b'')),
('recipe', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='recipes.Recipe')),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('picture', models.ImageField(blank=True, upload_to=b'profile_images')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='methodstep',
name='recipe',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='recipes.Recipe'),
),
migrations.AddField(
model_name='ingredient',
name='recipe',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='recipes.Recipe'),
),
]
|
# -*- coding: utf-8 -*-
import logging
import datetime
from sqlalchemy import Table, Column, ForeignKey, types
from sqlalchemy.orm import relation
from ckan.model.meta import metadata, mapper
from ckan.model.domain_object import DomainObject
from ckan.model import group
saha_organization_table = None
__all__ = [
'SahaOrganization', 'saha_organization_table',
]
log = logging.getLogger('ckanext_saha')
class SahaOrganization(DomainObject):
pass
def setup():
if not group.group_table.exists():
log.warning('Group tables not defined?')
return
if saha_organization_table is None:
define_tables()
log.debug('SAHA tables defined in memory')
if not saha_organization_table.exists():
saha_organization_table.create()
log.debug('SAHA tables created')
else:
log.debug('SAHA tables already exist')
def define_tables():
global saha_organization_table
if saha_organization_table is not None:
return
saha_organization_columns = (
Column('id', types.UnicodeText, primary_key=True),
Column('groupId', types.UnicodeText, ForeignKey('group.id'), nullable=True),
Column('modifiedDate', types.DateTime, default=datetime.datetime.utcnow),
Column('organizationName', types.UnicodeText),
Column('organizationUnit', types.UnicodeText),
Column('businessId', types.UnicodeText),
)
saha_organization_table = Table('saha_organization', metadata,
*saha_organization_columns)
mapper(SahaOrganization, saha_organization_table,
properties={
'group': relation(group.Group),
}
)
|
from django.test import TestCase
from django.apps import apps
from iee_contact.apps import IEEContactConfig
class IEEContactConfigTestCase(TestCase):
"""
Test app config
"""
def test_apps(self):
self.assertEqual(IEEContactConfig.name, "iee_contact")
self.assertEqual(apps.get_app_config("iee_contact").name, "iee_contact")
|
import datetime as dt
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
engine = create_engine("sqlite:///hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
app = Flask(__name__)
lastYear = dt.date(2017, 8, 23) - dt.timedelta(days=365)
@app.route("/")
def welcome():
return (
f"Welcome to the Hawaii Climate Analysis API!<br/>"
f"These are all the routes that are available:<br/>"
f"/api/v1.0/precipitation<br/>/api/v1.0/stations<br/>/api/v1.0/tobs<br/>/api/v1.0/temp/start/end"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
precipitation = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date >= lastYear).all()
precip = []
for p in precipitation:
day = {p.date: p.prcp for date, 'Station' : p.station}
precip.append(day)
return jsonify(precip)
@app.route("/api/v1.0/stations")
def stations():
names = session.query(Station.station).all()
stations_names = list(np.ravel(names))
return jsonify(stations=station_names)
@app.route("/api/v1.0/tobs")
def temp():
results = session.query(Measurement.tobs).\
filter(Measurement.date >= lastYear).\
filter(Measurement.station =='USC00519281').all()
temprature = list(np.ravel(results))
return jsonify(temps=temprature)
@app.route("/api/v1.0/temp/<start>")
@app.route("/api/v1.0/temp/<start>/<end>")
def stats():
#didnt figure out.
if __name__ == '__main__':
app.run()
|
## 3. Class Syntax ##
class Car():
def __init__(self):
self.color = "black"
self.make = "honda"
self.model = "accord"
black_honda_accord = Car()
print(black_honda_accord.color)
class Team():
def __init__(self):
self.name = "Tampa Bay Buccaneers"
bucs = Team()
print(bucs.name)
## 4. Instance Methods and __init__ ##
class Team():
def __init__(self,name):
self.name = name
bucs = Team("Tampa Bay Buccaneers")
giants = Team("New York Giants")
## 6. More Instance Methods ##
import csv
f = open("nfl.csv", 'r')
nfl = list(csv.reader(f))
# The NFL data is loaded into the nfl variable.
class Team():
def __init__(self, name):
self.name = name
def print_name(self):
print(self.name)
# Your method goes here
def count_total_wins(self):
wins = 0
for item in nfl:
if item[3]==self.name:
wins +=1
return(wins)
bucs = Team("Tampa Bay Buccaneers")
bucs.print_name()
broncos_wins = Team("Kansas City Chiefs").count_total_wins()
chiefs_wins = Team("Denver Broncos").count_total_wins()
## 7. Adding to the init Function ##
import csv
class Team():
def __init__(self, name,filename):
self.name = name
self.nfl = list(csv.reader(open(filename,'r')))
def count_total_wins(self):
count = 0
for row in self.nfl:
if row[2] == self.name:
count = count + 1
return count
jaguars_wins = Team("Jacksonville Jaguars", "nfl.csv").count_total_wins()
## 8. Wins in a Year ##
import csv
class Team():
def __init__(self, name,year):
self.name = name
self.nfl = list(csv.reader(open('nfl.csv','r')))
self.year = year
def count_total_wins(self):
count = 0
for row in self.nfl:
if row[2] == self.name:
count = count + 1
return count
def count_wins_in_year(self):
wins = 0
for item in self.nfl:
if(item[2]==self.name and item[0] == self.year):
wins +=1
return(wins)
niners_wins_2013 = Team("San Francisco 49ers","2013").count_wins_in_year()
|
def task241(string):
res = ''
for i in range(len(string)):
res += str(ord(string[i]))
return res
def task241_main():
print(
"""
Задание 2 "Строки и списки"
4. Пусть дана строка:
Вариант 1. На основе данной строки сформируйте новую, содержащую только цифры. Выведите новую строку.
"""
)
print(task241('qwerty'))
# task241_main()
|
"""
Calculate the fuel based on the given mass
"""
def calculate_fuel(mass):
return (mass // 3) - 2
f = open("input.txt", 'r')
total_fuel = 0
for number in f:
total_fuel += calculate_fuel(int(number))
print(total_fuel)
f.close()
|
#!/usr/bin/env python
import numpy
import rospy
import time
import collections
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Imu
from sensor_msgs.msg import NavSatFix
from std_srvs.srv import Trigger
from std_srvs.srv import TriggerResponse
from covariance_calculator import covariance_calculator
class CovarianceCalculatorNode():
def __init__(self):
self.odom_topics = []
self.odom_calculators = []
self.imu_topics = []
self.imu_calculators = []
self.gps_topics = []
self.gps_calculators = []
count = 0
while rospy.has_param("odom"+str(count)):
self.odom_topics.append(rospy.get_param("odom" + str(count)))
self.odom_calculators.append(CovarianceCalculator())
rospy.Subscriber(self.odom_topics[count], Odometry, self.odom_calculators[count].insert_odom)
count = count + 1
count = 0
while rospy.has_param("imu"+str(count)):
self.imu_topics.append(rospy.get_param("imu" + str(count)))
self.imu_calculators.append(CovarianceCalculator())
rospy.Subscriber(self.imu_topics[count], Imu, self.imu_calculators[count].insert_imu)
count = count +1
count = 0
while rospy.has_param("imu"+str(count)):
self.gps_topics.append(rospy.get_param("gps" + str(count)))
self.gps_calculators.append(CovarianceCalculator())
rospy.Subscriber(self.gps_topics[count], NavSatFix, self.gps_calculators[count].insert_gps)
count = count + 1
self.output_file_name = rospy.get_param("output_file_name", "covariance_out.txt")
def compute_cov(self):
self.odom_msgs_w_covariance = []
self.imu_msgs_w_covariance = []
self.gps_msgs_w_covariance = []
print ("Collecting data. Ctrl-C to stop collection and compute covariances")
rospy.spin()
fileout = open("/opt/robot/covariances/" + str(str(time.time()) + '__' + self.output_file_name), 'w+')
for calculators in self.odom_calculators:
self.odom_msgs_w_covariance.append(calculators.get_odom_w_covariance())
for calculators in self.imu_calculators:
self.imu_msgs_w_covariance.append(calculators.get_imu_w_covariance())
for calculators in self.gps_calculators:
self.gps_msgs_w_covariance.append(calculators.get_gps_w_covariance())
count = 0
for msg in self.odom_msgs_w_covariance:
fileout.write('\n\n'+ self.odom_topics[count] +':')
self.print_formated(msg.twist.covariance, fileout)
count = count + 1
count = 0
for msg in self.imu_msgs_w_covariance:
fileout.write("\n\n" + self.imu_topics[count] + ' (linear)' +':')
self.print_formated(msg.linear_acceleration_covariance, fileout)
count = count + 1
count = 0
for msg in self.imu_msgs_w_covariance:
fileout.write("\n\n" + self.imu_topics[count] + ' (angular)' +':')
self.print_formated(msg.angular_velocity_covariance, fileout)
count = count + 1
count = 0
for msg in self.gps_msgs_w_covariance:
fileout.write("\n\n" + self.gps_topics[count] + ':')
self.print_formated(msg.poistion_covariance, fileout)
count = count + 1
def print_formated(self, covariances, file_in):
for cov in covariances:
file_in.write(format(cov, '.20f')+',',)
if __name__ == '__main__':
rospy.init_node("covariance_calculator_node")
node = CovarianceCalculatorNode()
node.compute_cov()
|
"""CLI for DRSSMS package."""
from drssms import NeverAPI
import click
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
@click.group()
def main():
"""Enter main script."""
# napi = NeverAPI()
# TODO: Move napi up here and use context
pass
@main.command()
@click.argument('number')
@click.argument('text')
@click.option('--ani')
def push(number, text, ani):
"""Send push SMS to number without service."""
napi = NeverAPI()
napi.login()
napi.send_push_sms(number, text, ani)
@main.command()
@click.argument('number')
@click.argument('serviceid')
@click.option('--text', help='SMS text to overwrite service. \
Remember quotationmarks')
def service(serviceid, number, text):
"""Send service SMS to number, optionally overwrite text."""
napi = NeverAPI()
napi.login()
napi.send_service_sms(number, serviceid, text)
@main.command()
@click.argument('number')
def stop(number):
"""Stop an active SMS dialog."""
napi = NeverAPI()
napi.login()
napi.stop_dialog(number)
@main.command()
@click.option('--start', help='startdate in isoformat 2018-01-31 (inclusive)')
@click.option('--end', help='enddate in isoformat 2018-01-31 (exclusive)')
@click.option('--filename', help='filename without extension. \
Default: "sms_dialoger_start-[startdate]-end-[enddate].csv"')
def download(start, end, filename, name='download-sms'):
"""Download sms dialog file.
No options: get yesterday.
Only start: 24hours from start.
Start and end: start(inclusive) to end(exclusive).
"""
napi = NeverAPI()
napi.login()
napi.download_sms_file(start, end, filename)
|
import json
import logging
from datetime import date
from typing import Final
import requests
from bs4 import BeautifulSoup
from . import utils
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.INFO
)
DAY_INTERVAL: Final = "d"
HOUR_INTERVAL: Final = "h"
FIFTEEN_MINUTE_INTERVAL: Final = "mi"
day_before_yesterday = utils.get_past_date(2)
yesterday = utils.get_past_date(1)
today = date.today()
class Evergy:
def __init__(self, username, password):
self.logged_in = False
self.session = None
self.username = username
self.password = password
self.account_number = None
self.premise_id = None
self.login_url = "https://www.evergy.com/log-in"
self.logout_url = "https://www.evergy.com/logout"
self.account_summary_url = (
"https://www.evergy.com/ma/my-account/account-summary"
)
self.account_dashboard_url = (
"https://www.evergy.com/api/account/{accountNum}/dashboard/current"
)
self.usageDataUrl = "https://www.evergy.com/api/report/usage/{premise_id}?interval={interval}&from={start}&to={end}"
def login(self):
self.session = requests.Session()
logging.info("Logging in with username: " + self.username)
login_form = self.session.get(self.login_url)
login_form_soup = BeautifulSoup(login_form.text, "html.parser")
csrf_token = login_form_soup.select(".login-form > input")[0]["value"]
csrf_token_name = login_form_soup.select(".login-form > input")[0]["name"]
login_payload = {
"Username": str(self.username),
"Password": str(self.password),
csrf_token_name: csrf_token,
}
r = self.session.post(
url=self.login_url, data=login_payload, allow_redirects=False
)
logging.debug("Login response: " + str(r.status_code))
r = self.session.get(self.account_summary_url)
soup = BeautifulSoup(r.text, "html.parser")
account_data = soup.find_all("script", id="account-landing-data")
if len(account_data) == 0:
self.logged_in = False
else:
self.account_number = json.loads(account_data[0].contents[0])[
"accountNumber"
]
dashboard_data = self.session.get(
self.account_dashboard_url.format(accountNum=self.account_number)
).json()
self.premise_id = dashboard_data["addresses"][0]["premiseId"]
self.logged_in = (
self.account_number is not None and self.premise_id is not None
)
def logout(self):
logging.info("Logging out")
self.session.get(url=self.logout_url)
self.session = None
self.logged_in = False
def get_usage(self, days: int = 1, interval: str = DAY_INTERVAL) -> [dict]:
"""
Gets the energy usage for previous days up until today. Useful for getting the most recent data.
:rtype: [dict]
:param days: The number of back to get data for.
:param interval: The time period between each data element in the returned data. Default is days.
:return: A list of usage elements. The number of elements will depend on the `interval` argument.
"""
return self.get_usage_range(utils.get_past_date(days_back=days - 1), utils.get_past_date(0), interval=interval)
def get_usage_range(self, start: date = utils.get_past_date(0), end: date = utils.get_past_date(0),
interval: str = DAY_INTERVAL) -> [dict]:
"""
Gets a specific range of historical usage. Could be useful for reporting.
:param start: The date to begin getting data for (inclusive)
:param end: The last date to get data for (inclusive)
:param interval: The time period between each data element in the returned data. Default is days.
:return: A list of usage elements. The number of elements will depend on the `interval` argument.
"""
if not self.logged_in:
self.login()
if start > end:
logging.error("'start' date can't be after 'end' date")
raise Exception("'start' date can't be after 'end' date")
url = self.usageDataUrl.format(
premise_id=self.premise_id, interval=interval, start=start, end=end
)
logging.info("Fetching {}".format(url))
usage_response = self.session.get(url)
# A 403 is return if the user got logged out from inactivity
if self.logged_in and usage_response.status_code == 403:
logging.info("Received HTTP 403, logging in again")
self.login()
usage_response = self.session.get(url)
if usage_response.status_code != 200:
raise Exception("Invalid login credentials")
return usage_response.json()["data"]
|
numero1 = 10
numero2 = 5
numero3 = 9
promedio = (numero1 + numero2 + numero3)/3
print('El promedio de los 3 numeros es: ', promedio)
|
# Generated by Django 2.1.4 on 2018-12-24 16:46
from django.db import migrations, models
import django.db.models.deletion
import photoslib.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('photoslib', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TestModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', photoslib.fields.PhotoField(on_delete=django.db.models.deletion.PROTECT, to='photoslib.Photo')),
],
),
migrations.CreateModel(
name='MultiplyPhotosModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photos', photoslib.fields.ManyPhotosField(to='photoslib.Photo')),
],
),
migrations.CreateModel(
name='SortableMultiplyPhotosModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photos', photoslib.fields.SortableManyPhotosField(help_text=None, to='photoslib.Photo')),
],
),
]
|
#!/usr/bin/python3
# Project : pyzrtp
# Copyright (C) 2017 Orange
# All rights reserved.
# This software is distributed under the terms and conditions of the 'BSD 3-Clause'
# license which can be found in the file 'LICENSE' in this package distribution.
from Crypto.Cipher import AES
from collections import namedtuple
class UnsupportedCipherType(Exception): pass
# TODO: implement TwoFish CFB
Cipher = namedtuple('Cipher', 'name constructor keybits')
class Cipher_AES:
def __init__(self, key, iv, key_bits):
assert(key_bits % 8 == 0)
self.key_bytes = key_bits>>3
assert(len(key) == self.key_bytes)
self.c = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
def encrypt(self, data):
padded_data = data
while len(padded_data) % AES.block_size != 0:
padded_data += b'\x00'
cipher = self.c.encrypt(padded_data)
return cipher[:len(data)]
def decrypt(self, data):
padded_data = data
while len(padded_data) % AES.block_size != 0:
padded_data += b'\x00'
plain = self.c.decrypt(padded_data)
return plain[:len(data)]
CIPHERS = (
Cipher(b'AES3', lambda k, iv: Cipher_AES(k, iv, 256), 256), # AES-CFB with 256 bits key
Cipher(b'AES1', lambda k, iv: Cipher_AES(k, iv, 128), 128), # AES-CFB with 128 bits key
)
def get(name):
for c in CIPHERS:
if c.name == name: return c
raise UnsupportedCipherType(name)
if __name__ == '__main__':
from binascii import hexlify
from binascii import unhexlify as ux
def cipher(name, key, iv, data):
c = get(name)
assert(c.keybits<<3 == len(key))
instance = c.constructor(key, iv)
return hexlify(instance.encrypt(data))
|
#!/usr/bin/env python3
"""
An example of testing the cache to prove that it's not making more requests than expected.
"""
import asyncio
from contextlib import asynccontextmanager
from logging import basicConfig, getLogger
from unittest.mock import patch
from aiohttp import ClientSession
from aiohttp_client_cache import CachedResponse, CachedSession, SQLiteBackend
basicConfig(level='INFO')
logger = getLogger('aiohttp_client_cache.examples')
# Uncomment for more verbose debug output
# getLogger('aiohttp_client_cache').setLevel('DEBUG')
@asynccontextmanager
async def log_requests():
"""Context manager that mocks and logs all non-cached requests"""
async def mock_response(*args, **kwargs):
return CachedResponse(method='GET', reason='OK', status=200, url='url', version='1.1')
with patch.object(ClientSession, '_request', side_effect=mock_response) as mock_request:
async with CachedSession(cache=SQLiteBackend('cache-test.sqlite')) as session:
await session.cache.clear()
yield session
cached_responses = [v async for v in session.cache.responses.values()]
logger.debug('All calls to ClientSession._request():')
logger.debug(mock_request.mock_calls)
logger.info(f'Responses cached: {len(cached_responses)}')
logger.info(f'Requests sent: {mock_request.call_count}')
async def main():
"""Example usage; replace with any other requests you want to test"""
async with log_requests() as session:
for i in range(10):
response = await session.get('http://httpbin.org/get')
logger.debug(f'Response {i}: {type(response).__name__}')
if __name__ == '__main__':
asyncio.run(main())
|
import unittest
from hundo import parse_from_json as parser
class TestParsingFromJson(unittest.TestCase):
def test_smoke(self):
strings = (
'РЭУ, Факультет маркетинга, Менеджмент (38.03.02), ОП [Б], №: 3, №*: 1, №**: 2',
'ВШЭ, ФСН, Политология (41.03.04), ОП [БК], №: 3, №*: 1, №**: 3',
'МИЭТ, Прикладная математика (01.03.04), ОП [Б], №: 2, №*: 1, №**: 2',
'ЛЭТИ, ФКТИ, Информационные системы и технологии (09.03.02), ОП [Б], №: 22, №*: 1, №**: 1359',
'СПбГУАП, Информационная безопасность (10.03.01), Комплексная защита объектов информатизации, ОК [Б], №: 547, №*: 15, №**: 493'
)
answers = (
('РЭУ, Факультет маркетинга, Менеджмент (38.03.02)', 'ОП [Б]', False),
('ВШЭ, ФСН, Политология (41.03.04)', 'ОП [БК]', False),
('МИЭТ, Прикладная математика (01.03.04)', 'ОП [Б]', False),
('ЛЭТИ, ФКТИ, Информационные системы и технологии (09.03.02)', 'ОП [Б]', False),
('СПбГУАП, Информационная безопасность (10.03.01), Комплексная защита объектов информатизации', 'ОК [Б]', False)
)
for arg, expected in zip(strings, answers):
self.assertTupleEqual(parser(arg), expected)
def test_agreement(self):
strings = (
'<b>ВШЭ, ФМ, Математика (01.03.01), Совместный бакалавриат НИУ ВШЭ и ЦПМ, БВИ [Б], №: 15, №*: 14, №**: 15</b>',
'ВШЭ, ФМ, Математика (01.03.01), Совместный бакалавриат НИУ ВШЭ и ЦПМ, БВИ [Б], №: 15, №*: 14, №**: 15'
)
answers = (
True,
False
)
for arg, expected in zip(strings, answers):
self.assertEqual(parser(arg)[2], expected)
|
import numpy as np
import libsvm.svmutil as libSVM
class LibSVM(object):
def __init__(self,paramsLst):
self.paramsLst = paramsLst
self.svmModel = None
#whether or not to flip the classes
#this is a weird error, but, it seems like the svm doesn't remember the order of the classes
#what this code currently does, in the fit section, is predict the training data,
#compare where the positive interactions are in the those predictions compared to flipping the predictions
#and determines if the order of the classes has been flipped or not, setting this variable to flip them
#after each prediction if necessary. If I can figure out why this is happening, I will remove the code and fix it.
self.flipClasses = False
def fit(self,features,classes):
if len(classes.shape) > 1:
classes = classes.flatten()
self.svmModel = libSVM.svm_train(classes,features,self.paramsLst)
#weird class flipping check
d = self.predict_proba(features)
classData = classes[np.argsort(d[:,0])] #sort from most positive (least negative) to most negative
z = np.arange(1,classData.shape[0]+1)
correct = np.sum(classData/z) #score classes as they are
z = np.arange(classData.shape[0],0,-1)
incorrect = np.sum(classData/z) #score classes in reverse order
print(correct,incorrect)
if correct<incorrect:
#Average Precions greater in reverse than in the current direction
#(similar to AUC <0.5)
#somehow, the classes got flipped
self.flipClasses = True
def predict_proba(self,features):
results = libSVM.svm_predict([],features,self.svmModel,'-b 1')
classifications = results[0]
accuracy = results[1]
probabilities = results[2]
probabilities = np.asarray(probabilities)
if self.flipClasses:
probabilities = np.hstack((np.expand_dims(probabilities[:,1],axis=1),np.expand_dims(probabilities[:,0],axis=1)))
return probabilities
def save(self,fname):
libSVM.svm_save_model(fname,self.svmModel)
def load(self,fname):
self.svmModel = libSVM.svm_load_model(fname)
|
from alpaca_trade_api.entity import Quote
from alpaca_trade_api.rest import Positions
import config
import alpaca_trade_api as tradeapi
import risk
api = tradeapi.REST(config.API_KEY, config.SECRET_KEY, base_url=config.URL)
symbols = ["SPY" , "IWM", "DIA"]
for symbol in symbols:
quote = api.get_last_quote(symbol)
api.submit_order(
symbol=symbol,
side='buy',
type='market',
qty=risk.calculate_quantity(quote.bidprice, 5),
time_in_force='day'
)
positions = api.list_positions()
print(positions)
api.submit_order(
symbol='IWM',
side='sell',
qty=75,
type='trailing_stop',
trail_percent=2,
time_in_force='day'
)
|
from sensor_msgs.msg import Imu
from std_msgs.msg import String
from robotis_controller_msgs.msg import SyncWriteItem
import rospy
class OpenCR(object):
def __init__(self, ns):
self._sub_imu = rospy.Subscriber(ns + "/open_cr/imu", Imu, self._cb_imu, queue_size=10)
self._sub_button = rospy.Subscriber(ns + "/open_cr/button", String, self._cb_button, queue_size=10)
def _cb_imu(self, msg):
# print msg
pass
def _cb_button(self, msg):
"""
custom callback
:data: "mode", "start", "user"
(when long-press, data=data+"_long")
:return:
"""
# print msg
pass
def set_led(self, led):
"""
:param led: 1, 2, 4, 7(ALL)
:return:
"""
msg = SyncWriteItem()
msg.item_name = "LED"
msg.joint_name = ["open-cr"]
msg.value = [led]
self._pub_sync_write.publish(msg)
|
import abc
import math
import numpy as np
from PyQt5 import QtGui, QtWidgets, QtCore
from PyQt5.QtCore import QObject, pyqtSignal, QPointF, QRectF
from PyQt5.QtGui import QColor, QPalette
from PyQt5.QtWidgets import QGraphicsRectItem, QMenu, QGraphicsSceneContextMenuEvent, QAction, QGraphicsItem, \
QGraphicsEllipseItem, QAbstractGraphicsShapeItem, QGraphicsSceneMouseEvent, QApplication
from cvstudio.dao import LabelDao
from cvstudio.vo import LabelVO
class EditableItemSignals(QObject):
deleted = pyqtSignal(QGraphicsItem)
moved = pyqtSignal(QGraphicsItem, QPointF)
doubleClicked = pyqtSignal(QGraphicsItem)
clicked = pyqtSignal(QGraphicsItem)
class EditableItem(QAbstractGraphicsShapeItem):
def __init__(self, *args, **kwargs):
super(EditableItem, self).__init__(*args, **kwargs)
self.setZValue(10)
self.setAcceptHoverEvents(True)
self.setFlag(QtWidgets.QGraphicsItem.ItemIsSelectable, True)
self.setFlag(QtWidgets.QGraphicsItem.ItemIsMovable, True)
self.setFlag(QtWidgets.QGraphicsItem.ItemSendsGeometryChanges, True)
self.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.setOpacity(0.5)
self.signals = EditableItemSignals()
self._labels_dao = LabelDao()
self._label = LabelVO()
self._tag = None
self._shape_type = None
app = QApplication.instance()
color = app.palette().color(QPalette.Highlight)
self._pen_color = color
self._pen_width = 2
self._brush_color = color
self.setPen(QtGui.QPen(self._pen_color, self._pen_width))
@property
def pen_color(self):
return self._pen_color
@pen_color.setter
def pen_color(self, value):
self._pen_color = value
self.setPen(QtGui.QPen(self._pen_color, self._pen_width))
@property
def brush_color(self):
return self._brush_color
@brush_color.setter
def brush_color(self, value):
self._brush_color = value
self.setBrush(self._brush_color)
@property
def pen_width(self):
return self._pen_width
@pen_width.setter
def pen_width(self, value):
self._pen_width = value
self.pen().setWidth(self._pen_width)
@property
def shape_type(self):
return self._shape_type
@shape_type.setter
def shape_type(self, value):
self._shape_type = value
@property
def tag(self):
return self._tag
@tag.setter
def tag(self, value):
self._tag = value
@property
def label(self):
return self._label
@label.setter
def label(self, value: LabelVO):
self._label = value
if self._label:
self._pen_color = QColor(self._label.color)
self.setPen(QtGui.QPen(self._pen_color, 1))
self._brush_color = QColor(self._label.color)
self.setBrush(QtGui.QBrush(QtCore.Qt.NoBrush))
def hoverEnterEvent(self, event):
self.setBrush(self._brush_color)
self.setPen(QtGui.QPen(self._pen_color, self._pen_width))
super(EditableItem, self).hoverEnterEvent(event)
def hoverLeaveEvent(self, event):
self.setBrush(QtGui.QBrush(QtCore.Qt.NoBrush))
self.setPen(QtGui.QPen(self._pen_color, self._pen_width))
super(EditableItem, self).hoverLeaveEvent(event)
def contextMenuEvent(self, evt: QGraphicsSceneContextMenuEvent) -> None:
menu = QMenu()
action_delete: QAction = menu.addAction("Delete")
if self.tag:
dataset = self.tag
result = self._labels_dao \
.fetch_all(dataset)
if result:
labels_menu = menu.addMenu("labels")
for vo in result:
action = labels_menu.addAction(vo.name)
action.setData(vo)
action = menu.exec_(evt.screenPos())
if action == action_delete:
self.delete_item()
self.signals.deleted.emit(self)
elif action and isinstance(action.data(), LabelVO):
self.label = action.data()
def delete_item(self):
self.scene().removeItem(self)
@abc.abstractmethod
def coordinates(self, offset=QPointF(0, 0)):
raise NotImplementedError
class EditableBox(QGraphicsRectItem, EditableItem):
def __init__(self, parent=None):
super(EditableBox, self).__init__(parent)
self.shape_type = "box"
def coordinates(self, offset=QPointF(0, 0)):
item_box: QRectF = self.sceneBoundingRect()
x1 = math.floor(item_box.topLeft().x() + offset.x())
y1 = math.floor(item_box.topRight().y() + offset.y())
x2 = math.floor(item_box.bottomRight().x() + offset.x())
y2 = math.floor(item_box.bottomRight().y() + offset.y())
return ",".join(map(str, [x1, y1, x2, y2]))
class EditableEllipse(QGraphicsEllipseItem, EditableItem):
def __init__(self, parent=None):
super(EditableEllipse, self).__init__(parent)
self.shape_type = "ellipse"
def coordinates(self, offset=QPointF(0, 0)):
item_box: QRectF = self.sceneBoundingRect()
x1 = math.floor(item_box.topLeft().x() + offset.x())
y1 = math.floor(item_box.topRight().y() + offset.y())
x2 = math.floor(item_box.bottomRight().x() + offset.x())
y2 = math.floor(item_box.bottomRight().y() + offset.y())
return ",".join(map(str, [x1, y1, x2, y2]))
class EditablePolygonPointSignals(QObject):
deleted = pyqtSignal(int)
moved = pyqtSignal(QGraphicsItem, QPointF)
doubleClicked = pyqtSignal(QGraphicsItem)
class EditablePolygonPoint(QtWidgets.QGraphicsPathItem):
circle = QtGui.QPainterPath()
circle.addRect(QtCore.QRectF(-1, -1, 2, 2))
square = QtGui.QPainterPath()
square.addRect(QtCore.QRectF(-2.5, -2.5, 5, 5))
def __init__(self, index=None):
super(EditablePolygonPoint, self).__init__()
self.setPath(EditablePolygonPoint.circle)
self.setFlag(QtWidgets.QGraphicsItem.ItemIsSelectable, True)
self.setFlag(QtWidgets.QGraphicsItem.ItemIsMovable, True)
self.setFlag(QtWidgets.QGraphicsItem.ItemSendsGeometryChanges, True)
self.setAcceptHoverEvents(True)
self.index = index
self.setZValue(11)
self.signals = EditablePolygonPointSignals()
self.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
app = QApplication.instance()
color = app.palette().color(QPalette.Highlight)
self._pen_color = color
self._pen_width = 2
self._brush_color = color
self.setPen(QtGui.QPen(self._pen_color, self._pen_width))
self.setBrush(self._brush_color)
@property
def pen_color(self):
return self._pen_color
@pen_color.setter
def pen_color(self, value):
self._pen_color = value
self.setPen(QtGui.QPen(self._pen_color, self._pen_width))
@property
def brush_color(self):
return self._brush_color
@brush_color.setter
def brush_color(self, value):
self._brush_color = value
self.setBrush(self._brush_color)
@property
def pen_width(self):
return self._pen_width
@pen_width.setter
def pen_width(self, value):
self._pen_width = value
self.pen().setWidth(self._pen_width)
def mouseDoubleClickEvent(self, event: QGraphicsSceneMouseEvent) -> None:
self.signals.doubleClicked.emit(self)
def contextMenuEvent(self, evt: QGraphicsSceneContextMenuEvent) -> None:
menu = QMenu()
action: QAction = menu.addAction("Delete point")
delete_point = menu.exec_(evt.screenPos())
if action == delete_point:
self.scene().removeItem(self)
self.signals.deleted.emit(self.index)
def hoverEnterEvent(self, event):
self.setPath(EditablePolygonPoint.square)
self.setBrush(QtGui.QColor("white"))
# self.setBrush(QtGui.QBrush(QtCore.Qt.NoBrush))
super(EditablePolygonPoint, self).hoverEnterEvent(event)
def hoverLeaveEvent(self, event):
self.setPath(EditablePolygonPoint.circle)
self.setBrush(self._brush_color)
super(EditablePolygonPoint, self).hoverLeaveEvent(event)
def mouseReleaseEvent(self, event):
self.setSelected(False)
super(EditablePolygonPoint, self).mouseReleaseEvent(event)
def itemChange(self, change, value):
if change == QtWidgets.QGraphicsItem.ItemPositionChange and self.isEnabled():
self.signals.moved.emit(self, value)
return super(EditablePolygonPoint, self).itemChange(change, value)
class EditablePolygon(QtWidgets.QGraphicsPolygonItem, EditableItem):
def __init__(self, parent=None):
super(EditablePolygon, self).__init__(parent)
self.shape_type = "polygon"
self._points = []
self._controls = []
@property
def points(self):
return self._points
@points.setter
def points(self, value):
self._points = value
@property
def controls(self):
return self._controls
@controls.setter
def controls(self, value):
self._controls = value
@property
def last_point(self):
return self._points[-1]
@property
def count(self):
return len(self._points)
@EditableItem.label.setter
def label(self, value: LabelVO):
super(EditablePolygon, EditablePolygon).label.__set__(self, value)
if len(self._controls) > 0:
for point_control in self._controls:
point_control.brush_color = self._brush_color
point_control.pen_color = self._pen_color
def delete_polygon(self):
while self.points:
self.points.pop()
it = self.controls.pop()
self.scene().removeItem(it)
del it
self.scene().removeItem(self)
def addPoint(self, p):
self._points.append(p)
item = EditablePolygonPoint(len(self._points) - 1)
item.brush_color = self._brush_color
item.pen_color = self._pen_color
item.pen_width = self._pen_width
item.signals.moved.connect(self.point_moved_slot)
item.signals.deleted.connect(self.point_deleted_slot)
item.signals.doubleClicked.connect(self.point_double_clicked)
self.scene().addItem(item)
item.setPos(p)
self._controls.append(item)
self.setPolygon(QtGui.QPolygonF(self.points))
def insertPoint(self, index, p):
self.points.insert(index, p)
item = EditablePolygonPoint(index)
item.brush_color = self._brush_color
item.pen_color = self._pen_color
item.signals.moved.connect(self.point_moved_slot)
item.signals.deleted.connect(self.point_deleted_slot)
item.signals.doubleClicked.connect(self.point_double_clicked)
self.scene().addItem(item)
item.setPos(p)
self.controls.insert(index, item)
self.setPolygon(QtGui.QPolygonF(self.points))
def update_indexes(self):
for idx in range(len(self.points)):
self.controls[idx].index = idx
def point_moved_slot(self, item: EditablePolygonPoint, pos: QPointF):
self.points[item.index] = self.mapFromScene(pos)
self.setPolygon(QtGui.QPolygonF(self.points))
def point_deleted_slot(self, index: int):
del self.points[index]
del self.controls[index]
self.setPolygon(QtGui.QPolygonF(self.points))
self.update_indexes()
def point_double_clicked(self, item: EditablePolygonPoint):
pos = self.points[item.index]
self.insertPoint(item.index + 1, pos)
self.update_indexes()
def move_item(self, index, pos):
if 0 <= index < len(self.controls):
item = self.controls[index]
item.setEnabled(False)
item.setPos(pos)
item.setEnabled(True)
def itemChange(self, change, value):
if change == QtWidgets.QGraphicsItem.ItemPositionHasChanged:
for i, point in enumerate(self.points):
self.move_item(i, self.mapToScene(point))
return super(EditablePolygon, self).itemChange(change, value)
def delete_item(self):
self.delete_polygon()
def coordinates(self, offset=QPointF(0, 0)):
points = [[math.floor(pt.x() + offset.x()), math.floor(pt.y() + offset.y())] for pt in self.points]
points = np.asarray(points).flatten().tolist()
return ",".join(map(str, points))
|
"""
A query on a dictionary.
:Author: Maded Batara III
:Author: Jose Enrico Salinas
:Version: v20181020
"""
from collections import Counter, defaultdict
from functools import reduce
from operator import or_ as union
import random
class DictionaryQuery:
"""A query from a dictionary.
The DictionaryQuery class is a general-purpose class supporting
relevant queries from a list of words.
"""
def __init__(self, words):
"""
Initializes a DictionaryQuery.
Args:
words (list of str): A list of words to add to the query.
"""
self.words = {
word: Counter(word)
for word in words
}
def __contains__(self, word):
"""Implements word in DictionaryQuery."""
return word in self.words
def __eq__(self, other):
"""Implements DictionaryQuery == other."""
if isinstance(other, list):
return other == list(self)
elif isinstance(other, DictionaryQuery):
return list(other) == list(self)
else:
return False
def __iter__(self):
"""Implements iter(DictionaryQuery)."""
return iter(sorted(self.words))
def __len__(self):
"""Implements len(DictionaryQuery)."""
return len(self.words)
def __repr__(self):
"""Implements repr(DictionaryQuery)."""
return "DictionaryQuery(words={0})".format(len(self))
def __str__(self):
"""Implements str(DictionaryQuery)."""
return "Query from dictionary: [{0}]".format(
", ".join(word for word in self.words))
def choice(self):
"""
Selects a random word.
Returns:
str: A random word from the query.
"""
return random.choice(list(self.words))
def filter_by_anagram(self, term):
"""
Generates all anagrams of a certain word.
>> > dq = DictionaryQuery("rat", "tar", "art", "tarot", "carrot")
>> > for word in dq.filter_by_anagram("art"):
... print(word)
art
rat
tar
Args:
term(str): Term to find the anagrams of.
Returns:
DictionaryQuery: List of words which are anagrams of term.
"""
result = []
freq_term = Counter(term)
for word, freq_word in self.words.items():
if freq_term == freq_word:
result.append(word)
return DictionaryQuery(result)
def filter_by_length(self, start, end=None):
"""
Generates a list of words with a certain length.
Args:
start(int): Minimum length of a word in the new list.
end(int, optional): Maximum length of a word in the new list.
Returns:
DictionaryQuery: A list of words with length at least equal to
start. If end is specified, the list does not include words
with lengths greater than end.
"""
if end is None:
end = start
result = []
for word in self.words:
if start <= len(word) <= end:
result.append(word)
return DictionaryQuery(result)
def filter_from_string(self, string):
"""
Generates a list of words contained in a string.
Args:
string(str): String to find words in.
Returns:
DictionaryQuery: A list of words whose letters are contained
in the string.
"""
result = []
freq_string = Counter(string)
for word, freq_word in self.words.items():
if all(freq_string[x] >= freq_word[x] for x in freq_word):
result.append(word)
return DictionaryQuery(result)
def group_by_length(self):
"""
Groups a list of words by length.
Returns:
dict: The words in the query, grouped by length.
"""
grouping = defaultdict(list)
for word in self.words:
key = len(word)
grouping[key].append(word)
for key in grouping:
grouping[key].sort()
return dict(grouping)
def minimal_string(self):
"""
Generates the minimal string that contains all words in the list.
>> > dq = DictionaryQuery(["apple", "banana"])
>> > dq.minimal_string()
aaabelnnpp
Returns:
str: The minimal - length string that satisfies the property that
the letters of all words in the query are in it.
"""
minimal_freq = reduce(union, self.words.values())
return ''.join(sorted(minimal_freq.elements()))
def random(self, n=1):
"""
Generates a random list of words.
Args:
n(int, optional): Number of words to select from the query.
Returns:
DictionaryQuery: A random selection of n words already in the query.
"""
return DictionaryQuery([random.choice(list(self.words)) for _ in range(n)])
def random_string(self, n=1):
"""Generates the minimal string from a list of random words."""
return self.random(n=n).minimal_string()
|
# --
# Copyright (c) 2008-2021 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
# --
"""Sessions managed in memory
These sessions managers keep:
- the last recently used ``DEFAULT_NB_SESSIONS`` sessions
- for each session, the last recently used ``DEFAULT_NB_STATES`` states
"""
from nagare.sessions import common, lru_dict
from nagare.sessions.exceptions import ExpirationError
DEFAULT_NB_SESSIONS = 10000
DEFAULT_NB_STATES = 20
class Sessions(common.Sessions):
"""Sessions manager for states kept in memory
"""
CONFIG_SPEC = dict(
common.Sessions.CONFIG_SPEC,
nb_sessions='integer(default=%d)' % DEFAULT_NB_SESSIONS,
nb_states='integer(default=%d)' % DEFAULT_NB_STATES
)
def __init__(
self,
name, dist,
local_service, services_service,
nb_sessions=DEFAULT_NB_SESSIONS, nb_states=DEFAULT_NB_STATES,
**config
):
"""Initialization
In:
- ``nb_sessions`` -- maximum number of sessions kept in memory
- ``nb_states`` -- maximum number of states, for each sessions, kept in memory
"""
services_service(super(Sessions, self).__init__, name, dist, **config)
self.local = local_service
self.nb_states = nb_states
self._sessions = lru_dict.ThreadSafeLRUDict(nb_sessions)
def check_concurrence(self, multi_processes, multi_threads):
if multi_processes:
raise TypeError("this <%s> sessions manager can't run in multi-processes" % self.name)
def check_session_id(self, session_id):
"""Test if a session exist
In:
- ``session_id`` -- id of a session
Return:
- is ``session_id`` the id of an existing session?
"""
return session_id in self._sessions
def get_lock(self, session_id):
"""Retrieve the lock of a session
In:
- ``session_id`` -- session id
Return:
- the lock
"""
try:
return self._sessions[session_id][1]
except KeyError:
raise ExpirationError('lock not found for session {}'.format(session_id))
def _create(self, session_id, secure_token):
"""Create a new session
In:
- ``session_id`` -- id of the session
- ``secure_token`` -- the secure number associated to the session
"""
lock = self.local.worker.create_lock()
self._sessions[session_id] = [0, lock, secure_token, None, lru_dict.LRUDict(self.nb_states)]
return session_id, 0, secure_token, lock
def delete(self, session_id):
"""Delete a session
In:
- ``session_id`` -- id of the session to delete
"""
del self._sessions[session_id]
def _fetch(self, session_id, state_id):
"""Retrieve a state with its associated objects graph
In:
- ``session_id`` -- session id of this state
- ``state_id`` -- id of this state
Return:
- id of the latest state
- secure number associated to the session
- data kept into the session
- data kept into the state
"""
try:
last_state_id, _, secure_token, session_data, states = self._sessions[session_id]
state_data = states[state_id]
except KeyError:
raise ExpirationError('invalid session structure')
return last_state_id, secure_token, session_data, state_data
def _store(self, session_id, state_id, secure_token, use_same_state, session_data, state_data):
"""Store a state and its associated objects graph
In:
- ``session_id`` -- session id of this state
- ``state_id`` -- id of this state
- ``secure_token`` -- the secure number associated to the session
- ``use_same_state`` -- is this state to be stored in the previous snapshot?
- ``session_data`` -- data to keep into the session
- ``state_data`` -- data to keep into the state
"""
session = self._sessions[session_id]
if not use_same_state:
session[0] += 1
session[2] = secure_token
session[3] = session_data
session[4][state_id] = state_data
class SessionsWithPickledStates(Sessions):
"""Sessions manager for states pickled / unpickled in memory
"""
CONFIG_SPEC = dict(
Sessions.CONFIG_SPEC,
serializer='string(default="nagare.sessions.serializer:Pickle")'
)
|
#!/usr/bin/env python
"""
Example:
$ ./main.py --choices 10 --list host1 host2 host3 host4 host5 --number 4
['host1', 'host2', 'host3', 'host4']
['host1', 'host2', 'host3', 'host5']
['host1', 'host2', 'host4', 'host5']
['host1', 'host3', 'host4', 'host5']
['host2', 'host3', 'host4', 'host5']
"""
import argparse
import textwrap
parser = argparse.ArgumentParser(
prog="main.py",
description=textwrap.dedent("""\
Produce the top N combinations, sorted by LUM. Normally combinations
would come out as 1234, 1235, 1236, ... however this program wants to
minimise the maximum. Therefore the program produces 1234, 1235, 1245,
1345, 2345, and 1236... Take two adjacent combinations, find the unique
maximum you'll see it is strictly increasing for the wholeset.
"""))
parser.add_argument(
'-c', '--choices',
dest='choices',
type=int,
required=True)
parser.add_argument(
'-n', '--number',
dest='number',
type=int,
required=True)
parser.add_argument(
'-l', '--list',
nargs='+',
dest='list',
required=True)
def generator(ordered, n, choices):
"""Generates the next LUM combination starting with ordered[:n]
ordered - Ascending list of elements
n - The number of elements per combination
choices - The number of choies
run_time = O(choices)
"""
if len(ordered) == n:
yield ordered
return
if len(ordered) < n:
print(
"Can't create combinations of length {} with only {} elements"
.format(n, len(ordered)))
return
y = n
pool = []
results = []
curr = ordered[:n]
counted = 0
while True:
yield list(curr)
counted += 1
if counted >= choices:
return
if not pool or (max(pool) < min(curr)):
if y >= len(ordered):
return
pool.append(ordered[y])
y += 1
# TODO replace pool with ordered set or something
pool = sorted(pool)
index = None
index_smallest = 0
while index is None:
smallest = pool[index_smallest]
for i in range(len(curr)-1, -1, -1):
if curr[i] < smallest:
index = i
break
if index is None:
index_smallest += 1
do_swap(curr, index, pool, index_smallest)
pool = sorted(pool)
for i in range(0, index):
if pool[0] < curr[i]:
do_swap(curr, i, pool, 0)
pool = sorted(pool)
def do_swap(curr, curr_index, pool, pool_index):
a = pool[pool_index]
pool.remove(pool[pool_index])
pool.append(curr[curr_index])
curr[curr_index] = a
if __name__ == '__main__':
args = parser.parse_args()
for result in generator(args.list, args.number, args.choices):
print(result)
|
import json
import logging
import os
import pickle
from collections import namedtuple
import torch
from consts import SPEAKER_START, SPEAKER_END, NULL_ID_FOR_COREF
from utils import flatten_list_of_lists
from torch.utils.data import Dataset
# CorefExample = namedtuple("CorefExample", ["token_ids", "clusters"])
BartCorefExample = namedtuple("BartCorefExample", ["sentence_len", "input_ids", "attention_mask", "label_ids"])
logger = logging.getLogger(__name__)
# class CorefDataset1(Dataset):
# def __init__(self, file_path, tokenizer, max_seq_length=-1):
# self.tokenizer = tokenizer
# logger.info(f"Reading dataset from {file_path}")
# examples, self.max_mention_num, self.max_cluster_size, self.max_num_clusters = self._parse_jsonlines(file_path)
# self.max_seq_length = max_seq_length
# self.examples, self.lengths, self.num_examples_filtered = self._tokenize(examples)
# logger.info(
# f"Finished preprocessing Coref dataset. {len(self.examples)} examples were extracted, {self.num_examples_filtered} were filtered due to sequence length.")
#
# def _parse_jsonlines(self, file_path):
# examples = []
# max_mention_num = -1
# max_cluster_size = -1
# max_num_clusters = -1
# with open(file_path, 'r') as f:
# for line in f:
# d = json.loads(line.strip())
# doc_key = d["doc_key"]
# input_words = flatten_list_of_lists(d["sentences"])
# clusters = d["clusters"]
# max_mention_num = max(max_mention_num, len(flatten_list_of_lists(clusters)))
# max_cluster_size = max(max_cluster_size, max(len(cluster) for cluster in clusters) if clusters else 0)
# max_num_clusters = max(max_num_clusters, len(clusters) if clusters else 0)
# speakers = flatten_list_of_lists(d["speakers"])
# examples.append((doc_key, input_words, clusters, speakers))
# return examples, max_mention_num, max_cluster_size, max_num_clusters
#
# def _tokenize(self, examples):
# coref_examples = []
# lengths = []
# num_examples_filtered = 0
# for doc_key, words, clusters, speakers in examples:
# word_idx_to_start_token_idx = dict()
# word_idx_to_end_token_idx = dict()
# end_token_idx_to_word_idx = [0] # for <s>
#
# token_ids = []
# last_speaker = None
# for idx, (word, speaker) in enumerate(zip(words, speakers)):
# if last_speaker != speaker:
# speaker_prefix = [SPEAKER_START] + self.tokenizer.encode(" " + speaker,
# add_special_tokens=False) + [SPEAKER_END]
# last_speaker = speaker
# else:
# speaker_prefix = []
# for _ in range(len(speaker_prefix)):
# end_token_idx_to_word_idx.append(idx)
# token_ids.extend(speaker_prefix)
# word_idx_to_start_token_idx[idx] = len(token_ids) + 1 # +1 for <s>
# tokenized = self.tokenizer.encode(" " + word, add_special_tokens=False)
# for _ in range(len(tokenized)):
# end_token_idx_to_word_idx.append(idx)
# token_ids.extend(tokenized)
# word_idx_to_end_token_idx[idx] = len(token_ids) # old_seq_len + 1 (for <s>) + len(tokenized_word) - 1 (we start counting from zero) = len(token_ids)
#
# if 0 < self.max_seq_length < len(token_ids):
# num_examples_filtered += 1
# continue
#
# new_clusters = [
# [(word_idx_to_start_token_idx[start], word_idx_to_end_token_idx[end]) for start, end in cluster] for
# cluster in clusters]
# lengths.append(len(token_ids))
#
# coref_examples.append(((doc_key, end_token_idx_to_word_idx), CorefExample(token_ids=token_ids, clusters=new_clusters)))
# return coref_examples, lengths, num_examples_filtered
#
# def __len__(self):
# return len(self.examples)
#
# def __getitem__(self, item):
# return self.examples[item]
#
# def pad_clusters_inside(self, clusters):
# return [cluster + [(NULL_ID_FOR_COREF, NULL_ID_FOR_COREF)] * (self.max_cluster_size - len(cluster)) for cluster
# in clusters]
#
# def pad_clusters_outside(self, clusters):
# return clusters + [[]] * (self.max_num_clusters - len(clusters))
#
# def pad_clusters(self, clusters):
# clusters = self.pad_clusters_outside(clusters)
# clusters = self.pad_clusters_inside(clusters)
# return clusters
#
# def pad_batch(self, batch, max_length):
# max_length += 2 # we have additional two special tokens <s>, </s>
# padded_batch = []
# for example in batch:
# encoded_dict = self.tokenizer.encode_plus(example[0],
# add_special_tokens=True,
# pad_to_max_length=True,
# max_length=max_length,
# return_attention_mask=True,
# return_tensors='pt')
# clusters = self.pad_clusters(example.clusters)
# example = (encoded_dict["input_ids"], encoded_dict["attention_mask"]) + (torch.tensor(clusters),)
# padded_batch.append(example)
# tensored_batch = tuple(torch.stack([example[i].squeeze() for example in padded_batch], dim=0) for i in range(len(example)))
# return tensored_batch
# class CorefDataset(Dataset):
# def __init__(self, file_path, tokenizer, max_seq_length=-1):
# self.tokenizer = tokenizer
# logger.info(f"Reading dataset from {file_path}")
# examples = self._parse_jsonlines(file_path)
# self.max_seq_length = max_seq_length
# self.examples, self.num_examples_filtered = self._tokenize(examples)
# logger.info(
# f"Finished preprocessing Coref dataset. {len(self.examples)} examples were extracted, {self.num_examples_filtered} were filtered due to sequence length.")
#
# def _parse_jsonlines(self, file_path):
# examples = []
# with open(file_path, 'r') as f:
# for line in f:
# d = json.loads(line.strip())
# input_words = d["sentences"]
# clusters = d["target"]
# examples.append((input_words, clusters))
# return examples
#
# def _tokenize(self, examples):
# coref_examples = []
# num_examples_filtered = 0
# for words, clusters in examples:
# sentence = ' '.join(words)
# target = ' '.join(clusters)
#
# input_ids_no_pad = self.tokenizer.encode_plus(sentence, return_tensors="pt").input_ids
# if 0 < input_ids_no_pad.shape[1] < self.max_seq_length:
# # input_ids = self.tokenizer.encode_plus(sentence, return_tensors="pt", pad_to_max_length=True,
# # max_length=self.max_seq_length, truncation=True)
# # labels = self.tokenizer.encode_plus(target, return_tensors="pt", pad_to_max_length=True,
# # max_length=self.max_seq_length, truncation=True).input_ids
#
# input_ids = self.tokenizer.encode_plus(sentence, return_tensors="pt", max_length=self.max_seq_length, truncation=True)
# labels = self.tokenizer.encode_plus(target, return_tensors="pt", max_length=self.max_seq_length, truncation=True).input_ids
# coref_examples.append(BartCorefExample(sentence_len=len(words), input_ids=input_ids.input_ids.flatten(),
# attention_mask=input_ids.attention_mask.flatten(),
# label_ids=labels.flatten()))
# else:
# num_examples_filtered += 1
# return coref_examples, num_examples_filtered
#
# def __len__(self):
# return len(self.examples)
#
# def __getitem__(self, item):
# return self.examples[item]
#
# def get_dataset(args, tokenizer, evaluate=False):
# read_from_cache, file_path = False, ''
# if evaluate and os.path.exists(args.predict_file_cache):
# file_path = args.predict_file_cache
# read_from_cache = True
# elif (not evaluate) and os.path.exists(args.train_file_cache):
# file_path = args.train_file_cache
# read_from_cache = True
#
# if read_from_cache:
# logger.info(f"Reading dataset from {file_path}")
# with open(file_path, 'rb') as f:
# return pickle.load(f)
#
# file_path, cache_path = (args.predict_file, args.predict_file_cache) if evaluate else (args.train_file, args.train_file_cache)
#
# coref_dataset = CorefDataset(file_path, tokenizer, max_seq_length=args.max_seq_length)
# with open(cache_path, 'wb') as f:
# pickle.dump(coref_dataset, f)
#
# return coref_dataset
BartCorefGenerate = namedtuple("BartCorefGenerate", ["sentence_len", "input_ids", "sentences", "doc_id"])
class CorefDatasetGenerate(Dataset):
def __init__(self, file_path, tokenizer, max_seq_length=-1):
self.tokenizer = tokenizer
logger.info(f"Reading dataset from {file_path}")
examples = self._parse_jsonlines(file_path)
self.max_seq_length = max_seq_length
self.examples, self.num_examples_filtered = self._tokenize(examples)
logger.info(
f"Finished preprocessing Coref dataset. {len(self.examples)} examples were extracted, {self.num_examples_filtered} were filtered due to sequence length.")
def _parse_jsonlines(self, file_path):
examples = []
with open(file_path, 'r') as f:
for line in f:
d = json.loads(line.strip())
input_words = d["sentences"]
clusters = []
doc_id = d["doc_id"]
examples.append((input_words, clusters, doc_id))
return examples
def _tokenize(self, examples):
print("max length:", self.max_seq_length)
coref_examples = []
num_examples_filtered = 0
for words, clusters, doc_id in examples:
sentence = ' '.join(words)
input_ids = self.tokenizer([sentence], max_length=self.max_seq_length, return_tensors="pt", padding='max_length', truncation=True).input_ids
if 0 < input_ids.shape[1] <= self.max_seq_length:
coref_examples.append(BartCorefGenerate(sentence_len=len(words), input_ids=input_ids.flatten(),
sentences=sentence, doc_id=doc_id))
else:
num_examples_filtered += 1
return coref_examples, num_examples_filtered
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return self.examples[item]
def get_dataset_generate(args, tokenizer, evaluate=False):
read_from_cache, file_path = False, ''
if evaluate and os.path.exists(args.predict_file_cache):
file_path = args.predict_file_cache
read_from_cache = True
elif (not evaluate) and os.path.exists(args.train_file_cache):
file_path = args.train_file_cache
read_from_cache = True
if read_from_cache:
logger.info(f"Reading dataset from {file_path}")
with open(file_path, 'rb') as f:
return pickle.load(f)
file_path, cache_path = (args.predict_file, args.predict_file_cache) if evaluate else (args.train_file, args.train_file_cache)
coref_dataset = CorefDatasetGenerate(file_path, tokenizer, max_seq_length=args.max_seq_length)
with open(cache_path, 'wb') as f:
pickle.dump(coref_dataset, f)
return coref_dataset
|
# Copyright 2021 The Bellman Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from tf_agents.trajectories.time_step import StepType
from bellman.distributions.utils import create_uniform_distribution_from_spec
from bellman.environments.environment_model import EnvironmentModel
from bellman.environments.initial_state_distribution_model import (
DeterministicInitialStateModel,
)
from bellman.environments.termination_model import ConstantFalseTermination
from bellman.environments.tf_wrappers import TFTimeLimit
from bellman.environments.transition_model.keras_model.keras import KerasTransitionModel
from tests.tools.bellman.environments.reward_model import ConstantReward
from tests.tools.bellman.environments.transition_model.keras_models.dummy_ensemble import (
DummyEnsembleTransitionNetwork,
)
@pytest.fixture(name="wrapped_environment_and_action")
def _wrapped_environment_fixture(observation_space, action_space, batch_size):
observation = create_uniform_distribution_from_spec(observation_space).sample()
network = DummyEnsembleTransitionNetwork(observation_space)
model = KerasTransitionModel([network], observation_space, action_space)
env_model = EnvironmentModel(
transition_model=model,
reward_model=ConstantReward(observation_space, action_space, -1.0),
termination_model=ConstantFalseTermination(observation_space),
initial_state_distribution_model=DeterministicInitialStateModel(observation),
batch_size=batch_size,
)
wrapped_environment_model = TFTimeLimit(env_model, 2)
action = create_uniform_distribution_from_spec(action_space).sample((batch_size,))
return wrapped_environment_model, action
def test_tf_time_limit_reset_num_steps(wrapped_environment_and_action):
"""
Ensure that the number of steps after a termination are reset to 0.
"""
wrapped_environment_model, action = wrapped_environment_and_action
time_step = wrapped_environment_model.reset()
np.testing.assert_array_equal(time_step.step_type, StepType.FIRST)
next_time_step = wrapped_environment_model.step(action)
np.testing.assert_array_equal(next_time_step.step_type, StepType.MID)
next_time_step = wrapped_environment_model.step(action)
np.testing.assert_array_equal(next_time_step.step_type, StepType.LAST)
next_time_step = wrapped_environment_model.step(action)
np.testing.assert_array_equal(next_time_step.step_type, StepType.FIRST)
next_time_step = wrapped_environment_model.step(action)
np.testing.assert_array_equal(next_time_step.step_type, StepType.MID)
next_time_step = wrapped_environment_model.step(action)
np.testing.assert_array_equal(next_time_step.step_type, StepType.LAST)
def test_tf_wrapper_reset_method_resets_num_steps(wrapped_environment_and_action):
"""
Ensure that the number of steps after a reset are reset to 0.
"""
wrapped_environment_model, action = wrapped_environment_and_action
time_step = wrapped_environment_model.reset()
np.testing.assert_array_equal(time_step.step_type, StepType.FIRST)
next_time_step = wrapped_environment_model.step(action)
np.testing.assert_array_equal(next_time_step.step_type, StepType.MID)
next_time_step = wrapped_environment_model.reset()
np.testing.assert_array_equal(next_time_step.step_type, StepType.FIRST)
next_time_step = wrapped_environment_model.step(action)
np.testing.assert_array_equal(next_time_step.step_type, StepType.MID)
next_time_step = wrapped_environment_model.step(action)
np.testing.assert_array_equal(next_time_step.step_type, StepType.LAST)
def test_tf_wrapper_set_initial_observation_resets_num_steps(wrapped_environment_and_action):
"""
Ensure that the number of steps after setting the initial observation are reset to 0.
"""
wrapped_environment_model, action = wrapped_environment_and_action
time_step = wrapped_environment_model.reset()
np.testing.assert_array_equal(time_step.step_type, StepType.FIRST)
next_time_step = wrapped_environment_model.step(action)
np.testing.assert_array_equal(next_time_step.step_type, StepType.MID)
next_time_step = wrapped_environment_model.set_initial_observation(
next_time_step.observation
)
np.testing.assert_array_equal(next_time_step.step_type, StepType.FIRST)
next_time_step = wrapped_environment_model.step(action)
np.testing.assert_array_equal(next_time_step.step_type, StepType.MID)
next_time_step = wrapped_environment_model.step(action)
np.testing.assert_array_equal(next_time_step.step_type, StepType.LAST)
|
import pstats
import cProfile
import pyximport
pyximport.install()
import app
cProfile.runctx("app.analyze_movie('movie/testa.mp4')", globals(), locals(), "Profile.prof")
s = pstats.Stats("Profile.prof")
s.strip_dirs().sort_stats("time").print_stats()
|
"""position.py
This scripts will extract atoms position from OUTCAR file from VASP calculation.
Examples of information that we need to extract from Ni100_Clean_rel:
POSITION TOTAL-FORCE (eV/Angst)
-----------------------------------------------------------------------------------
0.00000 0.00000 0.00000 0.000000 0.000000 0.398756
0.00000 1.76500 1.76500 0.000000 0.000000 -0.393048
0.00000 0.00000 3.53000 0.000000 0.000000 -0.006319
0.00000 1.76500 5.30196 0.000000 0.000000 -0.000021
0.00000 0.00000 6.98746 0.000000 0.000000 0.000632
-----------------------------------------------------------------------------------
total drift: -0.000038 -0.000038 0.004794
Output that we need:
0.00000 0.00000 0.00000
0.00000 1.76500 1.76500
0.00000 0.00000 3.53000
0.00000 1.76500 5.30196
0.00000 0.00000 6.98746
"""
import sys
def position(outcar, output):
pos = []
separator = ' -----------------------------------------------------------------------------------\n'
read = False
for line in reversed(open(outcar).readlines()):
if line.startswith(' total drift:'):
read = True
continue
if read:
if line.startswith(' POSITION'):
read = False
break
else:
pos.append(line.split())
pos = pos[1:-1]
for i in range(0, len(pos)):
pos[i] = [float(pos[i][j]) for j in range(0, 3)]
with open(output, 'w') as f:
for p in reversed(pos):
f.write(' '.join(str(x) for x in p))
f.write('\n')
|
import PySimpleGUI as sg
# Very basic form. Return values as a list
window = sg.FlexForm('OCR Unicode') # begin with a blank form
layout = [
[sg.Text('Please enter the folder path')],
[sg.Text('Folder Path', size=(15, 1))],
[sg.InputText()],
[sg.Checkbox('Split pdf_pages into half')],
[sg.Submit(), sg.Cancel()]
]
button, values = sg.Window(window, layout).Read()
# print(values[0], values[1])
ocr_folder_path = values[0]
half_split = values[1]
if half_split == True:
half_split = 'yes'
else:
half_split = 'no'
print(ocr_folder_path)
print(half_split)
# window = sg.Window("OCR Unicode",layout)
# events, values = window.read()
window.close()
# ocr_folder_path = values[0]
# sg.popup('You entered', values[0])
# print(values[0])
|
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import cm
class BodyMap:
def __init__(self):
im_link = 'https://raw.githubusercontent.com/MuteJester/MediPlot/master/MediPlot/Body_Sil.png'
self.body_sil = plt.imread(im_link)
self.bmap = {'head': self.set_head, 'legs': self.set_legs, 'les': self.set_right_leg,
'left leg': self.set_left_leg, 'right leg': self.set_right_leg, 'right arm': self.set_right_arm,
'left arm': self.set_left_arm, 'torso': self.set_torso, 'arms': self.set_arms,
'waist': self.set_waist,
'neck': self.set_neck, 'left hand': self.set_left_hand, 'right hand': self.set_right_hand,
'left foot': self.set_left_foot, 'right foot': self.set_right_foot,
'upper right arm':self.set_upper_right_arm,'upper left arm':self.set_upper_left_arm,
'right forearm':self.set_right_forearm,'left forearm':self.set_left_forearm,
'right thigh':self.set_right_thigh,'left thigh':self.set_left_thigh,
'right lower leg':self.set_right_lower_leg,'left lower leg':self.set_left_lower_leg}
def list_of_targets(self):
return ['head', 'legs', 'right leg', 'left leg', 'right arm', 'left arm', 'torso', 'arms', 'waist', 'neck',
'left hand', 'right hand','upper right arm','upper left arm','right forearm','left forearm',
'right thigh','left thigh','right lower leg','left lower leg']
def set_head(self, value):
head = self.body_sil[0:270, 300:580]
self.body_sil[0:270, 300:580] = np.where(head == [1, 1, 1], head, value)
def set_neck(self, value):
neck = self.body_sil[270:330, 300:580]
self.body_sil[270:330, 300:580] = np.where(neck == [1, 1, 1], neck, value)
def set_torso(self, value):
torso = self.body_sil[330:965, 265:640]
self.body_sil[330:965, 265:640] = np.where(torso == [1, 1, 1], torso, value)
def set_waist(self, value):
waist = self.body_sil[965:1155, 235:670]
self.body_sil[965:1155, 235:670] = np.where(waist == [1, 1, 1], waist, value)
def set_left_leg(self, value):
right_leg = self.body_sil[1155:2290, 230:455]
self.body_sil[1155:2290, 230:455] = np.where(right_leg == [1, 1, 1], right_leg, value)
def set_right_leg(self, value):
left_leg = self.body_sil[1155:2290, 455:685]
self.body_sil[1155:2290, 455:685] = np.where(left_leg == [1, 1, 1], left_leg, value)
def set_legs(self, value):
self.set_right_leg(value)
self.set_left_leg(value)
def set_left_arm(self, value):
right_arm = self.body_sil[380:1350, 0:235]
self.body_sil[380:1350, 0:235] = np.where(right_arm == [1, 1, 1], right_arm, value)
self.body_sil[380:1000, 0:265] = np.where(self.body_sil[380:1000, 0:265] == [1, 1, 1],
self.body_sil[380:1000, 0:265], value)
def set_right_arm(self, value):
right_arm = self.body_sil[870:1370, 682:1299]
self.body_sil[870:1370, 682:1299] = np.where(right_arm == [1, 1, 1], right_arm, value)
self.body_sil[385:940, 640:1289] = np.where(self.body_sil[385:940, 640:1289] == [1, 1, 1],
self.body_sil[385:940, 640:1289], value)
def set_arms(self, value):
self.set_left_arm(value)
self.set_right_arm(value)
def set_left_hand(self, value):
left_hand = self.body_sil[1135:1400, 0:180]
self.body_sil[1135:1400, 0:180] = np.where(left_hand == [1, 1, 1], left_hand, value)
def set_right_hand(self, value):
right_hand = self.body_sil[1135:1400, 760:907]
self.body_sil[1135:1400, 760:907] = np.where(right_hand == [1, 1, 1], right_hand, value)
def set_left_foot(self, value):
left_foot = self.body_sil[2195:2289, 225:361]
self.body_sil[2195:2289, 225:361] = np.where(left_foot == [1, 1, 1], left_foot, value)
def set_right_foot(self, value):
right_foot = self.body_sil[2195:2289, 540:690]
self.body_sil[2195:2289, 540:690] = np.where(right_foot == [1, 1, 1], right_foot, value)
def set_upper_right_arm(self,value):
right_arm = self.body_sil[870:1200, 682:1110]
self.body_sil[385:810, 640:1110] = np.where(self.body_sil[385:810, 640:1110] == [1, 1, 1],
self.body_sil[385:810, 640:1110], value)
def set_upper_left_arm(self,value):
self.body_sil[380:800, 0:265] = np.where(self.body_sil[380:800, 0:265] == [1, 1, 1],
self.body_sil[380:800, 0:265], value)
def set_right_forearm(self,value):
right_arm = self.body_sil[870:1150, 682:1299]
self.body_sil[870:1150, 682:1299] = np.where(right_arm == [1, 1, 1], right_arm, value)
right_arm = self.body_sil[870:950, 642:1299]
self.body_sil[870:950, 642:1299] = np.where(right_arm == [1, 1, 1], right_arm, value)
def set_left_forearm(self,value):
right_arm = self.body_sil[830:1110, 0:245]
self.body_sil[830:1110, 0:245] = np.where(right_arm == [1, 1, 1], right_arm, value)
def set_right_thigh(self,value):
left_leg = self.body_sil[1270:1620, 455:685]
self.body_sil[1270:1620, 455:685] = np.where(left_leg == [1, 1, 1], left_leg, value)
def set_left_thigh(self, value):
right_leg = self.body_sil[1270:1620, 230:455]
self.body_sil[1270:1620, 230:455] = np.where(right_leg == [1, 1, 1], right_leg, value)
def set_right_lower_leg(self, value):
left_leg = self.body_sil[1720:2150, 455:685]
self.body_sil[1720:2150, 455:685] = np.where(left_leg == [1, 1, 1], left_leg, value)
def set_left_lower_leg(self, value):
right_leg = self.body_sil[1720:2150, 230:455]
self.body_sil[1720:2150, 230:455] = np.where(right_leg == [1, 1, 1], right_leg, value)
def generate(self, areas, values, figsize=(9, 15), cmap='coolwarm', background='white'):
self.f, self.ax = plt.subplots(figsize=figsize)
self.ax.axis('off')
norm = plt.Normalize(np.min(values), np.max(values))
col_map = cm.get_cmap(cmap)
for area, value in zip(areas, values):
self.bmap[area.lower()](col_map(norm(value))[:3])
if background == 'black':
self.body_sil = np.where(self.body_sil == [1, 1, 1], [0, 0, 0], self.body_sil)
divider = make_axes_locatable(self.ax)
cax = divider.append_axes('right', size='10%', pad=0.5)
x = self.ax.imshow(self.body_sil, cmap=cmap, vmin=np.min(values), vmax=np.max(values))
self.f.colorbar(x, cax=cax, orientation='vertical')
return self.ax
|
#!/usr/bin/env python3
import sys
import json
import random
import termcolor
import requests
from graphqlclient import GraphQLClient
from collections import namedtuple
from urllib.error import URLError
__server_description_fields = [
'uri', 'alias', 'status',
'uuid', 'message', 'replicaset'
]
ServerDescription = namedtuple(
'ServerDescription',
__server_description_fields
)
def find(predicate, seq, default=None):
for s in seq:
if predicate(s):
return s
return default
def parse_server_description(kwdict):
return ServerDescription(*[
(kwdict[prop] if prop in kwdict else None)
for prop in __server_description_fields
])
def get_servers(url):
client = GraphQLClient('http://{}/admin/api'.format(url))
result = client.execute('''
query {
serverList: servers {
uuid
alias
uri
status
message
replicaset {
uuid
}
}
}
''')
data = json.loads(result)
servers = list(map(
parse_server_description,
data['data']['serverList']
))
return servers
def get_cluster_info(url):
client = GraphQLClient('http://{}/admin/api'.format(url))
result = client.execute('''
query {
cluster {
clusterSelf: self {
uri: uri
uuid: uuid
}
failover
knownRoles: known_roles
can_bootstrap_vshard
vshard_bucket_count
}
}
''')
data = json.loads(result)
return data['data']['cluster']
def assign_roles(url, server_description, roles):
assert roles
client = GraphQLClient('http://{}/admin/api'.format(url))
result = client.execute('''
mutation( $uri: String!, $roles: [String!] ) {
createReplicasetResponse: join_server(
uri: $uri
roles: $roles
)
}
''',
variables={
**server_description._asdict(),
'roles': roles
}
)
data = json.loads(result)
print(data)
return (
'data' in data
and 'createReplicasetResponse' in data['data']
and 'errors' not in data,
data
)
if __name__ == "__main__":
url = '127.0.0.1:8081'
ping = requests.get('http://{}/'.format(url), timeout=10)
assert ping.status_code == 200
servers = get_servers(url)
# TODO prettify this output
print('\n'.join(map(str, servers)))
print('-' * 72)
assert servers
cluster_info = get_cluster_info(url)
bootstraped_uri = cluster_info['clusterSelf']['uri']
key_value_server = find(
lambda server: server.uri == bootstraped_uri,
servers,
servers[0]
)
print(key_value_server)
assert key_value_server
assert 'key-value' in cluster_info['knownRoles']
success, responce = assign_roles(url, key_value_server, ['key-value'])
if success:
print(termcolor.colored('success', 'green'))
else:
print(termcolor.colored('fail', 'red'))
print(responce)
exit(1)
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import six
def mask_value(value, s_percent=0.125):
"""Obfuscate a given string to show only a percentage of leading
and trailing characters.
:param s_percent: The percentage (in decimal) of characters to replace
"""
if isinstance(value, six.string_types):
visible = (32 if int(round(len(value) * s_percent)) > 32
else int(round(len(value) * s_percent)))
return value[:visible] + " xxxxxxxx " + value[-visible:]
return value
|
import os, sys, numpy as np, argparse, imp, datetime, time, pickle as pkl, random, json, collections
import matplotlib
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch, torch.nn as nn
#Custom Libraries
import datasets as data
import netlib as netlib
import auxiliaries as aux
"""==============================="""
#Name of folder containing the network checkpoint.
network = 'CUB_PADS_R50'
#Path to above folder - this setup assumes that the full folder is stored in the same directory as this script.
netfolder = 'CVPR2020_TrainingResults/CUB/R50'
#Load network and setup parameters, which are stored in a Namespace.
opt = pkl.load(open(netfolder+'/'+network+'/'+'hypa.pkl','rb'))
#Load network passed on the resp. parameters and load with trained weights.
model = netlib.networkselect(opt)
model.load_state_dict(torch.load(netfolder+'/'+network+'/checkpoint_Class.pth.tar')['state_dict'])
"""================================"""
os.environ["CUDA_DEVICE_ORDER"] ="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]= "1"
"""==============================="""
#Get dataloaders, primarily the one for the test set. For that, point to the folder that contains the datasets:
opt.source_path = '<path_to_dataset>/'+opt.dataset
dataloaders = data.give_dataloaders(opt.dataset, opt)
opt.num_classes = len(dataloaders['training'].dataset.avail_classes)
opt.device = torch.device('cuda')
"""================================"""
#Compute test metrics - note that weights were stored at optimal R@1 performance.
_ = model.to(opt.device)
_ = model.eval()
start = time.time()
image_paths = np.array(dataloaders['testing'].dataset.image_list)
with torch.no_grad():
evaltypes = ['Class']
metrics, labels = aux.eval_metrics_one_dataset(model, dataloaders['testing'], device=opt.device, k_vals=opt.k_vals, opt=opt, evaltypes=evaltypes)
###
full_result_str = ''
for evaltype in evaltypes:
result_str = ', '.join('@{0}: {1:.4f}'.format(k,rec) for k,rec in zip(opt.k_vals, metrics[evaltype]['Recall@k']))
result_str = '{0}-embed: NMI [{1:.4f}] | F1 [{2:.4f}] | Recall [{3}]'.format(evaltype, metrics[evaltype]['NMI'], metrics[evaltype]['F1'], result_str)
full_result_str += result_str+'\n'
print(full_result_str)
|
import yaml
import os
import numpy as np
import gym
import matplotlib.pyplot as plt
from quanser_robots.cartpole.ctrl import SwingUpCtrl, MouseCtrl
from quanser_robots.common import GentlyTerminating, Logger
def load_config(config_path="config.yml"):
'''Load the configuration setting from a given path'''
if os.path.isfile(config_path):
f = open(config_path)
return yaml.load(f)
else:
raise Exception("Configuration file is not found in the path: "+config_path)
def print_config(config_path="config.yml"):
'''Print the configuration setting from a given path'''
if os.path.isfile(config_path):
f = open(config_path)
config = yaml.load(f)
print("************************")
print("*** model configuration ***")
print(yaml.dump(config["model_config"], default_flow_style=False, default_style=''))
print("*** train configuration ***")
print(yaml.dump(config["training_config"], default_flow_style=False, default_style=''))
print("************************")
else:
raise Exception("Configuration file is not found in the path: "+config_path)
def anylize_env(env, test_episodes = 100,max_episode_step = 500, render = False):
'''Analyze the environment through random sampled episodes data'''
print("state space shape: ", env.observation_space.shape)
print("state space lower bound: ", env.observation_space.low)
print("state space upper bound: ", env.observation_space.high)
print("action space shape: ", env.action_space.shape)
print("action space lower bound: ", env.action_space.low)
print("action space upper bound: ", env.action_space.high)
print("reward range: ", env.reward_range)
rewards = []
steps = []
for episode in range(test_episodes):
env.reset()
step = 0
episode_reward = 0
for _ in range(max_episode_step):
if render:
env.render()
step += 1
action = env.action_space.sample()
next_state, reward, done, _ = env.step(action)
episode_reward += reward
if done:
# print("done with step: %s " % (step))
break
steps.append(step)
rewards.append(episode_reward)
env.close()
print("Randomly sample actions for %s episodes, with maximum %s steps per episodes"
% (test_episodes, max_episode_step))
print(" average reward per episode: %s, std: %s " % (np.mean(rewards), np.std(rewards) ))
print(" average steps per episode: ", np.mean(steps))
print(" average reward per step: ", np.sum(rewards)/np.sum(steps))
def plot_fig(episode, all_rewards,avg_rewards, losses):
plt.figure(figsize=(12, 5))
plt.subplot(121)
plt.title('Reward Trend with %s Episodes' % (episode))
plt.xlabel("episode")
plt.ylabel("reward")
plt.plot(all_rewards, 'b')
plt.plot(avg_rewards, 'r')
plt.subplot(122)
plt.title('Loss Trend with %s Episodes' % (episode))
plt.plot(losses)
plt.show()
def plot(frame_idx, rewards, losses):
plt.clf()
plt.close()
plt.ion()
plt.figure(figsize=(12 ,5))
plt.subplot(131)
plt.title('episode %s. reward: %s' % (frame_idx, np.mean(rewards[-10:])))
plt.plot(rewards)
plt.subplot(132)
plt.title('loss')
plt.plot(losses)
plt.pause(0.0001)
def save_fig(episode, all_rewards, avg_rewards, losses, epsilon, number = 0):
'''Save the experiment results in the ./storage folder'''
plt.clf()
plt.close("all")
plt.figure(figsize=(8 ,5))
plt.title('Reward Trend with %s Episodes' % (episode))
plt.xlabel("episode")
plt.ylabel("reward")
plt.plot(all_rewards,'b')
plt.plot(avg_rewards,'r')
plt.savefig("storage/reward-"+str(number)+".png")
plt.figure(figsize=(12, 5))
plt.subplot(121)
plt.title('Loss Trend with Latest %s Steps' % (1200))
plt.plot(losses[-1200:])
plt.subplot(122)
plt.title('Epsilon with %s Episodes' % (episode))
plt.plot(epsilon)
plt.savefig("storage/loss-"+str(number)+".png")
def get_angles(sin_theta, cos_theta):
theta = np.arctan2(sin_theta, cos_theta)
if theta > 0:
alpha = (-np.pi + theta)
else:
alpha = (np.pi + theta)
return alpha, theta
class PlotSignal:
def __init__(self, window=10000):
self.window = window
self.values = {}
def update(self, **argv):
for k in argv:
if k not in self.values:
self.values[k] = [argv[k]]
else:
self.values[k].append(argv[k])
self.values[k] = self.values[k][-self.window:]
def plot_signal(self):
N = len(self.values)
plt.clf()
for i, k in enumerate(self.values):
plt.subplot(N, 1, i + 1)
plt.title(k)
plt.plot(self.values[k])
plt.pause(0.0000001)
def last_plot(self):
N = len(self.values)
plt.clf()
plt.ioff()
for i, k in enumerate(self.values):
plt.subplot(N, 1, i + 1)
plt.title(k)
plt.plot(self.values[k])
plt.show()
def do_trajectory(env, ctrl, plot, time_steps=10000, use_plot=True,
collect_fr=10, plot_fr=10, render=True, render_fr=10):
obs = env.reset()
for n in range(time_steps):
act = ctrl(obs)
obs, _, done, _ = env.step(np.array(act[0]))
if done:
print("Physical Limits or End of Time reached")
break
if render:
if n % render_fr == 0:
env.render()
if use_plot:
if n % collect_fr == 0:
alpha, theta = get_angles(obs[1], obs[2])
plot.update(theta=theta, alpha=alpha, theta_dt=obs[4], volt=act[0], u=act[1], x=obs[0])
env.render()
if n % plot_fr == 0:
plot.plot_signal()
def get_env_and_controller(long_pendulum=True, simulation=True, swinging=True, mouse_control=False):
pendulum_str = {True:"Long", False:"Short"}
simulation_str = {True:"", False:"RR"}
task_str = {True:"Swing", False:"Stab"}
if not simulation:
pendulum_str = {True: "", False: ""}
mu = 7.5 if long_pendulum else 19.
env_name = "Cartpole%s%s%s-v0" % (task_str[swinging], pendulum_str[long_pendulum], simulation_str[simulation])
if not mouse_control:
return Logger(GentlyTerminating(gym.make(env_name))), SwingUpCtrl(long=long_pendulum, mu=mu)
else:
return Logger(GentlyTerminating(gym.make(env_name))), MouseCtrl()
|
import time
class Timer(object):
def __init__(self, print_fnc=print):
self._last_ts = time.time()
self._print = print_fnc
def step(self, message, reset=True):
time_spent = time.time() - self._last_ts
self._print('[{:5}] {}'.format(time_spent, message))
if reset:
self._last_ts = time.time()
def start(self):
self._last_ts = time.time()
|
import pytest
from llckbdm.min_rmse_kbdm import min_rmse_kbdm
def test_min_rmse_kbdm(data_brain_sim, dwell):
# because the number of points used to compute KBDM, only third element is capable of reproduce a good result
m_range = [30, 31, 180, 32, 33, 34]
l = 30
min_rmse_results = min_rmse_kbdm(
data=data_brain_sim,
dwell=dwell,
m_range=m_range,
l=l
)
assert len(min_rmse_results.samples) == len(m_range)
# it should compute rmse 0 for noiseless
assert min_rmse_results.min_rmse == pytest.approx(0)
# assert that third element produced the best result
assert min_rmse_results.min_index == 2
|
import unittest
from zserio.bitbuffer import BitBuffer
from zserio.bitreader import BitStreamReader
from zserio.bitsizeof import INT64_MIN
from zserio.exception import PythonRuntimeException
class BitStreamReaderTest(unittest.TestCase):
def testFromBitBuffer(self):
bitBuffer = BitBuffer(bytes([0x0B, 0xAB, 0xE0]), 19)
reader = BitStreamReader.fromBitBuffer(bitBuffer)
self.assertEqual(BitBuffer(bytes([0xAB, 0x07]), 11), reader.readBitBuffer())
def testReadBits(self):
data = [0, 1, 255, 128, 127]
reader = BitStreamReader(bytes(data))
for byte in data:
self.assertEqual(byte, reader.readBits(8))
with self.assertRaises(PythonRuntimeException):
reader.readBits(-1)
self.assertEqual(0, reader.readBits(0)) # read 0 bits
with self.assertRaises(PythonRuntimeException):
reader.readBits(1) # no more bits available
def testReadSignedBits(self):
data = [0, 0xff, 1, 127, 0x80]
reader = BitStreamReader(bytes(data))
self.assertEqual(0, reader.readSignedBits(8))
self.assertEqual(-1, reader.readSignedBits(8)) # 0xff == -1
self.assertEqual(1, reader.readSignedBits(8))
self.assertEqual(127, reader.readSignedBits(8))
self.assertEqual(-128, reader.readSignedBits(8)) # 0x80 == -128
self.assertEqual(0, reader.readSignedBits(0)) # read 0 bits
with self.assertRaises(PythonRuntimeException):
reader.readSignedBits(1) # no more bits available
with self.assertRaises(PythonRuntimeException):
reader.readSignedBits(-1)
def testReadVarInt16(self):
reader = BitStreamReader(bytes(1))
self.assertEqual(0, reader.readVarInt16())
self.assertEqual(8, reader.getBitPosition())
with self.assertRaises(PythonRuntimeException):
reader.readVarInt16()
def testReadVarInt32(self):
reader = BitStreamReader(bytes(1))
self.assertEqual(0, reader.readVarInt32())
self.assertEqual(8, reader.getBitPosition())
with self.assertRaises(PythonRuntimeException):
reader.readVarInt32()
def testReadVarInt64(self):
reader = BitStreamReader(bytes(1))
self.assertEqual(0, reader.readVarInt64())
self.assertEqual(8, reader.getBitPosition())
with self.assertRaises(PythonRuntimeException):
reader.readVarInt64()
def testReadVarInt(self):
reader = BitStreamReader(b'\x00\x80')
self.assertEqual(0, reader.readVarInt())
self.assertEqual(8, reader.getBitPosition())
self.assertEqual(INT64_MIN, reader.readVarInt())
self.assertEqual(16, reader.getBitPosition())
with self.assertRaises(PythonRuntimeException):
reader.readVarInt()
def testReadVarUInt16(self):
reader = BitStreamReader(bytes(1))
self.assertEqual(0, reader.readVarUInt16())
self.assertEqual(8, reader.getBitPosition())
with self.assertRaises(PythonRuntimeException):
reader.readVarUInt16()
def testReadVarUInt32(self):
reader = BitStreamReader(bytes(1))
self.assertEqual(0, reader.readVarUInt32())
self.assertEqual(8, reader.getBitPosition())
with self.assertRaises(PythonRuntimeException):
reader.readVarUInt32()
def testReadVarUInt64(self):
reader = BitStreamReader(bytes(1))
self.assertEqual(0, reader.readVarUInt64())
self.assertEqual(8, reader.getBitPosition())
with self.assertRaises(PythonRuntimeException):
reader.readVarUInt64()
def testReadVarUInt(self):
reader = BitStreamReader(bytes(1))
self.assertEqual(0, reader.readVarUInt())
self.assertEqual(8, reader.getBitPosition())
with self.assertRaises(PythonRuntimeException):
reader.readVarUInt()
def testReadFloat16(self):
reader = BitStreamReader(bytes(2))
self.assertEqual(0.0, reader.readFloat16())
self.assertEqual(16, reader.getBitPosition())
with self.assertRaises(PythonRuntimeException):
reader.readFloat16()
def testReadFloat32(self):
reader = BitStreamReader(bytes(4))
self.assertEqual(0.0, reader.readFloat32())
self.assertEqual(32, reader.getBitPosition())
with self.assertRaises(PythonRuntimeException):
reader.readFloat32()
def testReadFloat64(self):
reader = BitStreamReader(bytes(8))
self.assertEqual(0.0, reader.readFloat64())
self.assertEqual(64, reader.getBitPosition())
with self.assertRaises(PythonRuntimeException):
reader.readFloat64()
def testReadString(self):
reader = BitStreamReader(bytes(b'\x01\x41'))
self.assertEqual("A", reader.readString())
self.assertEqual(16, reader.getBitPosition())
with self.assertRaises(PythonRuntimeException):
reader.readString()
def testReadBool(self):
reader = BitStreamReader(bytes(b'\xA8'))
self.assertEqual(True, reader.readBool())
self.assertEqual(False, reader.readBool())
self.assertEqual(True, reader.readBool())
self.assertEqual(False, reader.readBool())
self.assertEqual(True, reader.readBool())
self.assertEqual(False, reader.readBool())
self.assertEqual(False, reader.readBool())
self.assertEqual(False, reader.readBool())
with self.assertRaises(PythonRuntimeException):
reader.readBool()
def testReadBitBuffer(self):
reader = BitStreamReader(bytes(b'\x0B\xAB\xE1\xE0\x1F\xC0'))
self.assertEqual(BitBuffer(bytes([0xAB, 0x07]), 11), reader.readBitBuffer())
self.assertEqual(BitBuffer(bytes([0x00, 0x7F]), 15), reader.readBitBuffer())
with self.assertRaises(PythonRuntimeException):
reader.readBitBuffer()
def testGetBitPosition(self):
reader = BitStreamReader(bytes(1))
self.assertEqual(0, reader.getBitPosition())
reader.readBits(4)
self.assertEqual(4, reader.getBitPosition())
def testSetBitPosition(self):
reader = BitStreamReader(bytes(1))
reader.setBitPosition(0)
self.assertEqual(0, reader.getBitPosition())
reader.setBitPosition(7)
self.assertEqual(7, reader.getBitPosition())
reader.setBitPosition(8)
self.assertEqual(8, reader.getBitPosition())
with self.assertRaises(PythonRuntimeException):
reader.setBitPosition(9)
with self.assertRaises(PythonRuntimeException):
reader.setBitPosition(-1)
reader.setBitPosition(0)
self.assertEqual(0, reader.getBitPosition())
def testAlignTo(self):
reader = BitStreamReader(bytes(1))
reader.alignTo(1)
self.assertEqual(0, reader.getBitPosition())
reader.readBits(1)
self.assertEqual(1, reader.getBitPosition())
reader.alignTo(1)
self.assertEqual(1, reader.getBitPosition())
reader.alignTo(4)
self.assertEqual(4, reader.getBitPosition())
|
import time
import csv
import os
from threading import Timer
from NavigationCenter.RobotStatus import RobotStatus
from datetime import datetime
from threading import Timer
from collections import defaultdict
from heapq import *
from Queue import *
class NaviCenter:
def __init__(self,naviFilePath):
filePath = naviFilePath + '/src/NavigationCenter/navi_path.csv'
pathfile = open(filePath,'rb')
pathreader = csv.reader(pathfile,dialect = 'excel')
self.pathDic = defaultdict(list)
for l,r,c in pathreader:
self.pathDic[l].append((int(c),r))
print self.pathDic
self.robotstatus = RobotStatus()
self.goalList = []
self.setPosition('Lobby')
self.robotstatus.setAvailable()
def register(self,pubGoalFunc,callEVFunc,evCheckFunc,enterEvFunc,alightEvFunc,changeMap):
self.pubRosGoal = pubGoalFunc
self.callEV = callEVFunc
self.evCheck = evCheckFunc
self.evEnter = enterEvFunc
self.evAlight = alightEvFunc
self.changeMap = changeMap
def pubGoal(self,goal):
self.pubRosGoal(goal)
def goNextGoal(self):
self.robotstatus.setMoving()
goal = self.goalList[0]
print 'Next Goal', goal
self.pubGoal(goal)
return
def hasNextGoal(self):
if self.goalList != []:
return True
else:
return False
def GoalReachCB(self):
if self.hasNextGoal():
self.setPosition(self.goalList.pop(0))
print 'reach ',self.robotstatus.getPosition()
else:
self.naviGoalReached()
if self.hasNextGoal():
if self.goalList[0] == 'EVin':
self.robotstatus.setWaitEV()
self.callElevator(self.robotstatus.getPosition())
else:
self.goNextGoal()
else:
self.naviGoalReached()
return
def setPosition(self,position):
self.robotstatus.setPosition(position)
def getNaviGoal(self,goal):
if self.robotstatus.isAvailable():
if self.setRoute(goal):
self.goNextGoal()
else:
return
else:
print 'robot is busy'
return
def naviGoalReached(self):
print 'navi goal reached'
self.robotstatus.setAvailable()
return
def setRoute(self,destination):
if self.robotstatus.isAvailable():
path = self.calculatePath(self.robotstatus.position,destination)
if path != None:
for stop in path:
self.goalList.append(stop)
return True
else:
print 'plane failed'
return False
else:
return False
def cleanRoute(self):
self.goalList = []
def calculatePath(self,start,destination):
print 'dijkstra ',start,destination
try:
(cost,path) = self.dijkstra(self.pathDic,start,destination)
print 'cost = ',cost
print 'path = ',path
except:
print 'path error'
return None
return path
def dijkstra(self,pathdic, f, t):
pathBuf = list()
q, seen = [(0,f,pathBuf)], set()
while q:
(cost,v1,path) = heappop(q)
if v1 not in seen:
seen.add(v1)
path.append(v1)
if v1 == t: return (cost, path)
for c, v2 in pathdic.get(v1, ()):
if v2 not in seen:
path2 = path[0:]
heappush(q, (cost+c, v2, path2))
path2 = []
return float("inf")
def callElevator(self,destination):
EVdic = {'EVW1':1,'EVW2':2,'EVW3':3,'EVW4':4}
print 'call elevator to ',EVdic[destination]
self.callEV(EVdic[destination])
return
def elevatorCB(self):
print 'robot status',self.robotstatus.status
if self.robotstatus.isWaitEV():
self.enterEVcheck()
elif self.robotstatus.isInEV():
self.alightEV()
else:
print 'elevator callback error'
return
def enterEVcheck(self):
print 'checking elevator'
self.evCheck()
return
def enterEVcheckCB(self,available):
if available:
print 'elevator is available'
self.enterEV()
else:
print 'wait for next one'
evGoal = self.robotstatus.getPosition()
t = Timer(1,self.callElevator,[evGoal])
t.daemon = True
t.start()
return
def enterEV(self):
self.evEnter()
print 'entering elevator'
return
def enterEVcallback(self,success):
if success:
position = self.goalList.pop(0)
self.robotstatus.setPosition(position)
self.robotstatus.setInEV()
self.callElevator(self.goalList[0])
self.changeNaviMap(self.goalList[0])
print 'in the elevator'
else:
print 'enter elevator failed'
return
def alightEV(self):
self.evAlight()
print 'alighting elevator'
return
def alightEVcallback(self,success):
if success:
position = self.goalList.pop(0)
self.robotstatus.setPosition(position)
self.robotstatus.setMoving()
print 'reach ',self.robotstatus.getPosition()
if self.hasNextGoal():
self.goNextGoal()
return
else:
self.naviGoalReached()
return
def changeNaviMap(self,destination):
EVdic = {'EVW1':1,'EVW2':2,'EVW3':3,'EVW4':4}
print 'change map to ',EVdic[destination]
self.changeMap(EVdic[destination])
return
|
class CountingBits:
"""
https://leetcode-cn.com/problems/counting-bits/
"""
def countBits(self, num: int) -> List[int]:
|
"""Unit tests for the scene-optimizer class.
Authors: Ayush Baid
"""
import unittest
from pathlib import Path
import dask
import hydra
import numpy as np
from dask.distributed import LocalCluster, Client
from gtsam import EssentialMatrix, Rot3, Unit3
from hydra.utils import instantiate
import gtsfm.utils.geometry_comparisons as comp_utils
from gtsfm.common.gtsfm_data import GtsfmData
from gtsfm.loader.olsson_loader import OlssonLoader
from gtsfm.scene_optimizer import SceneOptimizer
DATA_ROOT_PATH = Path(__file__).resolve().parent / "data"
class TestSceneOptimizer(unittest.TestCase):
"""Unit test for SceneOptimizer, which runs SfM for a scene."""
def setUp(self) -> None:
self.loader = OlssonLoader(str(DATA_ROOT_PATH / "set1_lund_door"), image_extension="JPG")
assert len(self.loader)
def test_create_computation_graph(self):
"""Will test Dask multi-processing capabilities and ability to serialize all objects."""
self.loader = OlssonLoader(str(DATA_ROOT_PATH / "set1_lund_door"), image_extension="JPG")
with hydra.initialize_config_module(config_module="gtsfm.configs"):
# config is relative to the gtsfm module
cfg = hydra.compose(config_name="scene_optimizer_unit_test_config.yaml")
obj: SceneOptimizer = instantiate(cfg.SceneOptimizer)
# generate the dask computation graph
sfm_result_graph = obj.create_computation_graph(
num_images=len(self.loader),
image_pair_indices=self.loader.get_valid_pairs(),
image_graph=self.loader.create_computation_graph_for_images(),
camera_intrinsics_graph=self.loader.create_computation_graph_for_intrinsics(),
image_shape_graph=self.loader.create_computation_graph_for_image_shapes(),
gt_pose_graph=self.loader.create_computation_graph_for_poses(),
)
# create dask client
cluster = LocalCluster(n_workers=1, threads_per_worker=4)
with Client(cluster):
sfm_result = dask.compute(sfm_result_graph)[0]
self.assertIsInstance(sfm_result, GtsfmData)
# compare the camera poses
computed_poses = sfm_result.get_camera_poses()
computed_rotations = [x.rotation() for x in computed_poses]
computed_translations = [x.translation() for x in computed_poses]
# get active cameras from largest connected component, may be <len(self.loader)
connected_camera_idxs = sfm_result.get_valid_camera_indices()
expected_poses = [self.loader.get_camera_pose(i) for i in connected_camera_idxs]
self.assertTrue(comp_utils.compare_global_poses(expected_poses, expected_poses))
def generate_random_essential_matrix() -> EssentialMatrix:
rotation_angles = np.random.uniform(low=0.0, high=2 * np.pi, size=(3,))
R = Rot3.RzRyRx(rotation_angles[0], rotation_angles[1], rotation_angles[2])
t = np.random.uniform(low=-1.0, high=1.0, size=(3,))
return EssentialMatrix(R, Unit3(t))
if __name__ == "__main__":
unittest.main()
|
'''
A quick edit to text_classifier.py.
bootstrap_classifier is used for the paragraph bootstrap project.
added the function: run_model_on_train_test_split
input:
X_train, X_test, y_train, y_test, output_filename, user_id, project_id,
label_id=None, method='bow', run_on_entire_dataset=False)
return:
result, clf
'''
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from src.models.base_classifier import BaseClassifier
from src.models.text_pipeline import TextPipeline
from sklearn.linear_model import LogisticRegression
import logging
import matplotlib.pyplot as plt
from src.utils.analyze_model import plot_precision_recall_curve, plot_roc_curve, plot_confidence_performance
logger = logging.getLogger('text classifier')
ngram_range = (1, 2)
# ngram_range = (1, 2)
RUN_FASTTEXT = False
class BootstrapClassifier(BaseClassifier):
@classmethod
def load(cls, filename):
# super loads only the model
classifier, offset = super().load(filename)
# inherited class loads the pipeline
try:
processing_pipeline = TextPipeline.load(filename, offset)
except EOFError:
logger.warning('EOF reached when trying to load pipeline')
processing_pipeline = None
classifier.processing_pipeline = processing_pipeline
return classifier
@property
def important_features(self, k=None):
column_order = ['feature_name', 'importance', 'class']
try:
importances = self._model.feature_importances_
feature_names = self.features
feature_importance_df = pd.DataFrame({'feature_name': feature_names,
'importance': importances,
'class': np.nan}).sort_values(by='importance', ascending=False)
if isinstance(k, int):
feature_importance_df = feature_importance_df.head(k)
except:
feature_names = self.features
if len(self._model.classes_) > 2:
feature_importances_per_class = []
for i, c in enumerate(self._model.classes_):
importances = self._model.coef_[i]
per_class_df = pd.DataFrame({'feature_name': feature_names[importances > 0],
'importance': importances[importances > 0],
'class': c}).sort_values(by='importance', ascending=False)
if isinstance(k, int):
per_class_df = per_class_df.head(k)
feature_importances_per_class.append(per_class_df)
feature_importance_df = pd.concat(feature_importances_per_class, sort=True)
else:
importances = self._model.coef_[0]
feature_importance_df = pd.DataFrame({'feature_name': feature_names,
'importance': abs(importances),
'class': [
self._model.classes_[0] if imp < 0 else self._model.classes_[
1] for imp in importances]})
return feature_importance_df.sort_values(by=['class', 'importance'], ascending=False)[column_order]
def set_preprocessor(self, pipeline):
self.processing_pipeline = TextPipeline(pipeline)
def run_on_train_test(self, df, X_train_in, X_test_in, y_train_in, y_test_in, output_filename, user_id, project_id,
label_id=None, pipeline=None, bootstrap_iterations=0, bootstrap_threshold=0.9,
run_on_entire_dataset=False):
output_filename = os.path.abspath(output_filename)
output_folder = os.path.join(os.path.dirname(output_filename), 'results')
os.makedirs(output_folder, exist_ok=True)
print('Running text classification model on train test split. Results will be saved to {}...'.format(output_filename))
print('Reading input file...')
label_field = 'label_id'
self.set_preprocessor(pipeline)
# # concat train test split to single dataframe
# X_train_in.insert(X_train_in.shape[1], label_field, y_train_in)
# X_test_in.insert(X_test_in.shape[1], label_field, y_test_in)
# df = pd.concat((X_train_in, X_test_in))
# print("X_train_in {}".format(X_train_in.shape))
# print("X_test_in {}".format(X_test_in.shape))
# print("df {}".format(df.shape))
#
# if 'label_id' in df.columns:
# df['label'] = df['label_id']
#
# elif 'label' not in df.columns:
# raise ValueError("no columns 'label' or 'label_id' exist in input dataframe")
#
# df = df[~pd.isnull(df['text'])]
# print("df {}".format(df.shape))
#
# df.loc[:, label_field] = df[label_field].apply(lambda x: str(x) if not pd.isnull(x) else x)
# df.loc[df[label_field] == ' ', label_field] = None
#
# if label_id:
# df_labeled = df[df[label_field] == label_id]
# df_labeled = pd.concat([df_labeled, df[df[label_field] != label_id].sample(df_labeled.shape[0])])
# df_labeled.loc[df_labeled[label_field] != label_id, label_field] = 0
# df_labeled = df_labeled[(~pd.isnull(df_labeled[label_field])) & (df_labeled[label_field] != ' ')]
# else:
# df_labeled = df[(~pd.isnull(df[label_field]))]
# print("df_labeled {}".format(df_labeled.shape))
# print('Pre-processing text and extracting features...')
# self.set_preprocessor(pipeline)
# train_set = df_labeled.loc[df_labeled['document_id'].isin(X_train_in['document_id'])]
# test_set = df_labeled.loc[df_labeled['document_id'].isin(X_test_in['document_id'])]
train_preprocessed = self.pre_process(X_train_in, fit=True)
test_preprocessed = self.pre_process(X_test_in, fit=False)
# if label_field not in X_train_in.columns:
# raise RuntimeError("column '{}' not found".format(label_field))
# else:
# y_train = X_train_in[label_field].values
# y_test = X_test_in[label_field].values
X_train = train_preprocessed.copy()
X_test = test_preprocessed.copy()
print('Training the model...')
self.fit(X_train, y_train_in)
print('Performance on train set:')
_, evaluation_text = self.evaluate(X_train, y_train_in)
result = 'Performance on train set: \n' + evaluation_text
print('Performance on test set:')
_, evaluation_text = self.evaluate(X_test, y_test_in)
result = result + '\nPerformance on test set: \n' + evaluation_text
df_gold_labels = df[df['user_id'] == 'gold_label']
y_gold_labels = df_gold_labels[label_field].values
if len(y_gold_labels)>0:
X_gold_labels = self.pre_process(df_gold_labels, fit=False)
print('Performance on gold labels set:')
_, evaluation_text = self.evaluate(X_gold_labels, y_gold_labels)
result = result + '\nPerformance on gold labels set: \n' + evaluation_text
else:
print('Gold labels do not exist - skipping the evaluation of model performance on them.')
if run_on_entire_dataset:
print('Running the model on the entire dataset...')
columns = ['document_id', label_field, 'user_id', 'prob']
if bootstrap_iterations > 0:
print('Bootstrapping...')
y_aug = df[label_field].copy()
for i in range(bootstrap_iterations+1):
# fitting on labeled examples
has_label = ~pd.isna(y_aug)
X_labeled = self.pre_process(df.loc[has_label], fit=False)
self.fit(X_labeled, y_aug[has_label])
# predict in chunks and (optionally) add bootstrapped labels
chunk_size = 10000
n_samples = df.shape[0]
for chunk_start in tqdm(range(0, n_samples, chunk_size)):
chunk_end = min(n_samples, chunk_start + chunk_size)
chunk_df = df.iloc[chunk_start:chunk_end]
chunk_df.loc[:, label_field] = None
y_chunk = df.iloc[chunk_start:chunk_end][label_field]
X_chunk = self.pre_process(chunk_df, fit=False)
if i < bootstrap_iterations:
print('bootstrap iteration ', i, '/', bootstrap_iterations, ' ',
[x for x in zip(np.unique(y_aug[has_label], return_counts=True))])
# no need to re-fit the model, only predict
y_chunk_aug = self.bootstrap(X_chunk, y=y_chunk, th=bootstrap_threshold, fit=False)
y_aug.iloc[chunk_start:chunk_end] = y_chunk_aug
# write to file only in last iteration
if i == bootstrap_iterations:
chunk_prediction_df = self.get_prediction_df(X_chunk, y=y_chunk)
chunk_prediction_df['document_id'] = df['document_id']
chunk_prediction_df['user_id'] = user_id
chunk_prediction_df = chunk_prediction_df.rename({'confidence': 'prob'}, axis=1)
chunk_prediction_df[label_field] = chunk_prediction_df['prediction']
chunk_prediction_df[columns].to_csv(output_filename, index=False, header=True)
# output_df = pd.DataFrame(columns=columns)
# output_df.to_csv(output_filename, index=False, header=True, index_label=False)
print('Saving model weights to file...')
class_weights = self.important_features
class_weights_filename = os.path.join(output_folder,
'ml_logistic_regression_weights_{project_id}.csv'.format(project_id=project_id))
class_weights.to_csv(class_weights_filename, header=True, index=False)
print('Saving model to a pickle file...')
model_save_filename = os.path.join(output_folder, 'ml_model_{project_id}.pickle'.format(project_id=project_id))
self.save(model_save_filename)
print('Saving model results to a text file...')
ml_model_results_filename = os.path.join(output_folder, 'ml_model_results_{}.txt'.format(project_id))
with open(ml_model_results_filename, 'wt') as f:
f.write(result)
y_test_pred = self.predict(X_test)
y_test_pred_proba = self.predict_proba(X_test)
# # Showing examples of large errors
# df_labeled.loc[:, 'y_pred'] = self.predict(X)
# df_labeled.loc[:, 'is_error'] = df_labeled['y_pred']!=df_labeled[label_field]
# df_labeled.loc[:, 'y_pred_proba'] = np.max(self.predict_proba(X), axis=1)
# df_labeled.to_csv(output_filename, index=False, header=True, index_label=False)
# Confusion matrix
print('Generating confusion matrix...')
from src.utils.analyze_model import plot_confusion_matrix
fig = plot_confusion_matrix(y_test_in, y_test_pred, classes=None, normalize=True, title='Normalized confusion matrix - test')
filename = os.path.join(output_folder, 'confusion_matrix_test_{}.png'.format(project_id))
fig.savefig(filename)
plt.clf()
fig = plot_confusion_matrix(y_train_in, self.predict(X_train), classes=None, normalize=True, title='Normalized confusion matrix - train')
filename = os.path.join(output_folder, 'confusion_matrix_train_{}.png'.format(project_id))
fig.savefig(filename)
plt.clf()
# Precision-recall curve
print('Generating the Precision-Recall graph...')
try:
fig = plot_precision_recall_curve(y_test_pred_proba, y_test_in)
filename = os.path.join(output_folder, 'precision_recall_curve_{}.png'.format(project_id))
fig.savefig(filename)
plt.clf()
except ValueError as e:
print(e)
# ROC curve
print('Generating ROC curve...')
try:
fig = plot_roc_curve(y_test_pred_proba, y_test_in)
filename = os.path.join(output_folder, 'roc_curve_{}.png'.format(project_id))
fig.savefig(filename)
plt.clf()
except ValueError as e:
print(e)
# Confidence-accuracy graph
print('Generating the Confidence-Accuracy graph...')
try:
fig = plot_confidence_performance(y_test_pred, y_test_pred_proba, y_test_in)
filename = os.path.join(output_folder, 'confidence_accuracy_graph_{}.png'.format(project_id))
fig.savefig(filename)
plt.clf()
except ValueError as e:
print(e)
# Confidence Distribution
print('Computing distribution of confidence...')
try:
ax = pd.Series(np.max(y_test_pred_proba, axis=1)).hist(bins=50)
plt.xlabel('Confidence'); plt.ylabel('Counts')
filename = os.path.join(output_folder, 'confidence_distribution_{}.png'.format(project_id))
plt.gcf().savefig(filename)
plt.clf()
except ValueError as e:
print(e)
# Generating learning curve
print('Generating the learning curve...')
from src.utils.analyze_model import plot_learning_curve_cv
fig = plot_learning_curve_cv(X_train, y_train_in, estimator=self._model)
filename = os.path.join(output_folder, 'learning_curve_{}.png'.format(project_id))
fig.savefig(filename)
plt.clf()
# # Run FastText for text classification
df_labeled_train = df.loc[X_train.index, :]
df_labeled_test = df.loc[X_test.index, :]
if RUN_FASTTEXT:
try:
print('Running FastText model...')
import fasttext
def write_as_fasttext_format(df, filename):
with open(filename, 'wt', encoding='utf-8') as f:
_ = [f.write('{} __label__{}\n'.format( r['text'].lower().replace('\n', ' '), r['label_id'].replace(' ', '_'))) for i,r in df.iterrows()]
write_as_fasttext_format(df_labeled_train, output_folder+'/fasttext_train.txt')
write_as_fasttext_format(df_labeled_test, output_folder+'/fasttext_test.txt')
classifier = fasttext.train_supervised(output_folder+'/fasttext_train.txt', )
fasttext_result = classifier.test(output_folder+'/fasttext_test.txt')
fasttext_pred = classifier.predict([r['text'].lower().replace('\n', ' ') for i, r in df_labeled_test.iterrows()])
fasttext_pred = [x[0] for x in fasttext_pred]
# fasttext_pred = ['1' if x == ['__label__1'] else '0' for x in fasttext_pred[0]]
_, evaluation_text = self.evaluate(X=None, y=df_labeled_test['label_id'].str.replace(' ', '_').values, y_pred=fasttext_pred)
result += '\nFastText performance on gold labels set: \n' + evaluation_text
except Exception as e:
print(e)
print('Done running the model!')
return result
def run_model_on_file(input_filename, output_filename, user_id, project_id, label_id=None, method='bow', run_on_entire_dataset=False):
model = LogisticRegression(verbose=False, class_weight='balanced', random_state=0, penalty='l1', C= 1, solver='liblinear', multi_class='ovr')
clf = BootstrapClassifier(model=model)
# pipeline functions are applied sequentially by order of appearance
pipeline = [('base processing', {'col': 'text', 'new_col': 'processed_text'}),
('bag of words', {'col': 'processed_text',
'use_idf': True, 'smooth_idf': True,
'min_df': 2, 'max_df': .9, 'binary': True, 'ngram_range': ngram_range,
'stop_words': 'english', 'strip_accents': 'ascii', 'max_features': 5000}),
('drop columns', {'drop_cols': ['label_id', 'text', 'processed_text', 'index', 'source']})]
result = clf.run_on_file(input_filename, output_filename, user_id, project_id, label_id,
pipeline=pipeline, run_on_entire_dataset=run_on_entire_dataset)
return result
def run_model_on_train_test_split(df, X_train, X_test, y_train, y_test, output_filename, user_id, project_id,
C= 1, label_id=None, method='bow', run_on_entire_dataset=False):
model = LogisticRegression(verbose=False, class_weight=None, random_state=0, penalty='l1', C=C, solver='liblinear', multi_class='ovr')
clf = BootstrapClassifier(model=model)
# pipeline functions are applied sequentially by order of appearance
pipeline = [('base processing', {'col': 'text', 'new_col': 'processed_text'}),
('bag of words', {'col': 'processed_text',
'use_idf': True, 'smooth_idf': True,
'min_df': 2, 'max_df': .9, 'binary': True, 'ngram_range': ngram_range,
'stop_words': 'english', 'strip_accents': 'ascii', 'max_features': 5000}),
('drop columns', {'drop_cols': ['label_id', 'text', 'processed_text', 'index', 'source']})]
result = clf.run_on_train_test(df, X_train, X_test, y_train, y_test, output_filename, user_id, project_id, label_id,
pipeline=pipeline, run_on_entire_dataset=run_on_entire_dataset)
return result, clf
if __name__ == '__main__':
data_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir, 'data'))
input_file = os.path.join(data_path,'new_data_0030_roy.csv')
output_file = os.path.join(data_path, 'try_output.csv')
data = pd.read_csv(input_file)
label_column_name = "label_id"
data_column_names = list(data.columns.values)
data_column_names.remove(label_column_name)
X_train, X_test, y_train, y_test = train_test_split(data[data_column_names], data[label_column_name], test_size=0.25)
y_train = y_train.astype(str)
y_test = y_test.astype(str)
result, bootstrap_clf = run_model_on_train_test_split(
X_train, X_test, y_train, y_test,
output_filename=output_file,
user_id=2,
project_id=998,
label_id=None,
method='bow',
run_on_entire_dataset=False)
|
# This code is referenced from https://github.com/VainF/pytorch-msssim/blob/master/pytorch_msssim/ssim.py
import torch
import torch.nn.functional as F
def _fspecial_gauss_1d(size, sigma):
r"""Create 1-D gauss kernel
Args:
size (int): the size of gauss kernel
sigma (float): sigma of normal distribution
Returns:
torch.Tensor: 1D kernel (1 x 1 x size)
"""
coords = torch.arange(size).to(dtype=torch.float)
coords -= size//2
g = torch.exp(-(coords**2) / (2*sigma**2))
g /= g.sum()
return g.unsqueeze(0).unsqueeze(0)
def gaussian_filter(input, win):
r""" Blur input with 1-D kernel
Args:
input (torch.Tensor): a batch of tensors to be blured
window (torch.Tensor): 1-D gauss kernel
Returns:
torch.Tensor: blured tensors
"""
N, C, H, W = input.shape
out = F.conv2d(input, win, stride=1, padding=0, groups=C)
out = F.conv2d(out, win.transpose(2, 3), stride=1, padding=0, groups=C)
return out
def _ssim(X, Y,
data_range,
win,
size_average=True,
K=(0.01,0.03)):
r""" Calculate ssim index for X and Y
Args:
X (torch.Tensor): images
Y (torch.Tensor): images
win (torch.Tensor): 1-D gauss kernel
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
Returns:
torch.Tensor: ssim results.
"""
K1, K2 = K
batch, channel, height, width = X.shape
compensation = 1.0
C1 = (K1 * data_range)**2
C2 = (K2 * data_range)**2
win = win.to(X.device, dtype=X.dtype)
mu1 = gaussian_filter(X, win)
mu2 = gaussian_filter(Y, win)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = compensation * ( gaussian_filter(X * X, win) - mu1_sq )
sigma2_sq = compensation * ( gaussian_filter(Y * Y, win) - mu2_sq )
sigma12 = compensation * ( gaussian_filter(X * Y, win) - mu1_mu2 )
cs_map = (2 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2) # set alpha=beta=gamma=1
ssim_map = ((2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)) * cs_map
ssim_per_channel = torch.flatten(ssim_map, 2).mean(-1)
cs = torch.flatten( cs_map, 2 ).mean(-1)
return ssim_per_channel, cs
def ssim(X, Y,
data_range=255,
size_average=True,
win_size=11,
win_sigma=1.5,
win=None,
K=(0.01, 0.03),
nonnegative_ssim=False):
r""" interface of ssim
Args:
X (torch.Tensor): a batch of images, (N,C,H,W)
Y (torch.Tensor): a batch of images, (N,C,H,W)
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
win (torch.Tensor, optional): 1-D gauss kernel. if None, a new kernel will be created according to win_size and win_sigma
K (list or tuple, optional): scalar constants (K1, K2). Try a larger K2 constant (e.g. 0.4) if you get a negative or NaN results.
nonnegative_ssim (bool, optional): force the ssim response to be nonnegative with relu
Returns:
torch.Tensor: ssim results
"""
if len(X.shape) != 4:
raise ValueError('Input images should be 4-d tensors.')
if not X.type() == Y.type():
raise ValueError('Input images should have the same dtype.')
if not X.shape == Y.shape:
raise ValueError('Input images should have the same shape.')
if win is not None: # set win_size
win_size = win.shape[-1]
if not (win_size % 2 == 1):
raise ValueError('Window size should be odd.')
if win is None:
win = _fspecial_gauss_1d(win_size, win_sigma)
win = win.repeat(X.shape[1], 1, 1, 1)
ssim_per_channel, cs = _ssim(X, Y,
data_range=data_range,
win=win,
size_average=False,
K=K)
if nonnegative_ssim:
ssim_per_channel = torch.relu(ssim_per_channel)
if size_average:
return ssim_per_channel.mean()
else:
return ssim_per_channel.mean(1)
def ms_ssim(X, Y,
data_range=255,
size_average=True,
win_size=11,
win_sigma=1.5,
win=None,
weights=None,
K=(0.01, 0.03)):
r""" interface of ms-ssim
Args:
X (torch.Tensor): a batch of images, (N,C,H,W)
Y (torch.Tensor): a batch of images, (N,C,H,W)
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
win (torch.Tensor, optional): 1-D gauss kernel. if None, a new kernel will be created according to win_size and win_sigma
weights (list, optional): weights for different levels
K (list or tuple, optional): scalar constants (K1, K2). Try a larger K2 constant (e.g. 0.4) if you get a negative or NaN results.
Returns:
torch.Tensor: ms-ssim results
"""
if len(X.shape) != 4:
raise ValueError('Input images should be 4-d tensors.')
if not X.type() == Y.type():
raise ValueError('Input images should have the same dtype.')
if not X.shape == Y.shape:
raise ValueError('Input images should have the same dimensions.')
if win is not None: # set win_size
win_size = win.shape[-1]
if not (win_size % 2 == 1):
raise ValueError('Window size should be odd.')
smaller_side = min( X.shape[-2:] )
assert smaller_side > (win_size-1) * (2**4), \
"Image size should be larger than %d due to the 4 downsamplings in ms-ssim"% ((win_size-1) * (2**4))
if weights is None:
weights = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]
weights = torch.FloatTensor(weights).to(X.device, dtype=X.dtype)
if win is None:
win = _fspecial_gauss_1d(win_size, win_sigma)
win = win.repeat(X.shape[1], 1, 1, 1)
levels = weights.shape[0]
mcs = []
for i in range(levels):
ssim_per_channel, cs = _ssim(X, Y,
win=win,
data_range=data_range,
size_average=False,
K=K)
if i<levels-1:
mcs.append(torch.relu(cs))
padding = (X.shape[2] % 2, X.shape[3] % 2)
X = F.avg_pool2d(X, kernel_size=2, padding=padding)
Y = F.avg_pool2d(Y, kernel_size=2, padding=padding)
ssim_per_channel = torch.relu( ssim_per_channel ) # (batch, channel)
mcs_and_ssim = torch.stack( mcs+[ssim_per_channel], dim=0 ) # (level, batch, channel)
ms_ssim_val = torch.prod( mcs_and_ssim ** weights.view(-1, 1, 1), dim=0 )
if size_average:
return ms_ssim_val.mean()
else:
return ms_ssim_val.mean(1)
class SSIM(torch.nn.Module):
def __init__(self,
data_range=255,
size_average=True,
win_size=11,
win_sigma=1.5,
channel=1,
K=(0.01, 0.03),
nonnegative_ssim=False):
r""" class for ssim
Args:
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
channel (int, optional): input channels (default: 3)
K (list or tuple, optional): scalar constants (K1, K2). Try a larger K2 constant (e.g. 0.4) if you get a negative or NaN results.
nonnegative_ssim (bool, optional): force the ssim response to be nonnegative with relu.
"""
super(SSIM, self).__init__()
self.win_size = win_size
self.win = _fspecial_gauss_1d(win_size, win_sigma).repeat(channel, 1, 1, 1)
self.size_average = size_average
self.data_range = data_range
self.K = K
self.nonnegative_ssim = nonnegative_ssim
def forward(self, X, Y):
return ssim(X, Y,
data_range=self.data_range,
size_average=self.size_average,
win=self.win,
K=self.K,
nonnegative_ssim=self.nonnegative_ssim)
class MS_SSIM(torch.nn.Module):
def __init__(self,
data_range=255,
size_average=True,
win_size=11,
win_sigma=1.5,
channel=1,
weights=None,
K=(0.01, 0.03)):
r""" class for ms-ssim
Args:
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
channel (int, optional): input channels (default: 3)
weights (list, optional): weights for different levels
K (list or tuple, optional): scalar constants (K1, K2). Try a larger K2 constant (e.g. 0.4) if you get a negative or NaN results.
"""
super(MS_SSIM, self).__init__()
self.win_size = win_size
self.win = _fspecial_gauss_1d(win_size, win_sigma).repeat(channel, 1, 1, 1)
self.size_average = size_average
self.data_range = data_range
self.weights = weights
self.K = K
def forward(self, X, Y):
return ms_ssim(X, Y,
data_range=self.data_range,
size_average=self.size_average,
win=self.win,
weights=self.weights,
K=self.K)
class SSIM_Loss(SSIM):
def forward(self, img1, img2):
return 1 - super(SSIM_Loss, self).forward(img1, img2)
class MSSSIM_Loss(MS_SSIM):
def forward(self, img1, img2):
return 1 - super(MSSSIM_Loss, self).forward(img1, img2)
|
from __future__ import annotations
from typing import List, Union, Optional, TYPE_CHECKING
import pandas as pd
from ..parsers import CoreScript, WranglingScript
if TYPE_CHECKING:
from ..models import ModifierModel, FieldModel, SchemaActionModel, ColumnModel
class BaseSchemaAction:
"""Actions inherit from this base class which describes the core functions and methodology for an Action.
Actions should redefine `name`, `title`, `description`, `modifiers` and `structure`, as well as produce a
`transform` function. Everything else will probably remain as defined, but particularly complex Actions should
modify as required.
`structure` can be an empty list, but an Action may be defined by these parameters:
* `modifier`: modifiers, of type `ModifierModel` defined by the ACTION and defining a `transform`.
* `field`: the specific columns effected by the morph, a `list` of `ColumnModel` or, rarely, `FieldModel`.
A standard script is::
"ACTION > 'destination_column' < [modifier 'source_column', modifier 'source_column']"
Where the structure of the source array is defined by the ACTION.
"""
def __init__(self) -> None:
self.wrangle = WranglingScript()
self.core = CoreScript()
self.name = ""
self.title = ""
self.description = ""
# `structure` defines the format in which an action is written, and validated
# can be - typically - any of `ColumnModel`, `ModifierModel`
# additional terms will require overriding the `has_valid_structure` function
self.structure = []
@property
def modifiers(self) -> Union[None, List[ModifierModel]]:
"""
Describes the ModifierModels for the Action. Typical modifiers are `+` or `-` but the Action can implement any
type of modifier as part of the `transform` function.
As an example::
[
{
"name": "+",
"title": "Add",
"type": "modifier"
},
{
"name": "-",
"title": "Subtract",
"type": "modifier"
}
]
Returns
-------
None or ModifierModel
ModifierModel representation of the modifiers.
"""
return None
def get_modifier(self, modifier: str) -> Union[ModifierModel, None]:
"""Return a specific set of Modifier definitions in response to an Modifier name.
Parameters
----------
modifier: str
A Modifier name.
Returns
-------
ModifierModel, or None
For the requested Modifier name. Or None, if it doesn't exist.
"""
return next((m for m in self.modifiers if m.name == modifier), None)
@property
def settings(self) -> SchemaActionModel:
"""
Returns the dict representation of the Action.
Returns
-------
dict
Dict representation of an Action.
"""
from whyqd.models import SchemaActionModel
action_settings = {
"name": self.name,
"title": self.title,
"description": self.description,
"text_structure": self.structure,
}
if self.modifiers:
action_settings["modifiers"] = self.modifiers
return SchemaActionModel(**action_settings)
def transform(
self,
df: pd.DataFrame,
destination: Optional[Union[FieldModel, ColumnModel]] = None,
source: Optional[List[Union[ColumnModel, ModifierModel]]] = None,
) -> pd.DataFrame:
"""
Perform a transformation. This function must be overridden by child Actions and describe a unique
new method.
.. warning:: Assumes that is a valid call. Will raise exceptions from inside Pandas if not.
Parameters
----------
df: DataFrame
Working data to be transformed
destination: FieldModel or ColumnModel, default None
Destination column for the result of the Action. If required.
source: list of ColumnModel and / or ModifierModel
List of source columns and modifiers for the action. If required.
Returns
-------
Dataframe
Containing the implementation of the Action
"""
return df
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'loch'."""
from primaires.interpreteur.commande.commande import Commande
class CmdLoch(Commande):
"""Commande 'loch'"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "loch", "log")
self.nom_categorie = "navire"
self.aide_courte = "manipule le loch"
self.aide_longue = \
"Cette commande permet d'utiliser le loch présent dans la " \
"salle pour estimer la vitesse du navire."
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
personnage.agir("utiliser_loch")
salle = personnage.salle
if not hasattr(salle, "navire") or salle.navire is None or \
salle.navire.etendue is None:
personnage << "|err|Vous n'êtes pas sur un navire.|ff|"
return
navire = salle.navire
loch = salle.loch
if not loch:
personnage << "|err|Il n'y a pas de loch ici.|ff|"
return
personnage << "Vous jetez la corde lestée à la mer."
personnage.salle.envoyer("{} jète le loch.", personnage)
vitesse_1 = salle.navire.vitesse_noeuds
personnage.etats.ajouter("utiliser_loch")
yield 6
if "utiliser_loch" not in personnage.etats:
return
personnage.etats.retirer("utiliser_loch")
vitesse_2 = salle.navire.vitesse_noeuds
vitesse = (vitesse_2 + vitesse_1) / 2
vitesse = round(vitesse, 1)
vitesse = str(vitesse).replace(".", ",")
navire.donnees["vitesse"] = vitesse
personnage << "Le loch vous donne une vitesse approximative de " \
"{} noeuds.".format(vitesse)
|
#coding=utf-8
import sys
sys.path.append("../configs")
sys.path.append("configs")
import settings
import happybase
import json
import logging
pool = happybase.ConnectionPool(size=settings.hbase_pool_size, \
host=settings.hbase_host, \
table_prefix=settings.hbase_table_prefix,\
protocol='compact')
# conn = happybase.Connection(host=settings.hbase_host,\
# table_prefix=settings.hbase_table_prefix,\
# protocol="compact")
def create_table(table_name):
try:
with pool.connection() as conn:
conn.create_table(table_name, {'index': dict(max_versions=1),'data': dict(max_versions=1)})
except Exception, e:
logging.exception(e)
return False
return True
#row key is imageId,data is mapfileId
def put_index(dictData):
try:
with pool.connection() as conn:
table = conn.table(settings.hbase_table_name)
with table.batch(batch_size=20) as b:
for (key,value) in dictData.items():
data = {'index:mapfileid':value}
imageId = key
b.put(imageId,data)
except Exception, e:
logging.exception(e)
return False
return True
#row key is mapfileId,data is imageId
def put_data(dictData):
try:
with pool.connection() as conn:
table = conn.table(settings.hbase_table_name)
imageIds = dictData.keys()
mapfileId = dictData.values()[0]
table.put(mapfileId,{'data:imageIds':json.dumps(imageIds)})
except Exception, e:
logging.exception(e)
return False
return True
def put(data):
return put_index(data) and put_data(data)
def getImageIds(mapfileId):
try:
with pool.connection() as conn:
table = conn.table(settings.hbase_table_name)
row = table.row(mapfileId)
imageIds = json.loads(row['data:imageIds'])
except Exception, e:
logging.exception(e)
imageIds = None
return imageIds
def getMapFileId(imageId):
try:
with pool.connection() as conn:
table = conn.table(settings.hbase_table_name)
row = table.row(imageId)
logging.info(row)
mapfileId = row['index:mapfileid']
except Exception, e:
logging.exception(e)
mapfileId = None
return mapfileId
def init():
create_table(settings.hbase_table_name)
def _test():
with pool.connection() as conn:
table = conn.table('image')
row = table.row('Video_8582-frame915.jpg')
print row
if __name__ == '__main__':
# init()
# conn = happybase.Connection("172.19.0.2")
# create_table('sxiong3')
# table = conn.table('sxiong3')
# print table
# # table.put(b'row-key', {b'cf:col1': b'value1',b'cf:col2': b'value2'})
# # table.put(b'row-key', {b'cf1:col1': b'value1'}, timestamp=123456789)
# row = table.row(b'row-key')
# print row[b'cf1:col1']
# data = ('2',{'imageid':'1223','videoid':'1111','frameid':'111'})
# print put_index(data)
# print put_data(data)
# imageid2 = '111'
# print test(imageid2)
# table = conn.table(settings.hbase_table_name)
# print type(table.row('1'))
# print table.row('1')
_test()
|
# -*- coding: utf-8 -*-
from abc import abstractmethod, ABC
from typing import Any
class IMqttMessageListener(ABC):
@abstractmethod
def on_message(self, topic: str, message: Any, packet: Any):
pass
|
"""
Run the application using Flask's simple webserver via
python runserver.py
command. Default port is 8000. To set the port indicate it via --port argument
python runserver.py --port 5000
"""
import argparse
from factory import create_app
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--port', type=int, help='port to listen to', default=8000)
args = parser.parse_args()
app = create_app()
app.run(port=args.port, debug=True)
|
from keras.layers import LSTM, Embedding, TimeDistributed, Concatenate, Add
from keras.layers import Dropout, Bidirectional, Dense
from keras.models import Model, Input
from keras.metrics import CategoricalAccuracy
from keras.losses import CategoricalCrossentropy
from model.AttentionText.attention_text import CharTagAttention
from model.base_tagger import BaseTagger
class StackedRNNTagger(BaseTagger):
def __init__(
self, word_length=50, char_embed_size=100,
char_rnn_units=400, char_recurrent_dropout=0.33,
recurrent_dropout=0.33, rnn_units=400, embedding_dropout=0.5,
main_layer_dropout=0.5, **kwargs
):
"""
Deep learning based sequence tagger.
Consist of:
- Char embedding (RNN+Attention)
- RNN
based on postag model of:
https://www.aclweb.org/anthology/K17-3002/
:param word_length: int, maximum character length in a token,
relevant when using cnn
:param char_embed_size: int, the size of character level embedding,
relevant when using cnn
:param char_rnn_units: int, RNN units on char level
:param char_recurrent_dropout: float, dropout rate in RNN char level
:param recurrent_dropout: float, dropout rate inside RNN
:param rnn_units: int, the number of rnn units
:param embedding_dropout: float, dropout rate after embedding layer
:param main_layer_dropout: float, dropout rate in between LSTM
"""
super(StackedRNNTagger, self).__init__(**kwargs)
self.word_length = word_length
self.char_embed_size = char_embed_size
self.ed = embedding_dropout
self.rnn_units = rnn_units
self.rd = recurrent_dropout
self.char_rnn_units = char_rnn_units
self.char_rd = char_recurrent_dropout
# self.loss =
self.main_layer_dropout = main_layer_dropout
def __get_char_embedding(self):
"""
Initialize character embedding
"""
word_input_layer = Input(shape=(self.word_length, ))
# +1 for padding
embedding_block = Embedding(
self.n_chars+1, self.char_embed_size,
input_length=self.word_length, trainable=True,
mask_zero=True
)(word_input_layer)
rnn_output = LSTM(
self.char_rnn_units, recurrent_dropout=self.char_rd,
return_sequences=True, return_state=True
)(embedding_block)
embedding_block, h, c = rnn_output
embedding_block = CharTagAttention(
self.char_rnn_units, self.word_length
)(embedding_block)
embedding_block = Concatenate()([embedding_block, c])
embedding_block = Dense(self.embedding_size)(embedding_block)
embedding_block = Model(
inputs=word_input_layer, outputs=embedding_block)
embedding_block.summary()
seq_inp_layer = Input(
shape=(self.seq_length, self.word_length), name="char"
)
embedding_block = TimeDistributed(embedding_block)(seq_inp_layer)
return seq_inp_layer, embedding_block
def init_model(self):
"""
Initialize the network model
"""
# Word Embebedding
input_word_layer = Input(shape=(self.seq_length,), name="word")
pre_trained_word_embed = Embedding(
self.vocab_size+1, self.embedding_size,
input_length=self.seq_length,
embeddings_initializer=self.embedding,
mask_zero=True,
trainable=False
)
pre_trained_word_embed = pre_trained_word_embed(input_word_layer)
learnable_word_embed = Embedding(
self.vocab_size+1, self.embedding_size,
input_length=self.seq_length,
embeddings_initializer="glorot_uniform",
mask_zero=True
)(input_word_layer)
# Char Embedding
input_char_layer, char_embed_block = self.__get_char_embedding()
input_layer = [input_char_layer, input_word_layer]
embed_block = Add()(
[char_embed_block, pre_trained_word_embed, learnable_word_embed]
)
if self.ed > 0:
embed_block = Dropout(self.ed)(embed_block)
self.model = embed_block
# LSTM Layer
self.model = Bidirectional(LSTM(
self.rnn_units, return_sequences=True,
recurrent_dropout=self.rd
))(self.model)
self.model = Dropout(self.main_layer_dropout)(self.model)
self.model = Bidirectional(LSTM(
self.rnn_units, return_sequences=True,
recurrent_dropout=self.rd
))(self.model)
# Dense layer
self.model = Dense(self.n_label+1, activation="relu")(self.model)
out = Dense(self.n_label+1)(self.model)
# out = TimeDistributed(
# Dense(self.n_label+1, activation="softmax")
# )(self.model)
self.model = Model(input_layer, out)
self.model.summary()
self.model.compile(
# Compute loss from logits because the output is not probability
loss=CategoricalCrossentropy(from_logits=True),
# use CategoricalAccuracy so it can operates using logits
optimizer=self.optimizer, metrics=[CategoricalAccuracy()]
)
def vectorize_input(self, inp_seq):
"""
Prepare vector of the input data
:param inp_seq: list of list of string, tokenized input corpus
:return word_vector: Dictionary, Word and char input vector
"""
input_vector = {
"word": self.get_word_vector(inp_seq),
"char": self.get_char_vector(inp_seq)
}
return input_vector
def init_inverse_indexes(self, X, y):
super(StackedRNNTagger, self).init_inverse_indexes(X, y)
self.init_c2i()
def get_class_param(self):
class_param = {
"label2idx": self.label2idx,
"word2idx": self.word2idx,
"seq_length": self.seq_length,
"word_length": self.word_length,
"idx2label": self.idx2label,
"char2idx": self.char2idx
}
return class_param
@staticmethod
def init_from_config(class_param):
"""
Load model from the saved zipfile
:param filepath: path to model zip file
:return classifier: Loaded model class
"""
constructor_param = {
"seq_length": class_param["seq_length"],
"word_length": class_param["word_length"],
}
classifier = StackedRNNTagger(**constructor_param)
classifier.label2idx = class_param["label2idx"]
classifier.word2idx = class_param["word2idx"]
classifier.idx2label = class_param["idx2label"]
classifier.n_label = len(classifier.label2idx)
classifier.char2idx = class_param["char2idx"]
return classifier
|
#-*- coding: utf-8 -*-
"""
@author:Bengali.AI
"""
from __future__ import print_function
from .normalizer import Normalizer
|
# Generated by Django 3.1.1 on 2020-10-07 22:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('example', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProfileAttachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='profiles/')),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='example.profile')),
],
),
]
|
class Solution:
def maximumUnits(self, boxTypes: List[List[int]], truckSize: int) -> int:
boxTypes.sort(key = lambda x: x[1]); units = 0
while truckSize > 0 and boxTypes:
curr = boxTypes.pop(-1)
while curr[0] > 0 and truckSize > 0:
units += curr[1]
curr[0] -= 1
truckSize -= 1
return units
|
#lab5-1
#섭씨 -> 화씨, 화씨-> 섭씨
#함수를 이용해서 구하기
def celtofah(cel):
fah = (9.0 / 5.0) * cel + 32
return fah
def fahtocel(fah):
cel = (5.0 / 9.0) * (fah - 32)
return cel
cel = 40.0
fah = 120.0
print('Celsius'+10*' '+ 'Fahrenheit')
for i in range(0,10): #40도부터 30도까지
print(cel , 10 * ' ', celtofah(cel))
cel -= 1.0
print('Fahrenhiet' + 10 * ' ' + 'Celsius')
for i in range(0,10): #120도부터 210도까지
print(fah, 10 * ' ', fahtocel(fah))
fah -= 10.0
#lab 5-2
#두 수가 주어졌을 때, 가로 세로라고 생각하고 사각형 넓이 구하기
import math
def area(n1, n2):
area = n1 * n2
return area
#두수가 주어졌을 때, 작은 수를 반지름이라고 생각하고 원의 넓의 구하기
def circle(n1, n2):
if n1 < n2:
circle = (n1 ** 2) * math.pi
else :
circle = (n2 ** 2) * math.pi
return circle
#각각 사각형과 원의 넓이를 구해서 프린트
def main2(n1, n2):
arearectangular = area(n1, n2)
circlearea = circle(n1, n2)
print("The area of rectangular is: ", arearectangular)
print("The area of circle is: ", circlearea)
n1 = eval(input("Enter the number1: "))
n2 = eval(input("Enter the number2: "))
main2(n1, n2)
#lab 5-3
#랜덤숫자 guessing하기
import random
def main3():
computer = random.randint(0,99)
GuessNumber(computer)
def GuessNumber(computer):
guesstime = 0
start = 0
end = 100
while guesstime < 100 :
guess = eval(input("Enter your guess number: "))
if guess == computer :
print("Correct :) ")
break
elif guess > computer :
print('Enter between ' , start, ' and ' , guess)
guesstime += 1
end = guess
elif guess < computer :
print('Enter between ' , guess, ' and ' , end)
guesstime += 1
start = guess
main3()
|
# -*- coding: utf-8 -*-
from openprocurement.tender.core.utils import optendersresource
from openprocurement.tender.openua.views.cancellation import TenderUaCancellationResource
from openprocurement.tender.openeu.utils import cancel_tender
@optendersresource(
name="aboveThresholdEU:Tender Cancellations",
collection_path="/tenders/{tender_id}/cancellations",
path="/tenders/{tender_id}/cancellations/{cancellation_id}",
procurementMethodType="aboveThresholdEU",
description="Tender cancellations",
)
class TenderCancellationResource(TenderUaCancellationResource):
@staticmethod
def cancel_tender_method(request):
return cancel_tender(request)
def cancel_lot(self, cancellation):
tender = self.request.validated["tender"]
self._cancel_lots(tender, cancellation)
cancelled_lots, cancelled_items, cancelled_features = self._get_cancelled_lot_objects(tender)
self._invalidate_lot_bids(tender, cancelled_lots=cancelled_lots, cancelled_features=cancelled_features)
self._cancel_lot_qualifications(tender, cancelled_lots=cancelled_lots)
self._lot_update_check_tender_status(tender)
self._lot_update_check_next_award(tender)
@staticmethod
def _get_cancelled_lot_objects(tender):
cancelled_lots = {i.id for i in tender.lots if i.status == "cancelled"}
cancelled_items = {i.id for i in tender.items if i.relatedLot in cancelled_lots}
cancelled_features = {
i.code
for i in (tender.features or [])
if i.featureOf == "lot" and i.relatedItem in cancelled_lots
or i.featureOf == "item" and i.relatedItem in cancelled_items
}
return cancelled_lots, cancelled_items, cancelled_features
def _lot_update_check_next_award(self, tender):
if tender.status == "active.auction" and all(
i.auctionPeriod and i.auctionPeriod.endDate
for i in self.request.validated["tender"].lots
if i.status == "active"
):
self.add_next_award_method(self.request)
@staticmethod
def _invalidate_lot_bids(tender, cancelled_lots, cancelled_features):
check_statuses = (
"active.tendering",
"active.pre-qualification",
"active.pre-qualification.stand-still",
"active.auction",
)
if tender.status in check_statuses:
def filter_docs(items):
result = [i for i in items
if i.documentOf != "lot"
or i.relatedItem not in cancelled_lots]
return result
for bid in tender.bids:
if tender.status == "active.tendering":
bid.documents = filter_docs(bid.documents)
bid.financialDocuments = filter_docs(bid.financialDocuments)
bid.eligibilityDocuments = filter_docs(bid.eligibilityDocuments)
bid.qualificationDocuments = filter_docs(bid.qualificationDocuments)
bid.parameters = [i for i in bid.parameters if i.code not in cancelled_features]
bid.lotValues = [i for i in bid.lotValues if i.relatedLot not in cancelled_lots]
if not bid.lotValues and bid.status in ["pending", "active"]:
bid.status = "invalid" if tender.status == "active.tendering" else "invalid.pre-qualification"
@staticmethod
def _cancel_lot_qualifications(tender, cancelled_lots):
for qualification in tender.qualifications:
if qualification.lotID in cancelled_lots:
qualification.status = "cancelled"
|
import logging
import os
import io
from unittest import mock
from django.test import TestCase, tag
from django.contrib.auth.models import User
from django.conf import settings
from feeds.models import Feed
from plugins.models import Plugin
from plugins.models import ComputeResource
from plugins.models import PluginParameter, DefaultPathParameter
from plugininstances.models import PluginInstance, PluginInstanceFile
from plugininstances.models import PluginInstanceFilter
from plugininstances.models import swiftclient
from plugininstances.models import PluginAppManager
class ModelTests(TestCase):
def setUp(self):
# avoid cluttered console output (for instance logging all the http requests)
logging.disable(logging.CRITICAL)
self.plugin_fs_name = "simplefsapp"
self.plugin_fs_parameters = {'dir': {'type': 'string', 'optional': True,
'default': "./"}}
self.plugin_ds_name = "simpledsapp"
self.plugin_ds_parameters = {'prefix': {'type': 'string', 'optional': False}}
self.username = 'foo'
self.password = 'foo-pass'
(self.compute_resource, tf) = ComputeResource.objects.get_or_create(
compute_resource_identifier="host")
# create plugins
(plugin_fs, tf) = Plugin.objects.get_or_create(name=self.plugin_fs_name,
type='fs',
compute_resource=self.compute_resource)
(plugin_ds, tf) = Plugin.objects.get_or_create(name=self.plugin_ds_name,
type='ds',
compute_resource=self.compute_resource)
# add plugins' parameters
(plg_param, tf) = PluginParameter.objects.get_or_create(
plugin=plugin_fs,
name='dir',
type=self.plugin_fs_parameters['dir']['type'],
optional=self.plugin_fs_parameters['dir']['optional'])
default = self.plugin_fs_parameters['dir']['default']
DefaultPathParameter.objects.get_or_create(plugin_param=plg_param, value=default)
PluginParameter.objects.get_or_create(
plugin=plugin_ds,
name='prefix',
type=self.plugin_ds_parameters['prefix']['type'],
optional=self.plugin_ds_parameters['prefix']['optional'])
# create user
User.objects.create_user(username=self.username,
password=self.password)
def tearDown(self):
# re-enable logging
logging.disable(logging.DEBUG)
class PluginInstanceModelTests(ModelTests):
def test_save_creates_new_feed_just_after_fs_plugininstance_is_created(self):
"""
Test whether overriden save method creates a feed just after an 'fs' plugin
instance is created.
"""
# create an 'fs' plugin instance that in turn should create a new feed
user = User.objects.get(username=self.username)
plugin = Plugin.objects.get(name=self.plugin_fs_name)
pl_inst = PluginInstance.objects.create(plugin=plugin, owner=user,
compute_resource=plugin.compute_resource)
self.assertEqual(Feed.objects.count(), 1)
self.assertEqual(pl_inst.feed.name,pl_inst.plugin.name)
def test_save_does_not_create_new_feed_just_after_ds_plugininstance_is_created(self):
"""
Test whether overriden save method does not create a feed just after a 'ds' plugin
instance is created.
"""
# create a 'fs' plugin instance
user = User.objects.get(username=self.username)
plugin = Plugin.objects.get(name=self.plugin_fs_name)
plg_inst = PluginInstance.objects.create(plugin=plugin, owner=user,
compute_resource=plugin.compute_resource)
# create a 'ds' plugin instance whose previous is the previous 'fs' plugin instance
plugin = Plugin.objects.get(name=self.plugin_ds_name)
PluginInstance.objects.create(plugin=plugin, owner=user, previous=plg_inst,
compute_resource=plugin.compute_resource)
# the new 'ds' plugin instance shouldn't create a new feed
self.assertEqual(Feed.objects.count(), 1)
def test_get_root_instance(self):
"""
Test whether custom get_root_instance method returns the root 'fs' plugin
instance for a give plugin instance.
"""
# create a 'fs' plugin instance
user = User.objects.get(username=self.username)
plugin = Plugin.objects.get(name=self.plugin_fs_name)
plg_inst_root = PluginInstance.objects.create(plugin=plugin, owner=user,
compute_resource=plugin.compute_resource)
# create a 'ds' plugin instance whose root is the previous 'fs' plugin instance
plugin = Plugin.objects.get(name=self.plugin_ds_name)
plg_inst = PluginInstance.objects.create(plugin=plugin, owner=user,
previous=plg_inst_root,
compute_resource=plugin.compute_resource)
root_instance = plg_inst.get_root_instance()
self.assertEqual(root_instance, plg_inst_root)
def test_get_descendant_instances(self):
"""
Test whether custom get_descendant_instances method returns all the plugin
instances that are a descendant of a plugin instance
"""
# create a 'fs' plugin instance
user = User.objects.get(username=self.username)
plugin = Plugin.objects.get(name=self.plugin_fs_name)
plg_inst_root = PluginInstance.objects.create(plugin=plugin, owner=user,
compute_resource=plugin.compute_resource)
# create a 'ds' plugin instance whose previous is the previous 'fs' plugin instance
plugin = Plugin.objects.get(name=self.plugin_ds_name)
plg_inst1 = PluginInstance.objects.create(plugin=plugin, owner=user,
previous=plg_inst_root,
compute_resource=plugin.compute_resource)
# create another 'ds' plugin instance whose previous is the previous 'ds' plugin
# instance
plugin = Plugin.objects.get(name=self.plugin_ds_name)
plg_inst2 = PluginInstance.objects.create(plugin=plugin, owner=user,
previous=plg_inst1,
compute_resource=plugin.compute_resource)
decend_instances = plg_inst_root.get_descendant_instances()
self.assertEqual(len(decend_instances), 3)
self.assertEqual(decend_instances[0], plg_inst_root)
self.assertEqual(decend_instances[1], plg_inst1)
self.assertEqual(decend_instances[2], plg_inst2)
def test_get_output_path(self):
"""
Test whether custom get_output_path method returns appropriate output paths
for both 'fs' and 'ds' plugins.
"""
# create an 'fs' plugin instance
user = User.objects.get(username=self.username)
plugin_fs = Plugin.objects.get(name=self.plugin_fs_name)
pl_inst_fs = PluginInstance.objects.create(plugin=plugin_fs, owner=user,
compute_resource=plugin_fs.compute_resource)
# 'fs' plugins will output files to:
# SWIFT_CONTAINER_NAME/<username>/feed_<id>/plugin_name_plugin_inst_<id>/data
fs_output_path = '{0}/feed_{1}/{2}_{3}/data'.format( self.username,
pl_inst_fs.feed.id,
pl_inst_fs.plugin.name,
pl_inst_fs.id)
self.assertEqual(pl_inst_fs.get_output_path(), fs_output_path)
# create a 'ds' plugin instance
user = User.objects.get(username=self.username)
plugin_ds = Plugin.objects.get(name=self.plugin_ds_name)
pl_inst_ds = PluginInstance.objects.create(plugin=plugin_ds,
owner=user, previous=pl_inst_fs, compute_resource=plugin_ds.compute_resource)
# 'ds' plugins will output files to:
# SWIFT_CONTAINER_NAME/<username>/feed_<id>/...
#/previous_plugin_name_plugin_inst_<id>/plugin_name_plugin_inst_<id>/data
ds_output_path = os.path.join(os.path.dirname(fs_output_path),
'{0}_{1}/data'.format(pl_inst_ds.plugin.name,
pl_inst_ds.id))
self.assertEqual(pl_inst_ds.get_output_path(), ds_output_path)
def test_register_output_files(self):
"""
Test whether custom register_output_files method properly registers a plugin's
output file with the REST API.
"""
# create an 'fs' plugin instance
user = User.objects.get(username=self.username)
plugin = Plugin.objects.get(name=self.plugin_fs_name)
pl_inst = PluginInstance.objects.create(plugin=plugin, owner=user,
compute_resource=plugin.compute_resource)
output_path = pl_inst.get_output_path()
object_list = [{'name': output_path + '/file1.txt'}]
container_data = ['', object_list]
with mock.patch.object(swiftclient.Connection, '__init__',
return_value=None) as conn_init_mock:
with mock.patch.object(swiftclient.Connection, 'get_container',
return_value=container_data) as conn_get_container_mock:
pl_inst.register_output_files()
conn_init_mock.assert_called_with(user=settings.SWIFT_USERNAME,
key=settings.SWIFT_KEY,
authurl=settings.SWIFT_AUTH_URL,)
conn_get_container_mock.assert_called_with(settings.SWIFT_CONTAINER_NAME,
prefix=output_path, full_listing=True)
self.assertEqual(PluginInstanceFile.objects.count(), 1)
plg_inst_file = PluginInstanceFile.objects.get(plugin_inst=pl_inst)
self.assertEqual(plg_inst_file.fname.name, output_path + '/file1.txt')
@tag('integration')
def test_integration_register_output_files(self):
"""
Test whether custom register_output_files method properly registers a plugin's
output file with the REST API.
"""
# create an 'fs' plugin instance
user = User.objects.get(username=self.username)
plugin = Plugin.objects.get(name=self.plugin_fs_name)
plg_inst = PluginInstance.objects.create(plugin=plugin, owner=user,
compute_resource=plugin.compute_resource)
# initiate a Swift service connection
conn = swiftclient.Connection(
user=settings.SWIFT_USERNAME,
key=settings.SWIFT_KEY,
authurl=settings.SWIFT_AUTH_URL,
)
# create container in case it doesn't already exist
conn.put_container(settings.SWIFT_CONTAINER_NAME)
# upload file to Swift storage
output_path = plg_inst.get_output_path()
with io.StringIO("test file") as file1:
conn.put_object(settings.SWIFT_CONTAINER_NAME, output_path + '/file1.txt',
contents=file1.read(),
content_type='text/plain')
plg_inst.register_output_files()
self.assertEqual(PluginInstanceFile.objects.count(), 1)
plg_inst_file = PluginInstanceFile.objects.get(plugin_inst=plg_inst)
self.assertEqual(plg_inst_file.fname.name, output_path + '/file1.txt')
# delete file from Swift storage
conn.delete_object(settings.SWIFT_CONTAINER_NAME, output_path + '/file1.txt')
def test_cancel(self):
"""
Test whether custom cancel method cancels the execution of the app corresponding
to a plugin instance.
"""
# create a 'fs' plugin instance
user = User.objects.get(username=self.username)
plugin = Plugin.objects.get(name=self.plugin_fs_name)
plg_inst = PluginInstance.objects.create(plugin=plugin, owner=user,
compute_resource=plugin.compute_resource)
self.assertEqual(plg_inst.status, 'started')
with mock.patch.object(PluginAppManager, 'cancel_plugin_app_exec',
return_value=None) as manager_cancel_plugin_app_exec_mock:
plg_inst.cancel()
# check that manager's cancel_plugin_app_exec method was called once
manager_cancel_plugin_app_exec_mock.assert_called_once()
self.assertEqual(plg_inst.status, 'cancelled')
def test_run(self):
"""
Test whether custom run method starts the execution of the app corresponding
to a plugin instance.
"""
with mock.patch.object(PluginAppManager, 'run_plugin_app',
return_value=None) as run_plugin_app_mock:
user = User.objects.get(username=self.username)
plugin = Plugin.objects.get(name=self.plugin_fs_name)
plg_inst = PluginInstance.objects.create(plugin=plugin, owner=user,
compute_resource=plugin.compute_resource)
self.assertEqual(plg_inst.status, 'started')
parameters_dict = {'dir': './'}
plg_inst.run(parameters_dict)
self.assertEqual(plg_inst.status, 'started')
# check that manager's run_plugin_app method was called with appropriate args
run_plugin_app_mock.assert_called_with(plg_inst,
parameters_dict,
service = 'pfcon',
inputDirOverride = '/share/incoming',
outputDirOverride = '/share/outgoing')
def test_check_exec_status(self):
"""
Test whether custom check_exec_status method checks the execution status of the
app corresponding to a plugin instance.
"""
with mock.patch.object(PluginAppManager, 'check_plugin_app_exec_status',
return_value=None) as check_plugin_app_exec_status_mock:
user = User.objects.get(username=self.username)
plugin = Plugin.objects.get(name=self.plugin_fs_name)
plg_inst = PluginInstance.objects.create(plugin=plugin, owner=user,
compute_resource=plugin.compute_resource)
plg_inst.check_exec_status()
# check that manager's check_plugin_app_exec_status method was called once
check_plugin_app_exec_status_mock.assert_called_once()
class PluginInstanceFilterModelTests(ModelTests):
def test_filter_by_root_id(self):
"""
Test whether custom filter_by_root_id method returns the plugin instances in a
queryset with a common root plugin instance.
"""
# create a 'fs' plugin instance
user = User.objects.get(username=self.username)
plugin = Plugin.objects.get(name=self.plugin_fs_name)
plg_inst_root = PluginInstance.objects.create(plugin=plugin, owner=user,
compute_resource=plugin.compute_resource)
# create a 'ds' plugin instance whose previous is the previous 'fs' plugin instance
plugin = Plugin.objects.get(name=self.plugin_ds_name)
plg_inst1 = PluginInstance.objects.create(plugin=plugin, owner=user,
previous=plg_inst_root,
compute_resource=plugin.compute_resource)
# create another 'ds' plugin instance whose previous is the previous 'ds' plugin
# instance
plugin = Plugin.objects.get(name=self.plugin_ds_name)
plg_inst2 = PluginInstance.objects.create(plugin=plugin, owner=user,
previous=plg_inst1,
compute_resource=plugin.compute_resource)
queryset = PluginInstance.objects.all()
value = plg_inst1.id
filter = PluginInstanceFilter()
filtered_queryset = filter.filter_by_root_id(queryset, "", value)
self.assertEqual(len(filtered_queryset), 2)
self.assertEqual(filtered_queryset[0], plg_inst1)
self.assertEqual(filtered_queryset[1], plg_inst2)
|
import traceback
from datetime import timedelta
import pytz
import pendulum
import pandas as pd
from dagster import (
solid,
pipeline,
ModeDefinition,
Output,
OutputDefinition,
)
from repositories.capturas.resources import (
keepalive_key,
timezone_config,
discord_webhook,
)
from repositories.libraries.basedosdados.resources import basedosdados_config
from repositories.helpers.hooks import (
discord_message_on_failure,
discord_message_on_success,
redis_keepalive_on_failure,
redis_keepalive_on_succes,
log_critical,
)
from repositories.capturas.solids import (
create_current_datetime_partition,
get_file_path_and_partitions,
get_raw,
save_raw_local,
save_treated_local,
upload_logs_to_bq,
)
from repositories.libraries.basedosdados.solids import upload_to_bigquery
@solid(
required_resource_keys={"basedosdados_config", "timezone_config"},
output_defs=[
OutputDefinition(name="treated_data", is_required=True),
OutputDefinition(name="error", is_required=False)],
)
def pre_treatment_br_rj_riodejaneiro_stpl_gps(context, data, timestamp, prev_error=None):
if prev_error is not None:
yield Output(pd.DataFrame(), output_name="treated_data")
yield Output(prev_error, output_name="error")
error = None
timezone = context.resources.timezone_config["timezone"]
data = data.json()
df = pd.DataFrame(data["veiculos"])
timestamp_captura = pd.to_datetime(timestamp)
df["timestamp_captura"] = timestamp_captura
df["dataHora"] = df["dataHora"].apply(
lambda ms: pd.to_datetime(
pendulum.from_timestamp(ms / 1000.0, timezone).isoformat()
)
)
# Filter data for 0 <= time diff <= 1min
try:
datahora_col = "dataHora"
df_treated = df
try:
df_treated[datahora_col] = df_treated[datahora_col].apply(
lambda x: x.tz_convert(timezone)
)
except TypeError:
df_treated[datahora_col] = df_treated[datahora_col].apply(
lambda x: x.tz_localize(timezone)
)
try:
df_treated["timestamp_captura"] = df_treated["timestamp_captura"].apply(
lambda x: x.tz_convert(timezone)
)
except TypeError:
df_treated["timestamp_captura"] = df_treated["timestamp_captura"].apply(
lambda x: x.tz_localize(timezone)
)
mask = (df_treated["timestamp_captura"] - df_treated[datahora_col]).apply(
lambda x: timedelta(seconds=0) <= x <= timedelta(minutes=1)
)
df_treated = df_treated[mask]
context.log.info(f"Shape antes da filtragem: {df.shape}")
context.log.info(f"Shape após a filtrage: {df_treated.shape}")
if df_treated.shape[0] == 0:
error = ValueError("After filtering, the dataframe is empty!")
df = df_treated
except:
err = traceback.format_exc()
log_critical(f"Failed to filter STPL data: \n{err}")
yield Output(df, output_name="treated_data")
yield Output(error, output_name="error")
@discord_message_on_failure
@discord_message_on_success
@redis_keepalive_on_failure
@redis_keepalive_on_succes
@pipeline(
mode_defs=[
ModeDefinition(
"dev",
resource_defs={
"basedosdados_config": basedosdados_config,
"timezone_config": timezone_config,
"discord_webhook": discord_webhook,
"keepalive_key": keepalive_key,
},
),
],
tags={
"pipeline": "br_rj_riodejaneiro_stpl_gps_registros",
"dagster-k8s/config": {
"container_config": {
"resources": {
"requests": {"cpu": "250m", "memory": "250Mi"},
"limits": {"cpu": "500m", "memory": "500Mi"},
},
}
},
},
)
def br_rj_riodejaneiro_stpl_gps_registros():
filename, partitions = create_current_datetime_partition()
file_path = get_file_path_and_partitions(filename, partitions)
data, timestamp, error = get_raw()
raw_file_path = save_raw_local(data, file_path)
treated_data, error = pre_treatment_br_rj_riodejaneiro_stpl_gps(
data, timestamp, prev_error=error)
upload_logs_to_bq(timestamp, error)
treated_file_path = save_treated_local(treated_data, file_path)
upload_to_bigquery([raw_file_path, treated_file_path], partitions)
|
import KratosMultiphysics
import KratosMultiphysics.ConstitutiveModelsApplication
import MainMaterial
#MainMaterial.Solution("shear_traction_parameters.json","isochoric_ogden_materials.json").Run()
#MainMaterial.Solution("shear_traction_parameters.json","ogden_materials.json").Run()
#MainMaterial.Solution("shear_traction_parameters.json","neohookean_materials.json").Run()
#MainMaterial.Solution("shear_parameters.json","isochoric_ogden_materials.json").Run()
#MainMaterial.Solution("shear_parameters.json","ogden_materials.json").Run()
#MainMaterial.Solution("shear_parameters.json","neohookean_materials.json").Run()
#MainMaterial.Solution("traction_parameters.json","isochoric_ogden_materials.json").Run()
#MainMaterial.Solution("traction_parameters.json","ogden_materials.json").Run()
MainMaterial.Solution("traction_parameters.json","neohookean_materials.json").Run()
|
"""Tests for distutils.dist."""
import distutils.cmd
import distutils.dist
import os
import shutil
import sys
import tempfile
import unittest
from test.test_support import TESTFN
class test_dist(distutils.cmd.Command):
"""Sample distutils extension command."""
user_options = [
("sample-option=", "S", "help text"),
]
def initialize_options(self):
self.sample_option = None
class TestDistribution(distutils.dist.Distribution):
"""Distribution subclasses that avoids the default search for
configuration files.
The ._config_files attribute must be set before
.parse_config_files() is called.
"""
def find_config_files(self):
return self._config_files
class DistributionTestCase(unittest.TestCase):
def setUp(self):
self.argv = sys.argv[:]
del sys.argv[1:]
def tearDown(self):
sys.argv[:] = self.argv
def create_distribution(self, configfiles=()):
d = TestDistribution()
d._config_files = configfiles
d.parse_config_files()
d.parse_command_line()
return d
def test_command_packages_unspecified(self):
sys.argv.append("build")
d = self.create_distribution()
self.assertEqual(d.get_command_packages(), ["distutils.command"])
def test_command_packages_cmdline(self):
sys.argv.extend(["--command-packages",
"foo.bar,distutils.tests",
"test_dist",
"-Ssometext",
])
d = self.create_distribution()
# let's actually try to load our test command:
self.assertEqual(d.get_command_packages(),
["distutils.command", "foo.bar", "distutils.tests"])
cmd = d.get_command_obj("test_dist")
self.assert_(isinstance(cmd, test_dist))
self.assertEqual(cmd.sample_option, "sometext")
def test_command_packages_configfile(self):
sys.argv.append("build")
f = open(TESTFN, "w")
try:
print >>f, "[global]"
print >>f, "command_packages = foo.bar, splat"
f.close()
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(),
["distutils.command", "foo.bar", "splat"])
# ensure command line overrides config:
sys.argv[1:] = ["--command-packages", "spork", "build"]
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(),
["distutils.command", "spork"])
# Setting --command-packages to '' should cause the default to
# be used even if a config file specified something else:
sys.argv[1:] = ["--command-packages", "", "build"]
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(), ["distutils.command"])
finally:
os.unlink(TESTFN)
def test_suite():
return unittest.makeSuite(DistributionTestCase)
|
"""Generated message classes for config version v1alpha1.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'config'
class ApplyInput(_messages.Message):
r"""Input parameters for preview of apply operation.
Fields:
blueprint: Required. Blueprint to preview.
configController: The Config Controller instance to preview configurations
against. Format:
`projects/{project}/locations/{location}/krmApiHosts/{instance}`.
createConfigController: Optional. If set, then a Config Controller
instance with a default, well-known name will be created as part of the
deployment, if it does not already exist. Note that Blueprints
Controller does not manage this Config Controller instance and only
creates it.
deployment: Deployment to dry-run modification to during preview. For
preview of new deployment this could be left empty. For preview
modifications to existing deployment this must match an existing
deployment, otherwise this will be considered as a new deployment.
Format:
`projects/{project}/locations/{location}/deployments/{deployment}`
pipelineOnly: Optional. If set, then only the Pipeline will run. The
rendered content will still be uploaded to Cloud Storage. No dry-run
will occur, and no preview diff artifacts will be generated.
"""
blueprint = _messages.MessageField('Blueprint', 1)
configController = _messages.StringField(2)
createConfigController = _messages.BooleanField(3)
deployment = _messages.StringField(4)
pipelineOnly = _messages.BooleanField(5)
class ApplyResults(_messages.Message):
r"""Locations of outputs from config application.
Fields:
artifacts: Location of kpt artifacts in Google Cloud Storage. Format:
`gs://{bucket}/{object}`
build: Corresponding Cloud Build run. Format:
`projects/{project}/locations/{location}/builds/{build}`
content: Location of generated manifests in Google Cloud Storage. Format:
`gs://{bucket}/{object}`
logs: Location of logs in Google Cloud Storage. Format:
`gs://{bucket}/{object}`
"""
artifacts = _messages.StringField(1)
build = _messages.StringField(2)
content = _messages.StringField(3)
logs = _messages.StringField(4)
class AuditConfig(_messages.Message):
r"""Specifies the audit configuration for a service. The configuration
determines which permission types are logged, and what identities, if any,
are exempted from logging. An AuditConfig must have one or more
AuditLogConfigs. If there are AuditConfigs for both `allServices` and a
specific service, the union of the two AuditConfigs is used for that
service: the log_types specified in each AuditConfig are enabled, and the
exempted_members in each AuditLogConfig are exempted. Example Policy with
multiple AuditConfigs: { "audit_configs": [ { "service": "allServices",
"audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [
"user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type":
"ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com",
"audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type":
"DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For
sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
logging. It also exempts jose@example.com from DATA_READ logging, and
aliya@example.com from DATA_WRITE logging.
Fields:
auditLogConfigs: The configuration for logging of each type of permission.
service: Specifies a service that will be enabled for audit logging. For
example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
`allServices` is a special value that covers all services.
"""
auditLogConfigs = _messages.MessageField('AuditLogConfig', 1, repeated=True)
service = _messages.StringField(2)
class AuditLogConfig(_messages.Message):
r"""Provides the configuration for logging a type of permissions. Example: {
"audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [
"user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables
'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from
DATA_READ logging.
Enums:
LogTypeValueValuesEnum: The log type that this config enables.
Fields:
exemptedMembers: Specifies the identities that do not cause logging for
this type of permission. Follows the same format of Binding.members.
logType: The log type that this config enables.
"""
class LogTypeValueValuesEnum(_messages.Enum):
r"""The log type that this config enables.
Values:
LOG_TYPE_UNSPECIFIED: Default case. Should never be this.
ADMIN_READ: Admin reads. Example: CloudIAM getIamPolicy
DATA_WRITE: Data writes. Example: CloudSQL Users create
DATA_READ: Data reads. Example: CloudSQL Users list
"""
LOG_TYPE_UNSPECIFIED = 0
ADMIN_READ = 1
DATA_WRITE = 2
DATA_READ = 3
exemptedMembers = _messages.StringField(1, repeated=True)
logType = _messages.EnumField('LogTypeValueValuesEnum', 2)
class Binding(_messages.Message):
r"""Associates `members`, or principals, with a `role`.
Fields:
condition: The condition that is associated with this binding. If the
condition evaluates to `true`, then this binding applies to the current
request. If the condition evaluates to `false`, then this binding does
not apply to the current request. However, a different role binding
might grant the same role to one or more of the principals in this
binding. To learn which resources support conditions in their IAM
policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
members: Specifies the principals requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet; with
or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example, `alice@example.com` .
* `serviceAccount:{emailid}`: An email address that represents a service
account. For example, `my-other-app@appspot.gserviceaccount.com`. *
`group:{emailid}`: An email address that represents a Google group. For
example, `admins@example.com`. *
`deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
identifier) representing a user that has been recently deleted. For
example, `alice@example.com?uid=123456789012345678901`. If the user is
recovered, this value reverts to `user:{emailid}` and the recovered user
retains the role in the binding. *
`deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address
(plus unique identifier) representing a service account that has been
recently deleted. For example, `my-other-
app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the
service account is undeleted, this value reverts to
`serviceAccount:{emailid}` and the undeleted service account retains the
role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An
email address (plus unique identifier) representing a Google group that
has been recently deleted. For example,
`admins@example.com?uid=123456789012345678901`. If the group is
recovered, this value reverts to `group:{emailid}` and the recovered
group retains the role in the binding. * `domain:{domain}`: The G Suite
domain (primary) that represents all the users of that domain. For
example, `google.com` or `example.com`.
role: Role that is assigned to the list of `members`, or principals. For
example, `roles/viewer`, `roles/editor`, or `roles/owner`.
"""
condition = _messages.MessageField('Expr', 1)
members = _messages.StringField(2, repeated=True)
role = _messages.StringField(3)
class Blueprint(_messages.Message):
r"""Contains details surrounding source configurations to be deployed.
Fields:
gcsSource: Required. URI of an object in Google Cloud Storage. Format:
`gs://{bucket}/{object}` URI may also specify an object version. Format:
`gs://{bucket}/{object}#{version}`
gitSource: Required. A set of files in a Git repository.
postprocessors: Optional. Functions to run after the pipeline defined in
the blueprint's kptfile. At most 5 postprocessors can be provided.
preprocessors: Optional. Functions to run before the pipeline defined in
the blueprint's kptfile. At most 5 preprocessors can be provided.
"""
gcsSource = _messages.StringField(1)
gitSource = _messages.MessageField('GitSource', 2)
postprocessors = _messages.MessageField('Function', 3, repeated=True)
preprocessors = _messages.MessageField('Function', 4, repeated=True)
class CancelOperationRequest(_messages.Message):
r"""The request message for Operations.CancelOperation."""
class ConfigProjectsLocationsDeploymentsCreateRequest(_messages.Message):
r"""A ConfigProjectsLocationsDeploymentsCreateRequest object.
Fields:
deployment: A Deployment resource to be passed as the request body.
deploymentId: A string attribute.
parent: Required. The parent in whose context the Deployment is created.
The parent value is in the format:
'projects/{project_id}/locations/{location}'.
requestId: An optional request ID to identify requests. Specify a unique
request ID so that if you must retry your request, the server will know
to ignore the request if it has already been completed. The server will
guarantee that for at least 60 minutes since the first request. For
example, consider a situation where you make an initial request and the
request times out. If you make the request again with the same request
ID, the server can check if original operation with the same request ID
was received, and if so, will ignore the second request. This prevents
clients from accidentally creating duplicate commitments. The request ID
must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
"""
deployment = _messages.MessageField('Deployment', 1)
deploymentId = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
requestId = _messages.StringField(4)
class ConfigProjectsLocationsDeploymentsDeleteRequest(_messages.Message):
r"""A ConfigProjectsLocationsDeploymentsDeleteRequest object.
Fields:
force: If set to true, any revisions for this deployment will also be
deleted. (Otherwise, the request will only work if the deployment has no
revisions.)
name: Required. The name of this service resource in the format:
'projects/{project_id}/locations/{location}/deployments/{deployment}'.
requestId: An optional request ID to identify requests. Specify a unique
request ID so that if you must retry your request, the server will know
to ignore the request if it has already been completed. The server will
guarantee that for at least 60 minutes after the first request. For
example, consider a situation where you make an initial request and the
request times out. If you make the request again with the same request
ID, the server can check if original operation with the same request ID
was received, and if so, will ignore the second request. This prevents
clients from accidentally creating duplicate commitments. The request ID
must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
"""
force = _messages.BooleanField(1)
name = _messages.StringField(2, required=True)
requestId = _messages.StringField(3)
class ConfigProjectsLocationsDeploymentsGetIamPolicyRequest(_messages.Message):
r"""A ConfigProjectsLocationsDeploymentsGetIamPolicyRequest object.
Fields:
options_requestedPolicyVersion: Optional. The maximum policy version that
will be used to format the policy. Valid values are 0, 1, and 3.
Requests specifying an invalid value will be rejected. Requests for
policies with any conditional role bindings must specify version 3.
Policies with no conditional role bindings may specify any valid value
or leave the field unset. The policy in the response might use the
policy version that you specified, or it might use a lower policy
version. For example, if you specify version 3, but the policy has no
conditional role bindings, the response uses version 1. To learn which
resources support conditions in their IAM policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
options_requestedPolicyVersion = _messages.IntegerField(1, variant=_messages.Variant.INT32)
resource = _messages.StringField(2, required=True)
class ConfigProjectsLocationsDeploymentsGetRequest(_messages.Message):
r"""A ConfigProjectsLocationsDeploymentsGetRequest object.
Fields:
name: Required. The name of this service resource in the format:
'projects/{project_id}/locations/{location}/deployments/{deployment}'.
"""
name = _messages.StringField(1, required=True)
class ConfigProjectsLocationsDeploymentsListRequest(_messages.Message):
r"""A ConfigProjectsLocationsDeploymentsListRequest object.
Fields:
filter: A string attribute.
orderBy: A string attribute.
pageSize: A integer attribute.
pageToken: A string attribute.
parent: Required. The parent in whose context the Deployments are listed.
The parent value is in the format:
'projects/{project_id}/locations/{location}'.
"""
filter = _messages.StringField(1)
orderBy = _messages.StringField(2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
parent = _messages.StringField(5, required=True)
class ConfigProjectsLocationsDeploymentsPatchRequest(_messages.Message):
r"""A ConfigProjectsLocationsDeploymentsPatchRequest object.
Fields:
deployment: A Deployment resource to be passed as the request body.
name: Resource name of the deployment. Format:
`projects/{project}/locations/{location}/deployments/{deployment}`
requestId: An optional request ID to identify requests. Specify a unique
request ID so that if you must retry your request, the server will know
to ignore the request if it has already been completed. The server will
guarantee that for at least 60 minutes since the first request. For
example, consider a situation where you make an initial request and the
request times out. If you make the request again with the same request
ID, the server can check if original operation with the same request ID
was received, and if so, will ignore the second request. This prevents
clients from accidentally creating duplicate commitments. The request ID
must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
updateMask: Field mask is used to specify the fields to be overwritten in
the Deployment resource by the update. The fields specified in the
update_mask are relative to the resource, not the full request. A field
will be overwritten if it is in the mask. If the user does not provide a
mask then all fields will be overwritten.
"""
deployment = _messages.MessageField('Deployment', 1)
name = _messages.StringField(2, required=True)
requestId = _messages.StringField(3)
updateMask = _messages.StringField(4)
class ConfigProjectsLocationsDeploymentsRevisionsGetRequest(_messages.Message):
r"""A ConfigProjectsLocationsDeploymentsRevisionsGetRequest object.
Fields:
name: Required. The name of this service resource in the format: 'projects
/{project_id}/locations/{location}/deployments/{deployment}/revisions/{r
evision}'.
"""
name = _messages.StringField(1, required=True)
class ConfigProjectsLocationsDeploymentsRevisionsListRequest(_messages.Message):
r"""A ConfigProjectsLocationsDeploymentsRevisionsListRequest object.
Fields:
filter: Lists the Revisions that match the filter expression. A filter
expression filters the resources listed in the response. The expression
must be of the form '{field} {operator} {value}' where operators: '<',
'>', '<=', '>=', '!=', '=', ':' are supported (colon ':' represents a
HAS operator which is roughly synonymous with equality). {field} can
refer to a proto or JSON field, or a synthetic field. Field names can be
camelCase or snake_case. Examples: - Filter by name: name =
"projects/foo/locations/us-central1/deployments/dep/revisions/bar -
Filter by labels: - Resources that have a key called 'foo' labels.foo:*
- Resources that have a key called 'foo' whose value is 'bar' labels.foo
= bar - Filter by state: - Revisions in CREATING state. state=CREATING
orderBy: Field to use to sort the list.
pageSize: When requesting a 'page' of resources, 'page_size' specifies
number of resources to return. If unspecified or set to 0, all resources
will be returned.
pageToken: Token returned by previous call to 'ListRevisions' which
specifies the position in the list from where to continue listing the
resources.
parent: Required. The parent in whose context the Revisions are listed.
The parent value is in the format:
'projects/{project_id}/locations/{location}/deployments/{deployment}'.
"""
filter = _messages.StringField(1)
orderBy = _messages.StringField(2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
parent = _messages.StringField(5, required=True)
class ConfigProjectsLocationsDeploymentsSetIamPolicyRequest(_messages.Message):
r"""A ConfigProjectsLocationsDeploymentsSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class ConfigProjectsLocationsDeploymentsTestIamPermissionsRequest(_messages.Message):
r"""A ConfigProjectsLocationsDeploymentsTestIamPermissionsRequest object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class ConfigProjectsLocationsGetRequest(_messages.Message):
r"""A ConfigProjectsLocationsGetRequest object.
Fields:
name: Resource name for the location.
"""
name = _messages.StringField(1, required=True)
class ConfigProjectsLocationsListRequest(_messages.Message):
r"""A ConfigProjectsLocationsListRequest object.
Fields:
filter: A filter to narrow down results to a preferred subset. The
filtering language accepts strings like "displayName=tokyo", and is
documented in more detail in [AIP-160](https://google.aip.dev/160).
name: The resource that owns the locations collection, if applicable.
pageSize: The maximum number of results to return. If not set, the service
selects a default.
pageToken: A page token received from the `next_page_token` field in the
response. Send that page token to receive the subsequent page.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class ConfigProjectsLocationsOperationsCancelRequest(_messages.Message):
r"""A ConfigProjectsLocationsOperationsCancelRequest object.
Fields:
cancelOperationRequest: A CancelOperationRequest resource to be passed as
the request body.
name: The name of the operation resource to be cancelled.
"""
cancelOperationRequest = _messages.MessageField('CancelOperationRequest', 1)
name = _messages.StringField(2, required=True)
class ConfigProjectsLocationsOperationsDeleteRequest(_messages.Message):
r"""A ConfigProjectsLocationsOperationsDeleteRequest object.
Fields:
name: The name of the operation resource to be deleted.
"""
name = _messages.StringField(1, required=True)
class ConfigProjectsLocationsOperationsGetRequest(_messages.Message):
r"""A ConfigProjectsLocationsOperationsGetRequest object.
Fields:
name: The name of the operation resource.
"""
name = _messages.StringField(1, required=True)
class ConfigProjectsLocationsOperationsListRequest(_messages.Message):
r"""A ConfigProjectsLocationsOperationsListRequest object.
Fields:
filter: The standard list filter.
name: The name of the operation's parent resource.
pageSize: The standard list page size.
pageToken: The standard list page token.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class ConfigProjectsLocationsPreviewsCreateRequest(_messages.Message):
r"""A ConfigProjectsLocationsPreviewsCreateRequest object.
Fields:
parent: Required. The parent in whose context the Preview is created. The
parent value is in the format:
'projects/{project_id}/locations/{location}'.
preview: A Preview resource to be passed as the request body.
requestId: Optional. An optional request ID to identify requests. Specify
a unique request ID so that if you must retry your request, the server
will know to ignore the request if it has already been completed. The
server will guarantee that for at least 60 minutes since the first
request. For example, consider a situation where you make an initial
request and the request times out. If you make the request again with
the same request ID, the server can check if original operation with the
same request ID was received, and if so, will ignore the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is
not supported (00000000-0000-0000-0000-000000000000).
"""
parent = _messages.StringField(1, required=True)
preview = _messages.MessageField('Preview', 2)
requestId = _messages.StringField(3)
class DeleteInput(_messages.Message):
r"""Input parameters for preview of delete operation.
Fields:
deployment: Required. Name of existing deployment to preview its deletion.
Format:
`projects/{project}/locations/{location}/deployments/{deployment}`
"""
deployment = _messages.StringField(1)
class Deployment(_messages.Message):
r"""A Deployment object.
Enums:
ErrorCodeValueValuesEnum: Output only. Code describing any errors that may
have occurred.
StateValueValuesEnum: Output only. Current state of the deployment.
Messages:
LabelsValue: User-defined metadata for the deployment.
Fields:
blueprint: Required. Blueprint to deploy.
configController: Required. Config Controller instance to deploy to.
Format:
`projects/{project}/locations/{location}/krmApiHosts/{instance}`.
createConfigController: Optional. If set, then a Config Controller
instance with a default, well-known name will be created as part of the
deployment, if it does not already exist. Note that Blueprints
Controller does not manage this Config Controller instance and only
creates it.
createTime: Output only. Time the deployment was created.
deleteResults: Output only. Locations of outputs from delete operation.
errorCode: Output only. Code describing any errors that may have occurred.
gitTarget: Optional. If set, then the hydrated blueprint will be uploaded
to the specified Git repository.
labels: User-defined metadata for the deployment.
latestRevision: Output only. Revision that was most recently applied.
Format:
`projects/{project}/locations/{location}/deployments/{deployment}/
revisions/{revision}`
name: Resource name of the deployment. Format:
`projects/{project}/locations/{location}/deployments/{deployment}`
reconcileTimeout: Optional. How long apply attempt should wait for
resource reconciliation on the Config Controller cluster to complete. If
unset, a default value of 5m will be used. A value of 0s indicates that
the Deployment will be ACTIVE as soon as resources are applied
successfully to the cluster and final resource actuation status will
need to be polled on asynchronously.
state: Output only. Current state of the deployment.
stateDetail: Output only. Additional information regarding the current
state.
updateTime: Output only. Time the deployment was last modified.
"""
class ErrorCodeValueValuesEnum(_messages.Enum):
r"""Output only. Code describing any errors that may have occurred.
Values:
ERROR_CODE_UNSPECIFIED: No error code was specified.
REVISION_FAILED: The revision failed (check its error code).
CLUSTER_CREATION_PERMISSION_DENIED: Cluster creation failed due to a
permissions issue.
CLOUD_BUILD_PERMISSION_DENIED: Cloud Build failed due to a permissions
issue.
CLUSTER_CREATION_FAILED: Cluster creation failed for a non-permissions-
related issue.
DELETE_BUILD_API_FAILED: The deletion Cloud Build failed before logs
could be generated.
DELETE_BUILD_RUN_FAILED: The deletion Cloud Build failed after logs
could be generated.
BUCKET_CREATION_PERMISSION_DENIED: A Cloud Storage bucket failed due to
a permissions issue.
BUCKET_CREATION_FAILED: A Cloud Storage bucket failed for a non-
permissions-related issue.
"""
ERROR_CODE_UNSPECIFIED = 0
REVISION_FAILED = 1
CLUSTER_CREATION_PERMISSION_DENIED = 2
CLOUD_BUILD_PERMISSION_DENIED = 3
CLUSTER_CREATION_FAILED = 4
DELETE_BUILD_API_FAILED = 5
DELETE_BUILD_RUN_FAILED = 6
BUCKET_CREATION_PERMISSION_DENIED = 7
BUCKET_CREATION_FAILED = 8
class StateValueValuesEnum(_messages.Enum):
r"""Output only. Current state of the deployment.
Values:
STATE_UNSPECIFIED: The default value. This value is used if the state is
omitted.
CREATING: The deployment is being created.
ACTIVE: The deployment is healthy.
UPDATING: The deployment is being updated.
DELETING: The deployment is being deleted.
FAILED: The deployment has encountered an unexpected error.
SUSPENDED: The deployment is no longer being actively reconciled. This
may be the result of recovering the project after deletion.
"""
STATE_UNSPECIFIED = 0
CREATING = 1
ACTIVE = 2
UPDATING = 3
DELETING = 4
FAILED = 5
SUSPENDED = 6
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""User-defined metadata for the deployment.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
blueprint = _messages.MessageField('Blueprint', 1)
configController = _messages.StringField(2)
createConfigController = _messages.BooleanField(3)
createTime = _messages.StringField(4)
deleteResults = _messages.MessageField('ApplyResults', 5)
errorCode = _messages.EnumField('ErrorCodeValueValuesEnum', 6)
gitTarget = _messages.MessageField('GitTarget', 7)
labels = _messages.MessageField('LabelsValue', 8)
latestRevision = _messages.StringField(9)
name = _messages.StringField(10)
reconcileTimeout = _messages.StringField(11)
state = _messages.EnumField('StateValueValuesEnum', 12)
stateDetail = _messages.StringField(13)
updateTime = _messages.StringField(14)
class DeploymentOperationMetadata(_messages.Message):
r"""Ephemeral metadata content describing the state of a deployment
operation.
Enums:
StepValueValuesEnum: The current step the deployment operation is running.
Fields:
applyResults: Locations of outputs from config application.
pipelineResults: Locations of outputs from kpt pipeline execution.
step: The current step the deployment operation is running.
"""
class StepValueValuesEnum(_messages.Enum):
r"""The current step the deployment operation is running.
Values:
DEPLOYMENT_STEP_UNSPECIFIED: No deployment step was specified.
PREPARING_STORAGE_BUCKET: Checking for existence of a storage bucket and
creating one in it's absence. This can take up to 7 minutes on the
first deployment.
PREPARING_CONFIG_CONTROLLER: Checking for existence of a Config
Controller instance and creating one in it's absence. This can take up
to 20 minutes on the first deployment.
CREATING_REVISION: Creating a revision resource.
RUNNING_PIPELINE: Blueprint is being processed.
RUNNING_APPLY: Blueprint is being applied to Config Controller.
RUNNING_PREVIEW: Blueprint is being previewed with Config Controller.
"""
DEPLOYMENT_STEP_UNSPECIFIED = 0
PREPARING_STORAGE_BUCKET = 1
PREPARING_CONFIG_CONTROLLER = 2
CREATING_REVISION = 3
RUNNING_PIPELINE = 4
RUNNING_APPLY = 5
RUNNING_PREVIEW = 6
applyResults = _messages.MessageField('ApplyResults', 1)
pipelineResults = _messages.MessageField('PipelineResults', 2)
step = _messages.EnumField('StepValueValuesEnum', 3)
class Empty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo { rpc
Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON
representation for `Empty` is empty JSON object `{}`.
"""
class Expr(_messages.Message):
r"""Represents a textual expression in the Common Expression Language (CEL)
syntax. CEL is a C-like expression language. The syntax and semantics of CEL
are documented at https://github.com/google/cel-spec. Example (Comparison):
title: "Summary size limit" description: "Determines if a summary is less
than 100 chars" expression: "document.summary.size() < 100" Example
(Equality): title: "Requestor is owner" description: "Determines if
requestor is the document owner" expression: "document.owner ==
request.auth.claims.email" Example (Logic): title: "Public documents"
description: "Determine whether the document should be publicly visible"
expression: "document.type != 'private' && document.type != 'internal'"
Example (Data Manipulation): title: "Notification string" description:
"Create a notification string with a timestamp." expression: "'New message
received at ' + string(document.create_time)" The exact variables and
functions that may be referenced within an expression are determined by the
service that evaluates it. See the service documentation for additional
information.
Fields:
description: Optional. Description of the expression. This is a longer
text which describes the expression, e.g. when hovered over it in a UI.
expression: Textual representation of an expression in Common Expression
Language syntax.
location: Optional. String indicating the location of the expression for
error reporting, e.g. a file name and a position in the file.
title: Optional. Title for the expression, i.e. a short string describing
its purpose. This can be used e.g. in UIs which allow to enter the
expression.
"""
description = _messages.StringField(1)
expression = _messages.StringField(2)
location = _messages.StringField(3)
title = _messages.StringField(4)
class Function(_messages.Message):
r"""A function that can be run to modify blueprint contents.
Messages:
ConfigValue: Optional. KRM resource passed to the function as input. The
entire resource must be no larger than 1024 bytes.
InlineConfigValue: Optional. KRM resource passed to the function as
inlined input. The entire resource must be no larger than 1024 bytes.
Fields:
config: Optional. KRM resource passed to the function as input. The entire
resource must be no larger than 1024 bytes.
gcsConfig: Optional. A Cloud Storage link referencing a KRM yaml file to
use as input to the function. There are no size limitations on this
field. Format: gs://my-bucket/my-directory/my-function-config.yaml
image: Required. Container image to run. Example: `gcr.io/kpt-fn/set-
label`
inlineConfig: Optional. KRM resource passed to the function as inlined
input. The entire resource must be no larger than 1024 bytes.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ConfigValue(_messages.Message):
r"""Optional. KRM resource passed to the function as input. The entire
resource must be no larger than 1024 bytes.
Messages:
AdditionalProperty: An additional property for a ConfigValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ConfigValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class InlineConfigValue(_messages.Message):
r"""Optional. KRM resource passed to the function as inlined input. The
entire resource must be no larger than 1024 bytes.
Messages:
AdditionalProperty: An additional property for a InlineConfigValue
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a InlineConfigValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
config = _messages.MessageField('ConfigValue', 1)
gcsConfig = _messages.StringField(2)
image = _messages.StringField(3)
inlineConfig = _messages.MessageField('InlineConfigValue', 4)
class GitSource(_messages.Message):
r"""A set of files in a Git repository.
Fields:
directory: Subdirectory inside the repository. Example: 'staging/my-
package'
ref: Git branch or tag.
repo: Repository URL. Example:
'https://github.com/kubernetes/examples.git'
"""
directory = _messages.StringField(1)
ref = _messages.StringField(2)
repo = _messages.StringField(3)
class GitTarget(_messages.Message):
r"""A Git repository to be used as a deployment target.
Fields:
branch: Git branch.
directory: Subdirectory inside the repository. Example: 'staging/my-
package'
repo: Repository URL. Example:
'https://github.com/kubernetes/examples.git'
"""
branch = _messages.StringField(1)
directory = _messages.StringField(2)
repo = _messages.StringField(3)
class ListDeploymentsResponse(_messages.Message):
r"""A ListDeploymentsResponse object.
Fields:
deployments: List of Deployments.
nextPageToken: A string attribute.
unreachable: Locations that could not be reached.
"""
deployments = _messages.MessageField('Deployment', 1, repeated=True)
nextPageToken = _messages.StringField(2)
unreachable = _messages.StringField(3, repeated=True)
class ListLocationsResponse(_messages.Message):
r"""The response message for Locations.ListLocations.
Fields:
locations: A list of locations that matches the specified filter in the
request.
nextPageToken: The standard List next-page token.
"""
locations = _messages.MessageField('Location', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListOperationsResponse(_messages.Message):
r"""The response message for Operations.ListOperations.
Fields:
nextPageToken: The standard List next-page token.
operations: A list of operations that matches the specified filter in the
request.
"""
nextPageToken = _messages.StringField(1)
operations = _messages.MessageField('Operation', 2, repeated=True)
class ListRevisionsResponse(_messages.Message):
r"""A response to a 'ListRevisions' call. Contains a list of Revisions.
Fields:
nextPageToken: A token to request the next page of resources from the
'ListRevisions' method. The value of an empty string means that there
are no more resources to return.
revisions: List of Revisions.
unreachable: Locations that could not be reached.
"""
nextPageToken = _messages.StringField(1)
revisions = _messages.MessageField('Revision', 2, repeated=True)
unreachable = _messages.StringField(3, repeated=True)
class Location(_messages.Message):
r"""A resource that represents Google Cloud Platform location.
Messages:
LabelsValue: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
MetadataValue: Service-specific metadata. For example the available
capacity at the given location.
Fields:
displayName: The friendly name for this location, typically a nearby city
name. For example, "Tokyo".
labels: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
locationId: The canonical id for this location. For example: `"us-east1"`.
metadata: Service-specific metadata. For example the available capacity at
the given location.
name: Resource name for the location, which may vary between
implementations. For example: `"projects/example-project/locations/us-
east1"`
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata. For example the available capacity at the
given location.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
displayName = _messages.StringField(1)
labels = _messages.MessageField('LabelsValue', 2)
locationId = _messages.StringField(3)
metadata = _messages.MessageField('MetadataValue', 4)
name = _messages.StringField(5)
class Operation(_messages.Message):
r"""This resource represents a long-running operation that is the result of
a network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success. If
the original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Fields:
done: If the value is `false`, it means the operation is still in
progress. If `true`, the operation is completed, and either `error` or
`response` is available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the `name` should be a resource name ending with
`operations/{unique_id}`.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
r"""The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the response
is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class OperationMetadata(_messages.Message):
r"""Represents the metadata of the long-running operation.
Fields:
apiVersion: Output only. API version used to start the operation.
createTime: Output only. Time the operation was created.
deploymentMetadata: Output only. Metadata about the deployment operation
state.
endTime: Output only. Time the operation finished running.
requestedCancellation: Output only. Identifies whether the user has
requested cancellation of the operation. Operations that have
successfully been cancelled have Operation.error value with a
google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
statusMessage: Output only. Human-readable status of the operation, if
any.
target: Output only. Server-defined resource path for the target of the
operation.
verb: Output only. Name of the verb executed by the operation.
"""
apiVersion = _messages.StringField(1)
createTime = _messages.StringField(2)
deploymentMetadata = _messages.MessageField('DeploymentOperationMetadata', 3)
endTime = _messages.StringField(4)
requestedCancellation = _messages.BooleanField(5)
statusMessage = _messages.StringField(6)
target = _messages.StringField(7)
verb = _messages.StringField(8)
class PipelineResults(_messages.Message):
r"""Locations of outputs from kpt pipeline execution.
Fields:
artifacts: Location of kpt artifacts in Google Cloud Storage. Format:
`gs://{bucket}/{object}`
build: Corresponding Cloud Build run. Format:
`projects/{project}/locations/{location}/builds/{build}`
content: Location of generated manifests in Google Cloud Storage. Format:
`gs://{bucket}/{object}`
logs: Location of Cloud Build logs in Google Cloud Storage. Format:
`gs://{bucket}/{object}`
"""
artifacts = _messages.StringField(1)
build = _messages.StringField(2)
content = _messages.StringField(3)
logs = _messages.StringField(4)
class Policy(_messages.Message):
r"""An Identity and Access Management (IAM) policy, which specifies access
controls for Google Cloud resources. A `Policy` is a collection of
`bindings`. A `binding` binds one or more `members`, or principals, to a
single `role`. Principals can be user accounts, service accounts, Google
groups, and domains (such as G Suite). A `role` is a named list of
permissions; each `role` can be an IAM predefined role or a user-created
custom role. For some types of Google Cloud resources, a `binding` can also
specify a `condition`, which is a logical expression that allows access to a
resource only if the expression evaluates to `true`. A condition can add
constraints based on attributes of the request, the resource, or both. To
learn which resources support conditions in their IAM policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies). **JSON example:** { "bindings": [ { "role":
"roles/resourcemanager.organizationAdmin", "members": [
"user:mike@example.com", "group:admins@example.com", "domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role":
"roles/resourcemanager.organizationViewer", "members": [
"user:eve@example.com" ], "condition": { "title": "expirable access",
"description": "Does not grant access after Sep 2020", "expression":
"request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag":
"BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: -
user:mike@example.com - group:admins@example.com - domain:google.com -
serviceAccount:my-project-id@appspot.gserviceaccount.com role:
roles/resourcemanager.organizationAdmin - members: - user:eve@example.com
role: roles/resourcemanager.organizationViewer condition: title: expirable
access description: Does not grant access after Sep 2020 expression:
request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA=
version: 3 For a description of IAM and its features, see the [IAM
documentation](https://cloud.google.com/iam/docs/).
Fields:
auditConfigs: Specifies cloud audit logging configuration for this policy.
bindings: Associates a list of `members`, or principals, with a `role`.
Optionally, may specify a `condition` that determines how and when the
`bindings` are applied. Each of the `bindings` must contain at least one
principal. The `bindings` in a `Policy` can refer to up to 1,500
principals; up to 250 of these principals can be Google groups. Each
occurrence of a principal counts towards these limits. For example, if
the `bindings` grant 50 different roles to `user:alice@example.com`, and
not to any other principal, then you can add another 1,450 principals to
the `bindings` in the `Policy`.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. **Important:** If you use IAM Conditions, you must include the
`etag` field whenever you call `setIamPolicy`. If you omit this field,
then IAM allows you to overwrite a version `3` policy with a version `1`
policy, and all of the conditions in the version `3` policy are lost.
version: Specifies the format of the policy. Valid values are `0`, `1`,
and `3`. Requests that specify an invalid value are rejected. Any
operation that affects conditional role bindings must specify version
`3`. This requirement applies to the following operations: * Getting a
policy that includes a conditional role binding * Adding a conditional
role binding to a policy * Changing a conditional role binding in a
policy * Removing any role binding, with or without a condition, from a
policy that includes conditions **Important:** If you use IAM
Conditions, you must include the `etag` field whenever you call
`setIamPolicy`. If you omit this field, then IAM allows you to overwrite
a version `3` policy with a version `1` policy, and all of the
conditions in the version `3` policy are lost. If a policy does not
include any conditions, operations on that policy may specify any valid
version or leave the field unset. To learn which resources support
conditions in their IAM policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
"""
auditConfigs = _messages.MessageField('AuditConfig', 1, repeated=True)
bindings = _messages.MessageField('Binding', 2, repeated=True)
etag = _messages.BytesField(3)
version = _messages.IntegerField(4, variant=_messages.Variant.INT32)
class Preview(_messages.Message):
r"""Preview message contains preview results.
Enums:
ErrorCodeValueValuesEnum: Output only. Code describing any errors that may
have occurred.
StateValueValuesEnum: Output only. Current state of the preview.
Fields:
applyInput: Input parameters for preview of apply operation.
createTime: Output only. Time the preview was created.
deleteInput: Input parameters for preview of delete operation.
errorCode: Output only. Code describing any errors that may have occurred.
name: Output only. Resource name of the preview. Format:
`projects/{project}/locations/{location}/previews/{preview}`
pipelineResults: Output only. Locations of outputs from kpt pipeline
execution.
previewResults: Output only. Locations of outputs from preview operation.
state: Output only. Current state of the preview.
stateDetail: Output only. Additional information regarding the current
state.
"""
class ErrorCodeValueValuesEnum(_messages.Enum):
r"""Output only. Code describing any errors that may have occurred.
Values:
ERROR_CODE_UNSPECIFIED: No error code was specified.
CLUSTER_CREATION_PERMISSION_DENIED: Cluster creation failed due to a
permissions issue.
CLOUD_BUILD_PERMISSION_DENIED: Cloud Build failed due to a permissions
issue.
CLUSTER_CREATION_FAILED: Cluster creation failed for a non-permissions-
related issue.
BUCKET_CREATION_PERMISSION_DENIED: A Cloud Storage bucket failed due to
a permissions issue.
BUCKET_CREATION_FAILED: A Cloud Storage bucket failed for a non-
permissions-related issue.
PIPELINE_BUILD_API_FAILED: The pipeline Cloud Build failed before logs
could be generated.
PIPELINE_BUILD_RUN_FAILED: The pipeline Cloud Build failed after logs
could be generated.
PREVIEW_BUILD_API_FAILED: The preview Cloud Build failed before logs
could be generated.
PREVIEW_BUILD_RUN_FAILED: The preview Cloud Build failed after logs
could be generated.
"""
ERROR_CODE_UNSPECIFIED = 0
CLUSTER_CREATION_PERMISSION_DENIED = 1
CLOUD_BUILD_PERMISSION_DENIED = 2
CLUSTER_CREATION_FAILED = 3
BUCKET_CREATION_PERMISSION_DENIED = 4
BUCKET_CREATION_FAILED = 5
PIPELINE_BUILD_API_FAILED = 6
PIPELINE_BUILD_RUN_FAILED = 7
PREVIEW_BUILD_API_FAILED = 8
PREVIEW_BUILD_RUN_FAILED = 9
class StateValueValuesEnum(_messages.Enum):
r"""Output only. Current state of the preview.
Values:
STATE_UNSPECIFIED: The default value. This value is used if the state is
omitted.
CREATING: The preview is being created.
COMPLETED: The preview is completed.
FAILED: The preview has encountered an error.
"""
STATE_UNSPECIFIED = 0
CREATING = 1
COMPLETED = 2
FAILED = 3
applyInput = _messages.MessageField('ApplyInput', 1)
createTime = _messages.StringField(2)
deleteInput = _messages.MessageField('DeleteInput', 3)
errorCode = _messages.EnumField('ErrorCodeValueValuesEnum', 4)
name = _messages.StringField(5)
pipelineResults = _messages.MessageField('PipelineResults', 6)
previewResults = _messages.MessageField('PreviewResults', 7)
state = _messages.EnumField('StateValueValuesEnum', 8)
stateDetail = _messages.StringField(9)
class PreviewResults(_messages.Message):
r"""Locations of outputs from config preview.
Fields:
artifacts: Location of kpt artifacts in Google Cloud Storage. Format:
`gs://{bucket}/{object}`
build: Name of the corresponding Cloud Build run. Format:
`projects/{project}/locations/{location}/builds/{build}` See https://clo
ud.google.com/build/docs/api/reference/rest/v1/projects.builds/get#query
-parameters for how to get build details.
content: Location of generated preview data in Google Cloud Storage.
Format: `gs://{bucket}/{object}`
logs: Location of logs in Google Cloud Storage. Format:
`gs://{bucket}/{object}`
"""
artifacts = _messages.StringField(1)
build = _messages.StringField(2)
content = _messages.StringField(3)
logs = _messages.StringField(4)
class Revision(_messages.Message):
r"""A child resource of a Deployment generated by a 'CreateDeployment' or
'UpdateDeployment' call. Each Revision contains artifacts pertaining to a
snapshot of a particular Deployment including transformed configurations and
logs.
Enums:
ActionValueValuesEnum: Output only. The type of action that this revision
represents.
ErrorCodeValueValuesEnum: Output only. Code describing any errors that may
have occurred.
StateValueValuesEnum: Output only. Current state of the revision.
Fields:
action: Output only. The type of action that this revision represents.
applyResults: Output only. Locations of outputs from config application.
blueprint: Output only. Blueprint that was deployed.
createTime: Output only. Time the revision was created.
errorCode: Output only. Code describing any errors that may have occurred.
name: Resource name of the revision. Format:
`projects/{project}/locations/{location}/deployments/{deployment}/
revisions/{revision}`
pipelineResults: Output only. Locations of outputs from kpt pipeline
execution.
reconcileTimeout: Optional. How long apply attempt should wait for
resource reconciliation on the Config Controller cluster to complete. If
unset, a default value of 5m will be used. A value of 0s indicates that
the Revision will be APPLIED as soon as resources are applied
successfully to the cluster and final resource actuation status will
need to be polled on asynchronously.
state: Output only. Current state of the revision.
stateDetail: Output only. Additional information regarding the current
state.
updateTime: Output only. Time the revision was last modified.
"""
class ActionValueValuesEnum(_messages.Enum):
r"""Output only. The type of action that this revision represents.
Values:
ACTION_UNSPECIFIED: The default value. This value is used if the action
is omitted.
CREATE: The revision was generated by creating a deployment.
UPDATE: The revision was generated by updating a deployment.
DELETE: The revision was generated by deleting a deployment.
"""
ACTION_UNSPECIFIED = 0
CREATE = 1
UPDATE = 2
DELETE = 3
class ErrorCodeValueValuesEnum(_messages.Enum):
r"""Output only. Code describing any errors that may have occurred.
Values:
ERROR_CODE_UNSPECIFIED: No error code was specified.
BUCKET_CREATION_PERMISSION_DENIED: A Cloud Storage bucket failed due to
a permissions issue. Deprecated in favor of equivalent ErrorCode on
Deployment.
BUCKET_CREATION_FAILED: A Cloud Storage bucket failed for a non-
permissions-related issue. Deprecated in favor of equivalent ErrorCode
on Deployment.
CLOUD_BUILD_PERMISSION_DENIED: Cloud Build failed due to a permissions
issue.
PIPELINE_BUILD_API_FAILED: The pipeline Cloud Build failed before logs
could be generated.
PIPELINE_BUILD_RUN_FAILED: The pipeline Cloud Build failed after logs
could be generated.
APPLY_BUILD_API_FAILED: The apply Cloud Build failed before logs could
be generated.
APPLY_BUILD_RUN_FAILED: The apply Cloud Build failed after logs could be
generated.
"""
ERROR_CODE_UNSPECIFIED = 0
BUCKET_CREATION_PERMISSION_DENIED = 1
BUCKET_CREATION_FAILED = 2
CLOUD_BUILD_PERMISSION_DENIED = 3
PIPELINE_BUILD_API_FAILED = 4
PIPELINE_BUILD_RUN_FAILED = 5
APPLY_BUILD_API_FAILED = 6
APPLY_BUILD_RUN_FAILED = 7
class StateValueValuesEnum(_messages.Enum):
r"""Output only. Current state of the revision.
Values:
STATE_UNSPECIFIED: The default value. This value is used if the state is
omitted.
APPLYING: The revision is being applied.
APPLIED: The revision was applied successfully.
FAILED: The revision could not be applied successfully.
"""
STATE_UNSPECIFIED = 0
APPLYING = 1
APPLIED = 2
FAILED = 3
action = _messages.EnumField('ActionValueValuesEnum', 1)
applyResults = _messages.MessageField('ApplyResults', 2)
blueprint = _messages.MessageField('Blueprint', 3)
createTime = _messages.StringField(4)
errorCode = _messages.EnumField('ErrorCodeValueValuesEnum', 5)
name = _messages.StringField(6)
pipelineResults = _messages.MessageField('PipelineResults', 7)
reconcileTimeout = _messages.StringField(8)
state = _messages.EnumField('StateValueValuesEnum', 9)
stateDetail = _messages.StringField(10)
updateTime = _messages.StringField(11)
class SetIamPolicyRequest(_messages.Message):
r"""Request message for `SetIamPolicy` method.
Fields:
policy: REQUIRED: The complete policy to be applied to the `resource`. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
updateMask: OPTIONAL: A FieldMask specifying which fields of the policy to
modify. Only the fields in the mask will be modified. If no mask is
provided, the following default mask is used: `paths: "bindings, etag"`
"""
policy = _messages.MessageField('Policy', 1)
updateMask = _messages.StringField(2)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default='json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class Status(_messages.Message):
r"""The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). Each `Status` message contains
three pieces of data: error code, error message, and error details. You can
find out more about this error model and how to work with it in the [API
Design Guide](https://cloud.google.com/apis/design/errors).
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There is a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
r"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
class TestIamPermissionsRequest(_messages.Message):
r"""Request message for `TestIamPermissions` method.
Fields:
permissions: The set of permissions to check for the `resource`.
Permissions with wildcards (such as '*' or 'storage.*') are not allowed.
For more information see [IAM
Overview](https://cloud.google.com/iam/docs/overview#permissions).
"""
permissions = _messages.StringField(1, repeated=True)
class TestIamPermissionsResponse(_messages.Message):
r"""Response message for `TestIamPermissions` method.
Fields:
permissions: A subset of `TestPermissionsRequest.permissions` that the
caller is allowed.
"""
permissions = _messages.StringField(1, repeated=True)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
|
"""
n个数字的无重复排列
使用标志列表判重
使用函数
"""
def perm(i,n):
global count
for a[i] in range(1, n + 1):
if flags[a[i]] == 1:
continue
flags[a[i]] = 1
if i == n:
count += 1
print(a[1:n + 1])
else:
perm(i+1,n)
flags[a[i]] = 0
n=int(input("请输入n:"))
count = 0
flags= [0] * (n + 1)
a = [0] * (n+1)
perm(1,n)
print(f"共{count}种方案")
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import ffmpeg
import re
from collections import namedtuple
silence_start_re = re.compile(r' silence_start: (?P<start>[0-9]+(\.?[0-9]*))')
silence_end_re = re.compile(r' silence_end: (?P<end>[0-9]+(\.?[0-9]*))')
silence_duration_re = re.compile(r' silence_duration: (?P<duration>[0-9]+(\.?[0-9]*))')
mean_volume_re = re.compile(r' mean_volume: (?P<mean>-?[0-9]+(\.?[0-9]*))')
max_volume_re = re.compile(r' max_volume: (?P<max>-?[0-9]+(\.?[0-9]*))')
# For more info on the silencedetect filter see https://www.ffmpeg.org/ffmpeg-filters.html#silencedetect
DEFAULT_THRESHOLD = '-60dB' # silence threshold in dB
DEFAULT_DURATION = 2 # silence duration in seconds
AudioProcessor = namedtuple('AudioProcessor', ['name', 'with_filter', 'output_processor'])
def with_volumedetect(stream, **kwargs):
"""Adds the ffmpeg volumedetect"""
return ffmpeg.filter(stream, 'volumedetect')
def parse_volume_output(lines):
"""Parses the output of ffmpeg for volume data"""
max_volume = None
mean_volume = None
for line in lines:
max_result = max_volume_re.search(line)
mean_result = mean_volume_re.search(line)
if max_result:
max_volume = float(max_result.group('max'))
elif mean_result:
mean_volume = float(mean_result.group('mean'))
if max_volume and mean_volume:
return mean_volume, max_volume
def with_silencedetect(stream, threshold=DEFAULT_THRESHOLD, duration=DEFAULT_DURATION, **kwargs):
"""Adds the ffmpeg silencedetect filter to detect silence in a stream"""
return ffmpeg.filter(stream, 'silencedetect', n=threshold, d=duration)
def parse_silence_output(lines):
"""Parses the output of ffmpeg for chunks of silence section denoted by a start, end tuples"""
chunk_starts = []
chunk_ends = []
for line in lines:
silence_start = silence_start_re.search(line)
silence_end = silence_end_re.search(line)
if silence_start:
chunk_starts.append(float(silence_start.group('start')))
elif silence_end:
chunk_ends.append(float(silence_end.group('end')))
return list(zip(chunk_starts, chunk_ends))
def execute_ffmpeg(input_file, processors=None, **kwargs):
"""
Run ffmpeg with a set of audio processors to add filters to a call
and process the results into a dict
"""
if processors is None:
processors = [
AudioProcessor('volumedetect', with_volumedetect, parse_volume_output),
AudioProcessor('silencedetect', with_silencedetect, parse_silence_output)
]
stream = ffmpeg.input(input_file)
for with_filter in [ap.with_filter for ap in processors]:
stream = with_filter(stream, **kwargs)
ret_code, out = ffmpeg.output(stream, '-', format='null').run(quiet=True)
if ret_code:
raise RuntimeError
output_lines = out.decode('utf-8').splitlines()
return {ap.name: ap.output_processor(output_lines) for ap in processors}
|
import cv2
image = cv2.imread('media/Leaves.jpg')
height, width, channels = image.shape
M = cv2.getRotationMatrix2D((width/2,height/2),90,1)
dst = cv2.warpAffine(image,M,(width,height))
cv2.imshow('Image', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import functools
from anillo.utils.common import merge_dicts
from urllib.parse import parse_qs
from cgi import parse_header
def wrap_form_params(func):
"""
A middleware that parses the url-encoded body and attach
the result to the request `form_params` attribute.
This middleware also merges the parsed value with the existing
`params` attribute in same way as `wrap_query_params` is doing.
"""
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "application/x-www-form-urlencoded":
params = {}
for key, value in parse_qs(request.body.decode("utf-8")).items():
if len(value) == 1:
params[key] = value[0]
else:
params[key] = value
request.params = merge_dicts(getattr(request, "params", None), params)
request.form_params = params
return func(request, *args, **kwargs)
return wrapper
def wrap_query_params(func):
"""
A middleware that parses the urlencoded params from the querystring
and attach it to the request `query_params` attribute.
This middleware also merges the parsed value with the existing
`params` attribute in same way as `wrap_form_params` is doing.
"""
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
params = {}
for key, value in parse_qs(request.query_string.decode("utf-8")).items():
if len(value) == 1:
params[key] = value[0]
else:
params[key] = value
request.params = merge_dicts(getattr(request, "params", None), params)
request.query_params = params
return func(request, *args, **kwargs)
return wrapper
|
from flask import Flask
from observatory.instance import SPACE_API
from observatory.lib.cli import BP_CLI
from observatory.rest.charts import BP_REST_CHARTS
from observatory.rest.mapper import BP_REST_MAPPER
from observatory.rest.owners import BP_REST_OWNERS
from observatory.rest.prompt import BP_REST_PROMPT
from observatory.rest.sensor import BP_REST_SENSOR
from observatory.rest.sp_api import BP_REST_SP_API
from observatory.shared import (
errorhandler,
form_drop_mapper,
form_drop_prompt,
form_drop_sensor,
form_drop_space_cam,
form_drop_space_contact_keymasters,
form_drop_space_links,
form_drop_space_membership_plans,
form_drop_space_projects,
form_drop_space_sensors_account_balance,
form_drop_space_sensors_barometer,
form_drop_space_sensors_beverage_supply,
form_drop_space_sensors_door_locked,
form_drop_space_sensors_humidity,
form_drop_space_sensors_network_traffic,
form_drop_space_sensors_power_consumption,
form_drop_space_sensors_radiation_alpha,
form_drop_space_sensors_radiation_beta,
form_drop_space_sensors_radiation_beta_gamma,
form_drop_space_sensors_radiation_gamma,
form_drop_space_sensors_temperature,
form_drop_space_sensors_total_member_count,
form_drop_space_sensors_wind,
form_sort_mapper,
form_sort_prompt,
form_sort_sensor,
script_config_data,
tagline,
)
from observatory.start.environment import ERROR_CODES, MDL_NAME
from observatory.start.extensions import (
BCRYPT,
CSRF_PROTECT,
DB,
LOGIN_MANAGER,
MIGRATE,
REST,
)
from observatory.start.logger import initialize_logging
from observatory.views.main import BLUEPRINT_MAIN
from observatory.views.mgnt import BLUEPRINT_MGNT
from observatory.views.sapi import BLUEPRINT_SAPI
from observatory.views.side import BLUEPRINT_SIDE
from observatory.views.user import BLUEPRINT_USER
def create_app(config_obj):
initialize_logging()
app = Flask(MDL_NAME)
app.config.from_object(config_obj)
register_extensions(app)
register_errorhandlers(app)
register_blueprints(app)
register_template_functions(app)
configure_jinja(app)
return app
def register_extensions(app):
BCRYPT.init_app(app)
CSRF_PROTECT.init_app(app)
DB.init_app(app)
LOGIN_MANAGER.init_app(app)
MIGRATE.init_app(app, DB)
REST.init_app(app)
def register_errorhandlers(app):
for code in ERROR_CODES:
app.errorhandler(code)(errorhandler)
def register_blueprints(app):
app.register_blueprint(BP_CLI)
app.register_blueprint(BLUEPRINT_MAIN)
app.register_blueprint(BLUEPRINT_MGNT)
app.register_blueprint(BLUEPRINT_SAPI)
app.register_blueprint(BLUEPRINT_SIDE)
app.register_blueprint(BLUEPRINT_USER)
app.register_blueprint(BP_REST_CHARTS)
app.register_blueprint(BP_REST_MAPPER)
app.register_blueprint(BP_REST_OWNERS)
app.register_blueprint(BP_REST_PROMPT)
app.register_blueprint(BP_REST_SENSOR)
app.register_blueprint(BP_REST_SP_API)
def register_template_functions(app):
app.jinja_env.globals.update(
{
'space_api': SPACE_API,
**{
func.__name__: func
for func in (
form_drop_mapper,
form_drop_prompt,
form_drop_sensor,
form_drop_space_cam,
form_drop_space_contact_keymasters,
form_drop_space_links,
form_drop_space_membership_plans,
form_drop_space_projects,
form_drop_space_sensors_account_balance,
form_drop_space_sensors_barometer,
form_drop_space_sensors_beverage_supply,
form_drop_space_sensors_door_locked,
form_drop_space_sensors_humidity,
form_drop_space_sensors_network_traffic,
form_drop_space_sensors_power_consumption,
form_drop_space_sensors_radiation_alpha,
form_drop_space_sensors_radiation_beta,
form_drop_space_sensors_radiation_beta_gamma,
form_drop_space_sensors_radiation_gamma,
form_drop_space_sensors_temperature,
form_drop_space_sensors_total_member_count,
form_drop_space_sensors_wind,
form_sort_mapper,
form_sort_prompt,
form_sort_sensor,
script_config_data,
tagline,
)
},
}
)
def configure_jinja(app):
app.jinja_env.lstrip_blocks = True
app.jinja_env.trim_blocks = True
|
"""GameModel exceptions."""
class GameModelError(Exception):
def __init__(self, message: str):
self.message = message
class InvalidPlayer(GameModelError):
pass
class InvalidSpace(GameModelError):
pass
|
from yaetos.etl_utils import ETL_Base, Commandliner, Cred_Ops_Dispatcher, pdf_to_sdf
from yaetos.db_utils import pdf_to_sdf
from libs.python_db_connectors.query_oracle import query as query_oracle
from sqlalchemy import types
class Job(ETL_Base):
OUTPUT_TYPES = {
'session_id': types.VARCHAR(16),
'count_events': types.INT(),
}
def transform(self):
cred_profiles = Cred_Ops_Dispatcher().retrieve_secrets(self.jargs.storage)
query_str = """
SELECT session_id, count_events
FROM test_ex5_pyspark_job
where rownum < 200
"""
df = query_oracle(query_str, db=self.db_creds, creds_or_file=cred_profiles)
# TODO: Check to get OUTPUT_TYPES from query_oracle, so not required here.
sdf = pdf_to_sdf(df, self.OUTPUT_TYPES, self.sc, self.sc_sql)
return sdf
if __name__ == "__main__":
args = {'job_param_file': 'conf/jobs_metadata.yml'}
Commandliner(Job, **args)
|
numero = int(input('Insira um número inteiro para obter a tabuada: '))
print('='*12)
contador = 1
for contador in range(1, 11):
print('{} x {} = {}'.format(numero, contador, numero * contador))
contador += 1
print('='*12)
|
import sqlite3
def _seconds_to_str(seconds) -> str:
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return '%02d:%02d:%02d' % (h, m, s)
def check_if_stop_exists(db_filepath, stop_id) -> bool:
db_connection = sqlite3.connect(db_filepath)
cursor = db_connection.cursor()
cursor.execute('''select id
from stop
where stop.id =:id;''', {"id" : stop_id})
return len(cursor.fetchall()) > 0
def get_stop_name(db_filepath, stop_id) -> str:
'''if there is no match, returns an empty str'''
db_connection = sqlite3.connect(db_filepath)
cursor = db_connection.cursor()
cursor.execute('''select name
from stop
where stop.id =:id;''', {"id" : stop_id})
result = cursor.fetchone()
if result:
return result[0]
else:
return ''
def get_route_ids_passing_through_stop(db_filepath, stop_id) -> set:
'''if there is no match, returns an empty set'''
db_connection = sqlite3.connect(db_filepath)
cursor = db_connection.cursor()
cursor.execute('''select route_id
from stop_route
where stop_route.stop_id =:id;''', {"id" : stop_id})
return set([tuple_[0] for tuple_ in cursor.fetchall()])
def get_route_short_name(db_filepath, route_id) -> str:
'''If there is no match, returns an empty str'''
db_connection = sqlite3.connect(db_filepath)
cursor = db_connection.cursor()
cursor.execute('''SELECT short_name
FROM Route
WHERE Route.id =:id;''', {'id' : route_id})
result = cursor.fetchone()
if result:
return result[0]
else:
return ''
def get_route_long_name(db_filepath, route_id) -> str:
'''If there is no match, returns an empty str'''
db_connection = sqlite3.connect(db_filepath)
cursor = db_connection.cursor()
cursor.execute('''SELECT long_name
FROM Route
WHERE Route.id =:id;''', {'id' : route_id})
result = cursor.fetchone()
if result:
return result[0]
else:
return ''
def get_latest_service_for_stop_on_trip(db_filepath, route_id,
stop_id) -> str:
'''If there is no match, returns an empty str'''
db_connection = sqlite3.connect(db_filepath)
cursor = db_connection.cursor()
cursor.execute('''SELECT max(Stop_Trip.departure_time_in_sec)
FROM Stop_Trip, Trip
WHERE Stop_Trip.stop_id = :in_stop_id
AND Stop_Trip.trip_id = Trip.id
AND Trip.route_id = :in_trip_id
;''', {'in_stop_id' : stop_id,
'in_trip_id' : route_id})
result = cursor.fetchone()
if result[0] is None:
return ''
else:
return _seconds_to_str(result[0])
def get_earliest_service_for_stop_on_trip(db_filepath, route_id,
stop_id) -> str:
'''If there is no match, returns an empty str'''
db_connection = sqlite3.connect(db_filepath)
cursor = db_connection.cursor()
cursor.execute('''SELECT min(Stop_Trip.departure_time_in_sec)
FROM Stop_Trip, Trip
WHERE Stop_Trip.stop_id = :in_stop_id
AND Stop_Trip.trip_id = Trip.id
AND Trip.route_id = :in_trip_id
;''', {'in_stop_id' : stop_id,
'in_trip_id' : route_id})
result = cursor.fetchone()
if result[0] is None:
return ''
else:
return _seconds_to_str(result[0])
|
import os
import cv2
import numpy as np
import scipy
from PIL import Image, ImageFont, ImageDraw, ImageStat
from torchvision import transforms as transforms
def get_rows_cols_no_size(no, width, height):
answers = {}
for x in range(100):
for y in range(100):
if x * y == no:
answers[abs(1 - abs(((x * height) / (y * width))))] = [x, y]
return answers[sorted(answers.keys())[0]][1], answers[sorted(answers.keys())[0]][0]
def get_rows_cols_with_size(no, width, height, act_size):
act_h, act_w = act_size[::-1]
ratio = act_h / act_w
answers = {}
for x in range(100):
for y in range(100):
if x * y == no:
answers[abs(((x * height) / (y * width)) - ratio)] = [x, y]
return answers[sorted(answers.keys())[0]][0], answers[sorted(answers.keys())[0]][1]
def get_rows_cols(no, width, height, act_size):
if act_size is None:
return get_rows_cols_no_size(no, width, height)
else:
return get_rows_cols_with_size(no, width, height, act_size)
class ResizeMe(object):
def __init__(self, desired_size):
pass
self.desired_size = desired_size
def __call__(self, img):
img = np.array(img).astype(np.uint8)
desired_ratio = self.desired_size[1] / self.desired_size[0]
actual_ratio = img.shape[0] / img.shape[1]
desired_ratio1 = self.desired_size[0] / self.desired_size[1]
actual_ratio1 = img.shape[1] / img.shape[0]
if desired_ratio < actual_ratio:
img = cv2.resize(img, (int(self.desired_size[1] * actual_ratio1), self.desired_size[1]), None,
interpolation=cv2.INTER_AREA)
elif desired_ratio > actual_ratio:
img = cv2.resize(img, (self.desired_size[0], int(self.desired_size[0] * actual_ratio)), None,
interpolation=cv2.INTER_AREA)
else:
img = cv2.resize(img, (self.desired_size[0], self.desired_size[1]), None, interpolation=cv2.INTER_AREA)
h, w, _ = img.shape
new_img = np.zeros((self.desired_size[1], self.desired_size[0], 3))
hh, ww, _ = new_img.shape
yoff = int((hh - h) / 2)
xoff = int((ww - w) / 2)
new_img[yoff:yoff + h, xoff:xoff + w, :] = img
return Image.fromarray(new_img.astype(np.uint8))
def normalize_output(img):
img = img - img.min()
img = img / img.max()
return img
def convert(seconds):
seconds = seconds % (24 * 3600)
hour = seconds // 3600
seconds %= 3600
minutes = seconds // 60
seconds %= 60
return "%d:%02d:%02d" % (hour, minutes, seconds)
def weight_images(img, img1, fades=4):
for x in np.linspace(0, 1, fades + 1):
yield cv2.addWeighted(np.array(img), 1 - x, np.array(img1), x, 0)
def get_bar(current, maxx, bar_length=30, bar_load="=", bar_blank="-"):
perc = current / maxx
bar = int(round(bar_length * perc, 0))
blank = int(round(bar_length - (bar_length * perc), 0))
return "[" + bar_load * bar + bar_blank * blank + "]" + f" {round(current / maxx * 100, 2)} % "
def pad_arr(img, size):
imgchange = Image.fromarray(img)
pad = int(imgchange.size[0] * size)
if pad <= 0:
pad = 1
if len(imgchange.size) == 3:
return np.array(transforms.functional.pad(img=imgchange, padding=pad, fill=(0, 0, 0)))
else:
return np.array(transforms.functional.pad(img=imgchange, padding=pad, fill=0))
def intensity_sort(image_layer):
from scipy import stats
## used to sort the mappings by pixel intensity.
#TODO Check this - sorting could be better !!
### I have tried many options to get them to organise nicely.
### This is the best i can come up with for now. That offers the best sorting to the eye
pixel_mean = {}
for i in range(image_layer.shape[2]):
img = image_layer[:, :, i]
#blured = blur(img)
#mean = np.mean(blured)
#median = np.median(blured)
#mode = stats.mode(blured.reshape(blured.shape[0]*blured.shape[1]))[0].item()
#cv2.calcHist(blured,)
#img_mean = np.mean(cv2.calcHist([blured], [0], None, [256], [0, 256]).ravel())
blured = blur(img)
mean = brightness(blured)
median = np.median(blured.ravel())
pixel_mean[i] = (mean+median)
pixel_mean = {k: v for k, v in sorted(pixel_mean.items(), key=lambda item: item[1], reverse=True)}
return image_layer[:, :, list(pixel_mean.keys())]
def brightness( im_file ):
im = Image.fromarray(im_file).convert('L')
stat = ImageStat.Stat(im)
return stat.mean[0]
def blur(img):
for x in range(2):
img = cv2.blur(img, (5,5))
return img
def colourize_image(img, colour_type=0):
if colour_type == 0:
base = np.zeros((img.shape[0], img.shape[1], 3)).astype(np.uint8)
base[:, :, 0] = img
base[:, :, 1] = img
base[:, :, 2] = img
return base
else:
return cv2.applyColorMap(img, colour_type, None)
def draw_text(img, text, subtext=None):
## used to draw text onto image
img = Image.fromarray(img)
# set font size relative to output size
size = int(img.size[0] / 25)
smaller_size = int(size*.8)
# draw text using PIL
# check for font location
if os.name == "nt":
font_loc = "C:/Windows/Fonts/Arial.ttf"
else:
# changed this as some people having issue locating the fonts.
# possibly still an issue for people using SAGEMAKER cant find any way to access the file system.
#last entry is a possible fix
font_locs = ["/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf",
"/usr/share/fonts/truetype/noto/NotoSans-Regular.ttf",
"/usr/share/fonts/truetype/liberation/LiberationSans-Regular.ttf",
"/usr/share/fonts/truetype/humor-sans/Humor-Sans.ttf",
"/usr/share/fonts/dejavu/DejaVuSans.ttf"]
for font in font_locs:
if os.path.isfile(font):
font_loc = font
font = ImageFont.truetype(font_loc, size)
smaller_font = ImageFont.truetype(font_loc, smaller_size)
# draw with stroke
draw = ImageDraw.Draw(img)
init_w = int(img.size[0] * 0.01)
init_h = int(img.size[1] * 0.01)
text_col = (255, 255, 255)
fill_col = (0, 0, 0)
drawn = draw.text((init_w, init_h), text, text_col, font=font, stroke_width=2,stroke_fill=fill_col)
if subtext is not None:
size_of_sub = font.getsize(text)[1]
sub_h = init_h + size_of_sub + img.size[0] * .005
draw.text((init_w, sub_h),
subtext,
text_col,
font=smaller_font,
stroke_width=2,
stroke_fill=fill_col)
return np.array(img)
|
from flask import request, render_template, session
class SSDP_HTML():
endpoints = ["/ssdp", "/ssdp.html"]
endpoint_name = "page_ssdp_html"
endpoint_access_level = 1
endpoint_category = "tool_pages"
pretty_name = "SSDP"
def __init__(self, fhdhr):
self.fhdhr = fhdhr
def __call__(self, *args):
return self.get(*args)
def get(self, *args):
return render_template('ssdp.html', request=request, session=session, fhdhr=self.fhdhr)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.