content
stringlengths 5
1.05M
|
|---|
import numpy as np
import torch
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = -1
self.sum = 0
self.count = 0
def update(self, val, n=1):
if val is not None:
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class MoveAverageMeter(object):
def __init__(self, cls_number, dim, old=0.9, new=0.1):
self.old = old
self.new = new
self.cls = cls_number
self.dim = dim
self.reset()
def reset(self):
self.avg = None
def update(self, val, gt_label):
if self.avg is None:
self.avg = torch.zeros(self.cls, self.dim).type_as(val)
cls_ids = torch.unique(gt_label)
for cls_i in cls_ids:
inds = gt_label == cls_i
self.avg[cls_i, :] = torch.mean(val[inds, cls_i, :], dim=0)
else:
cls_ids = torch.unique(gt_label)
for cls_i in cls_ids:
inds = gt_label == cls_i
self.avg[cls_i,:] = self.old*self.avg[cls_i,:] + self.new * torch.mean(val[inds, cls_i, :], dim=0)
class AveragePrecisionMetric(object):
def __init__(self, num_classes):
self.num_classes = num_classes
self.reset()
def reset(self):
self.num_pos = np.zeros(self.num_classes)
self.all_cls = []
def update(self, labels, preds):
pred_scores = preds.asnumpy()
label_vecs = labels.asnumpy()
# pred_probs = np.where(pred_scores > 0, 1 / (1 + np.exp(-pred_scores)),
# np.exp(pred_scores) / (1 + np.exp(pred_scores))) # more numerical stable
scores_tflag = np.stack([pred_scores, label_vecs], axis=-1)
self.all_cls.append(scores_tflag)
self.num_pos += np.sum(label_vecs, axis=0)
def get(self):
ap = np.zeros(self.num_classes)
all_cls = np.concatenate(self.all_cls, axis=0)
for c in range(self.num_classes):
all_cls_c = all_cls[:, c, :]
arg_sort = np.argsort(all_cls_c[:, 0])[::-1]
all_cls_c = all_cls_c[arg_sort]
num_tp = np.cumsum(all_cls_c[:, 1])
num_fp = np.cumsum(1 - all_cls_c[:, 1])
rec = num_tp / float(self.num_pos[c])
prec = num_tp / np.maximum(num_tp + num_fp, np.finfo(np.float64).eps)
ap[c] = voc_ap(rec, prec)
return ap.mean()
def ious(pred, gt):
pred = pred.astype(float)
gt = gt.astype(float)
numObj = len(gt)
gt = np.tile(gt, [len(pred), 1])
pred = np.repeat(pred, numObj, axis=0)
bi = np.minimum(pred[:, 2:], gt[:, 2:]) - np.maximum(pred[:, :2], gt[:, :2]) + 1
area_bi = np.prod(bi.clip(0), axis=1)
area_bu = (gt[:, 2] - gt[:, 0] + 1) * (gt[:, 3] - gt[:, 1] + 1) + (pred[:, 2] - pred[:, 0] + 1) * (pred[:, 3] - pred[:, 1] + 1) - area_bi
return area_bi / area_bu
def corloc(pred_boxes, ground_truth):
class_corloc = []
gt_bboxes = ground_truth['gt_bboxes']
for c, cls in enumerate(ground_truth['class_names']):
cls_pred_boxes = pred_boxes[pred_boxes[:, 1] == c, :]
cls_gt_bboxes = gt_bboxes[gt_bboxes[:, 1] == c, :]
cls_inds = (ground_truth['gt_labels'][:, c] == 1).nonzero()
cor = 0
for cidx in cls_inds[0]:
pred = cls_pred_boxes[cls_pred_boxes[:, 0] == cidx, 2:6]
if len(pred) > 0:
gt = cls_gt_bboxes[cls_gt_bboxes[:, 0] == cidx, 2:]
if max(ious(pred, gt)) >= 0.5:
cor += 1
class_corloc.append(float(cor)/len(cls_inds[0]))
return sum(class_corloc)/len(class_corloc)
def voc_ap(rec, prec, use_07_metric=False):
"""
average precision calculations
[precision integrated to recall]
:param rec: recall
:param prec: precision
:param use_07_metric: 2007 metric is 11-recall-point based AP
:return: average precision
"""
if use_07_metric:
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap += p / 11.
else:
# append sentinel values at both ends
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute precision integration ladder
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# look for recall value changes
i = np.where(mrec[1:] != mrec[:-1])[0]
# sum (\delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
|
from leapp.libraries.actor import iscmodel
from leapp.libraries.common import isccfg
from leapp.models import BindFacts
def model_paths(issues_model):
paths = list()
for m in issues_model:
paths.append(m.path)
return paths
def get_facts(cfg):
facts = iscmodel.get_facts(cfg)
assert isinstance(facts, BindFacts)
return facts
def test_simple():
mockcfg = isccfg.MockConfig("""
options {
listen-on port 53 { 127.0.0.1; };
listen-on-v6 port 53 { ::1; };
directory "/var/named";
allow-query { localhost; };
recursion yes;
dnssec-validation yes;
};
zone "." IN {
type hint;
file "named.ca";
};
""", '/etc/named.conf')
facts = get_facts(mockcfg)
assert facts.dnssec_lookaside is None
def test_dnssec_lookaside():
mockcfg = isccfg.MockConfig("""
options {
listen-on port 53 { 127.0.0.1; };
listen-on-v6 port 53 { ::1; };
directory "/var/named";
allow-query { localhost; };
recursion yes;
dnssec-validation yes;
dnssec-lookaside auto;
};
zone "." IN {
type hint;
file "named.ca";
};
""", '/etc/named.conf')
facts = get_facts(mockcfg)
assert '/etc/named.conf' in model_paths(facts.dnssec_lookaside)
def test_listen_on_v6():
present = isccfg.MockConfig("""
options {
listen-on { any; };
listen-on-v6 { any; };
};
""", '/etc/named.conf')
missing = isccfg.MockConfig("""
options {
listen-on { any; };
#listen-on-v6 { any; };
};
""", '/etc/named.conf')
facts = get_facts(present)
assert not facts.listen_on_v6_missing
facts = get_facts(missing)
assert facts.listen_on_v6_missing
|
import numpy as np
import pickle
from ..util import BaseCase
from pygsti.objects import FullGaugeGroupElement, Basis, ExplicitOpModel, TPPOVM, UnconstrainedPOVM
import pygsti.construction as pc
import pygsti.objects.spamvec as sv
class SpamvecUtilTester(BaseCase):
def test_convert_to_vector_raises_on_bad_input(self):
bad_vecs = [
'akdjsfaksdf',
[[], [1, 2]],
[[[]], [[1, 2]]]
]
for bad_vec in bad_vecs:
with self.assertRaises(ValueError):
sv.SPAMVec.convert_to_vector(bad_vec)
with self.assertRaises(ValueError):
sv.SPAMVec.convert_to_vector(0.0) # something with no len()
def test_base_spamvec(self):
raw = sv.SPAMVec(4, "densitymx", "prep")
T = FullGaugeGroupElement(
np.array([[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]], 'd'))
with self.assertRaises(NotImplementedError):
raw.todense()
with self.assertRaises(NotImplementedError):
raw.transform(T, "prep")
with self.assertRaises(NotImplementedError):
raw.depolarize(0.01)
class SpamvecBase(object):
def setUp(self):
self.vec = self.build_vec()
ExplicitOpModel._strict = False
def test_num_params(self):
self.assertEqual(self.vec.num_params(), self.n_params)
def test_copy(self):
vec_copy = self.vec.copy()
self.assertArraysAlmostEqual(vec_copy, self.vec)
self.assertEqual(type(vec_copy), type(self.vec))
def test_get_dimension(self):
self.assertEqual(self.vec.get_dimension(), 4)
def test_set_value_raises_on_bad_size(self):
with self.assertRaises(ValueError):
self.vec.set_value(np.zeros((1, 1), 'd')) # bad size
def test_vector_conversion(self):
v = self.vec.to_vector()
self.vec.from_vector(v)
deriv = self.vec.deriv_wrt_params()
# TODO assert correctness
def test_element_accessors(self):
a = self.vec[:]
b = self.vec[0]
#with self.assertRaises(ValueError):
# self.vec.shape = (2,2) #something that would affect the shape??
self.vec_as_str = str(self.vec)
a1 = self.vec[:] # invoke getslice method
# TODO assert correctness
def test_pickle(self):
pklstr = pickle.dumps(self.vec)
vec_pickle = pickle.loads(pklstr)
self.assertArraysAlmostEqual(vec_pickle, self.vec)
self.assertEqual(type(vec_pickle), type(self.vec))
def test_arithmetic(self):
result = self.vec + self.vec
self.assertEqual(type(result), np.ndarray)
result = self.vec + (-self.vec)
self.assertEqual(type(result), np.ndarray)
result = self.vec - self.vec
self.assertEqual(type(result), np.ndarray)
result = self.vec - abs(self.vec)
self.assertEqual(type(result), np.ndarray)
result = 2 * self.vec
self.assertEqual(type(result), np.ndarray)
result = self.vec * 2
self.assertEqual(type(result), np.ndarray)
result = 2 / self.vec
self.assertEqual(type(result), np.ndarray)
result = self.vec / 2
self.assertEqual(type(result), np.ndarray)
result = self.vec // 2
self.assertEqual(type(result), np.ndarray)
result = self.vec**2
self.assertEqual(type(result), np.ndarray)
result = self.vec.transpose()
self.assertEqual(type(result), np.ndarray)
V = np.ones((4, 1), 'd')
result = self.vec + V
self.assertEqual(type(result), np.ndarray)
result = self.vec - V
self.assertEqual(type(result), np.ndarray)
result = V + self.vec
self.assertEqual(type(result), np.ndarray)
result = V - self.vec
self.assertEqual(type(result), np.ndarray)
def test_hessian(self):
self.assertFalse(self.vec.has_nonzero_hessian())
def test_frobeniusdist2(self):
self.vec.frobeniusdist2(self.vec, "prep")
self.vec.frobeniusdist2(self.vec, "effect")
# TODO assert correctness
def test_frobeniusdist2_raises_on_bad_type(self):
with self.assertRaises(ValueError):
self.vec.frobeniusdist2(self.vec, "foobar")
class MutableSpamvecBase(SpamvecBase):
def test_set_value(self):
v = np.asarray(self.vec)
self.vec.set_value(v)
# TODO assert correctness
def test_transform(self):
S = FullGaugeGroupElement(np.identity(4, 'd'))
self.vec.transform(S, 'prep')
self.vec.transform(S, 'effect')
# TODO assert correctness
def test_transform_raises_on_bad_type(self):
S = FullGaugeGroupElement(np.identity(4, 'd'))
with self.assertRaises(ValueError):
self.vec.transform(S, 'foobar')
def test_depolarize(self):
self.vec.depolarize(0.9)
self.vec.depolarize([0.9, 0.8, 0.7])
# TODO assert correctness
class ImmutableSpamvecBase(SpamvecBase):
def test_raises_on_set_value(self):
v = np.asarray(self.vec)
with self.assertRaises(ValueError):
self.vec.set_value(v)
def test_raises_on_transform(self):
S = FullGaugeGroupElement(np.identity(4, 'd'))
with self.assertRaises(ValueError):
self.vec.transform(S, 'prep')
def test_raises_on_depolarize(self):
with self.assertRaises(ValueError):
self.vec.depolarize(0.9)
class FullSpamvecTester(MutableSpamvecBase, BaseCase):
n_params = 4
@staticmethod
def build_vec():
return sv.FullSPAMVec([1.0 / np.sqrt(2), 0, 0, 1.0 / np.sqrt(2)])
def test_raises_on_bad_dimension_2(self):
with self.assertRaises(ValueError):
sv.FullSPAMVec([[1.0 / np.sqrt(2), 0, 0, 1.0 / np.sqrt(2)], [0, 0, 0, 0]])
def test_convert(self):
basis = Basis.cast("pp", 4)
conv = sv.convert(self.vec, "full", basis)
# TODO assert correctness
def test_raises_on_invalid_conversion_type(self):
basis = Basis.cast("pp", 4)
with self.assertRaises(ValueError):
sv.convert(self.vec, "foobar", basis)
class TPSpamvecTester(MutableSpamvecBase, BaseCase):
n_params = 3
@staticmethod
def build_vec():
return sv.TPSPAMVec([1.0 / np.sqrt(2), 0, 0, 1.0 / np.sqrt(2)])
def test_raises_on_bad_initial_element(self):
with self.assertRaises(ValueError):
sv.TPSPAMVec([1.0, 0, 0, 0])
# incorrect initial element for TP!
with self.assertRaises(ValueError):
self.vec.set_value([1.0, 0, 0, 0])
# incorrect initial element for TP!
def test_convert(self):
basis = Basis.cast("pp", 4)
conv = sv.convert(self.vec, "TP", basis)
# TODO assert correctness
class CPTPSpamvecTester(MutableSpamvecBase, BaseCase):
n_params = 4
@staticmethod
def build_vec():
v_tp = np.zeros((4, 1), 'd')
v_tp[0] = 1.0 / np.sqrt(2)
v_tp[3] = 1.0 / np.sqrt(2) - 0.05
return sv.CPTPSPAMVec(v_tp, "pp")
def test_hessian(self):
self.skipTest("Hessian computation isn't implemented for CPTPSPAMVec; remove this skip when it becomes a priority")
self.vec.hessian_wrt_params()
self.vec.hessian_wrt_params([0])
self.vec.hessian_wrt_params([0], [0])
# TODO assert correctness
class StaticSpamvecTester(ImmutableSpamvecBase, BaseCase):
n_params = 0
v_tp = [1.0 / np.sqrt(2), 0, 0, 1.0 / np.sqrt(2)]
@staticmethod
def build_vec():
return sv.StaticSPAMVec(StaticSpamvecTester.v_tp)
def test_convert(self):
basis = Basis.cast("pp", 4)
conv = sv.convert(self.vec, "static", basis)
# TODO assert correctness
def test_optimize(self):
s = sv.FullSPAMVec(StaticSpamvecTester.v_tp)
sv.optimize_spamvec(self.vec, s)
# TODO assert correctness
class POVMSpamvecBase(ImmutableSpamvecBase):
def test_vector_conversion(self):
with self.assertRaises(ValueError):
self.vec.to_vector()
class ComplementSpamvecTester(POVMSpamvecBase, BaseCase):
n_params = 4
@staticmethod
def build_vec():
v = np.ones((4, 1), 'd')
v_id = np.zeros((4, 1), 'd')
v_id[0] = 1.0 / np.sqrt(2)
tppovm = TPPOVM([('0', sv.FullSPAMVec(v, typ="effect")),
('1', sv.FullSPAMVec(v_id - v, typ="effect"))])
return tppovm['1'] # complement POVM
def test_vector_conversion(self):
with self.assertRaises(ValueError):
self.vec.to_vector()
class TensorProdSpamvecBase(ImmutableSpamvecBase):
def test_arithmetic(self):
with self.assertRaises(TypeError):
self.vec + self.vec
def test_copy(self):
vec_copy = self.vec.copy()
self.assertArraysAlmostEqual(vec_copy.todense(), self.vec.todense())
self.assertEqual(type(vec_copy), type(self.vec))
def test_element_accessors(self):
with self.assertRaises(TypeError):
self.vec[:]
def test_pickle(self):
pklstr = pickle.dumps(self.vec)
vec_pickle = pickle.loads(pklstr)
self.assertArraysAlmostEqual(vec_pickle.todense(), self.vec.todense())
self.assertEqual(type(vec_pickle), type(self.vec))
class TensorProdPrepSpamvecTester(TensorProdSpamvecBase, BaseCase):
n_params = 4
@staticmethod
def build_vec():
v = np.ones((2, 1), 'd')
return sv.TensorProdSPAMVec("prep", [sv.FullSPAMVec(v),
sv.FullSPAMVec(v)])
class TensorProdEffectSpamvecTester(TensorProdSpamvecBase, POVMSpamvecBase, BaseCase):
n_params = 4
@staticmethod
def build_vec():
v = np.ones((4, 1), 'd')
povm = UnconstrainedPOVM([('0', sv.FullSPAMVec(v,typ="effect"))])
return sv.TensorProdSPAMVec("effect", [povm], ['0'])
|
def divide(l, direction):
if direction in ("F", "L"):
return l[: (len(l) // 2)]
elif direction in ("B", "R"):
return l[(len(l) // 2) :]
return None
def seat(code):
row = range(128)
i = 0
while i < 7:
row = divide(row, code[i])
i += 1
column = range(8)
j = 0
while j < 3:
column = divide(column, code[7 + j])
j += 1
return (int(row[0]) * 8) + int(column[0])
def solve(in_file):
with open(in_file) as f:
codes = f.read().splitlines()
ids = [seat(code) for code in codes]
return (set(range(min(ids), max(ids))) - set(ids)).pop()
print(solve("input.txt"))
# print(seat('FBFBBFFRLR')) # want 357
# print(seat('BFFFBBFRRR')) # want 567
# print(seat('FFFBBBFRRR')) # want 119
# print(seat('BBFFBBFRLL')) # want 820
# print(divide(range(128), 'F'))
|
import numpy as np
# Too lazy to copy Q3 ans, so just predefine primes
primes = np.array([2,3,5,7,11,13,17,19])
buckets = np.zeros_like(primes)
def findFactors(x):
factors = []
for i in range(len(primes)):
p = primes[i]
while(x % p == 0):
x //= p
factors.append(i)
return factors
for i in range(2,21):
factors = findFactors(i)
print(i, ":", [primes[x] for x in factors])
mybuckets = np.zeros_like(buckets)
for j in factors:
mybuckets[j] += 1
for j in range(len(mybuckets)):
if(buckets[j] < mybuckets[j]):
buckets[j] = mybuckets[j]
ans = 1;
for i in range(len(primes)):
ans *= primes[i] ** buckets[i]
print(ans)
|
#!/usr/bin/env python
#
# Copyright 2018 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from optparse import OptionParser
import os
import six
import sys
import time
from keystoneauth1 import loading
from keystoneauth1 import session
from novaclient import client
from novaclient.exceptions import ClientException
# In python3 SafeConfigParser was renamed to ConfigParser and the default
# for duplicate options default to true. In case of nova it is valid to
# have duplicate option lines, e.g. passthrough_whitelist which leads to
# issues reading the nova.conf
# https://bugs.launchpad.net/tripleo/+bug/1827775
if six.PY3:
from six.moves.configparser import ConfigParser
config = ConfigParser(strict=False)
else:
from six.moves.configparser import SafeConfigParser
config = SafeConfigParser()
debug = os.getenv('__OS_DEBUG', 'false')
if debug.lower() == 'true':
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
logging.basicConfig(stream=sys.stdout, level=loglevel)
LOG = logging.getLogger('nova_wait_for_api_service')
iterations = 60
timeout = 10
nova_cfg = '/etc/nova/nova.conf'
if __name__ == '__main__':
parser = OptionParser(usage="usage: %prog [options]")
parser.add_option('-k', '--insecure',
action="store_false",
dest='insecure',
default=True,
help='Allow insecure connection when using SSL')
(options, args) = parser.parse_args()
LOG.debug('Running with parameter insecure = %s',
options.insecure)
if os.path.isfile(nova_cfg):
try:
config.read(nova_cfg)
except Exception:
LOG.exception('Error while reading nova.conf:')
else:
LOG.error('Nova configuration file %s does not exist', nova_cfg)
sys.exit(1)
loader = loading.get_plugin_loader('password')
auth = loader.load_from_options(
auth_url=config.get('neutron',
'auth_url'),
username=config.get('neutron',
'username'),
password=config.get('neutron',
'password'),
project_name=config.get('neutron',
'project_name'),
project_domain_name=config.get('neutron',
'project_domain_name'),
user_domain_name=config.get('neutron',
'user_domain_name'))
sess = session.Session(auth=auth, verify=options.insecure)
# Wait until this host is listed in the service list
for i in range(iterations):
try:
nova = client.Client('2.11', session=sess,
endpoint_type='internal')
nova.versions.list()
LOG.info('Nova-api service active')
sys.exit(0)
except ClientException:
LOG.info('Waiting for nova-api service')
except Exception:
LOG.exception(
'Error while waiting for nova-api service')
time.sleep(timeout)
sys.exit(1)
# vim: set et ts=4 sw=4 :
|
import sys
import pytest
from yt_napari._data_model import InputModel, _store_schema
from yt_napari.schemas._manager import Manager
skip_win = "Schema manager is not for windows."
@pytest.mark.skipif(sys.platform == "win32", reason=skip_win)
def test_schema_version_management(tmp_path):
m = Manager(schema_db=tmp_path)
def get_expected(prefix, vstring):
return tmp_path.joinpath(m._filename(prefix, vstring))
# test with defaults
pfx = m.default_schema_prefix
expected_file = get_expected(pfx, "0.0.1")
m.write_new_schema("any old string")
assert expected_file.is_file()
# run again with defaults, should increment
expected_file = get_expected(pfx, "0.0.2")
m.write_new_schema("any old string")
assert expected_file.is_file()
# test other increments
expected_file = get_expected(pfx, "0.1.2")
m.write_new_schema("any old string", inc_minor=True, inc_micro=False)
assert expected_file.is_file()
expected_file = get_expected(pfx, "1.1.2")
m.write_new_schema("any old string", inc_major=True, inc_micro=False)
assert expected_file.is_file()
# test explicity version
expected_file = get_expected(pfx, "2.0.0")
m.write_new_schema("any old string", version="2.0.0")
assert expected_file.is_file()
# should error without override
with pytest.raises(Exception):
m.write_new_schema("any old string", version="2.0.0")
# provide override, should have new text
new_text = "different string"
m.write_new_schema(new_text, version="2.0.0", overwrite_version=True)
with open(expected_file) as f:
assert "different string" in f.read()
pfx = "new-yt-napari"
expected_file = get_expected(pfx, "0.0.1")
m.write_new_schema("any old string", schema_prefix=pfx)
assert expected_file.is_file()
@pytest.mark.skipif(sys.platform == "win32", reason=skip_win)
def test_schema_generation(tmp_path):
_store_schema(schema_db=tmp_path)
m = Manager(schema_db=tmp_path)
pfx = InputModel._schema_prefix
expected_file = tmp_path.joinpath(m._filename(pfx, "0.0.1"))
file_exists = expected_file.is_file()
assert file_exists
schema_contents = InputModel.schema_json(indent=2)
with pytest.raises(ValueError):
m.write_new_schema(schema_contents, schema_prefix="bad_prefix")
@pytest.mark.skipif(sys.platform == "win32", reason=skip_win)
def test_schema_update_docs(tmp_path):
# directory setup
docsdir = tmp_path / "docs"
docsdir.mkdir()
staticdir = docsdir / "_static"
staticdir.mkdir()
# create a schem.rst with the anchor test
schema_rst = docsdir / "schema.rst"
content = (
"some stuff to put into a file\n" "\n with the special schemalistanchor! \n\n"
)
schema_rst.write_text(content)
# store the schema a number of times
_store_schema(schema_db=tmp_path)
_store_schema(schema_db=tmp_path)
_store_schema(schema_db=tmp_path, inc_micro=False, inc_major=True)
m = Manager(schema_db=tmp_path)
m.update_docs(docsdir)
nfiles = len(list(staticdir.iterdir()))
# should contain all the schema plus a copy in latest
assert nfiles == 4
new_content = schema_rst.read_text()
assert content in new_content # make sure the original is in the new
m = Manager(schema_db=tmp_path)
for fi in m.schema_files:
# check that every schema file is now in the file
assert fi.name in new_content
|
# -*- coding: utf-8 -*-
# Created by lvjiyong on 16/5/6
HOST = '192.168.0.1'
PORT = 8888
|
from ..broker import Broker
class AuthRoleBroker(Broker):
controller = "auth_roles"
def index(self, **kwargs):
"""Lists the available auth roles. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this role definition.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this role definition.
:type id: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, role_name, description, created_at, updated_at, is_system.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each AuthRole. Valid values are id, role_name, description, created_at, updated_at, is_system. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_roles: An array of the AuthRole objects that match the specified input criteria.
:rtype auth_roles: Array of AuthRole
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified auth role.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for this role definition.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_role: The auth role identified by the specified id.
:rtype auth_role: AuthRole
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def search(self, **kwargs):
"""Lists the available auth roles matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param created_at: The date and time the record was initially created in NetMRI.
:type created_at: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param created_at: The date and time the record was initially created in NetMRI.
:type created_at: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param description: The description of the role, as displayed in the user interface.
:type description: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param description: The description of the role, as displayed in the user interface.
:type description: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this role definition.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this role definition.
:type id: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param is_system: A flag indicating whether or not this is a build-in role. Built-in roles may not be modified.
:type is_system: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param is_system: A flag indicating whether or not this is a build-in role. Built-in roles may not be modified.
:type is_system: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param role_name: The name of the role, as displayed in the user interface.
:type role_name: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param role_name: The name of the role, as displayed in the user interface.
:type role_name: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param updated_at: The date and time the record was last modified in NetMRI.
:type updated_at: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param updated_at: The date and time the record was last modified in NetMRI.
:type updated_at: Array of DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, role_name, description, created_at, updated_at, is_system.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each AuthRole. Valid values are id, role_name, description, created_at, updated_at, is_system. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against auth roles, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: created_at, description, id, is_system, role_name, updated_at.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_roles: An array of the AuthRole objects that match the specified input criteria.
:rtype auth_roles: Array of AuthRole
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available auth roles matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: created_at, description, id, is_system, role_name, updated_at.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_at: The operator to apply to the field created_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_at: The date and time the record was initially created in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_at: If op_created_at is specified, the field named in this input will be compared to the value in created_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_at must be specified if op_created_at is specified.
:type val_f_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_at: If op_created_at is specified, this value will be compared to the value in created_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_at must be specified if op_created_at is specified.
:type val_c_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_description: The operator to apply to the field description. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. description: The description of the role, as displayed in the user interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_description: If op_description is specified, the field named in this input will be compared to the value in description using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_description must be specified if op_description is specified.
:type val_f_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_description: If op_description is specified, this value will be compared to the value in description using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_description must be specified if op_description is specified.
:type val_c_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal NetMRI identifier for this role definition. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_is_system: The operator to apply to the field is_system. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. is_system: A flag indicating whether or not this is a build-in role. Built-in roles may not be modified. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_is_system: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_is_system: If op_is_system is specified, the field named in this input will be compared to the value in is_system using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_is_system must be specified if op_is_system is specified.
:type val_f_is_system: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_is_system: If op_is_system is specified, this value will be compared to the value in is_system using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_is_system must be specified if op_is_system is specified.
:type val_c_is_system: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_role_name: The operator to apply to the field role_name. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. role_name: The name of the role, as displayed in the user interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_role_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_role_name: If op_role_name is specified, the field named in this input will be compared to the value in role_name using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_role_name must be specified if op_role_name is specified.
:type val_f_role_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_role_name: If op_role_name is specified, this value will be compared to the value in role_name using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_role_name must be specified if op_role_name is specified.
:type val_c_role_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_at: The operator to apply to the field updated_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_at: The date and time the record was last modified in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_at: If op_updated_at is specified, the field named in this input will be compared to the value in updated_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_at must be specified if op_updated_at is specified.
:type val_f_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_at: If op_updated_at is specified, this value will be compared to the value in updated_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_at must be specified if op_updated_at is specified.
:type val_c_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, role_name, description, created_at, updated_at, is_system.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each AuthRole. Valid values are id, role_name, description, created_at, updated_at, is_system. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_roles: An array of the AuthRole objects that match the specified input criteria.
:rtype auth_roles: Array of AuthRole
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def create(self, **kwargs):
"""Creates a new auth role.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param description: The description of the role, as displayed in the user interface.
:type description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param role_name: The name of the role, as displayed in the user interface.
:type role_name: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the newly created auth role.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the newly created auth role.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the newly created auth role.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_role: The newly created auth role.
:rtype auth_role: AuthRole
"""
return self.api_request(self._get_method_fullname("create"), kwargs)
def update(self, **kwargs):
"""Updates an existing auth role.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for this role definition.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param description: The description of the role, as displayed in the user interface. If omitted, this field will not be updated.
:type description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param role_name: The name of the role, as displayed in the user interface. If omitted, this field will not be updated.
:type role_name: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the updated auth role.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the updated auth role.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the updated auth role.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_role: The updated auth role.
:rtype auth_role: AuthRole
"""
return self.api_request(self._get_method_fullname("update"), kwargs)
def destroy(self, **kwargs):
"""Deletes the specified auth role from NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for this role definition.
:type id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy"), kwargs)
def auth_privileges(self, **kwargs):
"""Shows the associated AuthPrivilege objects for the specified auth role.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for this role definition.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_privileges: The AuthPrivilege identified by the specified id.
:rtype auth_privileges: Array of AuthPrivilege
"""
return self.api_request(self._get_method_fullname("auth_privileges"), kwargs)
def add_auth_privileges(self, **kwargs):
"""Associates the AuthPrivilege object with the specified auth role.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for this role definition.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param auth_privilege_id: The internal NetMRI identifier of the AuthPrivilege to add.
:type auth_privilege_id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_role: The AuthRole identified by the specified primary key.
:rtype auth_role: AuthRole
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_privilege: The AuthPrivilege identified by the specified primary key.
:rtype auth_privilege: AuthPrivilege
"""
return self.api_request(self._get_method_fullname("add_auth_privileges"), kwargs)
def remove_auth_privileges(self, **kwargs):
"""Associates the AuthPrivilege object with the specified auth role.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for this role definition.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param auth_privilege_id: The internal NetMRI identifier of the AuthPrivilege to remove
:type auth_privilege_id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_role: The AuthRole identified by the specified primary key.
:rtype auth_role: AuthRole
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_privilege: The AuthPrivilege identified by the specified primary key.
:rtype auth_privilege: AuthPrivilege
"""
return self.api_request(self._get_method_fullname("remove_auth_privileges"), kwargs)
|
# -*- coding: utf-8 -*-
import yfinance as yf
import time
import numpy as np
from ..utils.time_utils import *
class Stock:
"""
This class allows to get data from the stock market
with an API
"""
def __init__(self, name, date, simulation_time, fixed_commission, prop_commission, moving_window, decrease_window, quantity=0):
self.__name = name
self.__owned = True
self.__quantity = quantity
self.__cost_price = 0
# in euros
self.__fixed_commission = fixed_commission
# rate, proportionnal to stock price
self.__prop_commission = prop_commission
# parameters
self.__decrease_window = decrease_window
self.__moving_window = moving_window
self.__stock = yf.Ticker(name)
# print(increase_date(date, -(moving_window + decrease_window)))
self.__history = self.__stock.history(
start=increase_date(date, -(moving_window + decrease_window)),
end=increase_date(date, simulation_time+2)
)
self.__historical_data = self.getCloseData()
self.initCostPrice(date)
return
def initCostPrice(self, date):
self.__cost_price = self.getDateValue(date)
return
def show(self, date):
i = 0
while self.getDateValue(increase_date(date, -i)) == None:
i += 1
return "\n----------Stock " + self.__name + "----------\nQuantity : " + str(self.__quantity) + \
"\nPrice : " + str(self.__cost_price) + \
"\nInitial price : " + str(self.getDateValue(increase_date(date, -i))) + \
"\nPrice difference : " + str(self.getQuantity()*(self.getDateValue(
increase_date(date, -i))-self.__cost_price)) + " euros\n"
def getStock(self):
"""
Returns the stock object
"""
return self.__stock
def getInfo(self):
"""
Returns the stock informations
"""
return self.__stock.info
def getDateValue(self, date):
"""
Returns the value of the 'close' value for date "date" (format year - month - day)
"""
if date in self.__history.index:
return self.__history['Close'][date]
else:
return None
def getDateVariation(self, date):
"""
Returns the value of the 'variation' value for date "date" (format year - month - day)
"""
if date in self.__history.index:
return self.__historical_data['Variation'][date]
else:
# print("!!!!!!!!!!!!!!!!!!!", date)
return None
def getMeanVariation(self, date):
"""
Returns the mean value of the stock variation on a moving_window period before date
"""
mean_var = 0
for i in range(len(self.__historical_data["Variation"].tolist())):
if self.__historical_data.index[i].timestamp() <= time.mktime(time.strptime(date, "%Y-%m-%d")):
if (time.mktime(time.strptime(date, "%Y-%m-%d"))-self.__historical_data.index[i].timestamp()) / (24 * 3600) <= self.__moving_window:
mean_var += 100 * \
self.__historical_data["Variation"].tolist()[i]
else:
break
return np.mean(mean_var)
def getRSI(self, date, nb_days):
historical_data = self.getHistoryToDate(date, nb_days)
pos_var, neg_var = [], []
# Calculation of the RSI
for i in range(len(historical_data["Variation"].tolist())):
if historical_data["Variation"].tolist()[i] > 0:
pos_var.append(
historical_data["Variation"].tolist()[i])
neg_var.append(0)
else:
neg_var.append(historical_data["Variation"].tolist()[i])
pos_var.append(0)
avg_gain, avg_loss = abs(np.mean(pos_var)), abs(np.mean(neg_var))
return 100 * avg_gain / (avg_gain + avg_loss)
def getStoch(self, date, nb_days):
historical_data = self.getHistoryToDate(date, nb_days)
close_values = historical_data["Variation"].tolist()
last_close = close_values[-1]
return 100 * ((last_close - min(close_values)) / (max(close_values) - min(close_values)))
def isDecreasingStock(self, date):
"""
Returns wheter or not the stock price is decreasing since a least "decrease_window"
days and just increased at date "date"
"""
i = 0
j = 0
while i <= self.__decrease_window:
if self.getDateVariation(increase_date(date, -i-j-1)) != None:
if self.getDateVariation(increase_date(date, -i-j-1)) > 0:
return False
i += 1
else:
j += 1
i = 0
while self.getDateVariation(increase_date(date, -i)) == None:
i += 1
return self.getDateVariation(increase_date(date, i)) > 0
def getOwned(self):
"""
Returns a booleen, whether or not the stock is owned
"""
return self.__owned
def setOwned(self, owned):
"""
Sets a booleen, whether or not the stock is owned
"""
self.__owned = owned
def getQuantity(self):
"""
Returns the stock's quantity
"""
return self.__quantity
def setQuantity(self, quantity):
"""
Sets the stock's quantity
"""
self.__quantity = quantity
def getCostPrice(self):
"""
Returns the stock cost price
"""
return self.__cost_price
def setCostPrice(self, cost_price):
"""
Sets the stock cost price
"""
self.__cost_price = cost_price
def getFixedCommission(self):
return self.__fixed_commission
def getPropCommission(self):
return self.__prop_commission
def getName(self):
"""
Returns the stock's name
"""
return self.__name
def getHistory(self):
"""
Returns the history data
"""
return self.__history
def getCloseData(self):
"""
Returns the 'close' column of the history data
and computes the variation between the open and close values.
"""
history = self.__history.copy()
history['Variation'] = 10000 * (
history['Close'] - history['Open'])/history['Close']
history['Variation'] = history['Variation'].astype(int)/10000
return history[['Close', 'Variation']]
def getHistoryToDate(self, date, nb_days):
"""
Returns the nb_days last lines for a given date. For example, if I need the 5 last days to calculate smth
getHistoryToDate(now, 5) will extract the 5 last entry
"""
return self.__historical_data.loc[increase_date(date, - nb_days):date]
def getFullHistoryToDate(self, date, nb_days):
"""
Same as getHistoryToDate but it returns the columns HIGH LOW OPEN CLOSE
"""
return self.__history[["High", "Low", "Open", "Close"]].loc[increase_date(date, - nb_days):date]
def getSupport(self):
return
def getResistance(self):
return
def getGain(self, date):
"""
Returns the gain earns with the stock
"""
return self.getQuantity()*(self.getDateValue(date)-self.getCostPrice())
def buy(self, quantity, wallet, price):
"""
Whenever the stock is bought, the price and the quantity is updated with the
new quantiy and price
"""
if not self.getOwned():
self.setOwned(True)
self.setQuantity(quantity)
self.setCostPrice(price)
else:
self.setCostPrice((self.getQuantity()*self.getCostPrice() +
quantity * price)/(quantity+self.getQuantity()))
self.setQuantity(quantity+self.getQuantity())
# Update of the wallet below
wallet.total_commission += quantity * price * self.getPropCommission() + self.getFixedCommission()
wallet.available_cash -= quantity * (price * (
1 + self.getPropCommission()) + self.getFixedCommission())
wallet.total_transaction += 1
return
def sell(self, wallet, price, quantity=None):
"""
Whenever the stock is sold, the quantity is updated with the
new quantiy
"""
if quantity == None or quantity > self.getQuantity():
return None
# sell all the stocks
elif quantity == self.getQuantity():
self.setOwned(False)
self.setQuantity(0)
self.setCostPrice(0)
wallet.total_commission += quantity * price * self.getPropCommission() + self.getFixedCommission()
wallet.available_cash += quantity * price * (1 - self.getPropCommission()) - self.getFixedCommission()
wallet.total_transaction += 1
# stell part of the stocks
else:
self.setQuantity(self.getQuantity()-quantity)
wallet.total_commission += quantity * price * self.getPropCommission() + self.getFixedCommission()
wallet.available_cash += quantity * price * (1 - self.getPropCommission()) - self.getFixedCommission()
wallet.total_transaction += 1
return "sold"
|
import logging
import os
import requests
from ....core.reporting.utils import convert_svg_to_png, is_image, SVG_SUFFIX, PNG_SUFFIX
TELEGRAM_BASE_URL = os.environ.get("TELEGRAM_BASE_URL", "https://api.telegram.org")
class TelegramClient:
def __init__(self, chat_id: int, bot_token: str):
self.chat_id = chat_id
self.bot_token = bot_token
def send_message(self, message: str, disable_links_preview: bool = True):
url = f"{TELEGRAM_BASE_URL}/bot{self.bot_token}/sendMessage"
message_json = {
"chat_id": self.chat_id,
"disable_web_page_preview": disable_links_preview,
"parse_mode": "Markdown",
"text": message
}
response = requests.post(url, json=message_json)
if response.status_code != 200:
logging.error(
f"Failed to send telegram message: chat_id - {self.chat_id} reason - {response.reason} {response.text}"
)
def send_file(self, file_name: str, contents: bytes):
file_type = "Photo" if is_image(file_name) else "Document"
url = f"{TELEGRAM_BASE_URL}/bot{self.bot_token}/send{file_type}?chat_id={self.chat_id}"
if file_name.endswith(SVG_SUFFIX):
contents = convert_svg_to_png(contents)
file_name = file_name.replace(SVG_SUFFIX, PNG_SUFFIX)
files = {
file_type.lower(): (file_name, contents)
}
response = requests.post(url, files=files)
if response.status_code != 200:
logging.error(
f"Failed to send telegram file: chat_id - {self.chat_id} reason - {response.reason} {response.text}"
)
|
import ecg_plot
import argparse
import matplotlib.pyplot as plt
import preprocess
import os
import read_ecg
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Plot ECG from wfdb')
parser.add_argument('path', type=str,
help='Path to the file to be plot.')
parser.add_argument('--save', default="",
help='Save in the provided path. Otherwise just display image.')
parser = preprocess.arg_parse_option(parser)
parser = read_ecg.arg_parse_option(parser)
args = parser.parse_args()
print(args)
ecg, sample_rate, leads = read_ecg.read_ecg(args.path, format=args.fmt)
ecg, sample_rate, leads = preprocess.preprocess_ecg(ecg, sample_rate, leads,
new_freq=args.new_freq,
new_len=args.new_len,
scale=args.scale,
powerline=args.powerline,
use_all_leads=args.use_all_leads,
remove_baseline=args.remove_baseline)
ecg_plot.plot(ecg, sample_rate=sample_rate,
lead_index=leads, style='bw')
# rm ticks
plt.tick_params(
axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False,
right=False,
labelleft=False,
labelbottom=False) # labels along the bottom edge are off
if args.save:
path, ext = os.path.splitext(args.save)
if ext == '.png':
ecg_plot.save_as_png(path)
elif ext == '.pdf':
ecg_plot.save_as_pdf(path)
else:
ecg_plot.show()
|
"""
ScalyMUCK has several base exceptions for the ScalyMUCK core and
ScalyMUCK modifications that may be loaded into the MUCK server.
This software is licensed under the MIT license.
Please refer to LICENSE.txt for more information.
"""
class ModApplicationError(Exception):
""" Generic exception for ScalyMUCK modifications to subclass in order
to report errors to the error reporting mechanism.
NOTE:
This should never be explictely raised by any code. This
is designed to be subclassed for proper exception support.
"""
class WorldArgumentError(ModApplicationError):
""" Raised when using the world API and an invalid argument is specified. """
class ModelArgumentError(ModApplicationError):
""" Raised when a model function is used improperly. """
class DatabaseError(ModApplicationError):
""" Raised when an error occurs in the database. """
|
#!/usr/bin/python
#
# Copyright (c) 2020 by VMware, Inc. ("VMware")
# Used Copyright (c) 2018 by Network Device Education Foundation,
# Inc. ("NetDEF") in this file.
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
"""
Following tests are covered in the script.
- Verify static route are blocked from route-map and prefix-list
applied in BGP nbrs
- Verify Static route when FRR connected to 2 TOR
"""
import sys
import time
import os
import pytest
import platform
import ipaddress
from copy import deepcopy
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
sys.path.append(os.path.join(CWD, "../lib/"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
from lib.topotest import version_cmp
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
start_topology,
write_test_header,
write_test_footer,
reset_config_on_routers,
verify_rib,
check_address_types,
step,
create_prefix_lists,
create_route_maps,
verify_prefix_lists,
verify_route_maps,
)
from lib.topolog import logger
from lib.bgp import (
verify_bgp_convergence,
create_router_bgp,
clear_bgp_and_verify,
clear_bgp,
)
from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
# Global variables
BGP_CONVERGENCE = False
ADDR_TYPES = check_address_types()
NETWORK = {"ipv4": "2.2.2.2/32", "ipv6": "22:22::2/128"}
NEXT_HOP_IP = {}
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
def setup_module(mod):
"""
Set up the pytest environment.
* `mod`: module name
"""
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
json_file = "{}/static_routes_topo4_ebgp.json".format(CWD)
tgen = Topogen(json_file, mod.__name__)
global topo
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
# to start deamons and then start routers
start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
if version_cmp(platform.release(), "4.19") < 0:
error_msg = (
'These tests will not run. (have kernel "{}", '
"requires kernel >= 4.19)".format(platform.release())
)
pytest.skip(error_msg)
# Checking BGP convergence
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# Api call verify whether BGP is converged
BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
BGP_CONVERGENCE
)
logger.info("Running setup_module() done")
def teardown_module(mod):
"""
Teardown the pytest environment.
* `mod`: module name
"""
logger.info("Running teardown_module to delete topology")
tgen = get_topogen()
# Stop toplogy and Remove tmp files
tgen.stop_topology()
logger.info(
"Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
)
logger.info("=" * 40)
#####################################################
#
# Tests starting
#
#####################################################
def test_static_routes_rmap_pfxlist_p0_tc7_ebgp(request):
"""
Verify static route are blocked from route-map & prefix-list applied in BGP
nbrs
"""
tc_name = request.node.name
write_test_header(tc_name)
tgen = get_topogen()
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
reset_config_on_routers(tgen)
step("Configure holddown timer = 1 keep alive = 3 in all the neighbors")
step("verify bgp convergence before starting test case")
bgp_convergence = verify_bgp_convergence(tgen, topo)
assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, bgp_convergence
)
step(
"Configure 4 IPv4 and 4 IPv6 nbrs with password with mismatch "
" authentication between FRR routers "
)
for addr_type in ADDR_TYPES:
# Api call to modify BGP timers
input_dict = {
"r2": {
"bgp": {
"local_as": "200",
"address_family": {
addr_type: {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link0": {"password": "r2"},
"r2-link1": {"password": "r2"},
"r2-link2": {"password": "r2"},
"r2-link3": {"password": "r2"},
}
},
"r3": {
"dest_link": {
"r2-link0": {"password": "r2"},
"r2-link1": {"password": "r2"},
"r2-link2": {"password": "r2"},
"r2-link3": {"password": "r2"},
}
},
}
}
}
},
}
}
}
result = create_router_bgp(tgen, topo, input_dict)
assert result is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, result
)
clear_bgp(tgen, addr_type, "r2")
step(" All BGP nbrs are down as authentication is mismatch on both" " the sides")
bgp_convergence = verify_bgp_convergence(tgen, topo, expected=False)
assert (
bgp_convergence is not True
), "Testcase {} : " "Failed \n BGP nbrs must be down. Error: {}".format(
tc_name, bgp_convergence
)
step(
"Configure 4 IPv4 and 4 IPv6 nbrs with macthing password "
" authentication between FRR routers "
)
for addr_type in ADDR_TYPES:
input_dict = {
"r2": {
"bgp": {
"local_as": "200",
"address_family": {
addr_type: {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link0": {"password": "r1"},
"r2-link1": {"password": "r1"},
"r2-link2": {"password": "r1"},
"r2-link3": {"password": "r1"},
}
},
"r3": {
"dest_link": {
"r2-link0": {"password": "r1"},
"r2-link1": {"password": "r1"},
"r2-link2": {"password": "r1"},
"r2-link3": {"password": "r1"},
}
},
}
}
}
},
}
}
}
result = create_router_bgp(tgen, topo, deepcopy(input_dict))
assert result is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, result
)
step("All BGP nbrs are up as authentication is matched now")
bgp_convergence = verify_bgp_convergence(tgen, topo)
assert bgp_convergence is True, "Testcase {} : Failed \n " "Error: {}".format(
tc_name, bgp_convergence
)
step("Create prefix list P1 to permit VM3 & deny VM1 v4 & v6 routes")
step("Create prefix list P2 to permit VM6 IPv4 and IPv6 routes")
for addr_type in ADDR_TYPES:
input_dict_2 = {
"r2": {
"prefix_lists": {
addr_type: {
"pf_list_1_{}".format(addr_type): [
{
"seqid": 10,
"network": topo["routers"]["r2"]["links"]["vm3"][
addr_type
],
"action": "permit",
},
{
"seqid": 20,
"network": topo["routers"]["r2"]["links"]["vm1"][
addr_type
],
"action": "deny",
},
],
"pf_list_2_{}".format(addr_type): [
{
"seqid": 10,
"network": topo["routers"]["r2"]["links"]["vm6"][
addr_type
],
"action": "permit",
}
],
}
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Prefix list created with matching networks deny or permit "
"show ip prefix list"
)
result = verify_prefix_lists(tgen, input_dict_2)
assert result is not True, "Testcase {} : Failed \n" " Error: {}".format(
tc_name, result
)
step("Redistribute all the routes (connected, static)")
input_dict_2_r1 = {
"r1": {
"bgp": {
"address_family": {
addr_type: {
"unicast": {"redistribute": [{"redist_type": "static"}]}
}
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_2_r1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
input_dict_2_r2 = {
"r2": {
"bgp": {
"address_family": {
addr_type: {
"unicast": {"redistribute": [{"redist_type": "static"}]}
}
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_2_r2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
input_dict_2_r3 = {
"r3": {
"bgp": {
"address_family": {
addr_type: {
"unicast": {"redistribute": [{"redist_type": "static"}]}
}
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_2_r3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("configure redistribute connected in Router BGP")
input_dict_2_r1 = {
"r1": {
"bgp": {
"address_family": {
addr_type: {
"unicast": {"redistribute": [{"redist_type": "connected"}]}
}
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_2_r1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
input_dict_2_r3 = {
"r3": {
"bgp": {
"address_family": {
addr_type: {
"unicast": {"redistribute": [{"redist_type": "connected"}]}
}
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_2_r3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
input_dict_2 = {
"r2": {
"bgp": {
"address_family": {
addr_type: {
"unicast": {"redistribute": [{"redist_type": "connected"}]}
}
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Apply prefix list P1 on BGP neighbors 1 2 3 4 connected from " "frr r1")
# Configure prefix list to bgp neighbor
input_dict_4 = {
"r2": {
"bgp": {
"address_family": {
addr_type: {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link0": {
"prefix_lists": [
{
"name": "pf_list_1_{}".format(
addr_type
),
"direction": "out",
}
]
},
"r2-link1": {
"prefix_lists": [
{
"name": "pf_list_1_{}".format(
addr_type
),
"direction": "out",
}
]
},
"r2-link2": {
"prefix_lists": [
{
"name": "pf_list_1_{}".format(
addr_type
),
"direction": "out",
}
]
},
"r2-link3": {
"prefix_lists": [
{
"name": "pf_list_1_{}".format(
addr_type
),
"direction": "out",
}
]
},
}
}
}
}
}
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Apply prefix list P2 on BGP nbrs 5 & 6 connected from FRR-2")
# Configure prefix list to bgp neighbor
input_dict_4 = {
"r2": {
"bgp": {
"address_family": {
addr_type: {
"unicast": {
"neighbor": {
"r3": {
"dest_link": {
"r2-link0": {
"prefix_lists": [
{
"name": "pf_list_2_{}".format(
addr_type
),
"direction": "out",
}
]
},
"r2-link1": {
"prefix_lists": [
{
"name": "pf_list_2_{}".format(
addr_type
),
"direction": "out",
}
]
},
"r2-link2": {
"prefix_lists": [
{
"name": "pf_list_2_{}".format(
addr_type
),
"direction": "out",
}
]
},
"r2-link3": {
"prefix_lists": [
{
"name": "pf_list_2_{}".format(
addr_type
),
"direction": "out",
}
]
},
}
}
}
}
}
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
clear_bgp_and_verify(tgen, topo, "r2")
step(
"VM1 IPv4 and IPv6 Route which is denied using prefix list is "
"not present on FRR1 side routing table , also not able to "
"ping the routes show ip route"
)
dut = "r1"
protocol = "bgp"
ntwk_r2_vm1 = str(
ipaddress.ip_interface(
u"{}".format(topo["routers"]["r2"]["links"]["vm1"][addr_type])
).network
)
input_dict = {"r1": {"static_routes": [{"network": ntwk_r2_vm1}]}}
result4 = verify_rib(
tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
)
assert result4 is not True, (
"Testcase {} : Failed , VM1 route is "
"not filtered out via prefix list. \n Error: {}".format(tc_name, result4)
)
step(
"VM4 and VM6 IPV4 and IPv6 address are present in local and "
"FRR2 routing table show ip bgp show ip route"
)
dut = "r2"
ntwk_r2_vm6 = str(
ipaddress.ip_interface(
u"{}".format(topo["routers"]["r2"]["links"]["vm6"][addr_type])
).network
)
input_dict = {"r3": {"static_routes": [{"network": ntwk_r2_vm6}]}}
result4 = verify_rib(tgen, addr_type, dut, input_dict)
assert result4 is True, "Testcase {} : Failed.\n Error: {}".format(
tc_name, result4
)
step("Remove prefix list from all the neighbors")
input_dict_4 = {
"r2": {
"bgp": {
"address_family": {
addr_type: {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link0": {
"prefix_lists": [
{
"name": "pf_list_1_{}".format(
addr_type
),
"direction": "out",
"delete": True,
}
]
},
"r2-link1": {
"prefix_lists": [
{
"name": "pf_list_1_{}".format(
addr_type
),
"direction": "out",
"delete": True,
}
]
},
"r2-link2": {
"prefix_lists": [
{
"name": "pf_list_1_{}".format(
addr_type
),
"direction": "out",
"delete": True,
}
]
},
"r2-link3": {
"prefix_lists": [
{
"name": "pf_list_1_{}".format(
addr_type
),
"direction": "out",
"delete": True,
}
]
},
}
}
}
}
}
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
input_dict_4 = {
"r2": {
"bgp": {
"address_family": {
addr_type: {
"unicast": {
"neighbor": {
"r3": {
"dest_link": {
"r2-link0": {
"prefix_lists": [
{
"name": "pf_list_2_{}".format(
addr_type
),
"direction": "out",
"delete": True,
}
]
},
"r2-link1": {
"prefix_lists": [
{
"name": "pf_list_2_{}".format(
addr_type
),
"direction": "out",
"delete": True,
}
]
},
"r2-link2": {
"prefix_lists": [
{
"name": "pf_list_2_{}".format(
addr_type
),
"direction": "out",
"delete": True,
}
]
},
"r2-link3": {
"prefix_lists": [
{
"name": "pf_list_2_{}".format(
addr_type
),
"direction": "out",
"delete": True,
}
]
},
}
}
}
}
}
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
clear_bgp_and_verify(tgen, topo, "r2")
step("Create RouteMap_1 with prefix list P1 and weight 50")
# Create route map
rmap_dict = {
"r2": {
"route_maps": {
"rmap_pf_list_1_{}".format(addr_type): [
{
"action": "permit",
"set": {"weight": 50},
"match": {
addr_type: {
"prefix_lists": "pf_list_1_{}".format(addr_type)
}
},
}
]
}
}
}
result = create_route_maps(tgen, rmap_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Create RouteMap_2 with prefix list P2 and weight 50")
# Create route map
rmap_dict = {
"r2": {
"route_maps": {
"rmap_pf_list_2_{}".format(addr_type): [
{
"action": "permit",
"set": {"weight": 50},
"match": {
addr_type: {
"prefix_lists": "pf_list_2_{}".format(addr_type)
}
},
}
]
}
}
}
result = create_route_maps(tgen, rmap_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Verify Route-map created verify using show route-map")
# verify rmap_pf_list_1 and rmap_pf_list_2 are present in router r2
input_dict = {
"r2": {
"route_maps": [
"rmap_pf_list_1_{}".format(addr_type),
"rmap_pf_list_2_{}".format(addr_type),
]
}
}
result = verify_route_maps(tgen, input_dict, expected=False)
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Apply policy RouteMap_1 nbrs 1 2 3 4 to FRR 1")
# Configure prefix list to bgp neighbor
input_dict_4 = {
"r2": {
"bgp": {
"address_family": {
addr_type: {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link0": {
"route_maps": [
{
"name": "rmap_pf_list_1_"
"{}".format(addr_type),
"direction": "out",
}
]
},
"r2-link1": {
"route_maps": [
{
"name": "rmap_pf_list_1_"
"{}".format(addr_type),
"direction": "out",
}
]
},
"r2-link2": {
"route_maps": [
{
"name": "rmap_pf_list_1_"
"{}".format(addr_type),
"direction": "out",
}
]
},
"r2-link3": {
"route_maps": [
{
"name": "rmap_pf_list_1_"
"{}".format(addr_type),
"direction": "out",
}
]
},
}
}
}
}
}
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Apply policy RouteMap_2 nbrs 5 and 6 to FRR2")
# Configure prefix list to bgp neighbor
input_dict_4 = {
"r2": {
"bgp": {
"address_family": {
addr_type: {
"unicast": {
"neighbor": {
"r3": {
"dest_link": {
"r2-link0": {
"route_maps": [
{
"name": "rmap_pf_list_2_"
"{}".format(addr_type),
"direction": "out",
}
]
},
"r2-link1": {
"route_maps": [
{
"name": "rmap_pf_list_2_"
"{}".format(addr_type),
"direction": "out",
}
]
},
"r2-link2": {
"route_maps": [
{
"name": "rmap_pf_list_2_"
"{}".format(addr_type),
"direction": "out",
}
]
},
"r2-link3": {
"route_maps": [
{
"name": "rmap_pf_list_2_"
"{}".format(addr_type),
"direction": "out",
}
]
},
}
}
}
}
}
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"After applying to BGP neighbors verify VM1 IPv4 and IPv6 Route"
" which is denied using prefix list is not present on FRR side"
" routing table , also not able to ping the routes show ip route"
" and VM4 and VM6 IPV4 and IPv6 address are present in local and"
" FRR routing table show ip bgp show ip route"
)
dut = "r1"
protocol = "bgp"
ntwk_r2_vm1 = str(
ipaddress.ip_interface(
u"{}".format(topo["routers"]["r2"]["links"]["vm1"][addr_type])
).network
)
input_dict = {"r1": {"static_routes": [{"network": ntwk_r2_vm1}]}}
result4 = verify_rib(
tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
)
assert (
result4 is not True
), "Testcase {} : Failed \n" "routes are still present \n Error: {}".format(
tc_name, result4
)
step("vm4 should be present in FRR1")
dut = "r1"
ntwk_r2_vm1 = str(
ipaddress.ip_interface(
u"{}".format(topo["routers"]["r1"]["links"]["vm4"][addr_type])
).network
)
input_dict = {"r1": {"static_routes": [{"network": ntwk_r2_vm1}]}}
result4 = verify_rib(tgen, addr_type, dut, input_dict)
assert result4 is True, (
"Testcase {} : Failed , VM1 route is "
"not filtered out via prefix list. \n Error: {}".format(tc_name, result4)
)
step("vm4 should be present in FRR2")
dut = "r2"
ntwk_r2_vm1 = str(
ipaddress.ip_interface(
u"{}".format(topo["routers"]["r1"]["links"]["vm4"][addr_type])
).network
)
input_dict = {"r1": {"static_routes": [{"network": ntwk_r2_vm1}]}}
result4 = verify_rib(tgen, addr_type, dut, input_dict)
assert result4 is True, (
"Testcase {} : Failed , VM1 route is "
"not filtered out via prefix list. \n Error: {}".format(tc_name, result4)
)
dut = "r3"
protocol = "bgp"
ntwk_r2_vm6 = str(
ipaddress.ip_interface(
u"{}".format(topo["routers"]["r2"]["links"]["vm6"][addr_type])
).network
)
input_dict = {"r3": {"static_routes": [{"network": ntwk_r2_vm6}]}}
result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result4 is True, "Testcase {} : Failed.\n Error: {}".format(
tc_name, result4
)
write_test_footer(tc_name)
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
|
import math
import main
from graphics import *
window_width = 600
window_height = 400
class Road(object):
def __init__(self, inner_points=None, outer_points=None, distance_check_points=None,
start=None, back_check=None, finish=None, starting_direction=None):
self.win = GraphWin(title="Self-driving car", width=window_width, height=window_height)
self.rw = 50 # Road width
self.road = []
self.lines = []
self.cars = []
self.start = None
self.starting_direction = None
self.margin = 2
self.back_check = None
self.finish = None
self.distance_check = []
if inner_points is not None and outer_points is not None \
and distance_check_points is not None and start is not None \
and back_check is not None and starting_direction is not None \
and finish is not None:
self.inner_points = inner_points
self.outer_points = outer_points
self.distance_check_points = distance_check_points
self.make_lines()
self.start = start[0]
self.back_check = Line(back_check[0], back_check[1])
self.finish = Line(finish[0], finish[1])
self.starting_direction = starting_direction
def make_lines(self):
# Clear lists
self.lines = []
self.distance_check = []
# Road lines
for point_array in [self.inner_points, self.outer_points]:
for i in range(0, len(point_array) - 1):
self.lines.append(Line(point_array[i], point_array[i + 1]))
# Distance check lines
for i in range(0, len(self.distance_check_points) - 1):
self.distance_check.append(Line(self.distance_check_points[i], self.distance_check_points[i + 1]))
def redraw(self):
for line in self.lines + self.distance_check:
try:
line.draw(self.win)
except GraphicsError:
pass
for thing in [self.start, self.finish, self.back_check]:
try:
thing.draw(self.win)
except GraphicsError:
pass
for car in self.cars:
Point(car.x, car.y).draw(self.win)
def reset_win(self):
for item in self.win.items[:]:
item.undraw()
del self.win.items[:]
self.win.update()
# self.win.close()
# self.win = GraphWin(title="Self-driving car", width=window_width, height=window_height)
# Also clear self.cars array, to start with a clean window
self.cars = []
self.redraw()
def test(self, cars):
self.cars = cars
self.reset_cars()
step_time = 1
while self.not_all_cars_collided():
for car in cars:
if car.collide_distance != -1:
continue
car.update_direction(self.get_sensor_data(car))
x_diff = math.cos(car.direction) * step_time * car.speed
y_diff = math.sin(car.direction) * step_time * car.speed
car.add_position(x_diff, y_diff)
if self.car_collided(car):
car.collide_distance = self.collide_distance(car)
if self.point_collides_with_line(self.finish, car.x, car.y) and car.checked:
car.finished = True
car.collide_distance = self.collide_distance(car)
if self.point_collides_with_line(self.back_check, car.x, car.y):
car.checked = True
self.redraw()
def collide_distance(self, car):
if not car.checked:
return 0
distances = []
for line in self.distance_check:
distances.append(distance_to_line_segment(line, car))
index = distances.index(min(distances))
total_distance = 0
for line in self.distance_check[:index]:
total_distance += length_of_line(line)
current_segment = self.distance_check[index]
total_distance += math.sqrt((car.x - current_segment.p1.x) ** 2 + (car.y - current_segment.p1.y) ** 2)
return total_distance
def reset_cars(self):
for car in self.cars:
car.checked = False
car.finished = False
car.set_position(self.start.x, self.start.y)
car.collide_distance = -1
car.direction = self.starting_direction
def not_all_cars_collided(self):
for car in self.cars:
if car.collide_distance == -1:
return True
return False
def car_collided(self, car):
for line in self.lines:
if self.point_collides_with_line(line, car.x, car.y):
return True
return False
def get_sensor_data(self, car):
result = []
# Hier is de maximale kijkhoek van de auto 180 graden oftewel pi radialen
view_angle = 1. * math.pi
num_of_sensors = main.ann.inputNodes
angle_per_sensor = view_angle / (num_of_sensors - 1)
first_sensor_angle = car.direction - view_angle / 2
max_sensor_range = car.sensor_range
for i in range(0, num_of_sensors):
sensor_angle = first_sensor_angle + i * angle_per_sensor
# De dichtbijste muur tot nu toe voor deze sensor
closest_line = max_sensor_range
for wall in self.lines:
# De richtingscoefficient van de sensorlijn
m1 = math.tan(sensor_angle)
# Het snijpunt van de sensorlijn met de y-as (als y = mx + b, dan b = y - mx)
b1 = car.y - m1 * car.x
if wall.p2.x == wall.p1.x:
# De muurlijn is verticaal. We moeten de coordinaten van het snijpunt
# nu anders berekenen, anders moeten we delen door nul
# Neem de x van de verticale muurlijn:
x = wall.p1.x
# En bereken de y door de x in te vullen in de sensorlijn formule:
y = m1 * x + b1
else:
# De richtingscoefficient van de muurlijn:
m2 = (wall.p2.y - wall.p1.y) / (wall.p2.x - wall.p1.x)
# Het snijpunt van de muurlijn met de y-as:
b2 = wall.p1.y - m2 * wall.p1.x
if m1 - m2 == 0:
# De lijnen lopen parallel dus er is geen snijpunt
continue
# De coordinaten van het snijpunt van de lijnen
x = (b2 - b1) / (m1 - m2)
y = m1 * x + b1
# De afstand snijpunt tot auto
d = math.sqrt((x - car.x) ** 2 + (y - car.y) ** 2)
if d > closest_line:
# Geen kandidaat voor dichtsbijzijnde lijn
continue
if out_of_range(wall, x, y):
# Sensorlijn snijdt muurlijn wel, maar niet het muurlijn*segment* dat de echte muur vormt
continue
if math.cos(sensor_angle) * (x - car.x) <= 0:
# Het snijpunt van sensorlijn en muurlijn zit aan de verkeerde kant van de auto
# (kruist niet met eigenlijke sensorlijnsegment)
continue
# Als deze code wordt bereikt is alles goed en is het de kortste afstand tot nu toe.
closest_line = d
result.append(closest_line)
return [result]
def point_collides_with_line(self, line, x, y):
# Eerst kijken of de auto binnen het bereik van de lijn is:
if out_of_range(line, x, y):
return False
# Afstand van punt tot lijn.
# Lijn:
# ax + by + c = 0
# Punt:
# (p, q)
# Formule voor afstand:
# |ap + bq + c| / sqrt(a^2 + b^2)
# Vergelijking van lijn opstelling aan de hand van twee punten (s en t):
# (sy - ty)x + (tx - sx)y + (sxty - txsy) = 0
# Dus:
# a = sy - ty
# b = tx - sx
# c = sx*ty - tx*sy
s = line.p1
t = line.p2
# Afstand berekenen
a = s.y - t.y
b = t.x - s.x
c = s.x * t.y - t.x * s.y
distance = abs(a * x + b * y + c) / math.sqrt(a ** 2 + b ** 2)
return distance < self.margin
def desired_direction(self, car):
distances = []
for line in self.distance_check:
distances.append(distance_to_line_segment(line, car))
index = distances.index(min(distances))
current_segment = self.distance_check[index]
if math.sqrt((car.x - current_segment.p2.x) ** 2 + (car.y - current_segment.p2.y) ** 2) < 10 \
and index < len(self.distance_check) - 1:
current_segment = self.distance_check[index + 1]
return math.atan2(float(current_segment.p2.y) - float(car.y),
float(current_segment.p2.x) - float(car.x))
def out_of_range(line, x, y):
minx = min(line.p1.x, line.p2.x)
maxx = max(line.p1.x, line.p2.x)
miny = min(line.p1.y, line.p2.y)
maxy = max(line.p1.y, line.p2.y)
return (x < minx or x > maxx) and (y < miny or y > maxy)
def distance_to_line_segment(line, car):
s = line.p1
t = line.p2
if t.x - s.x == 0:
# De lijn is verticaal
if min(s.y, t.y) <= car.y <= max(s.y, t.y):
return abs(car.x - t.x)
else:
return closest_end_point(line, car.x, car.y)
if t.y - s.y == 0:
# De lijn is horizontaal
if min(s.x, t.x) <= car.x <= max(s.x, t.x):
return abs(car.y - t.y)
else:
return closest_end_point(line, car.x, car.y)
# Richtingscoefficient van de lijn:
m1 = (t.y - s.y) * 1. / (t.x - s.x)
b1 = s.y - m1 * s.x
# Richtingscoefficient van de lijn uit het punt (car.x, car.y) die loodrecht op het lijnsegment staat:
m2 = -1. / m1
b2 = car.y - m2 * car.x
# Snijpunt van de lijnen:
x = (b2 - b1) / (m1 - m2)
y = m1 * x + b1
if out_of_range(line, x, y):
return closest_end_point(line, car.x, car.y)
else:
# Wiskunde D formule
a = s.y - t.y
b = t.x - s.x
c = s.x * t.y - t.x * s.y
return abs(a * car.x + b * car.y + c) / math.sqrt(a ** 2 + b ** 2)
def closest_end_point(line, x, y):
# Find the closest of the two end points of the line
d1 = math.sqrt((x - line.p1.x) ** 2 + (y - line.p1.y) ** 2)
d2 = math.sqrt((x - line.p2.x) ** 2 + (y - line.p2.y) ** 2)
return min(d1, d2)
def length_of_line(line):
return math.sqrt((line.p1.x - line.p2.x) ** 2 + (line.p1.y - line.p2.y) ** 2)
|
"""
Broadworks OCI-P Interface Exception Classes
Exception classes used by the API.
"""
import attr
@attr.s(slots=True, frozen=True)
class OCIError(Exception):
"""Base Exception raised by OCI operations.
Attributes:
message: explanation of why it went bang
object: the thing that went bang
"""
message: str = attr.ib()
object = attr.ib(default=None)
def __str__(self):
return f"{self.__class__.__name__}({self.message})"
class OCIErrorResponse(OCIError):
"""
Exception raised when an ErrorResponse is received and decoded.
Subclass of OCIError()
"""
pass
class OCIErrorTimeOut(OCIError):
"""
Exception raised when nothing is head back from the server.
Subclass of OCIError()
"""
pass
class OCIErrorUnknown(OCIError):
"""
Exception raised when life becomes too much for the software.
Subclass of OCIError()
"""
pass
# end
|
from sklearn.cross_validation import KFold
from sklearn.linear_model import LinearRegression, ElasticNet
from sklearn.datasets import load_boston
import numpy as np
boston = load_boston()
x = np.array([np.concatenate((v, [1])) for v in boston.data])
y = boston.target
FIT_EN = False
if FIT_EN:
model = ElasticNet(fit_intercept=True, alpha=0.5)
else:
model = LinearRegression(fit_intercept=True)
model.fit(x, y)
p = np.array([model.predict(np.array(xi).reshape(1, -1)) for xi in x])
e = p - y
total_error = np.dot(e, e)
rmse_train = np.sqrt(total_error / len(p))
kf = KFold(len(x), n_folds=10)
err = 0
for train, test in kf:
model.fit(x[train], y[train])
p = np.array([model.predict(np.array(xi).reshape(1, -1)) for xi in x[test]])
e = p - y[test]
err = err + np.dot(e, e) # todo 维度不一致
rmse_10cv = np.sqrt(err / len(x))
print('RMSE on training: {}'.format(rmse_train))
print('RMSE on 10-fold CV: {}'.format(rmse_10cv))
|
# Copyright 2018 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from moonshot import Moonshot
from moonshot.commission import PerShareCommission
from quantrocket.fundamental import get_reuters_financials_reindexed_like
class HighMinusLow(Moonshot):
"""
Strategy that buys stocks with high book-to-market ratios and shorts
stocks with low book-to-market ratios.
Specifically:
- calculate book value per share
- rank securities by price-to-book ratio
- buy the TOP_N_PCT percent of stocks with the lowest P/B ratios and short the TOP_N_PCT
percent of stocks with the highest P/B ratios
- rebalance the portfolio according to REBALANCE_INTERVAL
"""
CODE = "hml"
TOP_N_PCT = 10 # Buy/sell the bottom/top decile
REBALANCE_INTERVAL = "M" # M = monthly; see http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
def prices_to_signals(self, prices):
# calculate book value per share, defined as:
#
# (Total Assets - Total Liabilities) / Number of shares outstanding
#
# The COA codes for these metrics are 'ATOT' (Total Assets), 'LTLL' (Total
# Liabilities), and 'QTCO' (Total Common Shares Outstanding).
closes = prices.loc["Close"]
financials = get_reuters_financials_reindexed_like(closes, ["ATOT", "LTLL", "QTCO"])
tot_assets = financials.loc["ATOT"].loc["Amount"]
tot_liabilities = financials.loc["LTLL"].loc["Amount"]
shares_out = financials.loc["QTCO"].loc["Amount"]
book_values_per_share = (tot_assets - tot_liabilities)/shares_out
# Calculate and rank by price-to-book ratio
pb_ratios = closes/book_values_per_share
highest_pb_ratio_ranks = pb_ratios.rank(axis=1, ascending=False, pct=True)
lowest_pb_ratio_ranks = pb_ratios.rank(axis=1, ascending=True, pct=True)
top_n_pct = self.TOP_N_PCT / 100
# Get long and short signals and convert to 1, 0, -1
longs = (lowest_pb_ratio_ranks <= top_n_pct)
shorts = (highest_pb_ratio_ranks <= top_n_pct)
longs = longs.astype(int)
shorts = -shorts.astype(int)
# Combine long and short signals
signals = longs.where(longs == 1, shorts)
# Resample using the rebalancing interval.
# Keep only the last signal of the month, then fill it forward
signals = signals.resample(self.REBALANCE_INTERVAL).last()
signals = signals.reindex(closes.index, method="ffill")
return signals
def signals_to_target_weights(self, signals, prices):
weights = self.allocate_equal_weights(signals)
return weights
def target_weights_to_positions(self, weights, prices):
# Enter the position in the period/day after the signal
return weights.shift()
def positions_to_gross_returns(self, positions, prices):
# We'll enter on the open, so our return is today's open to
# tomorrow's open
opens = prices.loc["Open"]
gross_returns = opens.pct_change() * positions.shift()
return gross_returns
class USStockCommission(PerShareCommission):
IB_COMMISSION_PER_SHARE = 0.005
class HighMinusLowAmex(HighMinusLow):
CODE = "hml-amex"
DB = "amex-1d"
COMMISSION_CLASS = USStockCommission
|
# MIT License
# Copyright (c) 2019 Georgios Papachristou
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import requests
import json
import logging
from server.settings import IPSTACK_API
from server.skills.collection.internet import InternetSkills
from server.skills.skill import AssistantSkill
class LocationSkill(AssistantSkill):
@classmethod
def get_current_location(cls, **kwargs):
location_results = cls.get_location()
if location_results:
city, latitude, longitude = location_results
cls.response("You are in {0}".format(city))
@classmethod
def get_location(cls):
try:
send_url = "http://api.ipstack.com/check?access_key=" + IPSTACK_API['key']
geo_req = requests.get(send_url)
geo_json = json.loads(geo_req.text)
latitude = geo_json['latitude']
longitude = geo_json['longitude']
city = geo_json['city']
return city, latitude, longitude
except Exception as e:
if InternetSkills.internet_availability():
# If there is an error but the internet connect is good, then the location API has problem
cls.console(error_log=e)
logging.debug("Unable to get current location with error message: {0}".format(e))
return None
|
from .config import add_posenet_config
from .backbone import *
from .checkpoint import *
from .proposal_generator import *
from .roi_heads import *
|
from view.gui.application_settings import initialize_app_settings
from view.gui.flags_search import get_flags_index, query
from view.python_core.flags import FlagsManager
import pprint
def index_creation_test():
"""
Testing creation of flags index
"""
initialize_app_settings()
flags = FlagsManager()
ix = get_flags_index(flags)
return ix
def query_tests():
"""
Testing querying index
"""
ix = index_creation_test()
pprint.pprint(query(index=ix, query_str="movie"))
pprint.pprint(query(index=ix, query_str="color"))
pprint.pprint(query(index=ix, query_str="mv"))
pprint.pprint(query(index=ix, query_str="threshold"))
|
"""
********************************************************************************
compas_rhino.artists
********************************************************************************
.. currentmodule:: compas_rhino.artists
Artists for visualising (painting) COMPAS objects in Rhino.
Primitive Artists
=================
.. autosummary::
:toctree: generated/
:nosignatures:
PointArtist
LineArtist
FrameArtist
Shape Artists
=============
.. autosummary::
:toctree: generated/
:nosignatures:
Data Structure Artists
======================
.. autosummary::
:toctree: generated/
:nosignatures:
MeshArtist
NetworkArtist
VolMeshArtist
"""
from __future__ import absolute_import
from ._artist import _Artist
from ._primitiveartist import * # noqa: F401 F403
from .pointartist import PointArtist
from .lineartist import LineArtist
from .frameartist import FrameArtist
from .artist import * # noqa: F401 F403
from .meshartist import * # noqa: F401 F403
from .networkartist import * # noqa: F401 F403
from .volmeshartist import * # noqa: F401 F403
from compas.geometry import Frame
from compas.geometry import Line
from compas.geometry import Point
_Artist.register(Point, PointArtist)
_Artist.register(Frame, FrameArtist)
_Artist.register(Line, LineArtist)
__all__ = [name for name in dir() if not name.startswith('_')]
|
def nth_smallest(arr, pos):
|
import pytest
from eth_abi.abi import (
encode_single,
)
from ..common.unit import (
CORRECT_SINGLE_ENCODINGS,
)
@pytest.mark.parametrize(
'typ,python_value,_1,single_type_encoding,_2',
CORRECT_SINGLE_ENCODINGS,
)
def test_encode_single(typ, python_value, _1, single_type_encoding, _2):
actual = encode_single(typ, python_value)
assert actual == single_type_encoding
|
import sys
import math
from collections import defaultdict, deque
sys.setrecursionlimit(10 ** 6)
stdin = sys.stdin
INF = float('inf')
ni = lambda: int(ns())
na = lambda: list(map(int, stdin.readline().split()))
ns = lambda: stdin.readline().strip()
H, W, K = na()
grid = []
for _ in range(H):
grid.append(ns())
total = 0
for i in range(H):
for j in range(W):
if grid[i][j] == '#':
total += 1
ans = 0
for bit_h in range(1 << H):
cnt = 0
for i in range(H):
if bit_h & 1 << i:
for j in range(W):
if grid[i][j] == '#':
cnt += 1
for bit_w in range(1 << W):
cnt2 = 0
for i in range(W):
if bit_w & 1 << i:
for j in range(H):
if bit_h & 1 << j:
continue
if grid[j][i] == '#':
cnt2 += 1
if total - (cnt + cnt2) == K:
ans += 1
print(ans)
|
# ___________________________________________________________________________
#
# EGRET: Electrical Grid Research and Engineering Tools
# Copyright 2019 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
## loads and validates input unit commitment data
from pyomo.environ import *
import os.path
import egret.data.model_data as md
def create_ModelData(dat_file):
'''
Create a ModelData object from a prescient dat file
Parameters
----------
dat_file : str
Path to prescient *.dat file
Returns
-------
egret.model_data.ModelData
Returns a ModelData object with the dat file data specified
'''
return md.ModelData(create_model_data_dict(dat_file))
def create_model_data_dict(dat_file):
abstract_params = AbstractModel()
load_basic_data(abstract_params)
params = abstract_params.create_instance(dat_file)
md_dict = md.ModelData.empty_model_data_dict()
elements = md_dict['elements']
system = md_dict['system']
system['time_indices'] = list(str(t) for t in params.TimePeriods)
system['time_period_length_minutes'] = value(params.TimePeriodLengthMinutes)
system['load_mismatch_cost'] = value(params.LoadMismatchPenalty)
system['reserve_shortfall_cost'] = value(params.ReserveShortfallPenalty)
## These UC param files have the baseMVA factored out
system['baseMVA'] = 1.
bus_dict = dict()
set_reference = True
gen_bus_dict = dict()
renewable_gen_bus_dict = dict()
storage_bus_dict = dict()
for b in sorted(params.Buses):
if set_reference:
system['reference_bus'] = b
system['reference_bus_angle'] = 0.0
set_reference = False
bus_dict[b] = {}
for g in params.ThermalGeneratorsAtBus[b]:
gen_bus_dict[g] = b
for n in params.NondispatchableGeneratorsAtBus[b]:
renewable_gen_bus_dict[n] = b
for s in params.StorageAtBus[b]:
storage_bus_dict[s] = b
elements['bus'] = bus_dict
load_dict = dict()
for b in sorted(params.Buses):
l_d = { 'bus' : b,
'in_service': True,
'p_load':
{'data_type':'time_series',
'values': { str(t) : params.Demand[b,t] for t in params.TimePeriods }
}
}
load_dict[b] = l_d
elements['load'] = load_dict
reserve_dict = { str(t) : value(params.ReserveRequirement[t]) for t in params.TimePeriods }
system['reserve_requirement'] = { 'data_type':'time_series', 'values': reserve_dict }
branch_dict = dict()
for l in sorted(params.TransmissionLines):
b_d = { 'from_bus' : params.BusFrom[l],
'to_bus' : params.BusTo[l],
'reactance' : params.Impedence[l],
'rating_long_term' : params.ThermalLimit[l],
'rating_short_term' : params.ThermalLimit[l],
'rating_emergency' : params.ThermalLimit[l],
'in_service' : True
}
branch_dict[l] = b_d
elements['branch'] = branch_dict
interface_dict = dict()
for i in sorted(params.Interfaces):
i_d = { 'interface_lines' : list(params.InterfaceLines[l]),
'interface_from_limit': params.InterfaceFromLimit[l],
'interface_to_limit': params.InterfaceToLimit[l],
}
interface_dict[i] = i_d
elements['interface'] = interface_dict
zone_dict = dict()
for z in sorted(params.ReserveZones):
reserve_dict = { t : params.ZonalReserveRequirement[z,t] }
z_d = { 'reserve_requirement' : {'data_type': 'time_series', 'values' : reserve_dict } }
zone_dict[z] = z_d
elements['zone'] = zone_dict
gen_dict = dict()
for g in sorted(params.ThermalGenerators):
g_d = { 'generator_type':'thermal', }
g_d['bus'] = gen_bus_dict[g]
g_d['fuel'] = params.ThermalGeneratorType[g]
g_d['fast_start'] = params.QuickStart[g]
g_d['fixed_commitment'] = (1 if params.MustRun[g] else None)
g_d['in_service'] = True
g_d['zone'] = params.ReserveZoneLocation[g]
g_d['failure_rate'] = params.FailureProbability[g]
g_d['p_min'] = params.MinimumPowerOutput[g]
g_d['p_max'] = params.MaximumPowerOutput[g]
g_d['ramp_up_60min'] = params.NominalRampUpLimit[g]
g_d['ramp_down_60min'] = params.NominalRampDownLimit[g]
g_d['startup_capacity'] = params.StartupRampLimit[g]
g_d['shutdown_capacity'] = params.ShutdownRampLimit[g]
g_d['min_up_time'] = params.MinimumUpTime[g]
g_d['min_down_time'] = params.MinimumDownTime[g]
g_d['initial_status'] = params.UnitOnT0State[g]
g_d['initial_p_output'] = params.PowerGeneratedT0[g]
g_d['startup_cost'] = list(zip(params.StartupLags[g],params.StartupCosts[g]))
g_d['shutdown_cost'] = params.ShutdownFixedCost[g]
p_cost = {'data_type' : 'cost_curve' }
if value(params.PiecewiseType) == "NoPiecewise":
p_cost['cost_curve_type'] = 'polynomial'
p_cost['values'] = { 0 : params.ProductionCostA0[g],
1 : params.ProductionCostA1[g],
2 : params.ProductionCostA2[g],
}
else:
p_cost['cost_curve_type'] = 'piecewise'
p_cost['values'] = list(zip(params.CostPiecewisePoints[g], params.CostPiecewiseValues[g]))
g_d['p_cost'] = p_cost
## NOTE: generators need unique names
gen_dict[g+'_t'] = g_d
for g in sorted(params.AllNondispatchableGenerators):
g_d = { 'generator_type':'renewable', }
g_d['bus'] = renewable_gen_bus_dict[g]
g_d['in_service'] = True
g_d['fuel'] = params.NondispatchableGeneratorType[g]
g_d['p_min'] = { 'data_type':'time_series',
'values': { str(t) : params.MinNondispatchablePower[g,t] for t in params.TimePeriods }
}
g_d['p_max'] = { 'data_type':'time_series',
'values': { str(t) : params.MaxNondispatchablePower[g,t] for t in params.TimePeriods }
}
## NOTE: generators need unique names
gen_dict[g+'_r'] = g_d
elements['generator'] = gen_dict
storage_dict = {}
for s in sorted(params.Storage):
s_d = dict()
s_d['bus'] = storage_bus_dict[s]
s_d['min_discharge_rate'] = params.MinimumPowerOutputStorage[s]
s_d['max_discharge_rate'] = params.MaximumPowerOutputStorage[s]
s_d['min_charge_rate'] = params.MinimumPowerInputStorage[s]
s_d['max_charge_rate'] = params.MaximumPowerInputStorage[s]
s_d['ramp_up_output_60min'] = params.NominalRampUpLimitStorageOutput[s]
s_d['ramp_down_output_60min'] = params.NominalRampDownLimitStorageOutput[s]
s_d['ramp_up_input_60min'] = params.NominalRampUpLimitStorageInput[s]
s_d['ramp_down_input_60min'] = params.NominalRampDownLimitStorageInput[s]
s_d['energy_capacity'] = params.MaximumEnergyStorage[s]
s_d['minimum_state_of_charge'] = params.MinimumSocStorage[s]
s_d['charge_efficiency'] = params.InputEfficiencyEnergy[s]
s_d['discharge_efficiency'] = params.OutputEfficiencyEnergy[s]
s_d['retention_rate_60min'] = params.RetentionRate[s]
s_d['initial_state_of_charge'] = params.StorageSocOnT0[s]
s_d['initial_discharge_rate'] = params.StoragePowerOutputOnT0[s]
s_d['initial_charge_rate'] = params.StoragePowerInputOnT0[s]
storage_dict[s] = s_d
elements['storage'] = storage_dict
return md_dict
def _verify_must_run_t0_state_consistency(model):
# ensure that the must-run flag and the t0 state are consistent. in partcular, make
# sure that the unit has satisifed its minimum down time condition if UnitOnT0 is negative.
def verify_must_run_t0_state_consistency_rule(m, g):
if value(m.MustRun[g]):
t0_state = value(m.UnitOnT0State[g])
if t0_state < 0:
min_down_time = value(m.MinimumDownTime[g])
if abs(t0_state) < min_down_time:
print("DATA ERROR: The generator %s has been flagged as must-run, but its T0 state=%d is inconsistent with its minimum down time=%d" % (g, t0_state, min_down_time))
return False
return True
model.VerifyMustRunT0StateConsistency = BuildAction(model.ThermalGenerators, rule=verify_must_run_t0_state_consistency_rule)
def _populate_reserve_requirements(model):
def populate_reserve_requirements_rule(m):
reserve_factor = value(m.ReserveFactor)
if reserve_factor > 0.0:
for t in m.TimePeriods:
demand = sum(value(m.Demand[b,t]) for b in sorted(m.Buses))
m.ReserveRequirement[t] = (reserve_factor * demand)
model.PopulateReserveRequirements = BuildAction(rule=populate_reserve_requirements_rule)
def load_basic_data(model):
'''
This loads the model from a dat file
'''
warn_neg_load = False
#
# Parameters
#
##############################################
# string indentifiers for the set of busses. #
##############################################
model.Buses = Set()
###################
# Load Zones #
###################
#Aggregated loads are distributed in the system based on load coefficient values
model.Zones = Set(initialize=['SingleZone'])
def buildBusZone(m):
an_element = next(m.Zones.__iter__())
if len(m.Zones) == 1 and an_element == 'SingleZone':
for b in m.Buses:
m.BusZone[b] = an_element
else:
print("Multiple buses is not supported by buildBusZone in ReferenceModel.py -- someone should fix that!")
exit(1)
model.BusZone = Param(model.Buses, mutable=True)
model.BuildBusZone = BuildAction(rule=buildBusZone)
model.LoadCoefficient = Param(model.Buses, default=0.0)
def total_load_coefficient_per_zone(m, z):
return sum(m.LoadCoefficient[b] for b in m.Buses if str(value(m.BusZone[b]))==str(z))
model.TotalLoadCoefficientPerZone = Param(model.Zones, initialize=total_load_coefficient_per_zone)
def load_factors_per_bus(m,b):
if (m.TotalLoadCoefficientPerZone[value(m.BusZone[b])] != 0.0):
return m.LoadCoefficient[b]/m.TotalLoadCoefficientPerZone[value(m.BusZone[b])]
else:
return 0.0
model.LoadFactor = Param(model.Buses, initialize=load_factors_per_bus, within=NonNegativeReals)
################################
model.StageSet = Set(ordered=True)
# IMPORTANT: The stage set must be non-empty - otherwise, zero costs result.
def check_stage_set(m):
return (len(m.StageSet) != 0)
model.CheckStageSet = BuildCheck(rule=check_stage_set)
## for backwards capatability (for now)
model.TimePeriodLength = Param(default=1, within=PositiveReals)
def time_period_length_validator(m):
assert(m.TimePeriodLength == 1)
model.TimePeriodLengthIsOne = BuildAction(rule=time_period_length_validator)
## IN HOURS, assert athat this must be a positive number
model.TimePeriodLengthHours = Param(default=1.0, within=PositiveReals)
## in minutes, assert that this must be a positive integer
model.TimePeriodLengthMinutes = Param(default=60, within=PositiveIntegers)
## sync the two time period lengths depending on the user's specification
def harmonize_times(m):
## the user can only specify a non-default for
## one of the time period lengths
assert( (value(m.TimePeriodLengthHours) == 1.0) or (value(m.TimePeriodLengthMinutes) == 60) )
if value(m.TimePeriodLengthHours) != 1.0:
m.TimePeriodLengthMinutes = int(round(value(m.TimePeriodLengthHours)*60))
if value(m.TimePeriodLengthMinutes) != 60:
m.TimePeriodLengthHours = value(m.TimePeriodLengthMinutes)/60.
model.HarmonizeTimes = BuildAction(rule=harmonize_times)
model.NumTimePeriods = Param(within=PositiveIntegers, )
model.InitialTime = Param(within=PositiveIntegers, default=1)
model.TimePeriods = RangeSet(model.InitialTime, model.NumTimePeriods)
# the following sets must must come from the data files or from an initialization function that uses
# a parameter that tells you when the stages end (and that thing needs to come from the data files)
model.CommitmentTimeInStage = Set(model.StageSet, within=model.TimePeriods)
model.GenerationTimeInStage = Set(model.StageSet, within=model.TimePeriods)
##############################################
# Network definition (S)
##############################################
## for older .dat files
model.NumTransmissionLines = Param(default=0)
def num_transimission_lines_validator(m):
assert(m.NumTransmissionLines == 0)
model.NumTransmissionLinesIsZero = BuildAction(rule=num_transimission_lines_validator)
model.TransmissionLines = Set()
model.BusFrom = Param(model.TransmissionLines)
model.BusTo = Param(model.TransmissionLines)
model.Impedence = Param(model.TransmissionLines, within=NonNegativeReals)
model.ThermalLimit = Param(model.TransmissionLines) # max flow across the line
## Interfaces
## NOTE: Lines in iterfaces should be all go "from" the
## other network "to" the modeled network
model.Interfaces = Set()
model.InterfaceLines = Set(model.Interfaces, within=model.TransmissionLines)
model.InterfaceFromLimit = Param(model.Interfaces, within=NonNegativeReals)
model.InterfaceToLimit = Param(model.Interfaces, within=NonNegativeReals)
##########################################################
# string indentifiers for the set of thermal generators. #
# and their locations. (S) #
##########################################################
model.ThermalGenerators = Set()
model.ThermalGeneratorsAtBus = Set(model.Buses)
# thermal generator types must be specified as 'N', 'C', 'G', and 'H',
# with the obvious interpretation.
# TBD - eventually add a validator.
model.ThermalGeneratorType = Param(model.ThermalGenerators, within=Any, default='C')
def verify_thermal_generator_buses_rule(m, g):
for b in m.Buses:
if g in m.ThermalGeneratorsAtBus[b]:
return
print("DATA ERROR: No bus assigned for thermal generator=%s" % g)
assert(False)
model.VerifyThermalGeneratorBuses = BuildAction(model.ThermalGenerators, rule=verify_thermal_generator_buses_rule)
model.QuickStart = Param(model.ThermalGenerators, within=Boolean, default=False)
# optionally force a unit to be on.
model.MustRun = Param(model.ThermalGenerators, within=Boolean, default=False)
def nd_gen_init(m,b):
return []
model.NondispatchableGeneratorsAtBus = Set(model.Buses, initialize=nd_gen_init)
def NonNoBus_init(m):
retval = set()
for b in m.Buses:
retval = retval.union([gen for gen in m.NondispatchableGeneratorsAtBus[b]])
return retval
model.AllNondispatchableGenerators = Set(initialize=NonNoBus_init)
model.NondispatchableGeneratorType = Param(model.AllNondispatchableGenerators, within=Any, default='W')
######################
# Reserve Zones #
######################
# Generators are grouped in zones to provide zonal reserve requirements. #
# All generators can contribute to global reserve requirements #
model.ReserveZones = Set()
model.ZonalReserveRequirement = Param(model.ReserveZones, model.TimePeriods, default=0.0, within=NonNegativeReals)
model.ReserveZoneLocation = Param(model.ThermalGenerators, default='None')
#################################################################
# the global system demand, for each time period. units are MW. #
# demand as at busses (S) so total demand is derived #
#################################################################
# at the moment, we allow for negative demand. this is probably
# not a good idea, as "stuff" representing negative demand - including
# renewables, interchange schedules, etc. - should probably be modeled
# explicitly.
# Demand can also be given by Zones
model.DemandPerZone = Param(model.Zones, model.TimePeriods, default=0.0, )
# Convert demand by zone to demand by bus
def demand_per_bus_from_demand_per_zone(m,b,t):
return m.DemandPerZone[value(m.BusZone[b]), t] * m.LoadFactor[b]
model.Demand = Param(model.Buses, model.TimePeriods, initialize=demand_per_bus_from_demand_per_zone, )
def calculate_total_demand(m, t):
return sum(value(m.Demand[b,t]) for b in sorted(m.Buses))
model.TotalDemand = Param(model.TimePeriods, initialize=calculate_total_demand)
# at this point, a user probably wants to see if they have negative demand.
def warn_about_negative_demand_rule(m, b, t):
this_demand = value(m.Demand[b,t])
if this_demand < 0.0:
print("***WARNING: The demand at bus="+str(b)+" for time period="+str(t)+" is negative - value="+str(this_demand)+"; model="+str(m.name)+".")
if warn_neg_load:
model.WarnAboutNegativeDemand = BuildAction(model.Buses, model.TimePeriods, rule=warn_about_negative_demand_rule)
##################################################################
# the global system reserve, for each time period. units are MW. #
# NOTE: We don't have per-bus / zonal reserve requirements. they #
# would be easy to add. (dlw oct 2013: this comment is incorrect, I think) #
##################################################################
# we provide two mechanisms to specify reserve requirements. the
# first is a scaling factor relative to demand, on a per time
# period basis. the second is an explicit parameter that specifies
# the reserver requirement on a per-time-period basis. if the
# reserve requirement factor is > 0, then it is used to populate
# the reserve requirements. otherwise, the user-supplied reserve
# requirements are used.
model.ReserveFactor = Param(within=Reals, default=-1.0, )
model.ReserveRequirement = Param(model.TimePeriods, within=NonNegativeReals, default=0.0, mutable=True )
_populate_reserve_requirements(model)
##############################################################
# failure probability for each generator, in any given hour. #
# not used within the model itself at present, but rather #
# used by scripts that read / manipulate the model. #
##############################################################
def probability_failure_validator(m, v, g):
return v >= 0.0 and v <= 1.0
model.FailureProbability = Param(model.ThermalGenerators, validate=probability_failure_validator, default=0.0)
#####################################################################################
# a binary indicator as to whether or not each generator is on-line during a given #
# time period. intended to represent a sampled realization of the generator failure #
# probability distributions. strictly speaking, we interpret this parameter value #
# as indicating whether or not the generator is contributing (injecting) power to #
# the PowerBalance constraint. this parameter is not intended to be used in the #
# context of ramping or time up/down constraints. #
#####################################################################################
model.GeneratorForcedOutage = Param(model.ThermalGenerators * model.TimePeriods, within=Binary, default=False)
####################################################################################
# minimum and maximum generation levels, for each thermal generator. units are MW. #
# could easily be specified on a per-time period basis, but are not currently. #
####################################################################################
# you can enter generator limits either once for the generator or for each period (or just take 0)
model.MinimumPowerOutput = Param(model.ThermalGenerators, within=NonNegativeReals, default=0.0)
def maximum_power_output_validator(m, v, g):
return v >= value(m.MinimumPowerOutput[g])
model.MaximumPowerOutput = Param(model.ThermalGenerators, within=NonNegativeReals, validate=maximum_power_output_validator, default=0.0)
# wind is similar, but max and min will be equal for non-dispatchable wind
model.MinNondispatchablePower = Param(model.AllNondispatchableGenerators, model.TimePeriods, within=NonNegativeReals, default=0.0, )
def maximum_nd_output_validator(m, v, g, t):
return v >= value(m.MinNondispatchablePower[g,t])
model.MaxNondispatchablePower = Param(model.AllNondispatchableGenerators, model.TimePeriods, within=NonNegativeReals, default=0.0, validate=maximum_nd_output_validator)
#################################################
# generator ramp up/down rates. units are MW/h. #
# IMPORTANT: Generator ramp limits can exceed #
# the maximum power output, because it is the #
# ramp limit over an hour. If the unit can #
# fully ramp in less than an hour, then this #
# will occur. #
#################################################
# limits for normal time periods
model.NominalRampUpLimit = Param(model.ThermalGenerators, within=NonNegativeReals, )
model.NominalRampDownLimit = Param(model.ThermalGenerators, within=NonNegativeReals, )
# limits for time periods in which generators are brought on or off-line.
# must be no less than the generator minimum output.
def ramp_limit_validator(m, v, g):
return v >= m.MinimumPowerOutput[g]
## These defaults follow what is in most market manuals
## We scale this for the time period below
def startup_ramp_default(m, g):
return m.MinimumPowerOutput[g]+m.NominalRampUpLimit[g]/2.
def shutdown_ramp_default(m, g):
return m.MinimumPowerOutput[g]+m.NominalRampDownLimit[g]/2.
model.StartupRampLimit = Param(model.ThermalGenerators, within=NonNegativeReals, default=startup_ramp_default, validate=ramp_limit_validator, )
model.ShutdownRampLimit = Param(model.ThermalGenerators, within=NonNegativeReals, default=shutdown_ramp_default, validate=ramp_limit_validator, )
##########################################################################################################
# the minimum number of time periods that a generator must be on-line (off-line) once brought up (down). #
##########################################################################################################
model.MinimumUpTime = Param(model.ThermalGenerators, within=NonNegativeIntegers, default=0)
model.MinimumDownTime = Param(model.ThermalGenerators, within=NonNegativeIntegers, default=0)
#############################################
# unit on state at t=0 (initial condition). #
#############################################
# if positive, the number of hours prior to (and including) t=0 that the unit has been on.
# if negative, the number of hours prior to (and including) t=0 that the unit has been off.
# the value cannot be 0, by definition.
def t0_state_nonzero_validator(m, v, g):
return v != 0
model.UnitOnT0State = Param(model.ThermalGenerators, within=Reals, validate=t0_state_nonzero_validator, )
def t0_unit_on_rule(m, g):
return int(value(m.UnitOnT0State[g]) >= 1)
model.UnitOnT0 = Param(model.ThermalGenerators, within=Binary, initialize=t0_unit_on_rule, )
_verify_must_run_t0_state_consistency(model)
####################################################################
# generator power output at t=0 (initial condition). units are MW. #
####################################################################
def between_limits_validator(m, v, g):
status = (v <= (value(m.MaximumPowerOutput[g]) * value(m.UnitOnT0[g])) and v >= (value(m.MinimumPowerOutput[g]) * value(m.UnitOnT0[g])))
if status == False:
print("Failed to validate PowerGeneratedT0 value for g="+g+"; new value="+str(v)+", UnitOnT0="+str(value(m.UnitOnT0[g])))
return v <= (value(m.MaximumPowerOutput[g]) * value(m.UnitOnT0[g])) and v >= (value(m.MinimumPowerOutput[g]) * value(m.UnitOnT0[g]))
model.PowerGeneratedT0 = Param(model.ThermalGenerators, within=NonNegativeReals, validate=between_limits_validator, )
###############################################
# startup cost parameters for each generator. #
###############################################
# startup costs are conceptually expressed as pairs (x, y), where x represents the number of hours that a unit has been off and y represents
# the cost associated with starting up the unit after being off for x hours. these are broken into two distinct ordered sets, as follows.
def startup_lags_init_rule(m, g):
return [value(m.MinimumDownTime[g])]
model.StartupLags = Set(model.ThermalGenerators, within=NonNegativeIntegers, ordered=True, initialize=startup_lags_init_rule) # units are hours / time periods.
def startup_costs_init_rule(m, g):
return [0.0]
model.StartupCosts = Set(model.ThermalGenerators, within=NonNegativeReals, ordered=True, initialize=startup_costs_init_rule) # units are $.
# startup lags must be monotonically increasing...
def validate_startup_lags_rule(m, g):
startup_lags = list(m.StartupLags[g])
if len(startup_lags) == 0:
print("DATA ERROR: The number of startup lags for thermal generator="+str(g)+" must be >= 1.")
assert(False)
if startup_lags[0] != value(m.MinimumDownTime[g]):
print("DATA ERROR: The first startup lag for thermal generator="+str(g)+" must be equal the minimum down time="+str(value(m.MinimumDownTime[g]))+".")
assert(False)
for i in range(0, len(startup_lags)-1):
if startup_lags[i] >= startup_lags[i+1]:
print("DATA ERROR: Startup lags for thermal generator="+str(g)+" must be monotonically increasing.")
assert(False)
model.ValidateStartupLags = BuildAction(model.ThermalGenerators, rule=validate_startup_lags_rule)
# while startup costs must be monotonically non-decreasing!
def validate_startup_costs_rule(m, g):
startup_costs = list(m.StartupCosts[g])
for i in range(0, len(startup_costs)-2):
if startup_costs[i] > startup_costs[i+1]:
print("DATA ERROR: Startup costs for thermal generator="+str(g)+" must be monotonically non-decreasing.")
assert(False)
model.ValidateStartupCosts = BuildAction(model.ThermalGenerators, rule=validate_startup_costs_rule)
def validate_startup_lag_cost_cardinalities(m, g):
if len(m.StartupLags[g]) != len(m.StartupCosts[g]):
print("DATA ERROR: The number of startup lag entries ("+str(len(m.StartupLags[g]))+") for thermal generator="+str(g)+" must equal the number of startup cost entries ("+str(len(m.StartupCosts[g]))+")")
assert(False)
model.ValidateStartupLagCostCardinalities = BuildAction(model.ThermalGenerators, rule=validate_startup_lag_cost_cardinalities)
# for purposes of defining constraints, it is useful to have a set to index the various startup costs parameters.
# entries are 1-based indices, because they are used as indicies into Pyomo sets - which use 1-based indexing.
def startup_cost_indices_init_rule(m, g):
return range(1, len(m.StartupLags[g])+1)
model.StartupCostIndices = Set(model.ThermalGenerators, within=NonNegativeIntegers, initialize=startup_cost_indices_init_rule)
##################################################################################
# shutdown cost for each generator. in the literature, these are often set to 0. #
##################################################################################
model.ShutdownFixedCost = Param(model.ThermalGenerators, within=NonNegativeReals, default=0.0) # units are $.
## BEGIN PRODUCTION COST
## NOTE: For better or worse, we handle scaling this to the time period length in the objective function.
## In particular, this is done in objective.py.
##################################################################################################################
# production cost coefficients (for the quadratic) a0=constant, a1=linear coefficient, a2=quadratic coefficient. #
##################################################################################################################
model.ProductionCostA0 = Param(model.ThermalGenerators, default=0.0) # units are $/hr (or whatever the time unit is).
model.ProductionCostA1 = Param(model.ThermalGenerators, default=0.0) # units are $/MWhr.
model.ProductionCostA2 = Param(model.ThermalGenerators, default=0.0) # units are $/(MWhr^2).
# the parameters below are populated if cost curves are specified as linearized heat rate increment segments.
#
# CostPiecewisePoints represents the power output levels defining the segment boundaries.
# these *must* include the minimum and maximum power output points - a validation check
# if performed below.
#
# CostPiecewiseValues are the absolute heat rates / costs associated with the corresponding
# power output levels. the precise interpretation of whether a value is a heat rate or a cost
# depends on the value of the FuelCost parameter, specified below.
def piecewise_type_validator(m, v):
return (v == "NoPiecewise") or (v == "Absolute")
def piecewise_type_init(m):
boo = False
for g in m.ThermalGenerators:
if not (m.ProductionCostA0[g] == 0.0 and m.ProductionCostA1[g] == 0.0 and m.ProductionCostA2[g] == 0.0):
boo = True
break
if boo:
return "NoPiecewise"
else:
return "Absolute"
model.PiecewiseType = Param(validate=piecewise_type_validator,initialize=piecewise_type_init, )
def piecewise_init(m, g):
return []
model.CostPiecewisePoints = Set(model.ThermalGenerators, initialize=piecewise_init, ordered=True, within=NonNegativeReals)
model.CostPiecewiseValues = Set(model.ThermalGenerators, initialize=piecewise_init, ordered=True, within=NonNegativeReals)
# a check to ensure that the cost piecewise point parameter was correctly populated.
# these are global checks, which cannot be performed as a set validation (which
# operates on a single element at a time).
def validate_cost_piecewise_points_and_values_rule(m, g):
if value(m.PiecewiseType) == "NoPiecewise":
# if there isn't any piecewise data specified, we shouldn't find any.
if len(m.CostPiecewisePoints[g]) > 0:
print("DATA ERROR: The PiecewiseType parameter was set to NoPiecewise, but piecewise point data was specified!")
return False
# if there isn't anything to validate and we didn't expect piecewise
# points, we can safely skip the remaining validation steps.
return True
else:
# if the user said there was going to be piecewise data and none was
# supplied, they should be notified as to such.
if len(m.CostPiecewisePoints[g]) == 0:
print("DATA ERROR: The PiecewiseType parameter was set to something other than NoPiecewise, but no piecewise point data was specified!")
return False
# per the requirement below, there have to be at least two piecewise points if there are any.
min_output = value(m.MinimumPowerOutput[g])
max_output = value(m.MaximumPowerOutput[g])
points = list(m.CostPiecewisePoints[g])
if min_output not in points:
print("DATA ERROR: Cost piecewise points for generator g="+str(g)+" must contain the minimum output level="+str(min_output))
return False
if max_output not in points:
print("DATA ERROR: Cost piecewise points for generator g="+str(g)+" must contain the maximum output level="+str(max_output))
return False
return True
model.ValidateCostPiecewisePointsAndValues = BuildCheck(model.ThermalGenerators, rule=validate_cost_piecewise_points_and_values_rule)
# Sets the cost of fuel to the generator. Assert that this is 1.0 for this parser, for now
model.FuelCost = Param(model.ThermalGenerators, default=1.0)
def unit_fuel_cost(m,g):
return (m.FuelCost[g] == 1.0)
model.ValidateFuelCost = BuildCheck(model.ThermalGenerators, rule=unit_fuel_cost)
# Minimum production cost (needed because Piecewise constraint on ProductionCost
# has to have lower bound of 0, so the unit can cost 0 when off -- this is added
# back in to the objective if a unit is on
def minimum_production_cost(m, g):
if len(m.CostPiecewisePoints[g]) > 1:
return m.CostPiecewiseValues[g].first() * m.FuelCost[g]
else:
return m.FuelCost[g] * \
(m.ProductionCostA0[g] + \
m.ProductionCostA1[g] * m.MinimumPowerOutput[g] + \
m.ProductionCostA2[g] * (m.MinimumPowerOutput[g]**2))
model.MinimumProductionCost = Param(model.ThermalGenerators, within=NonNegativeReals, initialize=minimum_production_cost, )
##############################################################################################
# number of pieces in the linearization of each generator's quadratic cost production curve. #
##############################################################################################
model.NumGeneratorCostCurvePieces = Param(within=PositiveIntegers, default=2, )
ModeratelyBigPenalty = 1e3
model.ReserveShortfallPenalty = Param(within=NonNegativeReals, default=ModeratelyBigPenalty, )
#########################################
# penalty costs for constraint violation #
#########################################
BigPenalty = 1e4
model.LoadMismatchPenalty = Param(within=NonNegativeReals, default=BigPenalty, )
## END PRODUCTION COST CALCULATIONS
#
# STORAGE parameters
#
model.Storage = Set()
model.StorageAtBus = Set(model.Buses, initialize=Set())
def verify_storage_buses_rule(m, s):
for b in m.Buses:
if s in m.StorageAtBus[b]:
return
print("DATA ERROR: No bus assigned for storage element=%s" % s)
assert(False)
model.VerifyStorageBuses = BuildAction(model.Storage, rule=verify_storage_buses_rule)
####################################################################################
# minimum and maximum power ratings, for each storage unit. units are MW. #
# could easily be specified on a per-time period basis, but are not currently. #
####################################################################################
# Storage power output >0 when discharging
model.MinimumPowerOutputStorage = Param(model.Storage, within=NonNegativeReals, default=0.0)
def maximum_power_output_validator_storage(m, v, s):
return v >= value(m.MinimumPowerOutputStorage[s])
model.MaximumPowerOutputStorage = Param(model.Storage, within=NonNegativeReals, validate=maximum_power_output_validator_storage, default=0.0)
#Storage power input >0 when charging
model.MinimumPowerInputStorage = Param(model.Storage, within=NonNegativeReals, default=0.0)
def maximum_power_input_validator_storage(m, v, s):
return v >= value(m.MinimumPowerInputStorage[s])
model.MaximumPowerInputStorage = Param(model.Storage, within=NonNegativeReals, validate=maximum_power_input_validator_storage, default=0.0)
###############################################
# storage ramp up/down rates. units are MW/h. #
###############################################
# ramp rate limits when discharging
model.NominalRampUpLimitStorageOutput = Param(model.Storage, within=NonNegativeReals)
model.NominalRampDownLimitStorageOutput = Param(model.Storage, within=NonNegativeReals)
# ramp rate limits when charging
model.NominalRampUpLimitStorageInput = Param(model.Storage, within=NonNegativeReals)
model.NominalRampDownLimitStorageInput = Param(model.Storage, within=NonNegativeReals)
####################################################################################
# minimum state of charge (SOC) and maximum energy ratings, for each storage unit. #
# units are MWh for energy rating and p.u. (i.e. [0,1]) for SOC #
####################################################################################
# you enter storage energy ratings once for each storage unit
model.MaximumEnergyStorage = Param(model.Storage, within=NonNegativeReals, default=0.0)
model.MinimumSocStorage = Param(model.Storage, within=PercentFraction, default=0.0)
################################################################################
# round trip efficiency for each storage unit given as a fraction (i.e. [0,1]) #
################################################################################
model.InputEfficiencyEnergy = Param(model.Storage, within=PercentFraction, default=1.0)
model.OutputEfficiencyEnergy = Param(model.Storage, within=PercentFraction, default=1.0)
model.RetentionRate = Param(model.Storage, within=PercentFraction, default=1.0) ## assumed to be %/hr
########################################################################
# end-point SOC for each storage unit. units are in p.u. (i.e. [0,1]) #
########################################################################
# end-point values are the SOC targets at the final time period. With no end-point constraints
# storage units will always be empty at the final time period.
model.EndPointSocStorage = Param(model.Storage, within=PercentFraction, default=0.5)
############################################################
# storage initial conditions: SOC, power output and input #
############################################################
def t0_storage_power_input_validator(m, v, s):
return (v >= value(m.MinimumPowerInputStorage[s])) and (v <= value(m.MaximumPowerInputStorage[s]))
def t0_storage_power_output_validator(m, v, s):
return (v >= value(m.MinimumPowerInputStorage[s])) and (v <= value(m.MaximumPowerInputStorage[s]))
model.StoragePowerOutputOnT0 = Param(model.Storage, within=NonNegativeReals, validate=t0_storage_power_output_validator, default=0.0)
model.StoragePowerInputOnT0 = Param(model.Storage, within=NonNegativeReals, validate=t0_storage_power_input_validator, default=0.0)
model.StorageSocOnT0 = Param(model.Storage, within=PercentFraction, default=0.5)
return model
|
# pylint:disable=line-too-long
"""
The tool to check the availability or syntax of domains, IPv4, IPv6 or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Tests of PyFunceble.engine.user_agent.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/special-thanks.html
Contributors:
https://pyfunceble.github.io/contributors.html
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io///en/master/
Project homepage:
https://pyfunceble.github.io/
License:
::
MIT License
Copyright (c) 2017, 2018, 2019, 2020 Nissar Chababy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# pylint: enable=line-too-long
from unittest import TestCase
from unittest import main as launch_tests
import PyFunceble
class TestUserAgent(TestCase):
"""
Tests of PyFunceble.engine.user_agent
"""
# pylint: disable=unnecessary-lambda
def setUp(self):
"""
Setups everything needed for the tests.
"""
PyFunceble.load_config()
self.user_agent = PyFunceble.engine.UserAgent()
self.file_instance = PyFunceble.helpers.File(
PyFunceble.abstracts.Infrastructure.USER_AGENT_FILENAME
)
self.file_instance.delete()
def tearDown(self):
"""
Setups everything needed after the tests.
"""
self.file_instance.delete()
def test_dumped_empty(self):
"""
Tests of the case that the dump is
empty.
"""
self.user_agent.dumped = {}
self.assertRaises(
PyFunceble.exceptions.UserAgentBrowserNotFound,
lambda: self.user_agent.get(),
)
def test_dumped_without_chosen_browser(self):
"""
Tests of the case that the chosen browser
does not exists.
"""
del self.user_agent.dumped[PyFunceble.CONFIGURATION.user_agent.browser]
self.assertRaises(
PyFunceble.exceptions.UserAgentBrowserNotFound,
lambda: self.user_agent.get(),
)
def test_dumped_without_chosen_platform(self):
"""
Tests of the case that the chosen platform
does not exists.
"""
del self.user_agent.dumped[PyFunceble.CONFIGURATION.user_agent.browser][
PyFunceble.CONFIGURATION.user_agent.platform
]
self.assertRaises(
PyFunceble.exceptions.UserAgentPlatformNotFound,
lambda: self.user_agent.get(),
)
def test_dumped_does_not_exists(self):
"""
Tests of the case that the chosen user agent
does not exists.
"""
self.user_agent.dumped[PyFunceble.CONFIGURATION.user_agent.browser][
PyFunceble.CONFIGURATION.user_agent.platform
] = None
self.assertRaises(
PyFunceble.exceptions.UserAgentNotFound, lambda: self.user_agent.get(),
)
def test_dumped_output(self):
"""
Tests of the normal case.
"""
expected = "Hello, World!"
self.user_agent.dumped[PyFunceble.CONFIGURATION.user_agent.browser][
PyFunceble.CONFIGURATION.user_agent.platform
] = "Hello, World!"
self.assertEqual(expected, self.user_agent.get())
def test_custom_output(self):
"""
Tests of the case that the end-user give us a custom user agent.
"""
expected = self.user_agent.dumped[PyFunceble.CONFIGURATION.user_agent.browser][
PyFunceble.CONFIGURATION.user_agent.platform
]
actual = self.user_agent.get()
self.assertEqual(expected, actual)
if __name__ == "__main__":
launch_tests()
|
import json
class DashboardEncoder(json.JSONEncoder):
"""Encode dashboard objects."""
def default(self, obj):
to_json_data = getattr(obj, "to_json_data", None)
if to_json_data:
return to_json_data()
return json.JSONEncoder.default(self, obj)
|
from flask import Flask
from flask import jsonify
from providentia.core.brain import ProvidentiaBrain
app = Flask(__name__)
@app.route('/keywords')
def keywords():
brain = ProvidentiaBrain()
data = brain.fetch_top_keywords()
top_keywords = data.get('top_keywords',[])
return jsonify(data={
'entries':list(top_keywords),
'count':len(top_keywords),
'keywords_by_document':list(data.get('keywords_by_document',[]))
})
@app.route('/')
def top_keywords():
brain = ProvidentiaBrain()
data = brain.winsdom()
clusters = data.get('clusters', [])
titles = data.get('titles', [])
k = data.get('keywords',[])
c = {}
keys = []
for key in clusters:
documents = []
for id in clusters[key]:
documents.append({'id':id,'title:':titles[id]})
c[key] = documents
return jsonify(data={ 'keywords':{'entries':k, 'count':len(k)},
'clusters':c,
'titles':{'entries':titles, 'count':len(titles)}})
if __name__ == '__main__':
app.debug = True
app.run()
|
import os
from keras.models import Sequential, load_model
from keras.layers import Activation, Embedding, Dense, TimeDistributed, LSTM, Dropout
MODEL_DIR = './model'
def save_weights(epoch, model):
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
model.save_weights(os.path.join(MODEL_DIR, 'epoch.{}.h5'.format(epoch)))
def load_weights(epoch, model):
model.load_weights(os.path.join(MODEL_DIR, 'epoch.{}.h5'.format(epoch)))
def build_model(batch_size, seq_length, vocab_size):
model = Sequential()
model.add(Embedding(vocab_size, 512, batch_input_shape=(batch_size, seq_length)))
for i in range(3):
model.add(LSTM(256, return_sequences=True, stateful=True))
model.add(TimeDistributed(Dense(vocab_size)))
model.add(Activation('softmax'))
return model
if __name__ == '__main__':
model = build_model(16, 64, 8104)
model.summary()
|
from urllib.parse import parse_qs, urlparse
import os
import requests
import pytest
from trackbelt import search_soundcloud
real_request = requests.request
def mock_request(raw_url, **kwargs):
url = urlparse(raw_url)
method = 'GET'
assert url.scheme in ('http', 'https')
assert url.netloc == 'soundcloud.com'
assert url.params == ''
assert url.fragment == ''
path = os.path.join(
os.path.dirname(__file__), 'responses', url.netloc,
url.path.strip('/'), url.query, '.'.join((method.lower(), 'html')))
try:
with open(path) as f:
data = f.read()
except FileNotFoundError:
data = real_request(method, raw_url, **kwargs).content
os.makedirs(os.path.dirname(path))
with open(path, 'w') as f:
f.write(data.decode('utf-8'))
pytest.fail(
'Missing test data for "{} {}" have been written to "{}"'.format(
method, raw_url, path))
response = requests.Response()
response._content = data
response.status_code = 200
return response
def test_soundcloud_search_basic(monkeypatch):
monkeypatch.setattr(requests, 'get', mock_request)
result = search_soundcloud('evvy', 'collide (keljet remix)')
assert result == dict(
title='EVVY - Collide (Keljet Remix)',
url='/keljet/evvy-collide-keljet-remix-1',
)
def test_soundcloud_search_empty(monkeypatch):
monkeypatch.setattr(requests, 'get', mock_request)
result = search_soundcloud('nonexistentartist', 'nonexistenttitle')
assert result is None
|
import io
import os
from configparser import ConfigParser
def read_bots():
lista = list()
config = ConfigParser()
config.read('config.env')
bots = config.sections();
print ("I have " + str(len(bots)) + " bots")
for x in bots:
bot = dict()
info = config.options(x)
if('coin' in info and 'token' in info and 'contract' in info):
try:
coin = str(config.get(x,'coin')).replace('"', "")
token = str(config.get(x,'token')).replace('"', "")
contract = str(config.get(x,'contract')).replace('"', "")
info_ = list()
info_.append(token)
info_.append(contract)
bot[coin] = info_
lista.append(bot)
except:
print(str(x) + " has invalid information!")
else:
print(str(x) + " has invalid information!")
return lista
|
from django.contrib import admin
from maestrofinder_app.models import Musician, Request
# Register your models here.
admin.site.register(Musician)
admin.site.register(Request)
|
#!/usr/bin/python3
#
# Copyright 2017, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from syscall import *
from utils import *
RESULT_UNSUPPORTED_YET = 1
RESULT_UNSUPPORTED_FLAG = 2
RESULT_UNSUPPORTED_AT_ALL = 3
FLAG_RENAME_WHITEOUT = (1 << 2) # renameat2's flag: whiteout source
FLAG_O_ASYNC = 0o20000 # open's flag
# fallocate flags:
F_FALLOC_FL_COLLAPSE_RANGE = 0x08
F_FALLOC_FL_ZERO_RANGE = 0x10
F_FALLOC_FL_INSERT_RANGE = 0x20
# fcntl's flags:
F_SETFD = 2
F_GETLK = 5
F_SETLK = 6
F_SETLKW = 7
F_SETOWN = 8
F_GETOWN = 9
F_SETSIG = 10
F_GETSIG = 11
F_SETOWN_EX = 15
F_GETOWN_EX = 16
F_OFD_GETLK = 36
F_OFD_SETLK = 37
F_OFD_SETLKW = 38
F_SETLEASE = 1024
F_GETLEASE = 1025
F_NOTIFY = 1026
F_ADD_SEALS = 1033
F_GET_SEALS = 1034
FD_CLOEXEC = 1
AT_EMPTY_PATH = 0x1000
# clone() flags set by pthread_create():
# = CLONE_VM|CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD|CLONE_SYSVSEM|CLONE_SETTLS|
# CLONE_PARENT_SETTID|CLONE_CHILD_CLEARTID
F_PTHREAD_CREATE = 0x3d0f00
AT_FDCWD_HEX = 0x00000000FFFFFF9C # = AT_FDCWD (hex)
AT_FDCWD_DEC = -100 # = AT_FDCWD (dec)
MAX_DEC_FD = 0x10000000
########################################################################################################################
# realpath -- get the resolved path (it does not resolve links YET)
########################################################################################################################
# noinspection PyShadowingBuiltins
def realpath(path):
len_path = len(path)
assert(len_path > 0 and path[0] == '/')
newpath = "/"
newdirs = []
dirs = path.split('/')
for dir in dirs:
if dir in ("", "."):
continue
if dir == ".." and dirs.index(dir) > 0:
len_newdirs = len(newdirs)
if len_newdirs > 0:
del newdirs[len_newdirs - 1]
continue
newdirs.append(dir)
newpath += "/".join(newdirs)
if path[len_path - 1] == '/':
newpath += "/"
return newpath
########################################################################################################################
# ListSyscalls
########################################################################################################################
class ListSyscalls(list):
def __init__(self, pmem_paths, script_mode, debug_mode, verbose_mode):
list.__init__(self)
self.log_anls = logging.getLogger("analysis")
self.script_mode = script_mode
self.debug_mode = debug_mode
self.verbose_mode = verbose_mode
self.print_progress = not (self.debug_mode or self.script_mode)
self.time0 = 0
self.pid_table = []
self.npids = 0
self.last_pid = -1
self.last_pid_ind = 0
self.fd_tables = []
self.cwd_table = []
self.all_strings = ["(stdin)", "(stdout)", "(stderr)"]
self.path_is_pmem = [0, 0, 0]
if pmem_paths:
paths = str(pmem_paths)
else:
paths = ""
self.pmem_paths = paths.split(':')
# add slash at the end and normalize all paths
self.pmem_paths = [realpath(path + '/') for path in self.pmem_paths]
self.all_supported = 1 # all syscalls are supported
self.unsupported = 0
self.unsupported_yet = 0
self.unsupported_rel = 0
self.unsupported_flag = 0
self.list_unsup = []
self.list_unsup_yet = []
self.list_unsup_rel = []
self.list_unsup_flag = []
self.ind_unsup = []
self.ind_unsup_yet = []
self.ind_unsup_rel = []
self.ind_unsup_flag = []
####################################################################################################################
def is_path_pmem(self, string):
string = str(string)
for path in self.pmem_paths:
if string.find(path) == 0:
return 1
return 0
####################################################################################################################
# all_strings_append -- append the string to the list of all strings
####################################################################################################################
def all_strings_append(self, string, is_pmem):
if self.all_strings.count(string) == 0:
self.all_strings.append(string)
self.path_is_pmem.append(is_pmem)
str_ind = len(self.all_strings) - 1
else:
str_ind = self.all_strings.index(string)
return str_ind
####################################################################################################################
@staticmethod
def fd_table_assign(table, fd, val):
fd_table_length = len(table)
assert_msg(fd_table_length + 1000 > fd, "abnormally huge file descriptor ({0:d}), input file may be corrupted"
.format(fd))
for i in range(fd_table_length, fd + 1):
table.append(-1)
table[fd] = val
####################################################################################################################
def print_always(self):
for syscall in self:
syscall.print_always()
####################################################################################################################
# look_for_matching_record -- look for matching record in a list of incomplete syscalls
####################################################################################################################
def look_for_matching_record(self, info_all, pid_tid, sc_id, name, retval):
for syscall in self:
check = syscall.check_read_data(info_all, pid_tid, sc_id, name, retval, DEBUG_OFF)
if check == CHECK_OK:
self.remove(syscall)
return syscall
return -1
####################################################################################################################
# set_pid_index -- set PID index and create a new FD table for each PID
####################################################################################################################
def set_pid_index(self, syscall):
pid = syscall.pid_tid >> 32
if pid != self.last_pid:
self.last_pid = pid
if self.pid_table.count(pid) == 0:
self.pid_table.append(pid)
self.npids = len(self.pid_table)
self.fd_tables.append([0, 1, 2])
self.cwd_table.append(self.cwd_table[self.last_pid_ind])
if self.npids > 1:
self.log_anls.debug("DEBUG WARNING(set_pid_index): added new _empty_ FD table for new PID 0x{0:08X}"
.format(pid))
self.last_pid_ind = len(self.pid_table) - 1
else:
self.last_pid_ind = self.pid_table.index(pid)
syscall.pid_ind = self.last_pid_ind
####################################################################################################################
def set_pid_index_offline(self):
length = len(self)
if not self.script_mode:
print("\nCounting PIDs:")
for syscall in self:
if self.print_progress:
n = self.index(syscall) + 1
print("\r{0:d} of {1:d} ({2:d}%) ".format(n, length, int((100 * n) / length)), end='')
self.set_pid_index(syscall)
if self.print_progress:
print(" done.")
if self.debug_mode:
for pid in self.pid_table:
self.log_anls.debug("PID[{0:d}] = 0x{1:016X}".format(self.pid_table.index(pid), pid))
####################################################################################################################
# arg_is_pmem -- check if a path argument is located on the pmem filesystem
####################################################################################################################
def arg_is_pmem(self, syscall, narg):
if narg > syscall.nargs:
return 0
narg -= 1
if syscall.has_mask(Arg_is_path[narg] | Arg_is_fd[narg]):
str_ind = syscall.args[narg]
if str_ind != -1 and str_ind < len(self.path_is_pmem) and self.path_is_pmem[str_ind]:
return 1
return 0
####################################################################################################################
# check_fallocate_flags -- check if the fallocate flags are supported by pmemfile
####################################################################################################################
@staticmethod
def check_fallocate_flags(syscall):
syscall.unsupported_flag = ""
if syscall.args[1] == F_FALLOC_FL_COLLAPSE_RANGE:
syscall.unsupported_flag = "FALLOC_FL_COLLAPSE_RANGE"
elif syscall.args[1] == F_FALLOC_FL_ZERO_RANGE:
syscall.unsupported_flag = "FALLOC_FL_ZERO_RANGE"
elif syscall.args[1] == F_FALLOC_FL_INSERT_RANGE:
syscall.unsupported_flag = "FALLOC_FL_INSERT_RANGE"
if syscall.unsupported_flag != "":
return RESULT_UNSUPPORTED_FLAG
else:
return RESULT_SUPPORTED
####################################################################################################################
# check_fcntl_flags -- check if the fcntl flags are supported by pmemfile
####################################################################################################################
@staticmethod
def check_fcntl_flags(syscall):
syscall.unsupported_flag = ""
if syscall.args[1] == F_SETFD and (syscall.args[2] & FD_CLOEXEC == 0):
syscall.unsupported_flag = "F_SETFD: not possible to clear FD_CLOEXEC flag"
elif syscall.args[1] == F_GETLK:
syscall.unsupported_flag = "F_GETLK"
elif syscall.args[1] == F_SETLK:
syscall.unsupported_flag = "F_SETLK"
elif syscall.args[1] == F_SETLKW:
syscall.unsupported_flag = "F_SETLKW"
elif syscall.args[1] == F_SETOWN:
syscall.unsupported_flag = "F_SETOWN"
elif syscall.args[1] == F_GETOWN:
syscall.unsupported_flag = "F_GETOWN"
elif syscall.args[1] == F_SETSIG:
syscall.unsupported_flag = "F_SETSIG"
elif syscall.args[1] == F_GETSIG:
syscall.unsupported_flag = "F_GETSIG"
elif syscall.args[1] == F_SETOWN_EX:
syscall.unsupported_flag = "F_SETOWN_EX"
elif syscall.args[1] == F_GETOWN_EX:
syscall.unsupported_flag = "F_GETOWN_EX"
elif syscall.args[1] == F_OFD_GETLK:
syscall.unsupported_flag = "F_OFD_GETLK"
elif syscall.args[1] == F_OFD_SETLK:
syscall.unsupported_flag = "F_OFD_SETLK"
elif syscall.args[1] == F_OFD_SETLKW:
syscall.unsupported_flag = "F_OFD_SETLKW"
elif syscall.args[1] == F_SETLEASE:
syscall.unsupported_flag = "F_SETLEASE"
elif syscall.args[1] == F_GETLEASE:
syscall.unsupported_flag = "F_GETLEASE"
elif syscall.args[1] == F_NOTIFY:
syscall.unsupported_flag = "F_NOTIFY"
elif syscall.args[1] == F_ADD_SEALS:
syscall.unsupported_flag = "F_ADD_SEALS"
elif syscall.args[1] == F_GET_SEALS:
syscall.unsupported_flag = "F_GET_SEALS"
if syscall.unsupported_flag != "":
return RESULT_UNSUPPORTED_FLAG
else:
return RESULT_SUPPORTED
####################################################################################################################
# is_supported -- check if the syscall is supported by pmemfile
####################################################################################################################
def is_supported(self, syscall):
# SyS_fork and SyS_vfork are not supported at all
if syscall.name in ("fork", "vfork"):
return RESULT_UNSUPPORTED_AT_ALL
# SyS_clone is supported only with flags set by pthread_create()
if syscall.name == "clone" and syscall.args[0] != F_PTHREAD_CREATE:
syscall.unsupported_flag = "flags other than set by pthread_create()"
return RESULT_UNSUPPORTED_FLAG
# the rest of checks is valid only if at least one of paths or file descriptors points at pmemfile filesystem
if not syscall.is_pmem:
return RESULT_SUPPORTED
# SyS_open and SyS_openat with O_ASYNC flag are not supported
if syscall.has_mask(EM_rfd):
# In order to make the checks faster, first the bit flags are checked (because it is very fast)
# and a syscall name is verified only if all flags are correct (because comparing strings is slower).
if (syscall.is_mask(Arg_is_path[0]) and syscall.args[1] & FLAG_O_ASYNC and syscall.name == "open") or \
(syscall.is_mask(Arg_is_fd[0] | Arg_is_path[1] | EM_fileat) and syscall.args[2] & FLAG_O_ASYNC and
syscall.name == "openat"):
syscall.unsupported_flag = "O_ASYNC"
return RESULT_UNSUPPORTED_FLAG
return RESULT_SUPPORTED
# let's check SyS_*at syscalls
if syscall.is_mask(EM_isfileat):
# SyS_execveat and SyS_name_to_handle_at are not supported
if syscall.name in ("execveat", "name_to_handle_at"):
return RESULT_UNSUPPORTED_AT_ALL
# SyS_renameat2 with RENAME_WHITEOUT flag is not supported
if syscall.nargs == 5 and syscall.name in "renameat2" and syscall.args[4] & FLAG_RENAME_WHITEOUT:
syscall.unsupported_flag = "RENAME_WHITEOUT"
return RESULT_UNSUPPORTED_FLAG
# the rest of SyS_*at syscalls is supported
return RESULT_SUPPORTED
# let's check syscalls with file descriptor as the first argument
if syscall.has_mask(EM_fd_1):
# SyS_fallocate with FALLOC_FL_COLLAPSE_RANGE and FALLOC_FL_ZERO_RANGE and FALLOC_FL_INSERT_RANGE flags
# is not supported
if syscall.name == "fallocate":
return self.check_fallocate_flags(syscall)
# many of SyS_fcntl flags is not supported
if syscall.name == "fcntl":
return self.check_fcntl_flags(syscall)
# let's check syscalls with pmem path or file descriptor as the first argument
if self.arg_is_pmem(syscall, 1):
# the following syscalls are not supported
if syscall.name in (
"chroot", "execve", "readahead",
"setxattr", "lsetxattr", "fsetxattr",
"listxattr", "llistxattr", "flistxattr",
"removexattr", "lremovexattr", "fremovexattr"):
return RESULT_UNSUPPORTED_AT_ALL
# the SyS_flock syscall is not supported YET
if syscall.name == "flock":
return RESULT_UNSUPPORTED_YET
# SyS_copy_file_range and SyS_splice syscalls are not supported YET
if (self.arg_is_pmem(syscall, 1) or self.arg_is_pmem(syscall, 3)) and\
syscall.name in ("copy_file_range", "splice"):
return RESULT_UNSUPPORTED_YET
# SyS_sendfile and SyS_sendfile64 syscalls are not supported YET
if (self.arg_is_pmem(syscall, 1) or self.arg_is_pmem(syscall, 2)) and\
syscall.name in ("sendfile", "sendfile64"):
return RESULT_UNSUPPORTED_YET
# SyS_mmap syscall is not supported YET
if self.arg_is_pmem(syscall, 5) and syscall.name == "mmap":
return RESULT_UNSUPPORTED_YET
# the rest of syscalls is supported
return RESULT_SUPPORTED
####################################################################################################################
def log_print_path(self, is_pmem, name, path):
if is_pmem:
self.log_anls.debug("{0:20s} \"{1:s}\" [PMEM]".format(name, path))
else:
self.log_anls.debug("{0:20s} \"{1:s}\"".format(name, path))
####################################################################################################################
@staticmethod
def log_build_msg(msg, is_pmem, path):
if is_pmem:
msg += " \"{0:s}\" [PMEM]".format(path)
else:
msg += " \"{0:s}\"".format(path)
return msg
####################################################################################################################
def set_first_cwd(self, cwd):
assert_msg(len(self.cwd_table) == 0, "cwd_table is not empty")
self.cwd_table.append(cwd)
####################################################################################################################
def set_cwd(self, new_cwd, syscall):
self.cwd_table[syscall.pid_ind] = new_cwd
####################################################################################################################
def get_cwd(self, syscall):
return self.cwd_table[syscall.pid_ind]
####################################################################################################################
def get_fd_table(self, syscall):
return self.fd_tables[syscall.pid_ind]
####################################################################################################################
# handle_fileat -- helper function of match_fd_with_path() - handles *at syscalls
####################################################################################################################
def handle_fileat(self, syscall, arg1, arg2, msg):
assert_msg(syscall.has_mask(Arg_is_fd[arg1]), "argument #{0:d} is not a file descriptor".format(arg1))
assert_msg(syscall.has_mask(Arg_is_path[arg2]), "argument #{0:d} is not a path".format(arg2))
dirfd = syscall.args[arg1]
if dirfd == AT_FDCWD_HEX:
dirfd = AT_FDCWD_DEC
# check if AT_EMPTY_PATH is set
if (syscall.has_mask(EM_aep_arg_4) and (syscall.args[3] & AT_EMPTY_PATH)) or\
(syscall.has_mask(EM_aep_arg_5) and (syscall.args[4] & AT_EMPTY_PATH)):
path = ""
else:
path = syscall.strings[syscall.args[arg2]]
dir_str = ""
newpath = path
unknown_dirfd = 0
# handle empty and relative paths
if (len(path) == 0 and not syscall.read_error) or (len(path) != 0 and path[0] != '/'):
# get FD table of the current PID
fd_table = self.get_fd_table(syscall)
# check if dirfd == AT_FDCWD
if dirfd == AT_FDCWD_DEC:
dir_str = self.get_cwd(syscall)
# is dirfd saved in the FD table?
elif 0 <= dirfd < len(fd_table):
# read string index of dirfd
str_ind = fd_table[dirfd]
# save string index instead of dirfd as the argument
syscall.args[arg1] = str_ind
# read path of dirfd
dir_str = self.all_strings[str_ind]
elif syscall.has_mask(EM_rfd) and syscall.iret != -1:
unknown_dirfd = 1
if not unknown_dirfd:
if len(path) == 0:
newpath = dir_str
else:
newpath = dir_str + "/" + path
if dir_str != "":
msg += " \"{0:s}\" \"{1:s}\"".format(dir_str, path)
else:
msg += " ({0:d}) \"{1:s}\"".format(dirfd, path)
if not unknown_dirfd:
path = realpath(newpath)
else:
path = newpath
is_pmem = self.is_path_pmem(path)
# append new path to the global array of all strings
str_ind = self.all_strings_append(path, is_pmem)
# save index in the global array as the argument
syscall.args[arg2] = str_ind
syscall.is_pmem |= is_pmem
if is_pmem:
msg += " [PMEM]"
if unknown_dirfd:
self.log_anls.warning("Unknown dirfd : {0:d}".format(dirfd))
return path, is_pmem, msg
####################################################################################################################
# handle_one_path -- helper function of match_fd_with_path() - handles one path argument of number n
####################################################################################################################
def handle_one_path(self, syscall, n):
path = syscall.strings[syscall.args[n]]
if syscall.read_error and len(path) == 0:
is_pmem = 0
else:
# handle relative paths
if len(path) == 0:
path = self.get_cwd(syscall)
elif path[0] != '/':
path = self.get_cwd(syscall) + "/" + path
path = realpath(path)
is_pmem = self.is_path_pmem(path)
syscall.is_pmem |= is_pmem
# append new path to the global array of all strings
str_ind = self.all_strings_append(path, is_pmem)
# save index in the global array as the argument
syscall.args[n] = str_ind
return path, str_ind, is_pmem
####################################################################################################################
# match_fd_with_path -- save paths in the table and match file descriptors with saved paths
####################################################################################################################
def match_fd_with_path(self, syscall):
if syscall.read_error:
self.log_anls.warning("BPF read error occurred, path is empty in syscall: {0:s}".format(syscall.name))
# handle SyS_open or SyS_creat
if syscall.is_mask(EM_fd_from_path):
path, str_ind, is_pmem = self.handle_one_path(syscall, 0)
self.log_print_path(is_pmem, syscall.name, path)
fd_out = syscall.iret
if fd_out != -1:
# get FD table of the current PID
fd_table = self.get_fd_table(syscall)
# add to the FD table new pair (fd_out, str_ind):
# - new descriptor 'fd_out' points at the string of index 'str_ind' in the table of all strings
self.fd_table_assign(fd_table, fd_out, str_ind)
# handle all SyS_*at syscalls
elif syscall.is_mask(EM_isfileat):
msg = "{0:20s}".format(syscall.name)
path, is_pmem, msg = self.handle_fileat(syscall, 0, 1, msg)
fd_out = syscall.iret
# handle SyS_openat
if syscall.has_mask(EM_rfd) and fd_out != -1:
str_ind = self.all_strings_append(path, is_pmem)
# get FD table of the current PID
fd_table = self.get_fd_table(syscall)
# add to the FD table new pair (fd_out, str_ind):
# - new descriptor 'fd_out' points at the string of index 'str_ind' in the table of all strings
self.fd_table_assign(fd_table, fd_out, str_ind)
# handle syscalls with second 'at' pair (e.g. linkat, renameat)
if syscall.is_mask(EM_isfileat2):
path, is_pmem, msg = self.handle_fileat(syscall, 2, 3, msg)
self.log_anls.debug(msg)
# handle SyS_symlinkat (it is a special case of SyS_*at syscalls)
elif syscall.name == "symlinkat":
msg = "{0:20s}".format(syscall.name)
path, str_ind, is_pmem = self.handle_one_path(syscall, 0)
msg += self.log_build_msg(msg, is_pmem, path)
path, is_pmem, msg = self.handle_fileat(syscall, 1, 2, msg)
self.log_anls.debug(msg)
# handle SyS_dup*
elif syscall.is_mask(EM_fd_from_fd):
# get FD table of the current PID
fd_table = self.get_fd_table(syscall)
fd_in = syscall.args[0]
fd_out = syscall.iret
# is fd_in saved in the FD table?
if 0 <= fd_in < len(fd_table):
# read string index of fd_in
str_ind = fd_table[fd_in]
# save string index instead of fd_in as the argument
syscall.args[0] = str_ind
# read path of fd_in
path = self.all_strings[str_ind]
is_pmem = self.path_is_pmem[str_ind]
syscall.is_pmem |= is_pmem
self.log_print_path(is_pmem, syscall.name, path)
if fd_out != -1:
# add to the FD table new pair (fd_out, str_ind):
# - new descriptor 'fd_out' points at the string of index 'str_ind' in the table of all strings
self.fd_table_assign(fd_table, fd_out, str_ind)
else:
# fd_in is an unknown descriptor
syscall.args[0] = -1
self.log_anls.debug("{0:20s} ({1:d})".format(syscall.name, fd_in))
if fd_out != -1:
self.log_anls.warning("Unknown fd : {0:d}".format(fd_in))
# handle SyS_close
elif syscall.name == "close":
fd_in = syscall.args[0]
# get FD table of the current PID
fd_table = self.get_fd_table(syscall)
# is fd_in saved in the FD table?
if 0 <= fd_in < len(fd_table):
# read string index of fd_in
str_ind = fd_table[fd_in]
# "close" the fd_in descriptor
fd_table[fd_in] = -1
# read path of fd_in
path = self.all_strings[str_ind]
is_pmem = self.path_is_pmem[str_ind]
syscall.is_pmem |= is_pmem
self.log_print_path(is_pmem, syscall.name, path)
else:
self.log_anls.debug("{0:20s} (0x{1:016X})".format(syscall.name, fd_in))
# handle syscalls with a path or a file descriptor among arguments
elif syscall.has_mask(EM_str_all | EM_fd_all):
msg = "{0:20s}".format(syscall.name)
# loop through all syscall's arguments
for narg in range(syscall.nargs):
# check if the argument is a string
if syscall.has_mask(Arg_is_str[narg]):
is_pmem = 0
path = syscall.strings[syscall.args[narg]]
# check if the argument is a path
if syscall.has_mask(Arg_is_path[narg]):
# handle relative paths
if len(path) != 0 and path[0] != '/':
self.all_strings_append(path, 0) # add relative path as non-pmem
path = self.get_cwd(syscall) + "/" + path
# handle empty paths
elif len(path) == 0 and not syscall.read_error:
path = self.get_cwd(syscall)
path = realpath(path)
is_pmem = self.is_path_pmem(path)
syscall.is_pmem |= is_pmem
# append new path to the global array of all strings
str_ind = self.all_strings_append(path, is_pmem)
# save index in the global array as the argument
syscall.args[narg] = str_ind
msg = self.log_build_msg(msg, is_pmem, path)
# check if the argument is a file descriptor
if syscall.has_mask(Arg_is_fd[narg]):
# get FD table of the current PID
fd_table = self.get_fd_table(syscall)
fd = syscall.args[narg]
if fd in (0xFFFFFFFF, 0xFFFFFFFFFFFFFFFF):
fd = -1
# is fd saved in the FD table?
if 0 <= fd < len(fd_table):
# read string index of fd
str_ind = fd_table[fd]
# read path of fd
path = self.all_strings[str_ind]
is_pmem = self.path_is_pmem[str_ind]
syscall.is_pmem |= is_pmem
# save string index instead of fd as the argument
syscall.args[narg] = str_ind
msg = self.log_build_msg(msg, is_pmem, path)
else:
# fd_in is an unknown descriptor
syscall.args[narg] = -1
if fd < MAX_DEC_FD:
msg += " ({0:d})".format(fd)
else:
msg += " (0x{0:016X})".format(fd)
self.log_anls.debug(msg)
self.post_match_action(syscall)
####################################################################################################################
def post_match_action(self, syscall):
# change current working directory in case of SyS_chdir and SyS_fchdir
if syscall.ret == 0 and syscall.name in ("chdir", "fchdir"):
old_cwd = self.get_cwd(syscall)
new_cwd = self.all_strings[syscall.args[0]]
self.set_cwd(new_cwd, syscall)
self.log_anls.debug("INFO: current working directory changed:")
self.log_anls.debug(" from: \"{0:s}\"".format(old_cwd))
self.log_anls.debug(" to: \"{0:s}\"".format(new_cwd))
# add new PID to the table in case of SyS_fork, SyS_vfork and SyS_clone
if syscall.name in ("fork", "vfork", "clone"):
if syscall.iret <= 0:
return
old_pid = syscall.pid_tid >> 32
new_pid = syscall.iret
self.add_pid(new_pid, old_pid)
####################################################################################################################
# add_pid -- add new PID to the table and copy CWD and FD table for this PID
####################################################################################################################
def add_pid(self, new_pid, old_pid):
if self.pid_table.count(new_pid) == 0:
self.pid_table.append(new_pid)
self.npids = len(self.pid_table)
assert_msg(self.pid_table.count(old_pid) == 1, "there is no old PID in the table")
old_pid_ind = self.pid_table.index(old_pid)
self.cwd_table.append(self.cwd_table[old_pid_ind])
self.fd_tables.append(self.fd_tables[old_pid_ind])
else:
# correct the CWD and FD table
pid_ind = self.pid_table.index(new_pid)
old_pid_ind = self.pid_table.index(old_pid)
self.cwd_table[pid_ind] = self.cwd_table[old_pid_ind]
self.fd_tables[pid_ind] = self.fd_tables[old_pid_ind]
self.log_anls.debug("DEBUG Notice(add_pid): copied CWD and FD table from: "
"old PID 0x{0:08X} to: new PID 0x{1:08X}".format(old_pid, new_pid))
####################################################################################################################
def match_fd_with_path_offline(self):
assert_msg(len(self.cwd_table) > 0, "empty CWD table")
if not self.script_mode:
print("\nAnalyzing:")
length = len(self)
for syscall in self:
if self.print_progress:
n = self.index(syscall) + 1
print("\r{0:d} of {1:d} ({2:d}%) ".format(n, length, int((100 * n) / length)), end='')
if not self.has_entry_content(syscall):
continue
self.match_fd_with_path(syscall)
syscall.unsupported_type = self.is_supported(syscall)
if self.print_progress:
print(" done.\n")
####################################################################################################################
def has_entry_content(self, syscall):
if not (syscall.content & CNT_ENTRY): # no entry info (no info about arguments)
if not (syscall.name in ("clone", "fork", "vfork") or syscall.sc_id == RT_SIGRETURN_SYS_EXIT):
self.log_anls.warning("missing info about arguments of syscall: '{0:s}' - skipping..."
.format(syscall.name))
return 0
return 1
####################################################################################################################
def print_unsupported(self, l_names, l_inds):
for name in l_names:
if not self.verbose_mode:
print(" {0:s}".format(name))
else:
list_ind = l_inds[l_names.index(name)]
if len(list_ind):
print(" {0:s}:".format(name))
else:
print(" {0:s}".format(name))
for str_ind in list_ind:
if self.path_is_pmem[str_ind]:
print("\t\t\"{0:s}\" [PMEM]".format(self.all_strings[str_ind]))
else:
print("\t\t\"{0:s}\"".format(self.all_strings[str_ind]))
####################################################################################################################
def print_unsupported_verbose2(self, msg, syscall, end):
print("{0:28s}\t{1:16s}\t".format(msg, syscall.name), end='')
for narg in range(syscall.nargs):
if syscall.has_mask(Arg_is_path[narg] | Arg_is_fd[narg]):
str_ind = syscall.args[narg]
if str_ind != -1:
if self.path_is_pmem[str_ind]:
print(" \"{0:s}\" [PMEM] ".format(self.all_strings[str_ind]), end='')
else:
print(" \"{0:s}\"".format(self.all_strings[str_ind]), end='')
if end:
print()
####################################################################################################################
@staticmethod
def add_to_unsupported_lists(syscall, name, l_names, l_inds):
if l_names.count(name) == 0:
l_names.append(name)
ind = len(l_names) - 1
list_ind = []
l_inds.append(list_ind)
assert_msg(len(l_names) == len(l_inds), "lists lengths are not equal")
else:
ind = l_names.index(name)
list_ind = l_inds[ind]
for narg in range(syscall.nargs):
if syscall.has_mask(Arg_is_path[narg] | Arg_is_fd[narg]):
str_ind = syscall.args[narg]
if str_ind != -1:
if list_ind.count(str_ind) == 0:
list_ind.append(str_ind)
l_inds[ind] = list_ind
####################################################################################################################
def add_to_unsupported_lists_or_print(self, syscall):
if not syscall.unsupported_type:
return
if self.all_supported:
self.all_supported = 0
if syscall.unsupported_type == RESULT_UNSUPPORTED_AT_ALL:
if self.verbose_mode >= 2:
self.print_unsupported_verbose2("unsupported syscall:", syscall, end=1)
else:
self.add_to_unsupported_lists(syscall, syscall.name, self.list_unsup, self.ind_unsup)
elif syscall.unsupported_type == RESULT_UNSUPPORTED_FLAG:
if self.verbose_mode >= 2:
self.print_unsupported_verbose2("unsupported flag:", syscall, end=0)
print(" [unsupported flag:]", syscall.unsupported_flag)
else:
name = syscall.name + " <" + syscall.unsupported_flag + ">"
self.add_to_unsupported_lists(syscall, name, self.list_unsup_flag, self.ind_unsup_flag)
else: # syscall.unsupported_type == RESULT_UNSUPPORTED_YET
if self.verbose_mode >= 2:
self.print_unsupported_verbose2("unsupported syscall yet:", syscall, end=1)
else:
self.add_to_unsupported_lists(syscall, syscall.name, self.list_unsup_yet, self.ind_unsup_yet)
####################################################################################################################
def print_unsupported_syscalls(self):
if self.all_supported:
print("All syscalls are supported.")
return
if self.verbose_mode >= 2:
return
# RESULT_UNSUPPORTED_AT_ALL
if len(self.list_unsup):
print("Unsupported syscalls detected:")
self.print_unsupported(self.list_unsup, self.ind_unsup)
print()
# RESULT_UNSUPPORTED_FLAG
if len(self.list_unsup_flag):
print("Unsupported syscall's flag detected:")
self.print_unsupported(self.list_unsup_flag, self.ind_unsup_flag)
print()
# RESULT_UNSUPPORTED_YET
if len(self.list_unsup_yet):
print("Yet-unsupported syscalls detected (will be supported):")
self.print_unsupported(self.list_unsup_yet, self.ind_unsup_yet)
print()
####################################################################################################################
def print_unsupported_syscalls_offline(self):
for syscall in self:
self.add_to_unsupported_lists_or_print(syscall)
self.print_unsupported_syscalls()
|
from fastapi import APIRouter, status, HTTPException, Depends, Request
from core.multi_database_middleware import get_db_session
from sqlalchemy.orm import Session
from api.schemas.role_schema import UserRoleSchemaRequest, RoleSchemaRequest, RoleSchema, RoleRequestAccessSchema
from models.role_model import RoleModel, UserRoleModel
from core.auth import logged_in_user, admin_user
from typing import List
from api.repository.role_transactions import modify_user_role, create_new_role
from api.repository.access_request_transactions import EmailRequestAccess
router = APIRouter(
prefix="/role",
tags=['Role']
)
@router.get('/all', status_code=status.HTTP_200_OK, response_model=List[RoleSchema])
def get_All_Roles(db: Session= Depends(get_db_session), user = Depends(admin_user)):
role = db.query(RoleModel).filter(RoleModel.role_name!='super-admin').all()
return role
@router.get('/{id}', status_code=status.HTTP_200_OK)
def get_Role_By_Id(id: int, db: Session= Depends(get_db_session), user = Depends(admin_user)):
role = db.query(RoleModel).filter(RoleModel.id==id).first()
role.user
return role
@router.post('', status_code=status.HTTP_200_OK)
def create_Role(request: RoleSchemaRequest, db: Session= Depends(get_db_session), user = Depends(admin_user)):
return create_new_role(request.role_name, db, user['username'])
@router.delete('/{id}', status_code=status.HTTP_202_ACCEPTED)
def delete_Role_By_Id(id: int, db: Session= Depends(get_db_session), user = Depends(admin_user)):
role = db.query(RoleModel).filter(RoleModel.id==id)
if role.first().role_name == 'super_user':
raise HTTPException(status_code=status.HTTP_423_LOCKED, detail=f"Forbidden.")
role.delete(synchronize_session=False)
db.commit()
return 'Role deleted.'
@router.put('/assign', status_code=status.HTTP_202_ACCEPTED)
def assign_Role_To_User(request: UserRoleSchemaRequest, db: Session= Depends(get_db_session), user = Depends(admin_user)):
modify_user_role(request.roles, request.user_id, db, user['username'])
return "Role assigned."
@router.delete('/unassign', status_code=status.HTTP_202_ACCEPTED)
def unassign_Role_To_User(request: UserRoleSchemaRequest, db: Session= Depends(get_db_session), user = Depends(admin_user)):
role = db.query(RoleModel).filter(RoleModel.id==request.role_id)
if role.first().role_name == 'super_user':
raise HTTPException(status_code=status.HTTP_423_LOCKED, detail=f"Forbidden.")
user_role = db.query(UserRoleModel).where(
UserRoleModel.user_id==request.user_id,
UserRoleModel.role_id==request.role_id
)
user_role.delete(synchronize_session=False)
db.commit()
return 'Role unassigned.'
@router.post('/request-access', status_code=status.HTTP_200_OK)
def request_Access(request: RoleRequestAccessSchema, db: Session= Depends(get_db_session), user = Depends(logged_in_user)):
print("__________REQUEST_ACCESS___________")
EmailRequestAccess().request_access(db, user['username'], request.message)
return "sent"
|
# coding=utf-8
# ${PROJECTNAME}
# (c) Chris von Csefalvay, 2015.
"""
test_FPS is responsible for [brief description here].
"""
from time import sleep
from unittest import TestCase
from processpathway import FPS
class TestFPS(TestCase):
def setUp(self):
self.fpsc = FPS()
def test_update(self):
self.fpsc.update()
sleep(1)
self.fpsc.update()
self.assertAlmostEqual(self.fpsc.fps, 1.0, places=2)
|
#!/usr/bin/env python3
def main():
#import required modules
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium import webdriver as wd
from time import sleep
import subprocess
#in case anything goes wrong, simply log the error.
try:
#some options to optimize using chromedriver for automation.
chrome_options = Options()
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--headless')
chrome_options.add_argument("enable-automation")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--dns-prefetch-disable")
#load the driver.
wd = wd.Chrome('./chromedriver', options = chrome_options)
#load the university's login panel
wd.get('http://hamava.arakmu.ac.ir/Education')
#find the elements of the login form that we need to interact with.
user = WebDriverWait(wd,30).until(EC.presence_of_element_located(
(By.XPATH,'/html/body/div[2]/div[2]/div[1]/div[2]/form/div[1]/div/input')))
uspass = WebDriverWait(wd,30).until(EC.presence_of_element_located(
(By.XPATH,'/html/body/div[2]/div[2]/div[1]/div[2]/form/div[2]/div/input')))
button = WebDriverWait(wd,30).until(EC.presence_of_element_located(
(By.XPATH,'/html/body/div[2]/div[2]/div[1]/div[2]/form/div[3]/div/button')))
#enter username and pass followed by clicking the enter button.
user.send_keys('student-id')
uspass.send_keys('student-password')
button.click()
sleep(5)
#retrieving the element that contains the number of notifications regarding the student's status
nots = WebDriverWait(wd,30).until(EC.presence_of_element_located(
(By.XPATH, '//*[@id="notification-button"]/span[2]')))
not_co = nots.text
#for debug purposes
#print(dir(nots))
#print(type(nots))
#print(type(not_co))
# 0 means no notifications, no need to do anything. otherwise, notify me.
if not_co == '0' :
print('no news', not_co)
print('===================================================================')
else :
subprocess.run(['zenity' ,'--error' ,'--title="Score ready???"' ,'--text="{}"'.format(not_co),'--width=200', '--height=100'])
print('===============')
print(not_co)
print('===================================================================')
except Exception as e:
print('ERROR: ', e)
print('===================================================================')
if __name__ == '__main__' :
main()
|
import fileinput
from itertools import count
from utils import parse_nums
def gcd(a, b):
"""Compute the greatest common divisor of a and b"""
while b > 0:
a, b = b, a % b
return a
def lcm(a, b, *args):
"""Compute the lowest common multiple of a and b"""
return a * b / gcd(a, b)
positions = []
initial = []
velocities = [[0] * 3 for _ in range(4)]
for i, line in enumerate(fileinput.input()):
line = line.strip()
nums = parse_nums(line)
positions.append(list(nums))
initial.append(list(nums))
CYCLES = [None, None, None]
for step in count(start=1):
# Update velocities
for x in range(4):
for y in range(x + 1, 4):
for d in range(3):
if positions[x][d] < positions[y][d]:
velocities[x][d] += 1
velocities[y][d] -= 1
elif positions[x][d] > positions[y][d]:
velocities[x][d] -= 1
velocities[y][d] += 1
# Update positions
for x in range(4):
for d in range(3):
positions[x][d] += velocities[x][d]
if step == 1000:
energy = 0
for pos, vel in zip(positions, velocities):
energy += sum(abs(p) for p in pos) * sum(abs(v) for v in vel)
print "Total energy after 1000 steps:", energy
for d in range(3):
if CYCLES[d] is not None:
continue
for m in range(4):
if positions[m][d] != initial[m][d]:
break
if velocities[m][d] != 0:
break
else:
CYCLES[d] = step
if all(CYCLES):
print "Steps for full cycle:", lcm(lcm(CYCLES[0], CYCLES[1]), CYCLES[2])
break
|
# -*- coding: latin-1 -*-
# Copyright (c) 2008 Pycircuit Development Team
# See LICENSE for details.
""" Test high-level circuit definition
"""
from pycircuit.circuit.hdl import Behavioural
import sympy
import numpy as np
def test_resistor():
"""Verify simple resistor model"""
class Resistor(Behavioural):
instparams = [Parameter(name='r', desc='Resistance', unit='ohm')]
@staticmethod
def analog(plus, minus):
b = Branch(plus, minus)
return Contribution(b.I, 1/r * b.V),
res = Resistor(r=1e3)
v1,v2 = sympy.symbols(('v1', 'v2'))
assert res.i([v1,v2]) == [1e-3*(v1-v2), -1e-3*(v1-v2)]
assert np.alltrue(res.G([v1,v2]) ==
np.array([[1e-3, -1e-3], [-1e-3, 1e-3]]))
assert np.alltrue(res.C([v1,v2]) == np.zeros((2,2)))
assert np.alltrue(res.CY([v1,v2]) == np.zeros((2,2)))
def test_capacitor():
"""Verify simple capacitance model"""
class Capacitor(Behavioural):
instparams = [Parameter(name='c', desc='Capacitance', unit='F')]
@staticmethod
def analog(plus, minus):
b = Branch(plus, minus)
return Contribution(b.I, ddt(c * b.V)),
C = sympy.Symbol('C')
cap = Capacitor(c=C)
v1,v2 = sympy.symbols(('v1', 'v2'))
assert cap.i([v1,v2]) == [0, 0]
assert cap.q([v1,v2]) == [C*(v1-v2), -C*(v1-v2)]
assert np.alltrue(cap.C([v1,v2]) ==
np.array([[C, -C], [-C, C]]))
assert np.alltrue(cap.G([v1,v2]) == np.zeros((2,2)))
assert np.alltrue(cap.CY([v1,v2]) == np.zeros((2,2)))
|
expr = str(input("Digite uma expressão:"))
pilha = 0
for cont in expr:
if cont == "(":
pilha += 1 # Para cada parenteses aberto é somado 1
if cont == ")":
pilha -= 1 # Para cada parenteses Fechado é subtraido 1
if pilha < 0: # Se o numero da pilha for negativo significa que foi fechado um parenteses antes de abrir
break # Programa encerra no erro
if pilha == 0: # Qualquer valor diferente de zero significa que a conta não fechou corretamente
print("Sua expressão é valida!!!")
else:
print("Sua expressão é invalida!!!")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from threathunter_common.util import millis_now
from threathunter_common.util import run_in_thread
from .generator import gen_generator
from .parser_initializer import get_fn_load_parsers
current_generators = []
last_update_ts = 0
def get_current_generators():
"""
获取当前所有的日志解析
:return:
"""
now = millis_now()
# 初始化
if last_update_ts == 0:
load_parsers()
if now - last_update_ts >= 30000:
run_in_thread(load_parsers())
return current_generators
def load_parsers():
"""
更新日志解析列表
:return:
"""
global current_generators
global last_update_ts
# update ts first, as this operation costs time, it may send multiple update command if we don't do this
last_update_ts = millis_now()
try:
parser_fn = get_fn_load_parsers()
if parser_fn:
parsers = parser_fn()
generators = [gen_generator(p) for p in parsers]
current_generators = generators
except:
pass
def test_parsers(parsers=None):
if not parsers:
parser_fn = get_fn_load_parsers()
if parser_fn:
parsers = parser_fn()
else:
return
generators = [gen_generator(p) for p in parsers]
return generators
|
_FONT = {
32: [3, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63],
33: [3, 63, 63, 63, 51, 51, 51, 51, 51, 51, 63, 51, 63, 63, 63],
34: [5, 1023, 1023, 1023, 819, 819, 819, 1023, 1023, 1023, 1023, 1023, 1023, 1023, 1023],
35: [7, 16383, 16383, 16383, 13119, 13119, 3, 13119, 15567, 3, 15567, 15567, 16383, 16383, 16383],
36: [6, 4095, 4095, 4095, 4047, 3075, 4092, 4092, 3843, 3327, 3327, 3840, 4047, 4095, 4095],
37: [10, 1048575, 1048575, 1048575, 1036047, 1044723, 1044723, 1045263, 986367, 848127, 848127, 986943, 1048575, 1048575, 1048575],
38: [8, 65535, 65535, 65535, 64527, 64755, 64755, 65295, 53040, 49404, 49404, 3075, 65535, 65535, 65535],
39: [3, 63, 63, 63, 51, 51, 51, 63, 63, 63, 63, 63, 63, 63, 63],
40: [4, 255, 255, 63, 207, 207, 243, 243, 243, 243, 243, 207, 207, 63, 255],
41: [4, 255, 255, 252, 243, 243, 207, 207, 207, 207, 207, 243, 243, 252, 255],
42: [5, 1023, 1023, 1023, 975, 0, 819, 252, 1023, 1023, 1023, 1023, 1023, 1023, 1023],
43: [6, 4095, 4095, 4095, 4095, 4095, 3903, 3903, 3, 3903, 3903, 4095, 4095, 4095, 4095],
44: [3, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 51, 51, 60, 63],
45: [3, 63, 63, 63, 63, 63, 63, 63, 0, 63, 63, 63, 63, 63, 63],
46: [3, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 51, 63, 63, 63],
47: [5, 1023, 1023, 255, 831, 831, 831, 975, 975, 975, 1011, 1011, 1011, 1020, 1023],
48: [6, 4095, 4095, 4095, 3843, 3120, 3324, 3324, 3324, 3324, 3120, 3843, 4095, 4095, 4095],
49: [6, 4095, 4095, 4095, 3903, 3843, 3891, 3903, 3903, 3903, 3903, 3903, 4095, 4095, 4095],
50: [6, 4095, 4095, 4095, 3855, 3315, 3327, 3135, 3903, 4047, 4083, 3075, 4095, 4095, 4095],
51: [6, 4095, 4095, 4095, 3840, 3327, 3327, 3855, 3327, 3327, 3327, 3840, 4095, 4095, 4095],
52: [6, 4095, 4095, 4095, 3327, 3135, 3279, 3315, 3324, 0, 3327, 3327, 4095, 4095, 4095],
53: [6, 4095, 4095, 4095, 3075, 4083, 4083, 3843, 3135, 3327, 3327, 3843, 4095, 4095, 4095],
54: [6, 4095, 4095, 4095, 3855, 4047, 4083, 3843, 3315, 3315, 3315, 3855, 4095, 4095, 4095],
55: [6, 4095, 4095, 4095, 3072, 3135, 3903, 4047, 4047, 4035, 4083, 4083, 4095, 4095, 4095],
56: [6, 4095, 4095, 4095, 3855, 3315, 3315, 3855, 3315, 3315, 3315, 3855, 4095, 4095, 4095],
57: [6, 4095, 4095, 4095, 3843, 3324, 3324, 3324, 3075, 3327, 3903, 4035, 4095, 4095, 4095],
58: [3, 63, 63, 63, 63, 63, 63, 51, 63, 63, 63, 51, 63, 63, 63],
59: [3, 63, 63, 63, 63, 63, 63, 51, 63, 63, 63, 51, 51, 60, 63],
60: [6, 4095, 4095, 4095, 4095, 4095, 3903, 4047, 4080, 4080, 4047, 3903, 4095, 4095, 4095],
61: [6, 4095, 4095, 4095, 4095, 4095, 4095, 3075, 4095, 3075, 4095, 4095, 4095, 4095, 4095],
62: [6, 4095, 4095, 4095, 4095, 4095, 4083, 4047, 3135, 3135, 4047, 4083, 4095, 4095, 4095],
63: [5, 1023, 1023, 1023, 963, 828, 831, 975, 963, 1011, 1023, 1011, 1023, 1023, 1023],
64: [11, 4194303, 4194303, 4194303, 4128831, 3948303, 3145935, 3357747, 3358515, 3358515, 3358515, 3932355, 4194255, 4177983, 4194303],
65: [8, 65535, 65535, 65535, 65343, 64527, 64719, 64719, 62451, 61443, 62451, 53244, 65535, 65535, 65535],
66: [7, 16383, 16383, 16383, 15363, 13299, 13299, 15363, 13299, 13299, 13299, 15363, 16383, 16383, 16383],
67: [7, 16383, 16383, 16383, 63, 16335, 16371, 16371, 16371, 16371, 16335, 63, 16383, 16383, 16383],
68: [8, 65535, 65535, 65535, 64515, 62451, 53235, 53235, 53235, 53235, 62451, 64515, 65535, 65535, 65535],
69: [6, 4095, 4095, 4095, 3, 4083, 4083, 3075, 4083, 4083, 4083, 3, 4095, 4095, 4095],
70: [6, 4095, 4095, 4095, 3, 4083, 4083, 3, 4083, 4083, 4083, 4083, 4095, 4095, 4095],
71: [8, 65535, 65535, 65535, 61503, 53199, 65523, 65523, 53235, 53235, 53199, 49215, 65535, 65535, 65535],
72: [8, 65535, 65535, 65535, 53235, 53235, 53235, 49155, 53235, 53235, 53235, 53235, 65535, 65535, 65535],
73: [3, 63, 63, 63, 51, 51, 51, 51, 51, 51, 51, 51, 63, 63, 63],
74: [6, 4095, 4095, 4095, 3327, 3327, 3327, 3327, 3327, 3327, 3327, 3840, 4095, 4095, 4095],
75: [7, 16383, 16383, 16383, 1011, 12531, 15411, 16131, 16131, 15411, 12531, 1011, 16383, 16383, 16383],
76: [6, 4095, 4095, 4095, 4083, 4083, 4083, 4083, 4083, 4083, 4083, 3, 4095, 4095, 4095],
77: [9, 262143, 262143, 262143, 249807, 200655, 209715, 209715, 209715, 212211, 212211, 212979, 262143, 262143, 262143],
78: [8, 65535, 65535, 65535, 53235, 53187, 52995, 53043, 52467, 50163, 50163, 53235, 65535, 65535, 65535],
79: [9, 262143, 262143, 262143, 258111, 249807, 212979, 212979, 212979, 212979, 249807, 258111, 262143, 262143, 262143],
80: [7, 16383, 16383, 16383, 15363, 13299, 13299, 13299, 15363, 16371, 16371, 16371, 16383, 16383, 16383],
81: [9, 262143, 262143, 262143, 258111, 249807, 212979, 212979, 212979, 212979, 249807, 258111, 261375, 246783, 262143],
82: [7, 16383, 16383, 16383, 15363, 13299, 13299, 13299, 15363, 15411, 15603, 12531, 16383, 16383, 16383],
83: [6, 4095, 4095, 4095, 3075, 4092, 4092, 4035, 3135, 3327, 3327, 3840, 4095, 4095, 4095],
84: [6, 4095, 4095, 4095, 3, 3903, 3903, 3903, 3903, 3903, 3903, 3903, 4095, 4095, 4095],
85: [8, 65535, 65535, 65535, 53235, 53235, 53235, 53235, 53235, 53235, 50115, 61455, 65535, 65535, 65535],
86: [8, 65535, 65535, 65535, 16371, 53199, 53199, 53199, 62271, 62271, 64767, 64767, 65535, 65535, 65535],
87: [9, 262143, 262143, 262143, 249852, 249660, 249660, 249036, 249036, 249024, 259011, 259059, 262143, 262143, 262143],
88: [7, 16383, 16383, 16383, 13308, 15603, 16143, 16143, 16143, 15603, 15603, 13308, 16383, 16383, 16383],
89: [7, 16383, 16383, 16383, 1008, 13299, 12483, 15375, 16191, 16191, 16191, 16191, 16383, 16383, 16383],
90: [6, 4095, 4095, 4095, 0, 3327, 3327, 3903, 4047, 4083, 4083, 0, 4095, 4095, 4095],
91: [4, 255, 255, 15, 207, 207, 207, 207, 207, 207, 207, 207, 207, 15, 255],
92: [5, 1023, 1023, 1020, 1011, 1011, 1011, 975, 975, 975, 831, 831, 831, 255, 1023],
93: [4, 255, 255, 240, 243, 243, 243, 243, 243, 243, 243, 243, 243, 240, 255],
94: [7, 16383, 16383, 16383, 16335, 16179, 15408, 15612, 16383, 16383, 16383, 16383, 16383, 16383, 16383],
95: [6, 4095, 4095, 4095, 4095, 4095, 4095, 4095, 4095, 4095, 4095, 4095, 4095, 0, 4095],
96: [4, 255, 255, 252, 243, 207, 255, 255, 255, 255, 255, 255, 255, 255, 255],
97: [6, 4095, 4095, 4095, 4095, 4095, 3843, 3327, 3075, 3324, 3324, 3075, 4095, 4095, 4095],
98: [7, 16383, 16383, 16383, 16371, 16371, 15363, 13299, 13299, 13299, 12531, 15363, 16383, 16383, 16383],
99: [5, 1023, 1023, 1023, 1023, 1023, 783, 1011, 1011, 1011, 1011, 783, 1023, 1023, 1023],
100: [7, 16383, 16383, 16383, 13311, 13311, 12303, 13299, 13299, 13299, 13251, 12303, 16383, 16383, 16383],
101: [6, 4095, 4095, 4095, 4095, 4095, 3855, 3315, 3075, 4083, 4083, 3087, 4095, 4095, 4095],
102: [4, 255, 255, 255, 15, 243, 3, 243, 243, 243, 243, 243, 255, 255, 255],
103: [6, 4095, 4095, 4095, 4095, 4095, 3087, 3315, 3315, 3315, 3315, 3087, 3327, 3843, 4095],
104: [6, 4095, 4095, 4095, 4083, 4083, 3843, 3315, 3315, 3315, 3315, 3315, 4095, 4095, 4095],
105: [3, 63, 63, 63, 51, 63, 51, 51, 51, 51, 51, 51, 63, 63, 63],
106: [4, 255, 255, 255, 207, 255, 207, 207, 207, 207, 207, 207, 207, 240, 255],
107: [6, 4095, 4095, 4095, 4083, 4083, 3315, 3891, 4035, 3843, 3123, 243, 4095, 4095, 4095],
108: [3, 63, 63, 63, 51, 51, 51, 51, 51, 51, 51, 3, 63, 63, 63],
109: [9, 262143, 262143, 262143, 262143, 262143, 245763, 212211, 212211, 212211, 212211, 212211, 262143, 262143, 262143],
110: [6, 4095, 4095, 4095, 4095, 4095, 3843, 3315, 3315, 3315, 3315, 3315, 4095, 4095, 4095],
111: [7, 16383, 16383, 16383, 16383, 16383, 15375, 13299, 13299, 13299, 13299, 15375, 16383, 16383, 16383],
112: [7, 16383, 16383, 16383, 16383, 16383, 15363, 12531, 13299, 13299, 13299, 15363, 16371, 16371, 16383],
113: [7, 16383, 16383, 16383, 16383, 16383, 12303, 13251, 13299, 13299, 13299, 12303, 13311, 13311, 16383],
114: [4, 255, 255, 255, 255, 255, 3, 243, 243, 243, 243, 243, 255, 255, 255],
115: [5, 1023, 1023, 1023, 1023, 1023, 771, 1011, 963, 783, 831, 771, 1023, 1023, 1023],
116: [5, 1023, 1023, 1023, 1011, 1011, 771, 1011, 1011, 1011, 1011, 783, 1023, 1023, 1023],
117: [6, 4095, 4095, 4095, 4095, 4095, 3315, 3315, 3315, 3315, 3315, 3087, 4095, 4095, 4095],
118: [6, 4095, 4095, 4095, 4095, 4095, 1011, 1011, 3279, 3279, 3087, 3903, 4095, 4095, 4095],
119: [9, 262143, 262143, 262143, 262143, 262143, 64764, 64764, 209715, 209715, 197379, 249807, 262143, 262143, 262143],
120: [6, 4095, 4095, 4095, 4095, 4095, 1020, 3315, 3855, 3855, 3315, 1020, 4095, 4095, 4095],
121: [6, 4095, 4095, 4095, 4095, 4095, 1020, 1011, 3315, 3279, 3087, 3903, 3903, 4032, 4095],
122: [5, 1023, 1023, 1023, 1023, 1023, 0, 831, 975, 1011, 1020, 0, 1023, 1023, 1023],
123: [4, 255, 255, 15, 207, 207, 207, 207, 243, 207, 207, 207, 207, 15, 255],
124: [3, 63, 63, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 63],
125: [4, 255, 255, 240, 243, 243, 243, 243, 207, 243, 243, 243, 243, 240, 255],
126: [6, 4095, 4095, 4095, 4095, 4095, 4095, 4095, 963, 3132, 4095, 4095, 4095, 4095, 4095],
}
|
from django.contrib.syndication.feeds import Feed
from blogserver.apps.blog.models import Post,Category# Category
class LatestPosts(Feed):
title = "Latest Posts"
link = ""
description = "The Latest posts from the blog."
def items(self):
return Post.live.order_by('-date_published') [:10]
class Categories(Feed):
description = ""
def get_object(self, bits):
if len(bits) != 1:
raise Category.DoesNotExist
return Category.objects.get(slug=bits[0])
def title(self, obj):
return obj.name
def link(self, obj):
return obj.get_absolute_url()
def items(self, obj):
return obj.post_set.all()
|
import requests
import json
from generateReport import creation
def report_link(db,user_id,sheet_name,flag):
user_detail = db.find_one({"user": user_id})
# print("------------------------------------------")
# print(user_detail)
# print("-----------&&&&&&&&&&-------------------------------")
# print(ans["sheets"][sheet_name])
student = user_detail["sheets"][sheet_name]
ans = []
count =1
for st in student:
if(flag == st["status"]):
st["sNo"]=count
ans.append(st)
count=count+1
data ={
"title":flag.lower(),
"items":ans
}
return creation.createReport(data, sheet_name)
|
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
from ..types import FormattedText
from ..types import ReplyMarkup
class EditInlineMessageCaption(BaseObject):
"""
Edits the caption of an inline message sent via a bot; for bots only
:param inline_message_id: Inline message identifier
:type inline_message_id: :class:`str`
:param reply_markup: The new message reply markup; pass null if none
:type reply_markup: :class:`ReplyMarkup`
:param caption: New message content caption; pass null to remove caption; 0-GetOption("message_caption_length_max") characters
:type caption: :class:`FormattedText`
"""
ID: str = Field("editInlineMessageCaption", alias="@type")
inline_message_id: str
reply_markup: ReplyMarkup
caption: FormattedText
@staticmethod
def read(q: dict) -> EditInlineMessageCaption:
return EditInlineMessageCaption.construct(**q)
|
import math
from typing import Callable
import torch
import torch.nn as nn
from torchlibrosa.stft import STFT
from bytesep.models.pytorch_modules import Base
def l1(output: torch.Tensor, target: torch.Tensor, **kwargs) -> torch.Tensor:
r"""L1 loss.
Args:
output: torch.Tensor
target: torch.Tensor
Returns:
loss: torch.float
"""
return torch.mean(torch.abs(output - target))
def l1_wav(output: torch.Tensor, target: torch.Tensor, **kwargs) -> torch.Tensor:
r"""L1 loss in the time-domain.
Args:
output: torch.Tensor
target: torch.Tensor
Returns:
loss: torch.float
"""
return l1(output, target)
class L1_Wav_L1_Sp(nn.Module, Base):
def __init__(self):
r"""L1 loss in the time-domain and L1 loss on the spectrogram."""
super(L1_Wav_L1_Sp, self).__init__()
self.window_size = 2048
hop_size = 441
center = True
pad_mode = "reflect"
window = "hann"
self.stft = STFT(
n_fft=self.window_size,
hop_length=hop_size,
win_length=self.window_size,
window=window,
center=center,
pad_mode=pad_mode,
freeze_parameters=True,
)
def __call__(
self, output: torch.Tensor, target: torch.Tensor, **kwargs
) -> torch.Tensor:
r"""L1 loss in the time-domain and on the spectrogram.
Args:
output: torch.Tensor
target: torch.Tensor
Returns:
loss: torch.float
"""
# L1 loss in the time-domain.
wav_loss = l1_wav(output, target)
# L1 loss on the spectrogram.
sp_loss = l1(
self.wav_to_spectrogram(output, eps=1e-8),
self.wav_to_spectrogram(target, eps=1e-8),
)
# sp_loss /= math.sqrt(self.window_size)
# sp_loss *= 1.
# Total loss.
return wav_loss + sp_loss
return sp_loss
def get_loss_function(loss_type: str) -> Callable:
r"""Get loss function.
Args:
loss_type: str
Returns:
loss function: Callable
"""
if loss_type == "l1_wav":
return l1_wav
elif loss_type == "l1_wav_l1_sp":
return L1_Wav_L1_Sp()
else:
raise NotImplementedError
|
# This script will accept two numbers from user, then add them and give the result.
# Import modules.
import platform
import os
# Clear the screen as per the OS type.
os_name=platform.system()
if os_name == "Windows":
os.system("cls")
elif os_name == "Linux":
os.system("clear")
else:
print(f"The OS is not Windows/Linux. This script is designed only for Windows/Linux. Hence exiting.")
exit()
# Display purpose of the script.
print(f"This script will accept 2 numbers and display their sum.\n")
# Accept user input.
num1=eval(input("Enter first number: "))
num2=eval(input("Enter second number: "))
# Calculate sum.
sum=num1+num2
# Display the sum.
print(f"\nThe sum of {num1} and {num2} is {sum}.\n")
|
#!/usr/bin/env python
import sys
from oppai import *
# prints timing points (just a test for this interface)
ez = ezpp_new()
ezpp(ez, sys.argv[1])
for i in range(ezpp_ntiming_points(ez)):
time = ezpp_timing_time(ez, i)
ms_per_beat = ezpp_timing_ms_per_beat(ez, i)
change = ezpp_timing_change(ez, i)
print("%f | %f beats per ms | change: %d" % (time, ms_per_beat, change))
ezpp_free(ez)
|
import math
from datetime import datetime, timedelta
import sys
from airflow.models import Variable
import pandas as pd
import numpy as np
sys.path.insert(0, '/home/curw/git/DSS-Framework/db_util')
# sys.path.insert(0, '/home/hasitha/PycharmProjects/DSS-Framework/db_util')
from gen_db import CurwFcstAdapter, CurwObsAdapter, CurwSimAdapter
from dss_db import RuleEngineAdapter
COMMON_DATE_TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
STATION_TYPE = 'CUrW_WaterLevelGauge'
MME_TAG = 'MDPA'
VARIABLE_TYPE = 'waterlevel'
VARIABLE = 2
UNIT = 2
OBS_VARIABLE = 17
OBS_UNIT = 15
def get_curw_dss_adapter(db_config=None):
if db_config is None:
db_config = Variable.get('db_config', deserialize_json=True)
adapter = RuleEngineAdapter.get_instance(db_config)
return adapter
def get_curw_fcst_adapter(db_config=None):
if db_config is None:
db_config = Variable.get('fcst_db_config', deserialize_json=True)
adapter = CurwFcstAdapter.get_instance(db_config)
return adapter
def get_curw_obs_adapter(db_config=None):
if db_config is None:
db_config = Variable.get('obs_db_config', deserialize_json=True)
adapter = CurwObsAdapter.get_instance(db_config)
return adapter
def get_curw_sim_adapter(db_config=None):
if db_config is None:
db_config = Variable.get('sim_db_config', deserialize_json=True)
adapter = CurwSimAdapter.get_instance(db_config)
return adapter
def calculate_flo2d_rule_accuracy(flo2d_rule, exec_datetime):
print('calculate_flo2d_rule_accuracy|flo2d_rule : ', flo2d_rule)
print('calculate_flo2d_rule_accuracy|execution_date : ', exec_datetime)
flo2d_model = flo2d_rule['model']
flo2d_version = flo2d_rule['version']
print('calculate_flo2d_rule_accuracy|flo2d_model : ', flo2d_model)
flo2d_rule_id = flo2d_rule['rule_info']['id']
accuracy_rule_id = flo2d_rule['rule_info']['accuracy_rule']
observed_days = flo2d_rule['rule_info']['observed_days']
print('calculate_flo2d_rule_accuracy|observed_days : ', observed_days)
sim_tag = 'hourly_run'
print('calculate_flo2d_rule_accuracy|sim_tag : ', sim_tag)
dss_adapter = get_curw_dss_adapter()
accuracy_rule = dss_adapter.get_accuracy_rule_info_by_id(accuracy_rule_id)
print('calculate_flo2d_rule_accuracy|accuracy_rule : ', accuracy_rule)
obs_station_list = format_obs_station_list(accuracy_rule['observed_stations'], accuracy_rule['allowed_error'])
station_result = {}
success_count = 0
if len(obs_station_list) > 0:
for [obs_station, allowed_error] in obs_station_list:
station_error = calculate_station_accuracy(obs_station, flo2d_model, flo2d_version,
exec_datetime, observed_days, sim_tag)
if station_error is not None:
if station_error <= allowed_error:
station_result[obs_station] = True
success_count + 1
else:
station_result[obs_station] = False
else:
station_result[obs_station] = False
total_stations = len(station_result.keys())
print('calculate_flo2d_rule_accuracy|total_stations : ', total_stations)
print('calculate_flo2d_rule_accuracy|success_count : ', success_count)
accuracy_percentage = (success_count / total_stations) * 100
print('calculate_flo2d_rule_accuracy|accuracy_percentage : ', accuracy_percentage)
dss_adapter.update_flo2d_rule_accuracy_level(accuracy_percentage, flo2d_rule_id)
print('flo2d rule current accuracy successfully updated.')
def calculate_station_accuracy(obs_station, flo2d_model, flo2d_version,
exec_datetime, observed_days, sim_tag, method='MAD'):
obs_adapter = get_curw_obs_adapter()
obs_station_id = get_obs_station_id(obs_station, obs_adapter, STATION_TYPE)
[tms_start, tms_end] = get_flo2d_ts_start_end(exec_datetime, observed_days)
tms_start = tms_start.strftime('%Y-%m-%d %H:%M:%S')
tms_end = tms_end.strftime('%Y-%m-%d %H:%M:%S')
print('calculate_station_accuracy|[tms_start, tms_end] : ', [tms_start, tms_end])
if obs_station_id is not None:
obs_hash_id = get_obs_station_hash_id(obs_station_id, obs_adapter)
obs_df = get_obs_tms(obs_hash_id, exec_datetime, tms_start, tms_end, obs_adapter)
if obs_df is not None:
fcst_adapter = get_curw_fcst_adapter()
cell_id = get_cell_id(obs_station, flo2d_model, flo2d_version)
if cell_id is not None:
flo2d_station_id = get_matching_flo2d_station(obs_station, cell_id, fcst_adapter)
print('calculate_station_accuracy|flo2d_station_id : ', flo2d_station_id)
if flo2d_station_id is not None:
fcst_adapter = get_curw_fcst_adapter()
flo2d_hash_id = get_flo2d_station_hash_id(flo2d_model, flo2d_version, flo2d_station_id,
exec_datetime,
sim_tag, fcst_adapter)
print('calculate_station_accuracy|flo2d_hash_id : ', flo2d_hash_id)
if flo2d_hash_id is not None:
fcst_df = get_fcst_tms(flo2d_hash_id, exec_datetime, tms_start, tms_end, fcst_adapter)
if fcst_df is not None:
print('calculate_station_accuracy|obs_df : ', obs_df)
print('calculate_station_accuracy|fcst_df : ', fcst_df)
merged_df = obs_df.merge(fcst_df, how='left', on='time')
merged_df['cumulative_observed'] = merged_df['observed'].cumsum()
merged_df['cumulative_forecast'] = merged_df['forecast'].cumsum()
print(merged_df)
merged_df['cum_diff'] = merged_df["cumulative_observed"] - merged_df["cumulative_forecast"]
row_count = len(merged_df.index)
print('row_count : ', row_count)
if method == 'MAD':
print('MAD')
merged_df['abs_cum_diff'] = merged_df['cum_diff'].abs()
sum_abs_diff = merged_df['abs_diff'].sum()
print('sum_abs_diff : ', sum_abs_diff)
mean_absolute_deviation = sum_abs_diff / row_count
print('mean_absolute_deviation : ', mean_absolute_deviation)
return mean_absolute_deviation
elif method == 'RMSE':
print('RMSE')
merged_df['diff_square'] = np.power((merged_df['cum_diff']), 2)
root_mean_square_error = math.sqrt(merged_df['diff_square'].sum() / row_count)
print('root_mean_square_error : ', root_mean_square_error)
return root_mean_square_error
else:
print('Invalid method.')
return None
def get_cell_id(station_name, model, version, fcst_adapter=None):
if fcst_adapter is None:
fcst_adapter = get_curw_fcst_adapter()
cell_map = fcst_adapter.get_flo2d_cell_map(model, version)
cell_id = None
if cell_map is not None:
print('get_cell_id|cell_map : ', cell_map)
channel_cell_map = cell_map['CHANNEL_CELL_MAP']
flood_plain_cell_map = cell_map['FLOOD_PLAIN_CELL_MAP']
if station_name in channel_cell_map:
cell_id = channel_cell_map[station_name]
elif station_name in flood_plain_cell_map:
cell_id = channel_cell_map[station_name]
return cell_id
def format_obs_station_list(obs_stations, allowed_error):
station_list = obs_stations.split(",")
print(station_list)
formatted_list = []
for station in station_list:
station_val = station.split('-')
if len(station_val) == 2:
formatted_list.append([station_val[0], station_val[1]])
else:
formatted_list.append([station_val[0], allowed_error])
print(formatted_list)
return formatted_list
def get_obs_station_id(obs_station, obs_adapter=None, station_type=STATION_TYPE):
if obs_adapter is None:
obs_adapter = get_curw_obs_adapter()
station_id = obs_adapter.get_station_id_by_name(station_type, obs_station)
if station_id is not None:
print('get_obs_station_id|station_id : ', station_id)
return station_id
def get_obs_station_hash_id(obs_station_id, obs_adapter=None):
if obs_adapter is None:
obs_adapter = get_curw_obs_adapter()
hash_id = obs_adapter.get_station_hash_id(obs_station_id, OBS_VARIABLE, OBS_UNIT)
if hash_id is not None:
print('get_obs_station_hash_id|hash_id : ', hash_id)
return hash_id
def get_matching_flo2d_station(obs_station, cell_id, fcst_adapter=None):
fcst_station_name = '{}_{}'.format(cell_id, obs_station)
if fcst_adapter is None:
fcst_adapter = get_curw_fcst_adapter()
flo2d_station_id = fcst_adapter.get_flo2d_station_id_by_name(fcst_station_name)
if flo2d_station_id is not None:
print('get_matching_flo2d_station|flo2d_station_id : ', flo2d_station_id)
return flo2d_station_id
def get_flo2d_station_hash_id(flo2d_model, flo2d_version, flo2d_station_id, exec_date, sim_tag, fcst_adapter=None):
if fcst_adapter is None:
fcst_adapter = get_curw_fcst_adapter()
source_id = fcst_adapter.get_source_id(flo2d_model, flo2d_version)
if source_id is not None:
print('get_flo2d_station_hash_id|source_id : ', source_id)
hash_id = fcst_adapter.get_hash_id_of_station(VARIABLE, UNIT, source_id, flo2d_station_id, sim_tag,
exec_date)
if hash_id is not None:
print('get_flo2d_station_hash_id|hash_id : ', hash_id)
return hash_id
def get_flo2d_ts_start_end(exec_datetime, observed_days):
observed_days = int(observed_days)
exec_datetime = datetime.strptime(exec_datetime, '%Y-%m-%d %H:%M:%S')
print(exec_datetime)
exec_date_str = exec_datetime.strftime('%Y-%m-%d')
exec_date = datetime.strptime(exec_date_str, '%Y-%m-%d')
print(exec_date)
ts_start_date = exec_date - timedelta(days=observed_days)
ts_start_date_str = ts_start_date.strftime('%Y-%m-%d')
print(ts_start_date_str)
gfs_ts_start_utc_str = '{} 00:00:00'.format(ts_start_date_str)
print(gfs_ts_start_utc_str)
gfs_ts_start_utc = datetime.strptime(gfs_ts_start_utc_str, '%Y-%m-%d %H:%M:%S')
return [gfs_ts_start_utc, exec_datetime]
def get_fcst_tms(flo2d_station_hash_id, exec_datetime, tms_start, tms_end, fcst_adapter=None):
if fcst_adapter is None:
fcst_adapter = get_curw_fcst_adapter()
tms_df = fcst_adapter.get_wrf_station_tms(flo2d_station_hash_id, exec_datetime, tms_start, tms_end)
if tms_df is not None:
return format_df_to_time_indexing(tms_df)
def format_df_to_time_indexing(tms_df):
tms_df['time'] = pd.to_datetime(tms_df['time'], format=COMMON_DATE_TIME_FORMAT)
tms_df.set_index('time', inplace=True)
return tms_df
def get_obs_tms(obs_station_hash_id, exec_datetime, tms_start, tms_end, obs_adapter=None):
if obs_adapter is None:
obs_adapter = get_curw_obs_adapter()
tms_df = obs_adapter.get_timeseries_by_id(obs_station_hash_id, tms_start, tms_end)
if tms_df is not None:
return format_df_to_15min_intervals(tms_df)
def format_df_to_15min_intervals(tms_df):
tms_df = format_df_to_time_indexing(tms_df)
min15_ts = pd.DataFrame()
min15_ts['value'] = tms_df['value'].resample('15min', label='right', closed='right').sum()
print(min15_ts)
return min15_ts
if __name__ == "__main__":
# obs_db_config = {'mysql_user': 'admin', 'mysql_password': 'floody', 'mysql_host': '35.227.163.211',
# 'mysql_db': 'curw_obs', 'log_path': '/home/hasitha/PycharmProjects/DSS-Framework/log'}
# print(len(obs_db_config.keys()))
# sim_db_config = {'mysql_user': 'admin', 'mysql_password': 'floody', 'mysql_host': '35.227.163.211',
# 'mysql_db': 'curw_sim', 'log_path': '/home/hasitha/PycharmProjects/DSS-Framework/log'}
fcst_db_config = {'mysql_user': 'admin', 'mysql_password': 'floody', 'mysql_host': '35.227.163.211',
'mysql_db': 'curw_fcst', 'log_path': '/home/hasitha/PycharmProjects/DSS-Framework/log'}
# obs_adapter = get_curw_obs_adapter(obs_db_config)
# sim_adapter = get_curw_sim_adapter(sim_db_config)
fcst_adapter = get_curw_fcst_adapter(fcst_db_config)
# print(get_matching_flo2d_station('Arangala', obs_adapter, sim_adapter))
print(fcst_adapter.get_flo2d_cell_map('FLO2D', 250))
|
import DataAugment
import os
import sys
import glob
import random
import cv2
import numpy as np
from matplotlib import pyplot as plt
# set data augmentation method
# if methos = 1 -> use
DataAugmentMethod = {
'_avg_blur' : 0,
'_gaussain_blur' : 1,
'_gaussain_noise' : 1,
'_img_shift' : 1,
'_img_rotation' : 1,
'_img_flip' : 0,
'_img_zoom' : 0,
'_img_contrast' : 1,
'_img_color' : 0
}
# generate data
_generate_quantity = 100
# set the database relative path
database = './data'
# change dir to database path
os.chdir(database)
# get all of the '.jpg' file in the database path
images = os.listdir('.')
images = glob.glob('*.jpg')
# get quantity of '.jpg' file
size = len(images)
print (size)
# check workspace
os.chdir('../')
print(os.getcwd())
# parameter for data augment functions
_max_filiter_size = 5 #for avg_blur and gaussain_blur
_sigma = 0 # for gaussain_blur
_mean = 0 # for gaussain_noise
_var = 0.1 # for gaussain_noise
_x_min_shift_piexl = -20 # for img_shift
_x_max_shift_piexl = 20 # for img_shift
_y_min_shift_piexl = -20 # for img_shift
_y_max_shift_piexl = 20 # for img_shift
_fill_pixel = 255 # for img_shift and img_rotation
_min_angel = -10 # for img_rotation
_max_angel = 10 # for img_rotation
_min_scale = 0.9 # for img_rotation
_max_scale = 1.1 # for img_rotation
_min_zoom_scale = 1 # for img_zoom
_max_zoom_scale = 1 # for img_zoom
_min_s = -10 # for img_contrast
_max_s = 10 # for img_contrast
_min_v = -10 # for img_contrast
_max_v = 10 # for img_contrast
_min_h = -10 # for img_color
_max_h = 10 # for img_color
DataAugmentBase = './Augment/'
for i in images:
generate_quantity = _generate_quantity
while generate_quantity > 0 :
img_dir = database + '/' + i
img = cv2.imread(img_dir)
#print (generate_quantity)
if DataAugmentMethod['_avg_blur'] == 1 :
if random.randint(0, 1) == 1 :
img = DataAugment.avg_blur(img, _max_filiter_size)
#print ('do ab')
if DataAugmentMethod['_gaussain_blur'] == 1 :
if random.randint(0, 1) == 1 :
img = DataAugment.gaussain_blur(img, _max_filiter_size, _sigma)
#print ('do gb')
if DataAugmentMethod['_gaussain_noise'] == 1 :
if random.randint(0, 1) == 1 :
img = DataAugment.gaussain_noise(img, _mean, _var)
#print ('do gn')
if DataAugmentMethod['_img_shift'] == 1 :
if random.randint(0, 1) == 1 :
img = DataAugment.img_shift(img, _x_min_shift_piexl, _x_max_shift_piexl, _y_min_shift_piexl, _y_max_shift_piexl, _fill_pixel)
#print ('do is')
if DataAugmentMethod['_img_rotation'] == 1 :
if random.randint(0, 1) == 1 :
img = DataAugment.img_rotation(img, _min_angel, _max_angel, _min_scale, _max_scale, _fill_pixel)
#print ('do ir')
if DataAugmentMethod['_img_flip'] == 1:
if random.randint(0, 1) == 1 :
img = DataAugment.img_flip(img)
#print ('do if')
if DataAugmentMethod['_img_zoom'] == 1:
if random.randint(0, 1) == 1 :
img = DataAugment.img_zoom(img, _min_zoom_scale, _max_zoom_scale)
#print ('do iz')
if DataAugmentMethod['_img_contrast'] == 1:
if random.randint(0, 1) == 1 :
img = DataAugment.img_contrast(img, _min_s, _max_s, _min_v, _max_v)
#print ('do ic')
if DataAugmentMethod['_img_color'] == 1:
if random.randint(0, 1) == 1 :
img = DataAugment.img_color(img, _min_h, _max_h)
#print ('do ic2')
save_dir = ('_%06d_') % (generate_quantity)
save_dir = DataAugmentBase + save_dir + i
generate_quantity -= 1
img = img.astype(np.uint8)
cv2.imwrite(save_dir, img)
|
import numpy as np
import numpy.random as rd
import tensorflow as tf
from lsnn.guillaume_toolbox import einsum_bij_ki_to_bkj
b = 2
i,j,k = 3,4,5
a = rd.rand(b,i,j)
b = rd.rand(k,i)
tf_a = tf.constant(a)
tf_b = tf.constant(b)
prod2 = einsum_bij_ki_to_bkj(tf_a,tf_b)
sess = tf.Session()
np_prod_1 = np.einsum('bij,ki->bkj',a,b)
np_prod_2 = sess.run(prod2)
assert (np_prod_1 == np_prod_2).all(), 'Mistmatch'
print('Prod 1')
print(np_prod_1)
print('Prod 2')
print(np_prod_2)
|
from deephaven.plugin.json import Node
output = Node({
'str': 'bar',
'int': 1,
'float': 3.14,
'None': None,
'True': True,
'False': False,
'empty_list': [],
'empty_tuple': (),
'empty_dict': {},
'list': ['hello', 'world'],
'tuple': ('Devin', 1987),
'dict': {
'foo': 'bar',
'baz': 31337
}
})
|
import numpy as np
import pytest
from shapes.shape import Triangle, Rectangle, QuadrangleBrush, Quadrangle, Ellipse, Curve, \
from_index, index_of
from shapes.shape.shape import crop_bounds
def test_should_leave_bounds_unchanged_if_no_need_to_crop():
bounds = np.array([[50, 60, 50], [50, 60, 51], [50, 60, 52]])
bounds_to_crop = bounds.copy()
w = 100
h = 100
crop_bounds(bounds=bounds_to_crop, w=w, h=h)
assert np.array_equal(bounds, bounds_to_crop)
def test_should_crop_negative_x1s_to_zeros():
bounds = np.array([[-1, 60, 50], [50, 60, 51], [50, 60, 52]])
w = 100
h = 100
crop_bounds(bounds=bounds, w=w, h=h)
assert np.all(bounds >= 0)
def test_should_crop_too_big_x2s_to_max_width():
bounds = np.array([[50, 60, 50], [50, 100, 51], [50, 60, 52]])
w = 100
h = 100
crop_bounds(bounds=bounds, w=w, h=h)
assert np.all(bounds < w)
def test_should_crop_negative_ys_to_zeros():
bounds = np.array([[50, 60, -1], [50, 100, 51], [50, 60, 52]])
w = 100
h = 100
crop_bounds(bounds=bounds, w=w, h=h)
assert np.all(bounds >= 0)
def test_should_crop_too_big_ys_to_max_height():
bounds = np.array([[50, 60, 50], [50, 100, 51], [50, 60, 100]])
w = 100
h = 100
crop_bounds(bounds=bounds, w=w, h=h)
assert np.all(bounds < h)
@pytest.mark.parametrize("x1, x2, y, cropped_x1, cropped_x2, cropped_y", [
(-1, -2, 50, 0, 0, 50),
(-1, 150, 50, 0, 99, 50),
(140, 150, 50, 99, 99, 50),
(-1, 24, -30, 0, 24, 0),
(50, 150, 150, 50, 99, 99),
])
def test_should_crop_when_multiple_variables_are_out_of_bounds(x1, x2, y, cropped_x1, cropped_x2,
cropped_y):
bounds = np.array([[x1, x2, y]])
crop_bounds(bounds=bounds, w=100, h=100)
assert bounds[0][0] == cropped_x1, f'actual x1: {bounds[0][0]}, expected x1: {cropped_x1}'
assert bounds[0][1] == cropped_x2, f'actual x2: {bounds[0][1]}, expected x2: {cropped_x2}'
assert bounds[0][2] == cropped_y, f'actual y: {bounds[0][2]}, expected y: {cropped_y}'
@pytest.mark.parametrize("index, shape_cls", [
(0, Triangle),
(1, Rectangle),
(2, Ellipse),
(3, Quadrangle),
(4, QuadrangleBrush),
(5, Curve)
])
def test_from_index_should_return_proper_shape_class_given_index(index, shape_cls):
cls = from_index(index)
assert cls == shape_cls
@pytest.mark.parametrize("shape_cls, index", [
(Triangle, 0),
(Rectangle, 1),
(Ellipse, 2),
(Quadrangle, 3),
(QuadrangleBrush, 4),
(Curve, 5)
])
def test_index_of_should_return_proper_index_given_shape_class(shape_cls, index):
idx = index_of(shape_cls)
assert idx == index
@pytest.mark.parametrize("shape_cls, expected_params_len", [
(Triangle, 8),
(Rectangle, 7),
(Ellipse, 7),
(Quadrangle, 10),
(QuadrangleBrush, 9),
(Curve, 8)
])
def test_params_len_should_return_proper_value_for_each_shape_class(shape_cls, expected_params_len):
params_len = shape_cls.params_len()
assert params_len == expected_params_len
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2020 by ShabaniPy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the MIT license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Deterministic reconstruction of the j(x) in a JJ from a Fraunhofer pattern.
The method implemented here is based on:
[1] Dynes, R. C. & Fulton, T. A. Supercurrent Density Distribution in
Josephson Junctions. Phys. Rev. B 3, 3015–3023 (1971).
This method has the advantage of being algebraic but can suffer from a lack of
precision due to the finite extend of the measured Fraunhofer pattern.
We need to use the more complex approach of the paper since we are interested
in non-symmetric current distributions.
"""
from typing import Optional, Tuple
import numpy as np
from matplotlib import pyplot as plt
from scipy.integrate import quad, romb
from scipy.interpolate import interp1d
from scipy.signal import hilbert
from typing_extensions import Literal
from shabanipy.utils.integrate import can_romberg, resample_evenly
def extract_theta(
fields: np.ndarray,
ics: np.ndarray,
f2k: float,
jj_width: float,
method: Optional[Literal["romb", "quad", "hilbert"]] = "hilbert",
) -> np.ndarray:
"""Compute the phase as the Hilbert transform of ln(I_c).
Note the integral expressions for θ(β) implemented here differ from Eq. (5)
[1] by a factor of 2, but give the correct output. (The source of this
discrepancy is as yet unknown. But note the Hilbert transform is usually
defined, as below, with a prefactor 1/π.)
Parameters
----------
fields : np.ndarray
Magnetic field at which the critical current was measured. For ND input
the sweep should occur on the last axis.
ics : np.ndarray
Measured value of the critical current. For ND input the sweep should
occur on the last axis.
f2k : float
Field-to-wavenumber conversion factor (i.e. β / B).
jj_width: float
Width of the junction transverse to the field and current.
method: str, optional
Method used to compute the phase; `romb` and `quad` specify
numerical integration methods (note `quad` is particularly slow), while
`hilbert` uses `scipy.signal.hilbert` to compute the discrete Hilbert
transform.
Returns
-------
np.ndarray
Phase θ, the Hilbert transform of ln(I_c), to be used when rebuilding
the current distribution. The phases are shifted by a factor
`field * jj_width / 2` to give a reconstructed current density centered
about the origin.
"""
# scale B to beta first; then forget about it
fields = fields * f2k
if method == "romb":
theta = _extract_theta_romb(fields, ics)
elif method == "quad":
theta = _extract_theta_quad(fields, ics)
elif method == "hilbert":
theta = _extract_theta_hilbert(ics)
else:
raise ValueError(f"Method '{method}' unsupported")
return theta - fields * jj_width / 2
def _extract_theta_romb(fields: np.ndarray, ics: np.ndarray) -> np.ndarray:
"""Compute Eq. (5) of [1] using Romberg integration."""
if not can_romberg(fields):
fine_fields, fine_ics = resample_evenly(fields, ics)
else:
fine_fields, fine_ics = fields, ics
step = abs(fine_fields[0] - fine_fields[1])
theta = np.empty_like(fields)
for i, (field, ic) in enumerate(zip(fields, ics)):
# don't divide by zero
denom = field ** 2 - fine_fields ** 2
denom[denom == 0] = 1e-9
theta[i] = field / np.pi * romb((np.log(fine_ics) - np.log(ic)) / denom, step)
return theta
def _extract_theta_quad(fields: np.ndarray, ics: np.ndarray) -> np.ndarray:
"""Compute Eq. (5) of [1] using scipy.integrate.quad."""
ics_interp = interp1d(fields, ics, "cubic")
def integrand(b, beta, ic):
# quad will provide b when calling this
return (np.log(ics_interp(b)) - np.log(ic)) / (beta ** 2 - b ** 2)
theta = np.empty_like(fields)
for i, (field, ic) in enumerate(zip(fields, ics)):
theta[i] = (
field
/ np.pi
* quad(
integrand,
np.min(fields),
np.max(fields),
args=(field, ic),
points=[field],
)[0]
)
return theta
def _extract_theta_hilbert(ics: np.ndarray) -> np.ndarray:
"""Compute Eq. (5) of [1] using a discrete Hilbert transform."""
return hilbert(np.log(ics)).imag
def extract_current_distribution(
fields: np.ndarray,
ics: np.ndarray,
f2k: float,
jj_width: float,
jj_points: int,
debug: bool = False,
theta: Optional[np.ndarray] = None,
) -> Tuple[np.ndarray, np.ndarray]:
"""Extract the current distribution from Ic(B).
Parameters
----------
fields : np.ndarray
1D array of the magnetic field at which the critical current was measured.
ics : np.ndarray
1D array of the measured value of the critical current.
f2k : float
Field to wave-vector conversion factor. This can be estimated from the
Fraunhofer periodicity.
jj_width : float
Size of the junction. The current distribution will be reconstructed on
a larger region (2 * jj_width)
jj_points : int
Number of points used to describe the junction inside jj_width.
theta : np.ndarray, optional
Phase distribution to use in the current reconstruction. If None, it
will be extracted from the given Fraunhofer pattern.
Returns
-------
np.ndarray
Positions at which the current density was calculated.
np.ndarray
Current density.
"""
if not can_romberg(fields):
fine_fields, fine_ics = resample_evenly(fields, ics)
else:
fine_fields, fine_ics = fields, ics
fine_ics[np.less_equal(fine_ics, 1e-10)] = 1e-10
if theta is None:
theta = extract_theta(fine_fields, fine_ics, f2k, jj_width)
# scale from B to beta
fine_fields = f2k * fine_fields
step = abs(fine_fields[0] - fine_fields[1])
xs = np.linspace(-jj_width, jj_width, int(2 * jj_points))
j = np.empty(xs.shape, dtype=complex)
for i, x in enumerate(xs):
j[i] = (
1
/ (2 * np.pi)
* romb(fine_ics * np.exp(1j * (theta - fine_fields * x)), step)
)
if debug:
f, axes = plt.subplots(2, 1)
axes[0].plot(f2k * fields, ics)
axes[1].plot(xs, j.real)
plt.show()
return xs, j.real
|
import subprocess
cat = subprocess.Popen(
['cat', 'index.rst'],
stdout=subprocess.PIPE,
)
grep = subprocess.Popen(
['grep', '.. literalinclude::'],
stdin=cat.stdout,
stdout=subprocess.PIPE,
)
cut = subprocess.Popen(
['cut', '-f', '3', '-d:'],
stdin=grep.stdout,
stdout=subprocess.PIPE,
)
end_of_pipe = cut.stdout
print('Included files:')
for line in end_of_pipe:
print(line.decode('utf-8').strip())
|
def quick_sort(arr, low, high):
if len(arr) == 0:
return 'cannot sort an empty array'
if len(arr) == 1:
return arr
if low < high:
p = partition(arr, low, high)
quick_sort(arr, low, p - 1)
quick_sort(arr, p + 1, high)
return arr
def partition(arr, low, high):
pivot = arr[high]
i = (low -1)
for j in range(low, high):
if arr[j] <= pivot:
i += 1
arr[i], arr[j] = arr[j], arr[i]
arr[i+1], arr[high] = arr[high], arr[i+1]
# print(array)
return (i + 1)
if __name__ == "__main__":
array = [8,4,23,42,16,15]
n = len(array)
quick_sort(array, 0, n-1)
print (f'sorted: {array}')
|
import os
import requests
import urllib.request
import zipfile
TYPE="20w18a:server"
def _clone(url,fp):
urllib.request.urlretrieve(url,fp)
print(f"Requesting release tree...")
json=[e for e in requests.get("https://launchermeta.mojang.com/mc/game/version_manifest.json").json()["versions"] if e["id"]==TYPE.split(":")[0]][0]
print(f"Downloading {json['type']} {json['id']} ({json['releaseTime']})...")
json=requests.get(json["url"]).json()
print(f"{TYPE.split(':')[1].title()} url: {json['downloads'][TYPE.split(':')[1].lower()]['url']}...\n{TYPE.split(':')[1].title()} mapping url: {json['downloads'][TYPE.split(':')[1].lower()+'_mappings']['url']}...\nCreating temporary folder...")
if (os.path.exists("tmp")):
os.system("rm -rf tmp")
os.mkdir("tmp")
os.mkdir(f"tmp/{TYPE.split(':')[1].lower()}")
print(f"Downloading {TYPE.split(':')[1]}...")
_clone(json["downloads"][TYPE.split(":")[1].lower()]["url"],f"./tmp/{TYPE.split(':')[1].lower()}.jar")
print(f"Downloading {TYPE.split(':')[1]} mappings...")
_clone(json["downloads"][TYPE.split(":")[1].lower()+'_mappings']["url"],f"./tmp/{TYPE.split(':')[1].lower()+'_mappings'}.txt")
print("Getting jd-cli version...")
json=requests.get(requests.get("https://api.github.com/repos/kwart/jd-cmd/releases").json()[0]["assets_url"]).json()[1]
print(f"Downloading {json['name']}...")
_clone(json["browser_download_url"],"./tmp/jd-cli.zip")
print("Extracting jd-cli.jar...")
with zipfile.ZipFile("tmp/jd-cli.zip","r") as zf:
zf.extract("jd-cli.jar",path="./tmp/")
os.system(f"java -jar tmp/jd-cli.jar -g INFO -od ./tmp/{TYPE.split(':')[1].lower()}/ ./tmp/{TYPE.split(':')[1].lower()}.jar")
print(f"Reading mappings from ./tmp/{TYPE.split(':')[1].lower()}-mappings.txt...")
m={}
with open(f"./tmp/{TYPE.split(':')[1].lower()}_mappings.txt","r") as f:
f=f.read()
for l in f.split("\n"):
if (l[0]=="#"):
continue
else:
pass
###
|
import pandas as pd
from pdia.utils.createUniqueRunID import createUniqueRunID
from pdia.qc.dropStudents import dropStudents
def dropStudentsWithRepeatedBlock(df,
saveDroppedAs=None,
studentId='BookletNumber',
blockId="BlockCode",
runId="blockRunID",
verbose=True):
"""
Drop students with repeated blocks.
We keep track of whether the same blocks have been run multiple times. This could happen when, for example,
a student started a block, got interrupted, and did another block, and asked by the admin to go back to a block.
But more likely, some intervening "blockID" get inserted into a contiguous blockID, creating the illusion of
a block being run multiple times. The latter case is often salvageable.
So when possible, investigate such cases and come up with a fix.
:param df: input data frame with data from multiple students
:param saveDroppedAs: optionally saving the dropped data to a csv or pickle file. Remember to specify .csv or .pickle
:param studentId: name of the column containing the student ID info; default ot "BookletNumber"
:param runId: name of the column containing the run counter of blocknames; default to "blockRunID"
:param verbose: default to True
:return: a data frame with students having any of these events dropped.
"""
# error checks
assert (isinstance(df, pd.DataFrame))
for v in [studentId, blockId]:
assert (v in df.columns)
if verbose:
print("\ndropStudentsWithRepeatedBlock:")
# compute the blockRunID, and keep only the first line per Student by blockRunID
t2 = df.groupby([studentId]) \
.apply(lambda x: createUniqueRunID(x, var=blockId, runId=runId)) \
.groupby([studentId, runId]).first() \
.reset_index()
# find the # of unique BlockCode != the total number of block runs
idx = t2.groupby([studentId])[blockId].nunique() < t2.groupby([studentId])[runId].max()
# find the studentID: make sure it's a Pandas Series
studentsToDrop = pd.Series(idx[idx == True].index)
if verbose:
print("dropStudentsWithRepeatedBlock:")
return dropStudents(df, studentsToDrop, saveDroppedAs, studentId, verbose)
|
import unittest
from oslash.either import Right, Left
class TestEither(unittest.TestCase):
def test_either_right_map(self) -> None:
a = Right(42).map(lambda x: x * 10)
self.assertEqual(a, Right(420))
def test_either_left_map(self) -> None:
a = Left(42).map(lambda x: x*10)
self.assertEqual(a, Left(42))
def test_either_right_functor_law1(self) -> None:
"""fmap id = id"""
self.assertEqual(Right(3).map(lambda x: x), Right(3))
def test_either_right_functor_law2(self) -> None:
"""fmap (f . g) x = fmap f (fmap g x)"""
def f(x: int) -> int:
return x + 10
def g(x: int) -> int:
return x * 10
self.assertEqual(
Right(42).map(f).map(g),
Right(42).map(lambda x: g(f(x)))
)
def test_either_left_functor_law1(self) -> None:
"""fmap id = id"""
self.assertEqual(Left(3).map(lambda x: x), Left(3))
def test_either_left_functor_law2(self) -> None:
"""fmap (f . g) x = fmap f (fmap g x)"""
def f(x):
return x + 10
def g(x):
return x * 10
self.assertEqual(
Left(42).map(f).map(g),
Left(42).map(lambda x: g(f(x)))
)
def test_right_applicative_1(self) -> None:
a = Right.pure(lambda x, y: x + y).apply(Right(2)).apply(Right(40))
self.assertNotEqual(a, Left(42))
self.assertEqual(a, Right(42))
def test_right_applicative_2(self) -> None:
a = Right.pure(lambda x, y: x + y).apply(Left("error")).apply(Right(42))
self.assertEqual(a, Left("error"))
def test_right_applicative_3(self) -> None:
a = Right.pure(lambda x, y: x + y).apply(Right(42)).apply(Left("error"))
self.assertEqual(a, Left("error"))
def test_either_monad_right_bind_right(self) -> None:
m = Right(42).bind(lambda x: Right(x * 10))
self.assertEqual(m, Right(420))
def test_either_monad_right_bind_left(self) -> None:
"""Nothing >>= \\x -> return (x*10)"""
m = Left("error").bind(lambda x: Right(x * 10))
self.assertEqual(m, Left("error"))
|
from setuptools import find_packages
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='lcmap-merlin',
version='2.3.1',
description='Python client library for LCMAP rasters',
long_description=readme(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: Public Domain',
'Programming Language :: Python :: 3.6',
],
keywords='usgs lcmap eros',
url='http://github.com/usgs-eros/lcmap-merlin',
author='USGS EROS LCMAP',
author_email='',
license='Unlicense',
packages=find_packages(),
install_requires=[
'cytoolz',
'numpy',
'requests',
'python-dateutil',
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[test]
extras_require={
'test': ['pytest',
'pytest-cov',
'hypothesis',
'vcrpy',
],
'doc': ['sphinx',
'sphinx-autobuild',
'sphinx_rtd_theme'],
'dev': ['jupyter', 'readline'],
},
# entry_points={
#'console_scripts': [''],
# },
include_package_data=True,
zip_safe=False)
|
# Copyright 1999-2019 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import traceback
import argparse
'''
[output directory]
|______Report.html
|______[database name]
|______odps_ddl
| |______tables
| | |______[table name].sql
| |______partitions
| |______[table name].sql
|______hive_udtf_sql
|______single_partition
| |______[table name].sql
|______multi_partition
|______[table name].sql
'''
def execute(cmd: str, verbose=False) -> int:
try:
if (verbose):
print("INFO: executing \'%s\'" % (cmd))
sp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=os.setsid)
sp.wait()
if (verbose):
stdout = sp.stdout.read().strip()
stderr = sp.stderr.read().strip()
print("DEBUG: stdout: " + str(stdout))
print("DEBUG: stderr: " + str(stderr))
print("DEBUG: returncode: " + str(sp.returncode))
return sp.returncode
except Exception as e:
print("ERROR: execute \'%s\'' Failed: %s" % (cmd, e))
print(traceback.format_exc())
return 1
def main(root: str, odpscmd_path: str) -> None:
databases = os.listdir(root)
for database in databases:
if database == "report.html":
continue
create_table_stmt_dir = os.path.join(
root, database, "odps_ddl", "tables")
add_partition_stmt_dir = os.path.join(
root, database, "odps_ddl", "partitions")
if os.path.exists(create_table_stmt_dir):
create_table_stmt_files = os.listdir(create_table_stmt_dir)
for create_table_stmt_file in create_table_stmt_files:
file_path = os.path.join(
create_table_stmt_dir, create_table_stmt_file)
retry = 5
while retry > 0:
returncode = execute(
"%s -f %s" % (odpscmd_path, file_path), verbose=True)
if returncode == 0:
break
else:
print("INFO: execute %s failed, retrying..." % file_path)
retry -= 1
if retry == 0:
print("ERROR: execute %s failed 5 times" % file_path)
if os.path.exists(add_partition_stmt_dir):
add_partition_stmt_files = os.listdir(add_partition_stmt_dir)
for add_partition_stmt_file in add_partition_stmt_files:
file_path = os.path.join(
add_partition_stmt_dir, add_partition_stmt_file)
retry = 5
while retry > 0:
returncode = execute(
"%s -f %s" % (odpscmd_path, file_path), verbose=True)
if returncode == 0:
break
else:
print("INFO: execute %s failed, retrying..." % file_path)
retry -= 1
if retry == 0:
print("ERROR: execute %s failed 5 times" % file_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Run ODPS DDL automatically.')
parser.add_argument(
"--input",
required=True,
help="path to directory generated by meta processor")
parser.add_argument(
"--odpscmd",
required=False,
help="path to odpscmd executable")
args = parser.parse_args()
root = args.input
if args.odpscmd is None:
# get path to odpscmd
script_path = os.path.dirname(os.path.realpath(__file__))
odpscmd_path = os.path.join(
os.path.dirname(script_path), "res", "console", "bin", "odpscmd")
if not os.path.exists(odpscmd_path):
print("ERROR: cannot find odpscmd, please specify the path to odpscmd")
else:
odpscmd_path = args.odpscmd
main(root, odpscmd_path)
|
# Testing relative imports inside Rust binary
# Testing absolute imports inside Rust binary
from helloworld.speaker import say_bye
from . import talker
def main():
talker.say_hello()
say_bye()
# In .bzl config, setting python_config.run_module = "helloworld.main" should cause this to run as the entry point
if __name__ == "__main__":
print("Launching HelloWorld from __main__")
main()
|
import telebot
from telebot import types
import json
import db
import configure
bot = telebot.TeleBot(configure.config["token"])
id1 = ""
firstname = ""
lastname = ""
university = ""
faculty = ""
category = ""
skills = ""
portfolio = ""
edit_type = ""
check = False
@bot.message_handler(commands=['start'])
def any_msg(message):
global id1
id1 = str(message.from_user.id)
users = db.get_db(configure.config["db"]["users"])
if id1 in list(users.keys()):
keyboardmain = types.InlineKeyboardMarkup(row_width=2)
button_1 = types.InlineKeyboardButton(text="Join Group", callback_data="join_group")
button_2 = types.InlineKeyboardButton(text="Create Group", callback_data="create_group")
button_3 = types.InlineKeyboardButton(text="My projects", callback_data="my_projects")
button_4 = types.InlineKeyboardButton(text="Edit Profile", callback_data="edit_profile")
button_5 = types.InlineKeyboardButton(text="Info", callback_data="info")
keyboardmain.add(button_1, button_2, button_3, button_4, button_5)
bot.send_message(message.chat.id, text = f"Firstname: {users[id1]['firstname']}\nLastname: {users[id1]['lastname']}\nUniversity: {users[id1]['university']}\nFaculty: {users[id1]['faculty']}\nCategory: {users[id1]['category']}\nSkills: {users[id1]['skills']}\nPortfolio: {users[id1]['portfolio']}\n", reply_markup=keyboardmain)
else:
keyboardmain = types.InlineKeyboardMarkup(row_width=1)
button_1 = types.InlineKeyboardButton(text="Sign up", callback_data="sign_up")
button_2 = types.InlineKeyboardButton(text="Help", callback_data="help")
button_3 = types.InlineKeyboardButton(text="Contact us", callback_data="contact_us")
keyboardmain.add(button_1, button_2, button_3)
bot.send_message(message.chat.id,"Hi there!\nI am a bot 🤖 that will help you find like-minded people, join a project or create your own.",reply_markup=keyboardmain)
def message_lastname(message):
global firstname
firstname = message.text
bot.send_message(message.chat.id, "Input lastname")
bot.register_next_step_handler(message, message_faculty)
def message_university(message):
global lastname
lastname = message.text
bot.send_message(message.chat.id, "Input university")
bot.register_next_step_handler(message, message_faculty)
def message_faculty(message):
global university
university = message.text
bot.send_message(message.chat.id, "Input faculty")
bot.register_next_step_handler(message, message_category)
def message_category(message):
global faculty
faculty = message.text
bot.send_message(message.chat.id, "Input category")
bot.register_next_step_handler(message, message_skills)
def message_skills(message):
global category
category = message.text
bot.send_message(message.chat.id, "Input skills")
bot.register_next_step_handler(message, message_portfolio)
def message_portfolio(message):
global skills
skills = message.text
bot.send_message(message.chat.id, "Input portfolio")
bot.register_next_step_handler(message, message_result)
def message_result(message):
global portfolio
portfolio = message.text
keyboardmain = types.InlineKeyboardMarkup(row_width=2)
button_1 = types.InlineKeyboardButton(text="Done", callback_data="menu")
button_2 = types.InlineKeyboardButton(text="Cancel", callback_data="welcome")
keyboardmain.add(button_1, button_2)
bot.send_message(message.chat.id, f"Firstname: {firstname}\nLastname: {lastname}\nUniversity: {university}\nFaculty: {faculty}\nCategory: {category}\nSkills: {skills}\nPortfolio: {portfolio}\n",reply_markup=keyboardmain)
bot.register_next_step_handler(message, menu)
def menu(message):
keyboardmain = types.InlineKeyboardMarkup(row_width=2)
button_1 = types.InlineKeyboardButton(text="Join Group", callback_data="join_group")
button_2 = types.InlineKeyboardButton(text="Create Group", callback_data="create_group")
button_3 = types.InlineKeyboardButton(text="My projects", callback_data="my_projects")
button_4 = types.InlineKeyboardButton(text="Edit Profile", callback_data="edit_profile")
button_5 = types.InlineKeyboardButton(text="Info", callback_data="info")
keyboardmain.add(button_1, button_2, button_3, button_4, button_5)
bot.send_message(message.chat.id,"Name: Nazarii\nCategory: programming\nRating: 99.1%\nPortfolio: https://telegra.ph/haj-04-13-3",reply_markup=keyboardmain)
def edit_profile_back(message):
global edit_type
keyboardmain = types.InlineKeyboardMarkup(row_width=2)
button_1 = types.InlineKeyboardButton(text="Edit", callback_data="e_" + edit_type)
button_2 = types.InlineKeyboardButton(text="Done", callback_data="edit_profile")
button_3 = types.InlineKeyboardButton(text="Cancel", callback_data="edit_profile")
keyboardmain.add(button_1, button_2, button_3)
bot.send_message(message.chat.id, f"Correct {edit_type} {message.text}?",reply_markup=keyboardmain)
@bot.callback_query_handler(func=lambda call:True)
def callback_inline(call):
if call.data[:2] == "e_":
global edit_type
edit_type = call.data[2:]
# bot.send_message(chat_id=call.message.chat.id, text=f"Enter new {edit_type}")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=f"Enter new {edit_type}")
bot.register_next_step_handler(call.message, edit_profile_back)
if call.data == "sign_up":
bot.send_message(call.message.chat.id, "Input firstname")
bot.register_next_step_handler(call.message, message_lastname)
if call.data == "welcome":
keyboardmain = types.InlineKeyboardMarkup(row_width=1)
button_1 = types.InlineKeyboardButton(text="Sign up", callback_data="sign_up")
button_2 = types.InlineKeyboardButton(text="Help", callback_data="help")
button_3 = types.InlineKeyboardButton(text="Contact us", callback_data="contact_us")
keyboardmain.add(button_1, button_2, button_3)
bot.edit_message_text(call.message.chat.id, call.message.message_id, "Hi there!\nI am a bot 🤖 that will help you find like-minded people, join a project or create your own.",reply_markup=keyboardmain)
if call.data == "menu":
global id1
db.push_db(configure.config["db"]["users"], {
id1: {
"firstname": firstname,
"lastname": lastname,
"university": university,
"faculty": faculty,
"category": category,
"skills": skills,
"portfolio": portfolio
}
})
print(db.get_db(configure.config["db"]["users"]))
keyboardmain = types.InlineKeyboardMarkup(row_width=2)
button_1 = types.InlineKeyboardButton(text="Join Group", callback_data="join_group")
button_2 = types.InlineKeyboardButton(text="Create Group", callback_data="create_group")
button_3 = types.InlineKeyboardButton(text="My projects", callback_data="my_projects")
button_4 = types.InlineKeyboardButton(text="Edit Profile", callback_data="edit_profile")
button_5 = types.InlineKeyboardButton(text="Info", callback_data="info")
keyboardmain.add(button_1, button_2, button_3, button_4, button_5)
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="Name: Nazarii\nCategory: programming\nRating: 99.1%\nPortfolio: https://telegra.ph/haj-04-13-3", reply_markup=keyboardmain)
if call.data == "edit_profile":
keyboard = types.InlineKeyboardMarkup(row_width=2)
button_1 = types.InlineKeyboardButton(text="Name", callback_data="e_name")
button_2 = types.InlineKeyboardButton(text="About myself", callback_data="e_am")
button_3 = types.InlineKeyboardButton(text="Category", callback_data="e_category")
button_4 = types.InlineKeyboardButton(text="Portfolio", callback_data="e_portfolio")
backbutton = types.InlineKeyboardButton(text="Back to menu", callback_data="menu")
keyboard.add(button_1,button_2,button_3,button_4,backbutton)
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="Name: Nazarii\nCategory: programming\nRating: 99.1%\nPortfolio: https://telegra.ph/haj-04-13-3", reply_markup=keyboard)
if call.data == "my_projects":
keyboard = types.InlineKeyboardMarkup()
button_1 = types.InlineKeyboardButton(text="STUDCOM", callback_data="studcom")
backbutton = types.InlineKeyboardButton(text="Back", callback_data="menu")
keyboard.add(button_1,backbutton)
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="Name: Nazarii\nCategory: programming\nRating: 99.1%\nPortfolio: https://telegra.ph/haj-04-13-3", reply_markup=keyboard)
if call.data == "info":
keyboard = types.InlineKeyboardMarkup()
button_1 = types.InlineKeyboardButton(text="Help", callback_data="help")
button_2 = types.InlineKeyboardButton(text="Contact us", callback_data="contact_us")
backbutton = types.InlineKeyboardButton(text="Back", callback_data="menu")
keyboard.add(button_1,button_2, backbutton)
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="Name: Nazarii\nCategory: programming\nRating: 99.1%\nPortfolio: https://telegra.ph/haj-04-13-3", reply_markup=keyboard)
if call.data == "join_group":
keyboard = types.InlineKeyboardMarkup()
button_1 = types.InlineKeyboardButton(text="View offers", callback_data="view_offers")
button_2 = types.InlineKeyboardButton(text="Find by kw", callback_data="fbk")
backbutton = types.InlineKeyboardButton(text="Back", callback_data="menu")
keyboard.add(button_1,button_2, backbutton)
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="Name: Nazarii\nCategory: programming\nRating: 99.1%\nPortfolio: https://telegra.ph/haj-04-13-3", reply_markup=keyboard)
if call.data == "view_offers":
keyboard = types.InlineKeyboardMarkup(row_width=2)
button_1 = types.InlineKeyboardButton(text="Join", callback_data="join")
button_2 = types.InlineKeyboardButton(text="Next", callback_data="next")
backbutton = types.InlineKeyboardButton(text="Cancel", callback_data="join_group")
keyboard.add(button_1,button_2, backbutton)
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="Name: STUDCOM\nTheme: telegram bot for student communication\nRequired skills: python, PyTelegramBotAPI, json\nSquad: 3/5\nTime limit: 23:59 15.04\nhttps://telegra.ph/info-about-project-04-14 ", reply_markup=keyboard)
if __name__ == "__main__":
bot.polling(none_stop=True)
|
n1 = int(input('digite um numero: '))
n2= int(input('digite outro numero: '))
s = n1 + n2
print('A some entre {} e {} é {}'.format(n1, n2, s))
|
import numpy as np
import pandas as pd
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
from torch.autograd import Variable
import torch.utils.data as data
from sklearn.metrics import roc_auc_score, average_precision_score
from sklearn import metrics
import os
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def real_train(train_loader, model, args, optimizer):
# assumes data is already on cuda
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = Variable(data), Variable(target)
if len(target)<args['batch_size']:
continue
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output.contiguous().view(-1,args['num_classes']), target.flatten())
loss.backward()
if args['model'] in ['moo', 'mow','changegate_mow','changegate_moo']:
model.after_backward()
optimizer.step()
return loss.item()
def real_test(test_loader, model, args, save_scores=False):
model.eval()
test_loss = 0
predicted=[]
y_true=[]
predicted_last=[]
y_true_last=[]
predicted_max=[] #y_true_max = y_true_last
predicted_last5=[]
y_true_last5=[]
for data, target in test_loader:
data, target = Variable(data, volatile=True), Variable(target)
if len(target)<args['batch_size']:
continue
output = model(data)
test_loss += F.nll_loss(output.contiguous().view(-1,args['num_classes']), target.flatten(), size_average=False).item()
# AUC
if args['use_cuda']:
output=output.cpu()
target=target.cpu()
predicted=predicted+list(output[:,:,1].data.numpy().flatten())
y_true=y_true+list(target.data.numpy().flatten())
predicted_last=predicted_last+list(output[:,-1,1].data.numpy().flatten())
y_true_last=y_true_last+list(target[:,-1].data.numpy().flatten())
predicted_max=predicted_max+list(torch.max(output[:,:,1],dim=1)[0].data.numpy().flatten())
predicted_last5=predicted_last5+list(output[:,-5:,1].data.numpy().flatten())
y_true_last5=y_true_last5+list(target[:,-5:].data.numpy().flatten())
test_loss /= len(test_loader.dataset)
auc=roc_auc_score(y_true,predicted)
auc_last=roc_auc_score(y_true_last,predicted_last)
auc_max=roc_auc_score(y_true_last,predicted_max)
auc_last5=roc_auc_score(y_true_last5,predicted_last5)
if save_scores:
np.savez(os.path.join(args['savedir'],args['runname']+'_testscores'), pred=predicted_max,y_true=y_true_last,args=args)
torch.save(model.state_dict(), os.path.join(args['savedir'],args['runname'] + ".ckpt"))
return auc,test_loss,auc_last,auc_max,auc_last5
def real_data_search3(args, real_data, Net):
# Import Validation & Test Set
val_data = real_data('val',args)
val_loader = data.DataLoader(val_data, batch_size=args['batch_size'],shuffle=True)
test_data = real_data('test',args)
test_loader = data.DataLoader(test_data, batch_size=args['batch_size'],shuffle=True)
# Setup
if 'shiftLSTM' in args['model']: args['shiftLSTMk']=int(modelname.split()[1])
print('{}'.format(args['model']))
df=pd.DataFrame({'hidden_size':[]})
val_auc_max_all=0
# Training Data
if args['realstart']:
train_data = real_data('train',args)
train_loader = data.DataLoader(train_data, batch_size=args['batch_size'],shuffle=True)
np.save(os.path.join(args['savedir'],args['runname']+'sample_ind'), train_data.sample_ind)
else:
train_data = real_data('train',args,sample_ind=np.load(os.path.join(args['savedir'],args['genrunname']+'sample_ind.npy')))
train_loader = data.DataLoader(train_data, batch_size=args['batch_size'],
shuffle=True)
# Runs
for run in range(args['budget']):
#Draw HP
args['hidden_size']=int(np.random.choice([100,150,300,500,700]))
args['hyp_hidden_size']=int(np.random.choice([25,50,75,100,125,150]))
#Init Model
model = Net(args)
if args['use_cuda']:
model.cuda()
optimizer = optim.Adam(model.parameters()) #optim.SGD(model.parameters(), lr=0.01)
#Train Model
val_auc_max_run, epoch_run=0,0
early_stop=[]
for epoch in range(1, args['epochs'] + 1):
training_loss = real_train(train_loader, model, args, optimizer)
zval_auc,_,zval_auc_last,zval_auc_max,zval_auc_last5 = real_test(val_loader,model,args)
print('-- Run {}, epoch {}, training loss {:.4f}, val auc {:.4f}, val auc last {:.4f}, val auc max {:.4f}'.\
format(run,epoch,training_loss,zval_auc,zval_auc_last,zval_auc_max))
early_stop.append(zval_auc_max)
if zval_auc_max>val_auc_max_all:
val_auc_max_all = zval_auc_max
if zval_auc_max>val_auc_max_run:
val_auc_run, epoch_run, val_auc_run_last, val_auc_max_run= zval_auc, epoch, zval_auc_last, zval_auc_max
if df.shape[0]==0: save_scores=True
elif zval_auc_max>df.val_auc_max.max(): save_scores=True
else: save_scores=False
args['current epoch'] = epoch
args['current run'] = run
args['val_auc_max'] = zval_auc_max
ztest_auc,_,ztest_auc_last,ztest_auc_max,ztest_auc_last5= real_test(test_loader,model,args,save_scores=save_scores)
if len(early_stop)>5:
if abs(early_stop.pop(0)-zval_auc_max)<.001:
break
#Save Run Information
print('Best val auc run {}: {}'.format(run,val_auc_run))
df=df.append(pd.DataFrame({'val_auc':[val_auc_run],'test_auc':[ztest_auc],'val_auc_last':[val_auc_run_last],'val_auc_max':[val_auc_max_run],\
'test_auc_last':[ztest_auc_last],'test_auc_max':[ztest_auc_max],'test_auc_last5':[ztest_auc_last5],\
'hidden_size':[args['hidden_size']],'run':[run],\
'hyp_hidden_size':[args['hyp_hidden_size']],'num_params':[count_parameters(model)]}),sort=False)
df.reset_index(inplace=True,drop=True)
df.to_hdf(os.path.join(args['savedir'],args['runname']+'_data_search.h5'),key='data',mode='w')
df['N'] = args['N']
df['model'] = [args['model']]*df.shape[0]
df['genrunname'] = args['genrunname']
df['batch_size']=args['batch_size']
df['budget']=args['budget']
df['epochs']=args['epochs']
df.to_hdf(os.path.join(args['savedir'],args['runname']+'_data_search.h5'),key='data',mode='w')
return df
def empBSCI_old(stat_bs,stat,ci):
#calculates the empirical bootstrap confidence interval
#stat_bs is the bootstrapped estimate: mean, auroc, aupr etc.
#stat is the non bootstrapped estimate
stat2=stat_bs-stat #this is elementwise
upper=100-(100-ci)/2.0 #upper percentile
lower=(100-ci)/2.0 #lower percentile
upperci=np.percentile(stat2,upper)
lowerci=np.percentile(stat2,lower)
return stat-upperci, stat-lowerci #lower bound, upper bound
def empBSCI(stat_bs, ci):
#calculates the empirical bootstrap confidence interval
#stat_bs is the bootstrapped estimate: mean, auroc, aupr etc.
#stat is the non bootstrapped estimate
upper=100-(100-ci)/2.0 #upper percentile
lower=(100-ci)/2.0 #lower percentile
upperci=np.percentile(stat_bs,upper)
lowerci=np.percentile(stat_bs,lower)
return lowerci, upperci
def calc_auprc(y_true, y_predict):
from sklearn import metrics
(precisions, recalls, thresholds) = metrics.precision_recall_curve(y_true, y_predict)
return metrics.auc(recalls, precisions)
def do_bootstrap(pred, target, args, mode="auroc", ci=95):
assert mode in ["auroc", "auprc"], "mode must be auroc or auprc"
if mode == 'auroc':
eval_function = roc_auc_score
elif mode == 'auprc':
eval_function = calc_auprc
nrep=1000
#Bootstrap 95% CI
np.random.seed(124)
bootstrap=np.random.choice(len(pred),(nrep,len(pred)),replace=True)
aurocbs=np.empty(nrep)
aurocbs[:]=np.nan
for i in range(nrep):
aurocbs[i]=eval_function(target[bootstrap[i,:]],pred[bootstrap[i,:]])
zauroc=np.argsort(aurocbs) #sorts smallest to largest
auroc=eval_function(target,pred)
auroc_lower,auroc_upper=empBSCI(aurocbs,ci)
return auroc_lower, auroc, auroc_upper
|
import altair as alt
import pandas as pd
from tweets import tweets
# %%
terms = [
"covid",
"lockdown",
"furlough",
"open",
"close",
"takeaway",
"collect",
"delay",
"supply",
"brexit",
"online",
"deliver",
]
tweets["text"] = tweets.text.str.lower()
for term in terms:
tweets[term] = tweets.text.str.contains(term)
# %%
def yearly(tweets, year, cols, freq="7d"):
tweets_ = tweets.loc[lambda x: x.created_at.dt.year == year]
baseline_month = tweets_.groupby(pd.Grouper(key="created_at", freq=freq)).size()
return (
tweets_.groupby(pd.Grouper(key="created_at", freq=freq))[cols]
.sum()
.melt(var_name="term", ignore_index=False)
.join(baseline_month.rename("base"))
.assign(proportion=lambda x: x.value / x.base)
)
freq = "7d"
cols = ["open", "close"]
y2019 = (
yearly(tweets, 2019, cols, freq=freq)
.loc[lambda x: x.index.month < 12]
.reindex(fill_value=0)
)
y2020 = yearly(tweets, 2020, cols, freq=freq).reindex(fill_value=0)
# %%
data = y2020.assign(
norm_value=lambda x: x.proportion
/ y2019.loc[lambda x: x.index.month < 12].proportion.values
)
# Proportion of a months tweets mentioning a term, normalised against 2019
chart = (
alt.Chart(data.reset_index())
.mark_line()
.encode(
x=alt.X("created_at:T", title=None),
y=alt.Y("norm_value", title="Frequency relative to month in previous year"),
color=alt.Color("term:N", title="Tweet term"),
tooltip=["term", "created_at", "norm_value"],
)
)
export_chart(chart, "tweets_open_close_norm")
chart
# %%
def foo(tweets, var):
"""Frequency over time, both absolute as proportion."""
baseline = (
tweets.groupby([pd.Grouper(key="created_at", freq="1m")])
.size()
.to_frame("total")
)
return (
tweets.melt(id_vars=[var, "created_at"], value_vars=terms) # .sample(10_000)
.groupby(["variable", var, pd.Grouper(key="created_at", freq="1m")])
.sum()
.join(baseline)
.assign(proportion=lambda x: x.value / x.total)
.reset_index()
)
dta = foo(tweets, "section")
dta_place = foo(
tweets.dropna(subset=["laua"]).loc[lambda x: x.laua.str.startswith("S")], "laua"
)
# %%
chart = (
alt.Chart(dta)
.mark_area()
.encode(
alt.X(
"created_at:T",
axis=alt.Axis(format="%m/%Y"),
title=None,
),
alt.Y(
"proportion:Q",
scale=alt.Scale(type="linear"),
title=["Proportion of tweets"],
),
alt.Color(
"variable:N",
scale=alt.Scale(scheme="category20"),
),
alt.Facet("section:N", columns=2),
tooltip=["created_at", "value", "proportion", "variable", "section"],
)
.properties(width=300, height=100)
.resolve_scale(y="independent")
.interactive(bind_y=False)
)
export_chart(chart, "tweet_section_stack_terms")
chart
# %%
# Streamgraph of tweets by industry
chart = (
alt.Chart(dta)
.mark_area()
.encode(
alt.X(
"created_at:T",
axis=alt.Axis(format="%m/%Y"),
title=None,
),
alt.Y("proportion:Q", title=["Proportion of tweets"]),
alt.Color("section:N", scale=alt.Scale(scheme="category20b")),
alt.Facet("variable:N", columns=2, title="Term contained in tweet"),
tooltip=["created_at", "value", "proportion", "variable", "section"],
)
.properties(width=300, height=100)
.resolve_scale(y="independent")
)
export_chart(chart, "tweet_terms_stack_section")
chart
# %%
# Streamgraph of tweets by region
chart = (
alt.Chart(dta_place.assign(laua=lambda x: x.laua.map(read_lad_name_lookup())))
.mark_area()
.encode(
alt.X(
"created_at:T",
axis=alt.Axis(format="%m/%Y"),
title=None,
),
alt.Y(
"proportion:Q",
stack="zero",
title=["Proportion of tweets"],
),
alt.Color(
"laua:N", scale=alt.Scale(scheme="category20b"), title="Council area"
),
alt.Facet("variable:N", columns=2, title="Term contained in tweet"),
tooltip=["created_at", "value", "proportion", "variable", "laua"],
)
.properties(width=300, height=100)
.resolve_scale(y="independent")
)
export_chart(chart, "tweet_terms_stack_region")
chart
# %%
# Streamgraph of tweets by region
chart = (
alt.Chart(dta_place.assign(laua=lambda x: x.laua.map(read_lad_name_lookup())))
.mark_area()
.encode(
alt.X(
"created_at:T",
axis=alt.Axis(format="%m/%Y"),
title=None,
),
alt.Y(
"proportion:Q",
stack="zero",
title=["Proportion of tweets"],
),
alt.Color("variable:N", scale=alt.Scale(scheme="category20")),
alt.Facet("laua:N", columns=3, title="Term contained in tweet"),
tooltip=["created_at", "value", "proportion", "variable", "laua"],
)
.properties(width=300, height=100)
.resolve_scale(y="independent")
)
export_chart(chart, "tweet_region_stack_terms")
chart
|
import cobra
from scipy import sparse
from straindesign import MILP_LP, parse_constraints, lineqlist2mat, linexpr2dict, linexprdict2mat
from straindesign.names import *
from typing import Dict
# FBA for cobra model with CPLEX
# the user may provide the optional arguments
# constraints: Additional constraints in text form (list of lists)
# A_ineq, b_ineq: Additional constraints in matrix form
# obj: Alternative objective in text form
def fba(model,**kwargs):
# allowed_keys = {'obj','constraints','solver'}
# # set all keys passed in kwargs
# for key,value in kwargs.items():
# if key in allowed_keys:
# locals()[key] = value
# else:
# raise Exception("Key "+key+" is not supported.")
# # set all remaining keys to None
# for key in allowed_keys:
# if key not in kwargs.keys():
# locals()[key] = None
# Check type and size of A_ineq and b_ineq if they exist
reaction_ids = model.reactions.list_attr("id")
if CONSTRAINTS in kwargs:
kwargs[CONSTRAINTS] = parse_constraints(kwargs[CONSTRAINTS],reaction_ids)
A_ineq, b_ineq, A_eq, b_eq = lineqlist2mat(kwargs[CONSTRAINTS], reaction_ids)
if 'obj' in kwargs:
if kwargs['obj'] is not None:
if type(kwargs['obj']) is str:
kwargs['obj'] = linexpr2dict(kwargs['obj'],reaction_ids)
if type(kwargs['obj']) is dict:
c = linexprdict2mat(kwargs['obj'],reaction_ids).toarray()[0].tolist()
if 'solver' in kwargs:
solver = kwargs['solver']
else:
solver = None
# prepare vectors and matrices
A_eq_base = cobra.util.create_stoichiometric_matrix(model)
A_eq_base = sparse.csr_matrix(A_eq_base)
b_eq_base = [0]*len(model.metabolites)
if 'A_eq' in locals():
A_eq = sparse.vstack((A_eq_base, A_eq))
b_eq = b_eq_base+b_eq
else:
A_eq = A_eq_base
b_eq = b_eq_base
if 'A_ineq' not in locals():
A_ineq = sparse.csr_matrix((0,len(model.reactions)))
b_ineq = []
lb = [v.lower_bound for v in model.reactions]
ub = [v.upper_bound for v in model.reactions]
if 'c' not in locals():
c = [i.objective_coefficient for i in model.reactions]
if model.objective_direction == 'max':
c = [ -i for i in c]
else:
c = [ -i for i in c]
# build LP
my_prob = MILP_LP( c=c,
A_ineq=A_ineq,
b_ineq=b_ineq,
A_eq=A_eq,
b_eq=b_eq,
lb=lb,
ub=ub,
solver=solver)
x, opt_cx, status = my_prob.solve()
if status not in [OPTIMAL, UNBOUNDED]:
status = INFEASIBLE
fluxes = {reaction_ids[i] : x[i] for i in range(len(x))}
sol = cobra.core.Solution(objective_value=-opt_cx,status=status,fluxes=fluxes)
return sol
|
# -*- encoding: utf-8 -*-
import json
import requests
from ..consts import DOCKER_HUB_API_ENDPOINT, PER_PAGE
from .config import Config
class DockerHubClient:
""" Wrapper to communicate with docker hub API """
def __init__(self):
self.config = Config()
self.auth_token = self.config.get('auth_token')
def do_request(self, url, method='GET', data={}):
valid_methods = ['GET', 'POST']
if method not in valid_methods:
raise ValueError('Invalid HTTP request method')
headers = {'Content-type': 'application/json'}
if self.auth_token:
headers['Authorization'] = 'JWT ' + self.auth_token
request_method = getattr(requests, method.lower())
if len(data) > 0:
data = json.dumps(data, indent=2, sort_keys=True)
resp = request_method(url, data, headers=headers)
else:
resp = request_method(url, headers=headers)
content = {}
if resp.status_code == 200:
content = json.loads(resp.content.decode())
return {'content': content, 'code': resp.status_code}
def login(self, username=None, password=None, save_config=True):
data = {'username': username, 'password': password}
self.auth_token = None
resp = self.do_request(DOCKER_HUB_API_ENDPOINT + 'users/login/',
'POST', data)
if resp['code'] == 200:
self.auth_token = resp['content']['token']
if save_config:
self.config.set('auth_token', self.auth_token)
return resp['code'] == 200
def get_token(self):
return self.auth_token
def get_repos(self, org, page=1, per_page=PER_PAGE):
url = '{0}repositories/{1}/?page={2}&page_size={3}'. \
format(DOCKER_HUB_API_ENDPOINT, org, page, per_page)
return self.do_request(url)
def get_tags(self, org, repo, page=1, per_page=PER_PAGE):
url = '{0}repositories/{1}/{2}/tags?page={3}&page_size={4}'. \
format(DOCKER_HUB_API_ENDPOINT, org, repo, page, per_page)
return self.do_request(url)
def get_users(self, username):
url = '{0}users/{1}'.format(DOCKER_HUB_API_ENDPOINT, username)
return self.do_request(url)
def get_buildhistory(self, org, repo, page=1, per_page=PER_PAGE):
url = '{0}repositories/{1}/{2}/buildhistory?page={3}&page_size={4}'. \
format(DOCKER_HUB_API_ENDPOINT, org, repo, page, per_page)
return self.do_request(url)
|
# Copyright 2018-2019 CRS4
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import time
from ftplib import FTP
from concurrent import futures
import logging
logging.basicConfig()
LOGGER = logging.getLogger('tdm.gfs.noaa')
LOGGER.setLevel(logging.DEBUG)
class noaa_fetcher(object):
NOAA_FTP_SERVER = 'ftp.ncep.noaa.gov'
NOAA_BASE_PATH = '/pub/data/nccf/com/gfs/prod/'
NOAA_DATASET_FOLDER_SIZE = 196608
FETCH_ATTEMPTS = 3
@classmethod
def list_files_in_path(cls, path):
entries = {}
def add_clean_entry(x):
size, name = [x.split()[i] for i in (4, 8)]
entries[name] = {'size': int(size), 'name': name}
with FTP(cls.NOAA_FTP_SERVER) as ftp:
ftp.login()
ftp.cwd(path)
ftp.retrlines('LIST', callback=add_clean_entry)
return entries
@classmethod
def list_available_dataset_groups(cls):
return cls.list_files_in_path(cls.NOAA_BASE_PATH)
def __init__(self, year, month, day, hour):
self.date = datetime.datetime(year, month, day, hour, 0)
self.ds = 'gfs.%s' % self.date.strftime("%Y%m%d%H")
LOGGER.info('Initialized for dataset %s', self.ds)
def is_dataset_ready(self):
available_groups = self.list_available_dataset_groups()
return (self.ds in available_groups and
available_groups[self.ds]['size']
<= self.NOAA_DATASET_FOLDER_SIZE)
def fetch_file(self, ds_path, fname, tdir):
LOGGER.info('Fetching %s/%s into %s', self.ds, fname, tdir)
begin = datetime.datetime.now()
target = os.path.join(tdir, fname)
with FTP(self.NOAA_FTP_SERVER) as ftp:
ftp.login()
ftp.cwd(ds_path)
cmd = 'RETR %s' % fname
ftp.retrbinary(cmd, open(target, 'wb').write,
blocksize=1024*1024)
dt = datetime.datetime.now() - begin
LOGGER.info('It took %s secs to fetch %s',
dt.total_seconds(), fname)
return target
def fetch(self, res, tdir, pattern='gfs.t%Hz.pgrb2',
nthreads=4, tsleep=300):
def recover_results(fut_by_name):
failed = []
for fut in futures.as_completed(fut_by_fname):
fname = fut_by_fname[fut]
try:
res = fut.result()
except Exception as exc:
LOGGER.error('%s generated an exception: %s', fname, exc)
failed.append(fname)
LOGGER.info('adding %s to failed', fname)
else:
LOGGER.info('%s saved in %s', fname, res)
return failed
ds_path = os.path.join(self.NOAA_BASE_PATH, self.ds)
pre = self.date.strftime(pattern) + '.' + res
LOGGER.info('Fetching %s/%s into %s', self.ds, pre, tdir)
while not self.is_dataset_ready():
LOGGER.info('Dataset %s not ready, sleeping for %d sec',
self.ds, tsleep)
time.sleep(tsleep)
files = [f for f in self.list_files_in_path(ds_path)
if f.startswith(pre) and not f.endswith('.idx')]
begin = datetime.datetime.now()
with futures.ThreadPoolExecutor(max_workers=nthreads) as executor:
for i in range(self.FETCH_ATTEMPTS):
fut_by_fname = {executor.submit(self.fetch_file,
ds_path, fname, tdir): fname
for fname in files}
files = recover_results(fut_by_fname)
if len(files) == 0:
dt = datetime.datetime.now() - begin
LOGGER.info('It took %s secs to fetch %s.',
dt.total_seconds(), self.ds)
break
else:
LOGGER.info(
'At fetch iteration %d of %d, %d files missing.',
i, self.FETCH_ATTEMPTS, len(files))
else:
LOGGER.error(
'Still %d files missing after %d iteration.',
len(files), self.FETCH_ATTEMPTS)
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
import os
import site
from shutil import copy2
from os.path import join as pjoin
files = [
'datatables.css',
'datatables.js'
]
def main():
sitepackages = site.getsitepackages()
static_base_directory = sitepackages[0] if os.name != 'nt' else sitepackages[1]
destination = pjoin(static_base_directory, 'notebook', 'static')
dir_path = os.path.dirname(os.path.realpath(__file__))
for file in files:
full_path = pjoin(dir_path, file)
print(f'copying file {file} to {destination}')
copy2(full_path, destination)
if __name__ == '__main__':
main()
|
import armapy
from .data import Data
# TODO: implement SED
class SED:
data = None
seds = None
def __init__(self, data):
if type(data) is not Data:
data = Data(data)
self.data = data
def __download_properties__(self):
self.data.head.download_SVO_properties()
def __calc_sed__(self):
pass
|
import warnings
import numpy as np
import pandas as pd
warnings.filterwarnings("ignore")
url = "https://data.rivm.nl/covid-19/COVID-19_aantallen_gemeente_per_dag.csv"
def get_data(url: str = url) -> pd.DataFrame:
"""
Get data from url
url: url to csv
"""
return pd.read_csv(url, sep=";", parse_dates=["Date_of_publication"])[
["Date_of_publication", "Total_reported"]
]
def new_cases(df: pd.DataFrame) -> pd.DataFrame:
"""
Processes data to get new case counts
df: pandas DataFrame
"""
# Only get total cases based on date of publication
df = df.groupby(df["Date_of_publication"].dt.date).sum().reset_index()
# Rename columns
df.columns = ["date", "cases"]
# Set date as index
df = df.set_index("date")
return df
def smooth_cases(df: pd.DataFrame, window: int = 7, cutoff: int = 25) -> pd.DataFrame:
"""
Smooth new case data
df: pandas DataFrame
window: rolling windown used for smoothing
cuttoff: get start when new cases > cutoff
"""
# Calculate smoohted new cases
smoothed = (
df.rolling(7, win_type="gaussian", min_periods=1, center=True)
.mean(std=2)
.round()
)
# Get start index when new cases > cutoff
idx_start = np.searchsorted(smoothed.values.flatten(), cutoff)
# Filter smoothed and original based on cutoff
smoothed = smoothed.iloc[idx_start:]
original = df.loc[smoothed.index]
return original, smoothed
|
#!/usr/bin/env python3
from bpylist import bplist
import sqlite3
import urllib.request
chat_sqlite = 'iphonex/Line.sqlite'
dload_dir = '_stickers'
date_limit = ('1529848800000',) # before 25/06/2018 00:00 (1 month anniv. + 2d)
query = 'SELECT ZCONTENTMETADATA FROM `ZMESSAGE` WHERE `ZCHAT` = 3 AND `ZCONTENTTYPE` = 7 AND ZTIMESTAMP < ? ORDER BY `ZTIMESTAMP` ASC;'
# do query
conn = sqlite3.connect(chat_sqlite)
c = conn.cursor()
c.execute(query, date_limit)
rows = c.fetchall()
c.close()
print('rows =', len(rows))
# iterate through results
for row in rows:
blob = row[0]
bp = bplist.parse(blob)
key_index = bp['$objects'][1]['NS.keys']
obj_index = bp['$objects'][1]['NS.objects']
keys = [bp['$objects'][i] for i in key_index]
obj = [bp['$objects'][i] for i in obj_index]
STKVER_index = keys.index('STKVER')
STKPKGID_index = keys.index('STKPKGID')
STKID_index = keys.index('STKID')
STKVER = obj[STKVER_index]
STKPKGID = obj[STKPKGID_index]
STKID = obj[STKID_index]
print()
#print('blob =', blob)
print('bplist =', bp)
print('STKVER =', STKVER)
print('STKPKGID =', STKPKGID)
print('STKID =', STKID)
# download sticker as STKVER_STKPKGID_STKID.png
urllib.request.urlretrieve('http://dl.stickershop.line.naver.jp/products/0/0/' + STKVER + '/' + STKPKGID + '/android/stickers/' + STKID + '.png',
dload_dir + '/' + STKVER + '_' + STKPKGID + '_' + STKID + '.png')
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
def add_config_options_to_parser(parser):
parser.add_argument(
'--config_path',
type=str,
default='configs/train_transformer.yaml',
help="the yaml config file path.")
parser.add_argument(
'--batch_size', type=int, default=32, help="batch size for training.")
parser.add_argument(
'--epochs',
type=int,
default=10000,
help="the number of epoch for training.")
parser.add_argument(
'--lr',
type=float,
default=0.001,
help="the learning rate for training.")
parser.add_argument(
'--save_step',
type=int,
default=500,
help="checkpointing interval during training.")
parser.add_argument(
'--image_step',
type=int,
default=2000,
help="attention image interval during training.")
parser.add_argument(
'--max_len',
type=int,
default=400,
help="The max length of audio when synthsis.")
parser.add_argument(
'--transformer_step',
type=int,
default=160000,
help="Global step to restore checkpoint of transformer.")
parser.add_argument(
'--vocoder_step',
type=int,
default=90000,
help="Global step to restore checkpoint of postnet.")
parser.add_argument(
'--use_gpu',
type=int,
default=1,
help="use gpu or not during training.")
parser.add_argument(
'--use_data_parallel',
type=int,
default=0,
help="use data parallel or not during training.")
parser.add_argument(
'--stop_token',
type=int,
default=0,
help="use stop token loss in network or not.")
parser.add_argument(
'--data_path',
type=str,
default='./dataset/LJSpeech-1.1',
help="the path of dataset.")
parser.add_argument(
'--checkpoint_path',
type=str,
default=None,
help="the path to load checkpoint or pretrain model.")
parser.add_argument(
'--save_path',
type=str,
default='./checkpoint',
help="the path to save checkpoint.")
parser.add_argument(
'--log_dir',
type=str,
default='./log',
help="the directory to save tensorboard log.")
parser.add_argument(
'--sample_path',
type=str,
default='./sample',
help="the directory to save audio sample in synthesis.")
|
from __future__ import absolute_import
from subprocess import Popen, PIPE
import tempfile
import shutil
import socket
import os
import json
class DisposableConsul(object):
DEFAULT_CONSUL_BIN = 'consul'
def __init__(self, consul_bin=DEFAULT_CONSUL_BIN):
self.consul_bin = consul_bin
self.temp_dir = None
self.consul = None
self.config = None
@staticmethod
def find_unused_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
port = sock.getsockname()[-1]
sock.close()
return port
def start(self):
self.temp_dir = tempfile.mkdtemp()
self.config = self.generate_config()
config_file = os.path.join(self.temp_dir, 'config.json')
with open(config_file, 'w') as f:
json.dump(self.config, f)
self.consul = Popen([self.consul_bin, 'agent', '-server',
'-data-dir', self.temp_dir,
'-bootstrap-expect', '1',
'-config-file', config_file], stdout=PIPE, stderr=PIPE)
while True:
line = self.consul.stdout.readline().strip()
if 'New leader elected' in line:
break
def generate_config(self):
return {
'ports': {
k: self.find_unused_port() for k in
('dns', 'http', 'https', 'rpc', 'serf_lan', 'serf_wan', 'server')
}
}
def stop(self):
self.consul.terminate()
shutil.rmtree(self.temp_dir, ignore_errors=True)
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
@property
def http_port(self):
return self.config['ports']['http']
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ########################## #
# #
# Copyright 2020 Greg Caufield <greg@embeddedcoffee.ca> #
# #
# This file is part of MonkeyPack Package Manager #
# #
# The MIT Licence #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to #
# deal in the Software without restriction, including without limitation the #
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or #
# sell copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS #
# IN THE SOFTWARE. #
# #
# ############################################################################ #
from typing import Optional, Any
from mbget.version import Version
class Dependency(object):
def __init__(self, package_name: str):
self.__package_name = package_name
self.__required_version: Optional[Version] = None
self.__repo: Optional[str] = None
self.__barrel_name: Optional[str] = None
pass
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Dependency):
return False
return (
other.__package_name == self.__package_name
and other.__required_version == self.__required_version
and other.__repo == self.__repo
)
@property
def package_name(self) -> str:
return self.__package_name
@property
def version(self) -> Optional[Version]:
return self.__required_version
def set_version(self, version: str) -> None:
self.__required_version = Version(version)
@property
def repo(self) -> Optional[str]:
return self.__repo
def set_repo(self, repo: str) -> None:
self.__repo = repo
@property
def barrel_name(self) -> Optional[str]:
return self.__barrel_name
def set_barrel_name(self, name: str) -> None:
self.__barrel_name = name
|
"""
Given an array of integers and a target integer sum, return whether there exist a pair of integers in the array which add up to sum.
See if you can come up with an O(n^2) solution first. Then—can you come up with an O(n log n) one?
"""
import time
import random
import numpy as np
import matplotlib.pyplot as plt
DEBUG=False
def debug(_str):
if DEBUG: print(_str)
#returns Boolean indicating whether _list contains a pair that sums to _target
#
def brute_force_sum_exists(_list, _target):
found = False
for i in range(len(_list)-1):
for j in range(i+1, len(_list)):
if _list[i] + _list[j] == _target:
found = True
debug("Found a solution! Using %s and %s, at indices %s and %s" %
(_list[i], _list[j], i, j))
return(found)
#returns Boolean indicating whether _list contains a pair that sums to _target
#
def optimized_sum_exists(_list, _target):
found = False
slist = sorted(_list)
i=0
while i < len(slist) and not(found):
needed_addend = _target - slist[i]
debug("Going to start searching for %s, so that %s+%s=%s" % (needed_addend, slist[i], needed_addend, _target))
#Remove the other addend from the list, lest the binary search think
# that it can use the same value twice (without it actually
# appearing twice in the list)
pivot = slist[i]
slist.remove(pivot)
#If we get a non-None value, then we have a pair summing to _target
if binary_search(slist, needed_addend) != None:
found = True
debug("Found a solution for %s+%s=%s" % (pivot, needed_addend, _target))
i+=1
#Add the pivoting addend back into the list
slist.insert(i-1, pivot)
if not(found): debug("No solution exists")
return(found)
#returns the index of _target, if it appears in _sorted_list.
#returns None if _target is not present in _sorted_list
def binary_search(_sorted_list, _target):
if len(_sorted_list) == 1:
if _sorted_list[0] == _target:
return 0
else:
return None
elif len(_sorted_list) > 1:
midpoint = len(_sorted_list)//2
if _sorted_list[midpoint] == _target:
return midpoint
#midpoint of list is greater than target - recurse on first half of list
elif _sorted_list[midpoint] > _target:
return binary_search(_sorted_list[:midpoint], _target)
#midpoint of list is smaller than target - recurse on second half
else:
return binary_search(_sorted_list[midpoint:], _target)
else:
#list is empty, so the value can't be present
return None
def main():
#How many times to test the algorithms
NUM_TRIALS = 1000
#Create a list of random integers
LIST_SIZE = 1000
MAX_INT = 100
random_list = []
for i in range(LIST_SIZE):
random_list.append(random.randint(1,MAX_INT))
#Set the targets in advance so that the algs are checking for the same ones
random_targets = [random.randint(1,MAX_INT) for i in range(NUM_TRIALS)]
brute_force_answers = []
brute_force_times = []
efficient_answers = []
efficient_times = []
#O(n^2) solution: Consider each pair of numbers exactly once
#Note: doesn't matter whether or not the list is sorted in advance
print("Starting brute-force sum-find algorithm")
for i in range(NUM_TRIALS):
startOne = time.time()
brute_force_answers.append(brute_force_sum_exists(random_list, random_targets[i]))
finishOne = time.time()
brute_force_times.append(finishOne - startOne)
#O(n logn) solution:
# sort the array - O(n logn)
# for each element i: - O(n)
# take target_sum-i to decide what element j would pair with i - O(1)
# binary search for j - O(log n)
#Note: since we need the list to be sorted, we measure that time too.
# Strictly speaking, we could just sort it once at the beginning.
print("Starting efficient sum-find algorithm")
for j in range(NUM_TRIALS):
startTwo = time.time()
efficient_answers.append(optimized_sum_exists(random_list, random_targets[j]))
finishTwo = time.time()
efficient_times.append(finishTwo - startTwo)
try:
assert(brute_force_answers == efficient_answers)
except AssertionError:
for x in range(NUM_TRIALS):
if brute_force_answers[x] != efficient_answers[x]:
print("Algorithms differed when looking for %s. (Results: %s, %s)" % (random_targets[x], brute_force_answers[x], efficient_answers[x]))
#Create a little histogram to show the average efficiency of each alg
colors=['red','blue']
labels=["brute force", "efficient"]
plt.hist([brute_force_times, efficient_times], 100, density=True, histtype='bar', color=colors, label=labels)
plt.legend(prop={'size': 10})
plt.show()
main()
|
class HashItem:
def __init__(self, key, value):
self.key = key
self.value = value
class HashTable:
def __init__(self):
self.size = 256
self.slots = [None for i in range(self.size)]
self.count = 0
def _hash(self, key):
mult = 1
hv = 0
for ch in key:
hv += mult * ord(ch)
mult += 1
return hv % self.size
def put(self, key, value):
item = HashItem(key, value)
h = self._hash(key)
while self.slots[h]:
if self.slots[h].key is key:
break
h = (h+1) % self.size
if self.slots[h] is None:
self.count += 1
self.slots[h] = item
def get(self, key):
h = self._hash(key)
while self.slots[h]:
if self.slots[h].key is key:
return self.slots[h].value
h = (h+1) % self.size
return None
def __setitem__(self, key, value):
self.put(key, value)
def __getitem__(self, key):
return self.get(key)
|
from topic_model import Topic
def test_topic_serialization():
root = Topic('root', layer=0)
c1 = root.add_child('c1', [("foo", 0.5), ("bar", 0.3)])
c1_1 = c1.add_child('c1-1', [("baz", 0.5)])
c2 = root.add_child('c2', [("yolo", 0.5)])
stored = root.store_recursively()
topic_dict = Topic.restore_topics(stored)
root2 = topic_dict['root']
assert root.topic_id == root2.topic_id
def test_topic_model_stats():
#topic_model.topic_stats('../data/arxiv-topics.json.bz2')
pass
|
import logging
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QDialog
from ui.Ui_Preferences import Ui_PreferencesDialog
def _bool(settings, directive, fallback):
value = settings.value(directive)
if isinstance(value, bool):
return value
logging.debug(f"_bool(): {directive!r} is {value!r}")
if value == "true":
return True
elif value == "false":
return False
return fallback
class PreferencesDialog(QDialog, Ui_PreferencesDialog):
def __init__(self, settings, parent=None):
super(PreferencesDialog, self).__init__(parent)
self.setupUi(self)
self.setAttribute(Qt.WA_DeleteOnClose)
self.settings = settings
self.readSettings()
self.accepted.connect(self.saveSettings)
def readSettings(self):
s = self.settings
self.startOnLoginCheckBox.setChecked(_bool(s, "preferences/startup", False))
self.copyLinkToClipboardCheckBox.setChecked(
_bool(s, "preferences/copyToClipboard", True))
self.showUploadCompleteNotificationCheckBox.setChecked(
_bool(s, "preferences/showNotification", True))
self.openLinkInBrowserCheckBox.setChecked(
_bool(s, "preferences/openInBrowser", False))
def saveSettings(self):
s = self.settings
s.setValue("preferences/startup", self.startOnLoginCheckBox.isChecked())
s.setValue("preferences/copyToClipboard", self.copyLinkToClipboardCheckBox.isChecked())
s.setValue("preferences/showNotification",
self.showUploadCompleteNotificationCheckBox.isChecked())
s.setValue("preferences/openInBrowser", self.openLinkInBrowserCheckBox.isChecked())
|
import argparse
def argparse_config_train():
parser = argparse.ArgumentParser('')
parser.add_argument('--num-epoch', type=int, default=30,
help='number of training epochs')
parser.add_argument('--save-epoch', type=int, default=10,
help='frequency of model saving')
parser.add_argument('--train-shot', type=int, default=15,
help='number of support examples per training class')
parser.add_argument('--val-shot', type=int, default=5,
help='number of support examples per validation class')
parser.add_argument('--train-query', type=int, default=6,
help='number of query examples per training class')
parser.add_argument('--val-episode', type=int, default=2000,
help='number of episodes per validation')
parser.add_argument('--val-query', type=int, default=15,
help='number of query examples per validation class')
parser.add_argument('--train-way', type=int, default=5,
help='number of classes in one training episode')
parser.add_argument('--test-way', type=int, default=5,
help='number of classes in one test (or validation) episode')
parser.add_argument('--nfeat', type=int, default=640,
help='number of feature dimension')
parser.add_argument('--nKall', type=int, default=-1,
help='number of all classes')
parser.add_argument('--nKbase', type=int, default=0,
help='number of base classes')
parser.add_argument('--nTestBase', type=int, default=0,
help='number of query examples per testing class')
parser.add_argument('--epoch_size', type=int, default=1000,
help='number of episodes per epoch')
parser.add_argument('--avg-pool', default=False, action='store_true',
help='whether to do average pooling in the last layer of ResNet models')
parser.add_argument('--milestones', type=list, default=[10,20,25,30],
help='learning rate decay milestones (in epoches number)')
parser.add_argument('--lambdalr', type=list, default=[0.1,0.006,0.0012,0.00024],
help='learning rates to be used between above milestones')
parser.add_argument('--save-path', default='./experiments/tmp',
help='path to save log outputs')
parser.add_argument('--gpu', default='0',
help='choose which gpu to be used')
parser.add_argument('--network', type=str, default='Pretrain',
help='choose which embedding network to use. ResNet, ConvNet')
parser.add_argument('--head', type=str, default='ProtoNet',
help='choose which classification head to use. SEGA, ProtoNet')
parser.add_argument('--weight_generator_type', type=str, default='feature_averaging',
help='choose which weight generator type to use (for SEGA)')
parser.add_argument('--dataset', type=str, default='miniImageNet',
help='choose dataset to use. miniImageNet, tieredImageNet, CIFAR_FS, CUB')
parser.add_argument('--semantic_path', type=str, default='No semantic to be used',
help='semantic path for current dataset.')
parser.add_argument('--episodes-per-batch', type=int, default=8,
help='batch size')
parser.add_argument('--embnet_pretrainedandfix', default=False, action='store_true',
help='whether to load the feature extractor and fix it during the second stage.')
parser.add_argument('--pretrian_embnet_path', type=str, default=None,
help='feature extractor path.')
args = parser.parse_known_args()[0]
return args
|
#!/usr/bin/env python
import rospy
from light_classification.tl_classifier import TLClassifier
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from message_filters import ApproximateTimeSynchronizer, Subscriber
import cv2
import numpy as np
import yaml
STATE_COUNT_THRESHOLD = 3
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.camera_image = None
self.lights = []
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
self.bridge = CvBridge()
self.use_ground_truth = rospy.get_param("~use_ground_truth", default=False)
self.tl_consideration_distance = rospy.get_param("/tl_consideration_distance", 100)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
if not self.use_ground_truth:
self.light_classifier = TLClassifier()
base_waypoints_sub = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
self.tss = ApproximateTimeSynchronizer([Subscriber("/vehicle/traffic_lights", TrafficLightArray),
Subscriber("/image_color", Image),
Subscriber("/current_pose", PoseStamped)], 30, 0.1)
self.tss.registerCallback(self.light_image_pose_cb)
rospy.spin()
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
"""
# test get_waypoint_distance method; TODO: move into rostest
idx1 = 0
idx2 = 5
rospy.logerr("distance between %d and %d: %f", idx1, idx2, self.get_waypoint_distance(idx1, idx2))
idx1 = 5
idx2 = 0
rospy.logerr("distance between %d and %d: %f", idx1, idx2, self.get_waypoint_distance(idx1, idx2))
idx1 = 50
idx2 = 60
rospy.logerr("distance between %d and %d: %f", idx1, idx2, self.get_waypoint_distance(idx1, idx2))
idx1 = 60
idx2 = 50
rospy.logerr("distance between %d and %d: %f", idx1, idx2, self.get_waypoint_distance(idx1, idx2))
idx1 = len(self.waypoints.waypoints) - 50
idx2 = 50
rospy.logerr("distance between %d and %d: %f", idx1, idx2, self.get_waypoint_distance(idx1, idx2))
idx1 = 50
idx2 = len(self.waypoints.waypoints) - 50
rospy.logerr("distance between %d and %d: %f", idx1, idx2, self.get_waypoint_distance(idx1, idx2))
"""
def light_image_pose_cb(self, traffic_lights_msg, image_msg, pose_msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
traffic_lights_msg (TrafficLightArray): image from car-mounted camera
image_msg (Image): image from car-mounted camera
"""
self.pose = pose_msg
self.lights = traffic_lights_msg.lights
self.camera_image = image_msg
light_wp, state = self.process_traffic_lights()
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
# TODO: move into common package (might be cool to create a map-class: map = Map(waypoints), map.get_waypoint_distance(..))
# TODO: in simulation, this takes a non-trivial amount of resources and slowes down the whole pipeline
def get_closest_waypoint_idx(self, pose):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
closest_idx = 0
closest_dist = float("inf")
for idx, wp in enumerate(self.waypoints.waypoints):
dx = wp.pose.pose.position.x - pose.position.x
dy = wp.pose.pose.position.y - pose.position.y
dist = dx*dx + dy*dy
if dist < closest_dist:
closest_idx = idx
closest_dist = dist
return closest_idx
# TODO: move into common package (might cool to create a map-class: map = Map(waypoints), map.get_waypoint_distance(..))
def get_waypoint_distance(self, wp_idx1, wp_idx2):
"""
Calculates the the shortest distance between wp1 and wp2. Distance is measured as accumulated distance between the waypoints
traversing from wp1 to wp2. This assumes the waypoints form a closed loop. If the distance is negative, wp_idx2 is behind wp_idx1.
Args:
wp_idx1 (int): waypoint index of start point
wp_idx2 (int): waypoint index of end point
Returns:
float: signed sortest distance between wp1 and wp2 on the waypoints loop
"""
assert wp_idx1 >= 0 and wp_idx2 >= 0 and wp_idx1 < len(self.waypoints.waypoints) and wp_idx2 < len(self.waypoints.waypoints)
distance = 0
# the signed index-distance
idx_dist = wp_idx2 - wp_idx1
# handle the cases where we measure over the beginning of the waypoints loop
# such that we return always the shortes distance on the loop
if idx_dist < -len(self.waypoints.waypoints) / 2:
idx_dist = len(self.waypoints.waypoints) + idx_dist
elif idx_dist > len(self.waypoints.waypoints) / 2:
idx_dist = -len(self.waypoints.waypoints) + idx_dist
# the direction (forward vs. backwards)
dir = np.sign(idx_dist)
if dir < 0:
wp_idx1, wp_idx2 = wp_idx2, wp_idx1
idx = wp_idx1
while idx != wp_idx2:
p1 = self.waypoints.waypoints[idx].pose.pose.position
p2 = self.waypoints.waypoints[(idx+1) % len(self.waypoints.waypoints)].pose.pose.position
dist = np.sqrt((p2.x - p1.x)**2 + (p2.y - p1.y)**2)
distance += dist
idx += 1
if idx >= len(self.waypoints.waypoints):
idx = 0
return distance * dir
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
if(self.use_ground_truth):
return light['light'].state
#Get classification
cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
return self.light_classifier.get_classification(light, cv_image)
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
if not self.waypoints:
rospy.loginfo_throttle(5, "waiting for waypoints callback")
return -1, TrafficLight.UNKNOWN
# find the wp idx of the vehicle
car_closest_wp_idx = self.get_closest_waypoint_idx(self.pose.pose)
rospy.logdebug("Closest wp to the car's position is %d", car_closest_wp_idx)
# find the wp idx of the lights and their distance to the car
relevant_lights = []
for idx,l in enumerate(self.lights):
# find the corresponding stop-line (this assumes that the traffic lights and stop lines have the same index in the arrays)
tl_stop_line_pose = Pose()
tl_stop_line_pose.position.x = self.config['stop_line_positions'][idx][0]
tl_stop_line_pose.position.y = self.config['stop_line_positions'][idx][1]
# find closest wp to stop-line
tl_stop_line_wp_idx = self.get_closest_waypoint_idx(tl_stop_line_pose)
distance_to_car = self.get_waypoint_distance(car_closest_wp_idx, tl_stop_line_wp_idx)
# only consider TLs in front of the car's position, and ones not too far ahead
if distance_to_car < 0 or distance_to_car > self.tl_consideration_distance:
rospy.logdebug("Ignoring TL, its behind the car or too far ahead (%.2fm)", distance_to_car)
continue
tl = {
'light': l,
'stop_line_wp_idx': tl_stop_line_wp_idx,
'distance': distance_to_car,
'state': None
}
relevant_lights.append(tl)
# check if we have any relevant TLs ahead
if not len(relevant_lights):
rospy.logdebug("No relevant TL found")
return -1, TrafficLight.UNKNOWN
# sort by distance and select the closes one
relevant_lights.sort(key=lambda x: x['distance'])
next_relevant_tl = relevant_lights[0]
rospy.logdebug("Next relevant TL is %.2fm ahead at wp %d.", next_relevant_tl['distance'], next_relevant_tl['stop_line_wp_idx'])
# find its state
next_relevant_tl['state'] = self.get_light_state(next_relevant_tl)
return next_relevant_tl['stop_line_wp_idx'], next_relevant_tl['state']
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
def decay_lr(opt, max_iter, start_iter, initial_lr):
"""Decay learning rate linearly till 0."""
coeff = -initial_lr / (max_iter - start_iter)
for pg in opt.param_groups:
pg['lr'] += coeff
def getGradNorm(model):
total_norm = 0
for name, p in model.named_parameters():
try:
param_norm = p.grad.data.norm(2)
total_norm += param_norm.item() ** 2
except:
print name
total_norm = total_norm ** (1. / 2)
return total_norm
def plot_losses(g_losses, d_losses, grad_normG, grad_normD, dir_path):
plt.plot(g_losses)
plt.title('g loss')
plt.savefig(dir_path + "/g_losses.jpg")
plt.clf()
plt.plot(d_losses)
plt.title('d loss')
plt.savefig(dir_path + "/d_losses.jpg")
plt.clf()
plt.plot(grad_normG)
plt.title('G norm (square root of sum G norm)')
plt.savefig(dir_path + "/grad_normG.jpg")
plt.clf()
plt.plot(grad_normD)
plt.title('D norm (square root of sum D norm)')
plt.savefig(dir_path + "/grad_normD.jpg")
plt.clf()
np.save(dir_path + 'Dloss.npy', d_losses)
np.save(dir_path + 'Gloss.npy', g_losses)
np.save(dir_path + 'Dgram.npy', grad_normD)
np.save(dir_path + 'Ggram.npy', grad_normG)
def saveproj(y, y_hat, i ,dir_path):
plt.scatter(y_hat.data.numpy(), np.zeros(y_hat.shape[0]), label='Fake', s=100)
plt.scatter(y.data.numpy(), np.zeros(y.shape[0]), label='Real', s=50)
plt.title('Disc_projection')
plt.legend()
plt.savefig(dir_path + '/dprojection_epoch%05d.pdf'%(i), bbox_inches='tight')
plt.clf()
# EMA trick
def soft_copy_param(ema_netG, netG, beta):
netG_para = netG.state_dict()
for name, param in ema_netG.named_parameters():
param.data *= beta
param.data += (1-beta) * netG_para[name].cpu().data
|
import graphene
from catalog.schema import Query as CatalogQuery
from enrollment.schema import Query as EnrollmentQuery
from forms.schema import Query as FormsQuery
from grades.schema import Query as GradesQuery
from playlist.schema import Query as PlaylistQuery
from user.schema import Query as UserQuery
from user.schema import Mutation as UserMutation
from scheduler.schema import Query as SchedulerQuery
from scheduler.schema import Mutation as SchedulerMutation
class Query(
CatalogQuery, EnrollmentQuery, FormsQuery,
GradesQuery, PlaylistQuery, UserQuery, SchedulerQuery
):
ping = graphene.String()
def resolve_ping(self, info):
return 'pong!'
class Mutation(
UserMutation, SchedulerMutation
):
pass
schema = graphene.Schema(query=Query, mutation=Mutation)
|
"""
Purified fields for Django forms.
"""
from django import forms
from purifier import HTMLPurifier
class PurifyedCharField(forms.CharField):
"""
Extendable django.forms.CharField
Add named argument `white_list` - dict of allowed tags and attributes
"""
def __init__(self, white_list={}, *args, **kwargs):
self._white_list = white_list
super(PurifyedCharField, self).__init__(*args, **kwargs)
def clean(self, value):
value = super(PurifyedCharField, self).clean(value)
if value:
purifier = HTMLPurifier(self._white_list)
value = purifier.feed(value)
return value
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Produce a HTML page called VTKColorSeriesPatches.html showing the available
color series in vtkColorSeries.
It also shows how to select the text color based on luminance.
In this case Digital CCIR601 is used which gives less weight to the
red and blue components of a color.
"""
from vtkmodules.vtkCommonColor import (
vtkColorSeries,
vtkNamedColors
)
def main():
ncpt = HTMLTableMaker()
res = ncpt.MakeHTMLTable()
f = open("VTKColorSeriesPatches.html", "w", newline="\n")
f.write(res)
f.close()
class HTMLToFromRGBAColor:
@staticmethod
def RGBToHTMLColor(rgb):
"""
Convert an [R, G, B] list to #RRGGBB.
:param: rgb - The elements of the array rgb are unsigned chars (0..255).
:return: The html color.
"""
hexcolor = "#" + ''.join(['{:02x}'.format(x) for x in rgb])
return hexcolor
@staticmethod
def HTMLColorToRGB(colorString):
"""
Convert #RRGGBB to a [R, G, B] list.
:param: colorString a string in the form: #RRGGBB where RR, GG, BB are hexadecimal.
The elements of the array rgb are unsigned chars (0..255).
:return: The red, green and blue components as a list.
"""
colorString = colorString.strip()
if colorString[0] == '#':
colorString = colorString[1:]
if len(colorString) != 6:
raise ValueError("Input #%s is not in #RRGGBB format" % colorString)
r, g, b = colorString[:2], colorString[2:4], colorString[4:]
r, g, b = [int(n, 16) for n in (r, g, b)]
return [r, g, b]
@staticmethod
def RGBToLumaCCIR601(rgb):
"""
RGB -> Luma conversion
Digital CCIR601 (gives less weight to the R and B components)
:param: rgb - The elements of the array rgb are unsigned chars (0..255).
:return: The luminance.
"""
Y = 0.299 * rgb[0] + 0.587 * rgb[1] + 0.114 * rgb[2]
return Y
class ColorStructures:
"""
Holds the color series id, name and colors.
"""
cs_colors = dict()
max_colors = 0
def __init__(self):
cs = vtkColorSeries()
sizes = list()
for i in range(0, cs.GetNumberOfColorSchemes()):
cs.SetColorScheme(i)
sizes.append(cs.GetNumberOfColors())
vc = list()
for j in range(0, cs.GetNumberOfColors()):
vc.append(cs.GetColor(j))
self.cs_colors[i] = [cs.GetColorSchemeName(), vc]
self.max_colors = max(sizes)
class HTMLTableMaker:
"""
This class creates HTML Tables displaying all the colors in
the class vtkNamedColors grouped by various categories.
"""
def __init__(self):
self.cs = ColorStructures()
self.nc = vtkNamedColors()
self.htmlRGBA = HTMLToFromRGBAColor()
@staticmethod
def MakeHTMLStyle():
s = ' <style>\n'
s += '\n'
s += ' body {\n'
s += ' background-color: snow\n'
s += ' }\n'
s += ' h1 {text-align:left;}\n'
s += ' h2 {text-align:left;}\n'
s += ' h3 {text-align:left;}\n'
s += ' h4 {text-align:left;}\n'
s += ' h5 {text-align:left;}\n'
s += ' h6 {text-align:left;}\n'
s += '\n'
s += ' p {text-align:left;}\n'
s += '\n'
s += ' table {\n'
s += ' font-family: arial, sans-serif;\n'
s += ' border-collapse: collapse;\n'
s += ' font-size: medium;\n'
s += ' padding: 4px;\n'
s += ' }\n'
s += '\n'
s += ' th {\n'
s += ' background: LightSteelBlue;\n'
s += ' font-size: medium;\n'
s += ' }\n'
s += '\n'
s += ' th[colspan]:not([colspan="1"]) {\n'
s += ' background: LightSteelBlue;\n'
s += ' font-size: medium;\n'
s += ' text-align : center;\n'
s += ' vertical-align : top;\n'
s += ' }\n'
s += '\n'
s += ' tr {\n'
s += ' background: MintCream;\n'
s += ' vertical-align : top;\n'
s += ' }\n'
s += '\n'
s += ' td {\n'
s += ' background: MintCream;\n'
s += ' border: 1px solid #dddddd;\n'
s += ' text-align: left;\n'
s += ' padding: 8px;\n'
s += ' font-family: monospace;\n'
s += ' font-size: medium;\n'
s += ' font-weight: bold;\n'
s += ' }\n'
s += '\n'
s += ' td[colspan]:not([colspan="1"]) {\n'
s += ' text-align : center;\n'
s += ' }\n'
s += '\n'
s += ' .cour {\n'
s += ' font-family: Courier;\n'
s += ' }\n'
s += '\n'
s += ' html, body {\n'
s += ' height: 100%;\n'
s += ' }\n'
s += '\n'
s += ' html {\n'
s += ' display: table;\n'
s += ' margin: auto;\n'
s += ' }\n'
s += '\n'
s += ' body {\n'
s += ' display: table-cell;\n'
s += ' vertical-align: middle;\n'
s += ' }\n'
s += '\n'
s += ' thead {color: DarkGreen;}\n'
s += ' tbody {color: MidnightBlue;}\n'
s += ' tfoot {color: SaddleBrown;}\n'
s += '\n'
s += ' </style>\n'
return s
def MakeHTMLHeader(self):
s = '<!DOCTYPE html>\n'
s += '<html lang="en">\n'
s += '<head>\n'
s += '<meta charset="UTF-8" />\n'
s += '<title>vtkColorSeries</title>\n'
s += self.MakeHTMLStyle()
s += '</head>\n'
return s
def MakeTableHeader(self):
s = '<tr>\n'
s += '<th>Index</th>\n'
s += '<th colspan="' + str(self.cs.max_colors) + '">Name</th>\n'
s += '</tr>\n'
s += '<tr>\n'
s += '<th></th>\n'
s += '<th colspan="' + str(self.cs.max_colors) + '">Colors in the Series</th>\n'
s += '</tr>\n'
return s
def MakeTD1(self, idx, name):
s = '<tr>\n'
s += '<td>'
s += '<b>' + str(idx) + '</b>'
s += '</td>\n'
s += '<td colspan="' + str(self.cs.max_colors) + '">'
s += '<b>' + name + '</b>'
s += '</td>\n'
s += '</tr>\n'
return s
def MakeTD2(self, rgbs):
s = '<tr>\n'
s += '<td></td>\n'
cnt = 0
for p in rgbs:
ss = '{:3d} '.format(cnt)
ss = ss.replace(' ', ' ')
y = self.htmlRGBA.RGBToLumaCCIR601(p)
textColor = '#000000' # Black
if y < 255 / 2.0:
textColor = '#ffffff' # White
s += '<td style="background:' + self.htmlRGBA.RGBToHTMLColor(p) + ';color:'
s += textColor + '">' + ss + '</td>\n'
cnt += 1
if cnt < self.cs.max_colors:
s += '<td colspan="' + str(self.cs.max_colors - cnt) + '">   </td>\n'
s += '</tr>\n'
return s
def MakeTable(self):
res = self.MakeTableHeader()
for idx, v in self.cs.cs_colors.items():
name = v[0]
res += self.MakeTD1(idx, name)
res += self.MakeTD2(v[1])
return res
def MakeHTMLTable(self):
res = self.MakeHTMLHeader()
res += '<body>\n'
res += '<h1>Color series available in vtkColorSeries</h1>\n'
res += '<table>\n'
res += self.MakeTable()
res += '</table>\n'
res += '</body>\n'
return res
if __name__ == "__main__":
main()
|
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from django.urls import reverse
from homeschool.referrals.forms import ReferralForm
from .forms import ProfileForm
@login_required
def settings_dashboard(request):
"""A dashboard of all the user's settings."""
if request.method == "POST":
form = ProfileForm(request.POST, instance=request.user.profile)
if form.is_valid():
form.save()
messages.add_message(
request, messages.SUCCESS, "Your settings updated successfully."
)
return redirect(reverse("settings:dashboard"))
else:
form = ProfileForm(instance=request.user.profile)
return render(
request,
"users/settings_dashboard.html",
{"form": form, "nav_link": "settings", "referral_form": ReferralForm()},
)
|
import os
import logging
import subprocess
from glob import glob
from time import sleep
from argparse import ArgumentParser
from configparser import ConfigParser
def main():
# Syntax
parser = ArgumentParser()
parser.add_argument('-m', dest='mAp', type=float)
parser.add_argument('-e', dest='eps', type=float)
parser.add_argument('-z', dest='zlims', type=int, nargs=2)
parser.add_argument('-n', dest='batch_size', type=int, default=1)
args = parser.parse_args()
# Configure logger
logging.basicConfig(format='[ submitJobs ][ %(levelname)s ]: %(message)s',
level=logging.DEBUG)
# Determine how many jobs to submit at a time
logging.info('Submitting jobs in batches of %s' % args.batch_size)
# Relevant dirs
nfsdir = '/nfs/slac/g/ldmx/users/jmlazaro'
pwd = f'{nfsdir}/samples/v3/4gev/vissig/pro' # bc home isn't home
lheout = f'{pwd}/lhes/{args.mAp}/reformatted'
rootout = f'{nfsdir}/samples/v3/4gev/vissig/train/{args.mAp}'
if not os.path.exists(lheout): os.makedirs(lheout)
if not os.path.exists(rootout):
print(f'Making {rootout}')
os.makedirs(rootout)
# Command that will be used to submit jobs to the batch system
batch_command = ('bsub '
+ '-W 150 '
#+ '-q short '
#+ '-W 60 '
+ '-n 3 '
+ '-R "select[centos7] span[hosts=1]" '
+ f'singularity run --home {nfsdir} {nfsdir}/ldmx_pro_visdecay.sif {nfsdir} '
+ f'fire {pwd}/vis.py '
+ f'-m {args.mAp} '
+ f'-e {args.eps} '
+ f'-z {args.zlims[0]} {args.zlims[1]} '
+ f'--lheout {lheout} '
+ f'--rootout {rootout} '
)
# Build list of complete commands
job_commands = [] # Remnant from more complex batch system but still could be useful for debugging
for fil in sorted(
glob(f'{pwd}/lhes/{args.mAp}/split/*'),
key=lambda x: int( x.split('_p')[1].split('.lhe')[0] )
):
job_commands.append(batch_command
+ f'-i {fil} '
+ f'-r {len(job_commands)}'
)
#job_commands = job_commands[:2]
# Submit them
for command in job_commands:
print(command)
subprocess.Popen(command, shell=True).wait()
# If a batch of jobs has been submitted, don't submit another batch
# until all jobs are running.
if (job_commands.index(command) + 1)%args.batch_size == 0:
# Initially, wait 10s before submitting other jobs. This lets the
# batch system catch up so we get an accurate count of pending
# jobs.
sleep(10)
# Check how many jobs are pending
cjobs = int(
subprocess.check_output('bjobs -p | wc -l', shell=True)
)
print('cjobs: %s' % cjobs)
while cjobs != 0:
logging.info('%s jobs are still pending' % cjobs)
sleep(30)
cjobs = int(
subprocess.check_output('bjobs -p | wc -l', shell=True)
)
continue
if __name__ == '__main__': main()
|
# ! /usr/bin/python3
# -*- coding:utf-8 -*-
from conf.base import BaseDB, engine
import sys
from sqlalchemy import (
Column,
Integer,
String,
DateTime
)
class Users(BaseDB):
"""table for users
"""
__tablename__ = "users"
# 定义表结构,包括id,phone,password,createTime
id = Column(Integer, primary_key=True)
phone = Column(String(50), nullable=False)
password = Column(String(50), nullable=True)
createTime = Column(DateTime, nullable=True)
def __init__(self, phone, password, create_time):
self.phone = phone
self.password = password
self.createTime = create_time
def initdb():
BaseDB.metadata.create_all(engine)
if __name__ == '__main__':
print("Initialize database")
initdb()
# 数据库表的定义及初始化
|
# Lesson 4.13 - Timing Python operations
import numpy as np
import time
def test_run():
t1 = time.time()
print ("ML4T")
t2 = time.time()
print("The time taken by print statement is ", t2 - t1, " seconds")
if __name__ == "__main__":
test_run()
|
import os
import csv
from collections import Counter
vote_count = 0
khan = 0
correy = 0
li = 0
otooley = 0
py_poll= "election_data.csv"
votes = []
runners = []
with open(py_poll, newline="") as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
next(csvreader)
for row in csvreader:
votes.append(row[0])
runners.append(row[2])
candidate_votes = zip(votes, runners)
all_votes = dict(Counter(runners))
#winner = ""
winner_count = 0
for key, value in all_votes.items():
if value > winner_count:
winner_count = value
winner = key
for row in candidate_votes:
if row[1] == "Khan":
khan = khan + 1
elif row[1] == "Correy":
correy = correy + 1
elif row[1] == "Li":
li = li + 1
else:
otooley = otooley + 1
vote_count = len(votes)
khan_r = {"Name": "Khan", "Result": khan, "Percent": round(khan/vote_count,5)*100}
correy_r = {"Name": "Correy", "Result": correy, "Percent": round(correy/vote_count,5)*100}
li_r = {"Name": "Li", "Result": li, "Percent": round(li/vote_count,5)*100}
otooley_r = {"Name": "O'Tooley", "Result": otooley, "Percent": round(otooley/vote_count,5)*100}
print("Election Results")
print("-------------------------")
print("Total Votes: " + str(vote_count) + "")
print("-------------------------")
print(khan_r.get("Name") + ": " + "{:.3f}".format(khan_r.get("Percent")) + "% (" + str(khan_r.get("Result")) + ")")
print(correy_r.get("Name") + ": " + "{:.3f}".format(correy_r.get("Percent")) + "% (" + str(correy_r.get("Result")) + ")")
print(li_r.get("Name") + ": " + "{:.3f}".format(li_r.get("Percent")) + "% (" + str(li_r.get("Result")) + ")")
print(otooley_r.get("Name") + ": " + "{:.3f}".format(otooley_r.get("Percent")) + "% (" + str(otooley_r.get("Result")) + ")")
print("-------------------------")
print("Winner: "+ winner)
print("-------------------------")
text_file = open("Election_Results.txt", "w")
text_file.write("Election Results\n")
text_file.write("-------------------------\n")
text_file.write("Total Votes: " + str(vote_count) + "\n")
text_file.write("-------------------------\n")
text_file.write(khan_r.get("Name") + ": " + "{:.3f}".format(khan_r.get("Percent")) + "% (" + str(khan_r.get("Result")) + ")\n")
text_file.write(correy_r.get("Name") + ": " + "{:.3f}".format(correy_r.get("Percent")) + "% (" + str(correy_r.get("Result")) + ")\n")
text_file.write(li_r.get("Name") + ": " + "{:.3f}".format(li_r.get("Percent")) + "% (" + str(li_r.get("Result")) + ")\n")
text_file.write(otooley_r.get("Name") + ": " + "{:.3f}".format(otooley_r.get("Percent")) + "% (" + str(otooley_r.get("Result")) + ")\n")
text_file.write("-------------------------\n")
text_file.write("Winner: "+ winner + "\n")
text_file.write("-------------------------\n")
text_file.close()
|
import os
import json
import boto3
from os import listdir, environ, path
from os.path import isfile, join
from zipfile import ZipFile
DEPLOY_TAG = 'latest' # datetime.now().strftime("%Y%m%d%H%M")
ECS_URI = environ.get('ECS_REPOSITORY_URI')
BATCH_JOB_ROLE = 'city-scrapers-batch-job-role'
SPIDER_PATH = join(
path.dirname(path.dirname(path.abspath(__file__))),
'city_scrapers',
'spiders'
)
ENV_VARS = [
'SCRAPY_SETTINGS_MODULE',
'AIRTABLE_API_KEY',
'CITY_SCRAPERS_AIRTABLE_BASE_KEY',
'CITY_SCRAPERS_AIRTABLE_DATA_TABLE',
'CITY_SCRAPERS_AIRTABLE_GEOCODE_TABLE',
'CITY_SCRAPERS_GOOGLE_API_KEY',
'SENTRY_DSN',
'MAPZEN_API_KEY'
]
batch = boto3.client('batch')
iam = boto3.resource('iam')
lambda_client = boto3.client('lambda')
spider_names = [
path.splitext(f)[0]
for f in listdir(SPIDER_PATH)
if isfile(join(SPIDER_PATH, f)) and f != '__init__.py'
]
def create_job_definitions():
"""
Register all job definitions.
"""
active_job_defs = batch.describe_job_definitions(status='ACTIVE')['jobDefinitions']
print('deregistering all current job definitions')
for job in active_job_defs:
batch.deregister_job_definition(jobDefinition=job['jobDefinitionArn'])
future_job_defs = spider_names
job_role_arn = iam.Role(BATCH_JOB_ROLE).arn
for job_def in future_job_defs:
print('creating job def {}'.format(job_def))
batch.register_job_definition(
jobDefinitionName=job_def,
type='container',
containerProperties={
'image': '{0}:{1}'.format(ECS_URI, DEPLOY_TAG),
'vcpus': 1,
'memory': 768,
'command': ['scrapy', 'crawl', job_def],
'jobRoleArn': job_role_arn,
'environment': [{'name': v, 'value': environ.get(v)} for v in ENV_VARS],
'readonlyRootFilesystem': False,
'privileged': False,
},
retryStrategy={'attempts': 3}
)
def update_lambda_function(name):
with ZipFile('{}.zip'.format(name), 'w') as zf:
for f in listdir(join(path.dirname(__file__), name)):
zf.write(join(path.dirname(__file__), name, f), path.basename(f))
with open('{}.zip'.format(name), 'rb') as zf:
zip_buffer = zf.read()
os.remove('{}.zip'.format(name))
lambda_client.update_function_code(FunctionName=name, ZipFile=zip_buffer)
create_job_definitions()
update_lambda_function('city-scrapers-status')
|
from . import conversion
from . import measure
|
from math import sqrt
N = int(input())
nums = sorted(int(input()) for _ in range(N))
diff = nums[-1] - nums[0]
cands = set(elem
for div in range(1, int(sqrt(diff))+1)
if diff % div == 0
for elem in (div, diff // div))
Ms = sorted(cand
for cand in cands
if cand > 1
if all(num % cand == nums[0] % cand for num in nums))
print(' '.join(str(m) for m in Ms))
|
class OpenMLStudy(object):
'''
An OpenMLStudy represents the OpenML concept of a study. It contains
the following information: name, id, description, creation date,
creator id and a set of tags.
According to this list of tags, the study object receives a list of
OpenML object ids (datasets, flows, tasks and setups).
Can be used to obtain all relevant information from a study at once.
Parameters
----------
id : int
the study id
name : str
the name of the study (meta-info)
description : str
brief description (meta-info)
creation_date : str
date of creation (meta-info)
creator : int
openml user id of the owner / creator
tag : list(dict)
The list of tags shows which tags are associated with the study.
Each tag is a dict of (tag) name, window_start and write_access.
data : list
a list of data ids associated with this study
tasks : list
a list of task ids associated with this study
flows : list
a list of flow ids associated with this study
setups : list
a list of setup ids associated with this study
'''
def __init__(self, id, name, description, creation_date, creator,
tag, data, tasks, flows, setups):
self.id = id
self.name = name
self.description = description
self.creation_date = creation_date
self.creator = creator
self.tag = tag
self.data = data
self.tasks = tasks
self.flows = flows
self.setups = setups
pass
|
"""
API input/output manipulation utility functions
"""
import json
from alignment.PyHELM_simple import HelmObj
def extract_helm_from_json(input_json):
"""Extracts the HELM strings out of a JSON array.
:param input_json: JSON array of Peptide objects
:return: output_helm: string (extracted HELM strings separated by a newline)
"""
output_helm = ""
for peptide in input_json:
if len(output_helm) > 0:
output_helm += "\n"
output_helm += peptide["HELM"]
return output_helm
def json_output(helm_data, json_input):
"""
Creates JSON output from the decoded and aligned HELM sequence and JSON input.
Returns JSON array of aligned sequences.
:param helm_data: Dictionary of the aligned subpeptides
:param json_input: Original JSON input
:return:
"""
# If helm_data is empty, the function will return an empty json_list
json_list = []
if len(helm_data) > 0: # pylint: disable=too-many-nested-blocks
# The following cycle iterates over the helm_data dictionary, where keys are the names of subpeptides
for key in helm_data:
# peptides variable stores the sequences of subpeptides of the given key
peptides = [x.strip("\n") for x in helm_data[key].split(f"> {key}\n")[1:]]
index = 0
"""
The following cycle iterates over the items of the original JSON input.
At the beginning, it checks, whether the current entity contains the given subpeptide.
If the subpeptide exists in current sequence, then "if" part is executed, which creates
separate item for JSON output.
"""
for entity in json_input:
"""
Differentiate between aligned input (output of "/align" endpoint) and raw input.
If we don't have already aligned input, we only need to extract polymer, which equals current key (subpeptide)
If we have aligned input, we need to extract polymer, which equals key AND PolymerID
"""
polymer_in_entity = entity.get('PolymerID', None)
if not polymer_in_entity or polymer_in_entity == key:
sequence = extract_subpeptide(key, entity['HELM'])
else:
sequence = None
# If there is a subpeptide with a given key, we can add it to the output
if sequence is not None:
output_json = "{"
output_json += '"PolymerID":"' + key + '", '
output_json += '"AlignedSubpeptide":"' + sequence + '", '
output_json += '"HELM":"' + entity['HELM'] + '", '
output_json += '"ID":"' + entity["ID"] + '", '
output_json += '"AlignedSeq":"' + peptides[index] + '", '
"""
The following cycle iterates over the aligned subsequence, and the the help of the "extract_monomer"
function adds separate monomers to the JSON output in the following format:
"PEPTIDE1_1": "d1Nal",
"""
if key.startswith("CHEM"):
output_json += f'"{key}_1":"' + sequence[1:-1] + '"' + ", "
else:
for ind, el in enumerate(extract_monomer(peptides[index])):
output_json += f'"{key}_{ind + 1}":"' + el + '"' + ", "
output_json = output_json[:-2]
output_json += "}"
index += 1
json_list.append(json.loads(output_json))
return json_list
def extract_subpeptide(peptide_name, helm_string):
"""Extracts the sub-peptide sequence, given the HELM string and the name of the sub-peptide.
:param peptide_name:
:param helm_string:
:return:
"""
helm_obj = HelmObj()
helm_obj.parse_helm(helm_string)
# The following cycle goes through all the subpeptides of the HELM sequence.
# If the name of the subpeptide equals the peptide_name, the function returns sequence of the subpeptide
for poly in helm_obj.polymers:
if poly.name == peptide_name:
return poly.data
return None
def extract_monomer(aligned_sequence):
"""
Generator function. Yields the monomers of the aligned sequence. Every symbol is accounted for monomer,
unless they are not in square brackets. Otherwise, symbols in square brackets are precessed as single monomer.
Currently works with sequences, where monomers are not separated with dots, e.g.
[ClAc]FRYLY[Ahp]FCGKK[NH2]
If monomers will be separated with dots, it will be necessary to update this function.
:param aligned_sequence:
:return:
"""
non_natural = False
monomer = ''
# These variables track the number of opened and and closed square brackets.
# This is needed for correct parsing of sequences, which have SMILES as monomers
opened = 0
closed = 0
for el in aligned_sequence:
if monomer != "" and not non_natural:
monomer = ""
if el == "[" and opened == 0:
non_natural = True
opened += 1
elif el == "[":
opened += 1
monomer += el
elif not non_natural:
yield el
# Yield the monomer, only if the current symbol is closing bracket,
# and number of opened brackets is exactly one more than the number of closed ones
elif el == "]" and opened == closed + 1:
non_natural = False
opened = 0
closed = 0
yield monomer
elif el == "]":
monomer += el
closed += 1
else:
monomer += el
assert not non_natural, f"Non-valid notation of non-natural amino acids in the following sequence {aligned_sequence}"
def extract_aligned_sequences(aligned_array):
"""
Transforms aligned sequences into FASTA format
:param aligned_array: array of aligned sequences
:return: string, aligned sequences in FASTA format
"""
aligned_seqs = ''
for element in aligned_array:
if len(aligned_seqs) > 0:
aligned_seqs += '\n'
aligned_seqs += f"> {element['PolymerID']}\n"
aligned_seqs += element['AlignedSeq']
return aligned_seqs
def escape_double_quotes_in_input(input_data):
""" Adds escape character in front of double quotes in HELM string
:param input_data: JSON array of input data
:return: JSON array
"""
if "aligned_sequences" and "new_sequences" in input_data:
for ind, element in enumerate(input_data["aligned_sequences"]):
helm = element["HELM"].replace('"', '\\"')
input_data["aligned_sequences"][ind]["HELM"] = helm
for ind, element in enumerate(input_data["new_sequences"]):
helm = element["HELM"].replace('"', '\\"')
input_data["new_sequences"][ind]["HELM"] = helm
else:
for ind, element in enumerate(input_data):
helm = element["HELM"].replace('"', '\\"')
input_data[ind]["HELM"] = helm
return input_data
|
#!/usr/bin/env python
# coding=utf-8
################################################################################
import sys, os
curr_path = os.path.dirname(__file__)
parent_path = os.path.dirname(curr_path)
sys.path.append(parent_path) # add current terminal path to sys.path
################################################################################
import gym
import torch
import datetime
from utils import save_results, make_dir
from plot import plot_rewards
from agent import DQN
################################################################################
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # obtain current time
################################################################################
class DQNConfig:
def __init__(self):
self.algo = "DQN" # name of algo
self.env = 'CartPole-v0'
self.result_path = curr_path + "/outputs/" + self.env + \
'/' + curr_time + '/results/' # path to save results
self.model_path = curr_path + "/outputs/" + self.env + \
'/' + curr_time + '/models/' # path to save models
self.train_eps = 300 # max trainng episodes
self.eval_eps = 50 # number of episodes for evaluating
self.gamma = 0.95
self.epsilon_start = 0.90 # start epsilon of e-greedy policy
self.epsilon_end = 0.01
self.epsilon_decay = 500
self.lr = 0.0001 # learning rate
self.memory_capacity = 100000 # capacity of Replay Memory
self.batch_size = 64
self.target_update = 4 # update frequency of target net
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu") # check gpu
self.hidden_dim = 256 # hidden size of net
################################################################################
def env_agent_config(cfg, seed = 1):
env = gym.make(cfg.env)
env.seed(seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
agent = DQN(state_dim, action_dim, cfg)
return env, agent
################################################################################
def train(cfg, env, agent):
print('Start to train !')
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
rewards = []
ma_rewards = [] # moveing average reward
for i_ep in range(cfg.train_eps):
state = env.reset()
done = False
ep_reward = 0
while True:
action = agent.choose_action(state)
next_state, reward, done, _ = env.step(action)
ep_reward += reward
agent.memory.push(state, action, reward, next_state, done)
state = next_state
agent.update()
if done:
break
if (i_ep + 1) % cfg.target_update == 0:
agent.target_net.load_state_dict(agent.policy_net.state_dict())
if (i_ep + 1) % 10 == 0:
print('Episode:{}/{}, Reward:{}'.format(i_ep + 1, cfg.train_eps, ep_reward))
rewards.append(ep_reward)
# save ma rewards
if ma_rewards:
ma_rewards.append(0.9 * ma_rewards[-1] + 0.1 * ep_reward)
else:
ma_rewards.append(ep_reward)
print('Complete training!')
return rewards, ma_rewards
################################################################################
def eval(cfg, env, agent):
print('Start to eval !')
print(f'Env:{cfg.env}, Algorithm:{cfg.algo}, Device:{cfg.device}')
rewards = []
ma_rewards = [] # moving average rewards
for i_ep in range(cfg.eval_eps):
ep_reward = 0 # reward per episode
state = env.reset()
while True:
action = agent.predict(state)
next_state, reward, done, _ = env.step(action)
state = next_state
ep_reward += reward
if done:
break
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(ma_rewards[-1] * 0.9 + ep_reward * 0.1)
else:
ma_rewards.append(ep_reward)
if (i_ep + 1) % 10 == 10:
print(f"Episode:{i_ep + 1}/{cfg.eval_eps}, reward:{ep_reward:.1f}")
print('Complete evaling!')
return rewards, ma_rewards
################################################################################
if __name__ == "__main__":
cfg = DQNConfig()
# train
env,agent = env_agent_config(cfg, seed = 1)
rewards, ma_rewards = train(cfg, env, agent)
make_dir(cfg.result_path, cfg.model_path)
agent.save(path = cfg.model_path)
save_results(rewards, ma_rewards, tag = 'train', path = cfg.result_path)
plot_rewards(rewards, ma_rewards, tag = 'train', algo = cfg.algo, path = cfg.result_path)
# eval
env, agent = env_agent_config(cfg, seed = 10)
agent.load(path = cfg.model_path)
rewards, ma_rewards = eval(cfg, env, agent)
save_results(rewards, ma_rewards, tag = 'eval', path = cfg.result_path)
plot_rewards(rewards, ma_rewards, tag = 'eval', env = cfg.env, algo = cfg.algo, path = cfg.result_path)
|
from flask import Blueprint, jsonify, request
usuarios_app = Blueprint('usuarios_app',__name__,template_folder='templates')
@usuarios_app.route('/usr', methods=['POST'])
def cadastrar():
|
#Submitted by thr3s0ld
class Solution:
def longestDupSubstring(self, S: str) -> str:
def check(mid, roll):
a = 0
for i in range(mid):
a = (a * 26 + ans[i]) % roll
dic = {a}
aL = pow(26, mid, roll)
for pos in range(1, n - mid + 1):
a = (a * 26 - ans[pos - 1] * aL + ans[pos + mid - 1]) % roll
if a in dic:
return pos
dic.add(a)
return -1
n = len(S)
ans = [ord(i) - ord('a') for i in S]
lo, hi = 1, n
pos = -1
roll = 2**63 - 1
#binary
while lo <= hi:
mid = (lo + hi) // 2
cur = check(mid, roll)
if cur != -1:
lo = mid + 1
pos = cur
else:
hi = mid - 1
return S[pos: pos + lo - 1]
|
import numpy as np
import pandas as pd
import sklearn.preprocessing
from sensai.data_transformation import DataFrameTransformer, RuleBasedDataFrameTransformer, DataFrameTransformerChain, DFTNormalisation
class TestDFTTransformerBasics:
class TestDFT(DataFrameTransformer):
def _fit(self, df: pd.DataFrame):
pass
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
return pd.DataFrame({"foo": [1, 2], "baz": [1, 2]})
class RuleBasedTestDFT(RuleBasedDataFrameTransformer):
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
return df
testdf = pd.DataFrame({"foo": [1, 2], "bar": [1, 2]})
def test_basicProperties(self):
testdft = self.TestDFT()
assert not testdft.isFitted()
assert testdft.info()["changeInColumnNames"] is None
testdft.fit(self.testdf)
assert testdft.isFitted()
assert all(testdft.apply(self.testdf) == pd.DataFrame({"foo": [1, 2], "baz": [1, 2]}))
assert testdft.info()["changeInColumnNames"] is not None
# testing apply with no change in columns
testdft.apply(pd.DataFrame({"foo": [1, 2], "baz": [1, 2]}))
assert testdft.info()["changeInColumnNames"] == "none"
def test_ruleBasedAlwaysFitted(self):
assert self.RuleBasedTestDFT().isFitted()
def test_emptyChainFitted(self):
assert DataFrameTransformerChain().isFitted()
def test_combinationFittedIffEachMemberFitted(self):
# if one of the fgens is not fitted, the combination is not fitted either
dftChain = DataFrameTransformerChain(self.TestDFT(), self.RuleBasedTestDFT())
assert dftChain.dataFrameTransformers[1].isFitted()
assert not dftChain.isFitted()
dftChain.fit(self.testdf)
assert dftChain.isFitted()
assert dftChain.dataFrameTransformers[0].isFitted()
# if all fgens are fitted, the combination is also fitted, even if fit was not called
dftChain = DataFrameTransformerChain([self.RuleBasedTestDFT(), self.RuleBasedTestDFT()])
assert dftChain.isFitted()
class TestDFTNormalisation:
def test_multiColumnSingleRuleIndependent(self):
arr = np.array([1, 5, 10])
df = pd.DataFrame({"foo": arr, "bar": arr*100})
dft = DFTNormalisation([DFTNormalisation.Rule(r"foo|bar", transformer=sklearn.preprocessing.MaxAbsScaler(), independentColumns=True)])
df2 = dft.fitApply(df)
assert np.all(df2.foo == arr/10) and np.all(df2.bar == arr/10)
def test_multiColumnSingleRule(self):
arr = np.array([1, 5, 10])
df = pd.DataFrame({"foo": arr, "bar": arr*100})
dft = DFTNormalisation([DFTNormalisation.Rule(r"foo|bar", transformer=sklearn.preprocessing.MaxAbsScaler(), independentColumns=False)])
df2 = dft.fitApply(df)
assert np.all(df2.foo == arr/1000) and np.all(df2.bar == arr/10)
def test_arrayValued(self):
arr = np.array([1, 5, 10])
df = pd.DataFrame({"foo": [arr, 2*arr, 10*arr]})
dft = DFTNormalisation([DFTNormalisation.Rule(r"foo|bar", transformer=sklearn.preprocessing.MaxAbsScaler(), arrayValued=True)])
df2 = dft.fitApply(df)
assert np.all(df2.foo.iloc[0] == arr/100) and np.all(df2.foo.iloc[-1] == arr/10)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.